From 03c603970748844cbc188f1e0dc6179fa1a1e83d Mon Sep 17 00:00:00 2001 From: John Spray Date: Thu, 16 May 2024 09:26:34 +0100 Subject: [PATCH 001/220] pageserver: refine tenant_id->shard lookup (#7762) ## Problem This is tech debt from when shard splitting was implemented, to handle more nicely the edge case of a client reconnect at the moment of the split. During shard splits, there were edge cases where we could incorrectly return NotFound to a getpage@lsn request, prompting an unwanted reconnect/backoff from the client. It is already the case that parent shards during splits are marked InProgress before child shards are created, so `resolve_attached_shard` will not match on them, thereby implicitly preferring child shards (good). However, we were not doing any elegant handling of InProgress in general: `get_active_tenant_with_timeout` was previously mostly dead code: it was inspecting the slot found by `resolve_attached_shard` and maybe waiting for InProgress, but that path is never taken because since ef7c9c2ccc1a385f74455f45b54faa5b101065e6 the resolve function only ever returns attached slots. Closes: https://github.com/neondatabase/neon/issues/7044 ## Summary of changes - Change return value of `resolve_attached_shard` to distinguish between true NotFound case, and the case where we skipped slots that were InProgress. - Rework `get_active_tenant_with_timeout` to loop over calling resolve_attached_shard, waiting if it sees an InProgress result. The resulting behavior during a shard split is: - If we look up a shard early in split when parent is InProgress but children aren't created yet, we'll wait for the parent to be shut down. This corresponds to the part of the split where we wait for LSNs to catch up: so a small delay to the request, but a clean enough handling. - If we look up a shard while child shards are already present, we will match on those shards rather than the parent, as intended. --- pageserver/src/bin/pageserver.rs | 25 ++-- pageserver/src/page_service.rs | 125 +++++++++++------ pageserver/src/tenant/mgr.rs | 222 +++++++++++-------------------- 3 files changed, 176 insertions(+), 196 deletions(-) diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index a04195e12b..ba5b2608bd 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -647,17 +647,20 @@ fn start_pageserver( None, "libpq endpoint listener", true, - async move { - page_service::libpq_listener_main( - conf, - broker_client, - pg_auth, - pageserver_listener, - conf.pg_auth_type, - libpq_ctx, - task_mgr::shutdown_token(), - ) - .await + { + let tenant_manager = tenant_manager.clone(); + async move { + page_service::libpq_listener_main( + tenant_manager, + broker_client, + pg_auth, + pageserver_listener, + conf.pg_auth_type, + libpq_ctx, + task_mgr::shutdown_token(), + ) + .await + } }, ); } diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index f6b251283c..35aba044b2 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -32,6 +32,7 @@ use std::str; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use std::time::Instant; use tokio::io::AsyncWriteExt; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::io::StreamReader; @@ -49,7 +50,6 @@ use utils::{ use crate::auth::check_permission; use crate::basebackup; use crate::basebackup::BasebackupError; -use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; use crate::import_datadir::import_wal_from_tar; use crate::metrics; @@ -59,13 +59,15 @@ use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id; use crate::task_mgr; use crate::task_mgr::TaskKind; -use crate::tenant::mgr; -use crate::tenant::mgr::get_active_tenant_with_timeout; use crate::tenant::mgr::GetActiveTenantError; +use crate::tenant::mgr::GetTenantError; +use crate::tenant::mgr::ShardResolveResult; use crate::tenant::mgr::ShardSelector; +use crate::tenant::mgr::TenantManager; use crate::tenant::timeline::WaitLsnError; use crate::tenant::GetTimelineError; use crate::tenant::PageReconstructError; +use crate::tenant::Tenant; use crate::tenant::Timeline; use crate::trace::Tracer; use pageserver_api::key::rel_block_to_key; @@ -135,7 +137,7 @@ async fn read_tar_eof(mut reader: (impl AsyncRead + Unpin)) -> anyhow::Result<() /// Listens for connections, and launches a new handler task for each. /// pub async fn libpq_listener_main( - conf: &'static PageServerConf, + tenant_manager: Arc, broker_client: storage_broker::BrokerClientChannel, auth: Option>, listener: TcpListener, @@ -180,7 +182,7 @@ pub async fn libpq_listener_main( "serving compute connection task", false, page_service_conn_main( - conf, + tenant_manager.clone(), broker_client.clone(), local_auth, socket, @@ -203,7 +205,7 @@ pub async fn libpq_listener_main( #[instrument(skip_all, fields(peer_addr))] async fn page_service_conn_main( - conf: &'static PageServerConf, + tenant_manager: Arc, broker_client: storage_broker::BrokerClientChannel, auth: Option>, socket: tokio::net::TcpStream, @@ -260,7 +262,8 @@ async fn page_service_conn_main( // and create a child per-query context when it invokes process_query. // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler // and create the per-query context in process_query ourselves. - let mut conn_handler = PageServerHandler::new(conf, broker_client, auth, connection_ctx); + let mut conn_handler = + PageServerHandler::new(tenant_manager, broker_client, auth, connection_ctx); let pgbackend = PostgresBackend::new_from_io(socket, peer_addr, auth_type, None)?; match pgbackend @@ -291,11 +294,12 @@ struct HandlerTimeline { } struct PageServerHandler { - _conf: &'static PageServerConf, broker_client: storage_broker::BrokerClientChannel, auth: Option>, claims: Option, + tenant_manager: Arc, + /// The context created for the lifetime of the connection /// services by this PageServerHandler. /// For each query received over the connection, @@ -381,13 +385,13 @@ impl From for QueryError { impl PageServerHandler { pub fn new( - conf: &'static PageServerConf, + tenant_manager: Arc, broker_client: storage_broker::BrokerClientChannel, auth: Option>, connection_ctx: RequestContext, ) -> Self { PageServerHandler { - _conf: conf, + tenant_manager, broker_client, auth, claims: None, @@ -552,13 +556,9 @@ impl PageServerHandler { { debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id(); - let tenant = mgr::get_active_tenant_with_timeout( - tenant_id, - ShardSelector::First, - ACTIVE_TENANT_TIMEOUT, - &task_mgr::shutdown_token(), - ) - .await?; + let tenant = self + .get_active_tenant_with_timeout(tenant_id, ShardSelector::First, ACTIVE_TENANT_TIMEOUT) + .await?; // Make request tracer if needed let mut tracer = if tenant.get_trace_read_requests() { @@ -726,13 +726,9 @@ impl PageServerHandler { // Create empty timeline info!("creating new timeline"); - let tenant = get_active_tenant_with_timeout( - tenant_id, - ShardSelector::Zero, - ACTIVE_TENANT_TIMEOUT, - &task_mgr::shutdown_token(), - ) - .await?; + let tenant = self + .get_active_tenant_with_timeout(tenant_id, ShardSelector::Zero, ACTIVE_TENANT_TIMEOUT) + .await?; let timeline = tenant .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx) .await?; @@ -1370,18 +1366,69 @@ impl PageServerHandler { timeline_id: TimelineId, selector: ShardSelector, ) -> Result, GetActiveTimelineError> { - let tenant = get_active_tenant_with_timeout( - tenant_id, - selector, - ACTIVE_TENANT_TIMEOUT, - &task_mgr::shutdown_token(), - ) - .await - .map_err(GetActiveTimelineError::Tenant)?; + let tenant = self + .get_active_tenant_with_timeout(tenant_id, selector, ACTIVE_TENANT_TIMEOUT) + .await + .map_err(GetActiveTimelineError::Tenant)?; let timeline = tenant.get_timeline(timeline_id, true)?; set_tracing_field_shard_id(&timeline); Ok(timeline) } + + /// Get a shard's [`Tenant`] in its active state, if present. If we don't find the shard and some + /// slots for this tenant are `InProgress` then we will wait. + /// If we find the [`Tenant`] and it's not yet in state [`TenantState::Active`], we will wait. + /// + /// `timeout` is used as a total timeout for the whole wait operation. + async fn get_active_tenant_with_timeout( + &self, + tenant_id: TenantId, + shard_selector: ShardSelector, + timeout: Duration, + ) -> Result, GetActiveTenantError> { + let wait_start = Instant::now(); + let deadline = wait_start + timeout; + + // Resolve TenantId to TenantShardId. This is usually a quick one-shot thing, the loop is + // for handling the rare case that the slot we're accessing is InProgress. + let tenant_shard = loop { + let resolved = self + .tenant_manager + .resolve_attached_shard(&tenant_id, shard_selector); + match resolved { + ShardResolveResult::Found(tenant_shard) => break tenant_shard, + ShardResolveResult::NotFound => { + return Err(GetActiveTenantError::NotFound(GetTenantError::NotFound( + tenant_id, + ))); + } + ShardResolveResult::InProgress(barrier) => { + // We can't authoritatively answer right now: wait for InProgress state + // to end, then try again + tokio::select! { + _ = self.await_connection_cancelled() => { + return Err(GetActiveTenantError::Cancelled) + }, + _ = barrier.wait() => { + // The barrier completed: proceed around the loop to try looking up again + }, + _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => { + return Err(GetActiveTenantError::WaitForActiveTimeout { + latest_state: None, + wait_time: timeout, + }); + } + } + } + }; + }; + + tracing::debug!("Waiting for tenant to enter active state..."); + tenant_shard + .wait_to_become_active(deadline.duration_since(Instant::now())) + .await?; + Ok(tenant_shard) + } } #[async_trait::async_trait] @@ -1771,13 +1818,13 @@ where self.check_permission(Some(tenant_id))?; - let tenant = get_active_tenant_with_timeout( - tenant_id, - ShardSelector::Zero, - ACTIVE_TENANT_TIMEOUT, - &task_mgr::shutdown_token(), - ) - .await?; + let tenant = self + .get_active_tenant_with_timeout( + tenant_id, + ShardSelector::Zero, + ACTIVE_TENANT_TIMEOUT, + ) + .await?; pgb.write_message_noflush(&BeMessage::RowDescription(&[ RowDescriptor::int8_col(b"checkpoint_distance"), RowDescriptor::int8_col(b"checkpoint_timeout"), diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 5abda7b64e..1d8e2cf6d3 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -16,10 +16,9 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap}; use std::ops::Deref; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use sysinfo::SystemExt; use tokio::fs; -use utils::timeout::{timeout_cancellable, TimeoutCancellableError}; use anyhow::Context; use once_cell::sync::Lazy; @@ -119,6 +118,7 @@ pub(crate) enum TenantsMapRemoveResult { /// When resolving a TenantId to a shard, we may be looking for the 0th /// shard, or we might be looking for whichever shard holds a particular page. +#[derive(Copy, Clone)] pub(crate) enum ShardSelector { /// Only return the 0th shard, if it is present. If a non-0th shard is present, /// ignore it. @@ -169,6 +169,14 @@ impl TenantStartupMode { } } +/// Result type for looking up a TenantId to a specific shard +pub(crate) enum ShardResolveResult { + NotFound, + Found(Arc), + // Wait for this barrrier, then query again + InProgress(utils::completion::Barrier), +} + impl TenantsMap { /// Convenience function for typical usage, where we want to get a `Tenant` object, for /// working with attached tenants. If the TenantId is in the map but in Secondary state, @@ -182,51 +190,6 @@ impl TenantsMap { } } - /// A page service client sends a TenantId, and to look up the correct Tenant we must - /// resolve this to a fully qualified TenantShardId. - fn resolve_attached_shard( - &self, - tenant_id: &TenantId, - selector: ShardSelector, - ) -> Option { - let mut want_shard = None; - match self { - TenantsMap::Initializing => None, - TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => { - for slot in m.range(TenantShardId::tenant_range(*tenant_id)) { - // Ignore all slots that don't contain an attached tenant - let tenant = match &slot.1 { - TenantSlot::Attached(t) => t, - _ => continue, - }; - - match selector { - ShardSelector::First => return Some(*slot.0), - ShardSelector::Zero if slot.0.shard_number == ShardNumber(0) => { - return Some(*slot.0) - } - ShardSelector::Page(key) => { - // First slot we see for this tenant, calculate the expected shard number - // for the key: we will use this for checking if this and subsequent - // slots contain the key, rather than recalculating the hash each time. - if want_shard.is_none() { - want_shard = Some(tenant.shard_identity.get_shard_number(&key)); - } - - if Some(tenant.shard_identity.number) == want_shard { - return Some(*slot.0); - } - } - _ => continue, - } - } - - // Fall through: we didn't find an acceptable shard - None - } - } - } - /// Only for use from DeleteTenantFlow. This method directly removes a TenantSlot from the map. /// /// The normal way to remove a tenant is using a SlotGuard, which will gracefully remove the guarded @@ -2053,6 +2016,72 @@ impl TenantManager { Ok(reparented) } + + /// A page service client sends a TenantId, and to look up the correct Tenant we must + /// resolve this to a fully qualified TenantShardId. + /// + /// During shard splits: we shall see parent shards in InProgress state and skip them, and + /// instead match on child shards which should appear in Attached state. Very early in a shard + /// split, or in other cases where a shard is InProgress, we will return our own InProgress result + /// to instruct the caller to wait for that to finish before querying again. + pub(crate) fn resolve_attached_shard( + &self, + tenant_id: &TenantId, + selector: ShardSelector, + ) -> ShardResolveResult { + let tenants = self.tenants.read().unwrap(); + let mut want_shard = None; + let mut any_in_progress = None; + + match &*tenants { + TenantsMap::Initializing => ShardResolveResult::NotFound, + TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => { + for slot in m.range(TenantShardId::tenant_range(*tenant_id)) { + // Ignore all slots that don't contain an attached tenant + let tenant = match &slot.1 { + TenantSlot::Attached(t) => t, + TenantSlot::InProgress(barrier) => { + // We might still find a usable shard, but in case we don't, remember that + // we saw at least one InProgress slot, so that we can distinguish this case + // from a simple NotFound in our return value. + any_in_progress = Some(barrier.clone()); + continue; + } + _ => continue, + }; + + match selector { + ShardSelector::First => return ShardResolveResult::Found(tenant.clone()), + ShardSelector::Zero if slot.0.shard_number == ShardNumber(0) => { + return ShardResolveResult::Found(tenant.clone()) + } + ShardSelector::Page(key) => { + // First slot we see for this tenant, calculate the expected shard number + // for the key: we will use this for checking if this and subsequent + // slots contain the key, rather than recalculating the hash each time. + if want_shard.is_none() { + want_shard = Some(tenant.shard_identity.get_shard_number(&key)); + } + + if Some(tenant.shard_identity.number) == want_shard { + return ShardResolveResult::Found(tenant.clone()); + } + } + _ => continue, + } + } + + // Fall through: we didn't find a slot that was in Attached state & matched our selector. If + // we found one or more InProgress slot, indicate to caller that they should retry later. Otherwise + // this requested shard simply isn't found. + if let Some(barrier) = any_in_progress { + ShardResolveResult::InProgress(barrier) + } else { + ShardResolveResult::NotFound + } + } + } + } } #[derive(Debug, thiserror::Error)] @@ -2101,105 +2130,6 @@ pub(crate) enum GetActiveTenantError { Broken(String), } -/// Get a [`Tenant`] in its active state. If the tenant_id is currently in [`TenantSlot::InProgress`] -/// state, then wait for up to `timeout`. If the [`Tenant`] is not currently in [`TenantState::Active`], -/// then wait for up to `timeout` (minus however long we waited for the slot). -pub(crate) async fn get_active_tenant_with_timeout( - tenant_id: TenantId, - shard_selector: ShardSelector, - timeout: Duration, - cancel: &CancellationToken, -) -> Result, GetActiveTenantError> { - enum WaitFor { - Barrier(utils::completion::Barrier), - Tenant(Arc), - } - - let wait_start = Instant::now(); - let deadline = wait_start + timeout; - - let (wait_for, tenant_shard_id) = { - let locked = TENANTS.read().unwrap(); - - // Resolve TenantId to TenantShardId - let tenant_shard_id = locked - .resolve_attached_shard(&tenant_id, shard_selector) - .ok_or(GetActiveTenantError::NotFound(GetTenantError::NotFound( - tenant_id, - )))?; - - let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read) - .map_err(GetTenantError::MapState)?; - match peek_slot { - Some(TenantSlot::Attached(tenant)) => { - match tenant.current_state() { - TenantState::Active => { - // Fast path: we don't need to do any async waiting. - return Ok(tenant.clone()); - } - _ => { - tenant.activate_now(); - (WaitFor::Tenant(tenant.clone()), tenant_shard_id) - } - } - } - Some(TenantSlot::Secondary(_)) => { - return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive( - tenant_shard_id, - ))) - } - Some(TenantSlot::InProgress(barrier)) => { - (WaitFor::Barrier(barrier.clone()), tenant_shard_id) - } - None => { - return Err(GetActiveTenantError::NotFound(GetTenantError::NotFound( - tenant_id, - ))) - } - } - }; - - let tenant = match wait_for { - WaitFor::Barrier(barrier) => { - tracing::debug!("Waiting for tenant InProgress state to pass..."); - timeout_cancellable( - deadline.duration_since(Instant::now()), - cancel, - barrier.wait(), - ) - .await - .map_err(|e| match e { - TimeoutCancellableError::Timeout => GetActiveTenantError::WaitForActiveTimeout { - latest_state: None, - wait_time: wait_start.elapsed(), - }, - TimeoutCancellableError::Cancelled => GetActiveTenantError::Cancelled, - })?; - { - let locked = TENANTS.read().unwrap(); - let peek_slot = - tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read) - .map_err(GetTenantError::MapState)?; - match peek_slot { - Some(TenantSlot::Attached(tenant)) => tenant.clone(), - _ => { - return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive( - tenant_shard_id, - ))) - } - } - } - } - WaitFor::Tenant(tenant) => tenant, - }; - - tracing::debug!("Waiting for tenant to enter active state..."); - tenant - .wait_to_become_active(deadline.duration_since(Instant::now())) - .await?; - Ok(tenant) -} - #[derive(Debug, thiserror::Error)] pub(crate) enum DeleteTimelineError { #[error("Tenant {0}")] From 923cf91aa4c1986c47fba0158fff0ac9bf8225ce Mon Sep 17 00:00:00 2001 From: Andrew Rudenko Date: Thu, 16 May 2024 12:04:16 +0200 Subject: [PATCH 002/220] compute_ctl: catalog API endpoints (#7575) ## Problem There are two cloud's features that require extra compute endpoints. 1. We are running pg_dump to get DB schemas. Currently, we are using a special service for this. But it would be great to execute pg_dump in an isolated environment. And we already have such an environment, it's our compute! And likely enough pg_dump already exists there too! (see https://github.com/neondatabase/cloud/issues/11644#issuecomment-2084617832) 2. We need to have a way to get databases and roles from compute after time travel (see https://github.com/neondatabase/cloud/issues/12109) ## Summary of changes It adds two API endpoints to compute_ctl HTTP API that target both of the aforementioned cases. --------- Co-authored-by: Tristan Partin --- Cargo.lock | 2 + compute_tools/Cargo.toml | 2 + compute_tools/src/catalog.rs | 116 ++++++++++++++++++++ compute_tools/src/http/api.rs | 47 ++++++++ compute_tools/src/http/openapi_spec.yaml | 112 +++++++++++++++++++ compute_tools/src/lib.rs | 1 + libs/compute_api/src/responses.rs | 8 +- test_runner/fixtures/endpoint/__init__.py | 0 test_runner/fixtures/endpoint/http.py | 23 ++++ test_runner/fixtures/neon_fixtures.py | 8 ++ test_runner/regress/test_compute_catalog.py | 34 ++++++ 11 files changed, 352 insertions(+), 1 deletion(-) create mode 100644 compute_tools/src/catalog.rs create mode 100644 test_runner/fixtures/endpoint/__init__.py create mode 100644 test_runner/fixtures/endpoint/http.py create mode 100644 test_runner/regress/test_compute_catalog.py diff --git a/Cargo.lock b/Cargo.lock index 961101b151..b1f53404ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,8 +1233,10 @@ dependencies = [ "serde_json", "signal-hook", "tar", + "thiserror", "tokio", "tokio-postgres", + "tokio-stream", "tokio-util", "toml_edit", "tracing", diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index 759a117ee9..8f96530a9d 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -27,10 +27,12 @@ reqwest = { workspace = true, features = ["json"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tokio-postgres.workspace = true tokio-util.workspace = true +tokio-stream.workspace = true tracing.workspace = true tracing-opentelemetry.workspace = true tracing-subscriber.workspace = true tracing-utils.workspace = true +thiserror.workspace = true url.workspace = true compute_api.workspace = true diff --git a/compute_tools/src/catalog.rs b/compute_tools/src/catalog.rs new file mode 100644 index 0000000000..4fefa831e0 --- /dev/null +++ b/compute_tools/src/catalog.rs @@ -0,0 +1,116 @@ +use compute_api::{ + responses::CatalogObjects, + spec::{Database, Role}, +}; +use futures::Stream; +use postgres::{Client, NoTls}; +use std::{path::Path, process::Stdio, result::Result, sync::Arc}; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, + task, +}; +use tokio_stream::{self as stream, StreamExt}; +use tokio_util::codec::{BytesCodec, FramedRead}; +use tracing::warn; + +use crate::{ + compute::ComputeNode, + pg_helpers::{get_existing_dbs, get_existing_roles}, +}; + +pub async fn get_dbs_and_roles(compute: &Arc) -> anyhow::Result { + let connstr = compute.connstr.clone(); + task::spawn_blocking(move || { + let mut client = Client::connect(connstr.as_str(), NoTls)?; + let roles: Vec; + { + let mut xact = client.transaction()?; + roles = get_existing_roles(&mut xact)?; + } + let databases: Vec = get_existing_dbs(&mut client)?.values().cloned().collect(); + + Ok(CatalogObjects { roles, databases }) + }) + .await? +} + +#[derive(Debug, thiserror::Error)] +pub enum SchemaDumpError { + #[error("Database does not exist.")] + DatabaseDoesNotExist, + #[error("Failed to execute pg_dump.")] + IO(#[from] std::io::Error), +} + +// It uses the pg_dump utility to dump the schema of the specified database. +// The output is streamed back to the caller and supposed to be streamed via HTTP. +// +// Before return the result with the output, it checks that pg_dump produced any output. +// If not, it tries to parse the stderr output to determine if the database does not exist +// and special error is returned. +// +// To make sure that the process is killed when the caller drops the stream, we use tokio kill_on_drop feature. +pub async fn get_database_schema( + compute: &Arc, + dbname: &str, +) -> Result>, SchemaDumpError> { + let pgbin = &compute.pgbin; + let basepath = Path::new(pgbin).parent().unwrap(); + let pgdump = basepath.join("pg_dump"); + let mut connstr = compute.connstr.clone(); + connstr.set_path(dbname); + let mut cmd = Command::new(pgdump) + .arg("--schema-only") + .arg(connstr.as_str()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .spawn()?; + + let stdout = cmd.stdout.take().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stdout.") + })?; + + let stderr = cmd.stderr.take().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stderr.") + })?; + + let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new()); + let stderr_reader = BufReader::new(stderr); + + let first_chunk = match stdout_reader.next().await { + Some(Ok(bytes)) if !bytes.is_empty() => bytes, + Some(Err(e)) => { + return Err(SchemaDumpError::IO(e)); + } + _ => { + let mut lines = stderr_reader.lines(); + if let Some(line) = lines.next_line().await? { + if line.contains(&format!("FATAL: database \"{}\" does not exist", dbname)) { + return Err(SchemaDumpError::DatabaseDoesNotExist); + } + warn!("pg_dump stderr: {}", line) + } + tokio::spawn(async move { + while let Ok(Some(line)) = lines.next_line().await { + warn!("pg_dump stderr: {}", line) + } + }); + + return Err(SchemaDumpError::IO(std::io::Error::new( + std::io::ErrorKind::Other, + "failed to start pg_dump", + ))); + } + }; + let initial_stream = stream::once(Ok(first_chunk.freeze())); + // Consume stderr and log warnings + tokio::spawn(async move { + let mut lines = stderr_reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + warn!("pg_dump stderr: {}", line) + } + }); + Ok(initial_stream.chain(stdout_reader.map(|res| res.map(|b| b.freeze())))) +} diff --git a/compute_tools/src/http/api.rs b/compute_tools/src/http/api.rs index 128783b477..0286429cf2 100644 --- a/compute_tools/src/http/api.rs +++ b/compute_tools/src/http/api.rs @@ -5,17 +5,21 @@ use std::net::SocketAddr; use std::sync::Arc; use std::thread; +use crate::catalog::SchemaDumpError; +use crate::catalog::{get_database_schema, get_dbs_and_roles}; use crate::compute::forward_termination_signal; use crate::compute::{ComputeNode, ComputeState, ParsedSpec}; use compute_api::requests::ConfigurationRequest; use compute_api::responses::{ComputeStatus, ComputeStatusResponse, GenericAPIError}; use anyhow::Result; +use hyper::header::CONTENT_TYPE; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use tokio::task; use tracing::{error, info, warn}; use tracing_utils::http::OtelName; +use utils::http::request::must_get_query_param; fn status_response_from_state(state: &ComputeState) -> ComputeStatusResponse { ComputeStatusResponse { @@ -133,6 +137,34 @@ async fn routes(req: Request, compute: &Arc) -> Response { + info!("serving /dbs_and_roles GET request",); + match get_dbs_and_roles(compute).await { + Ok(res) => render_json(Body::from(serde_json::to_string(&res).unwrap())), + Err(_) => { + render_json_error("can't get dbs and roles", StatusCode::INTERNAL_SERVER_ERROR) + } + } + } + + (&Method::GET, "/database_schema") => { + let database = match must_get_query_param(&req, "database") { + Err(e) => return e.into_response(), + Ok(database) => database, + }; + info!("serving /database_schema GET request with database: {database}",); + match get_database_schema(compute, &database).await { + Ok(res) => render_plain(Body::wrap_stream(res)), + Err(SchemaDumpError::DatabaseDoesNotExist) => { + render_json_error("database does not exist", StatusCode::NOT_FOUND) + } + Err(e) => { + error!("can't get schema dump: {}", e); + render_json_error("can't get schema dump", StatusCode::INTERNAL_SERVER_ERROR) + } + } + } + // download extension files from remote extension storage on demand (&Method::POST, route) if route.starts_with("/extension_server/") => { info!("serving {:?} POST request", route); @@ -303,10 +335,25 @@ fn render_json_error(e: &str, status: StatusCode) -> Response { }; Response::builder() .status(status) + .header(CONTENT_TYPE, "application/json") .body(Body::from(serde_json::to_string(&error).unwrap())) .unwrap() } +fn render_json(body: Body) -> Response { + Response::builder() + .header(CONTENT_TYPE, "application/json") + .body(body) + .unwrap() +} + +fn render_plain(body: Body) -> Response { + Response::builder() + .header(CONTENT_TYPE, "text/plain") + .body(body) + .unwrap() +} + async fn handle_terminate_request(compute: &Arc) -> Result<(), (String, StatusCode)> { { let mut state = compute.state.lock().unwrap(); diff --git a/compute_tools/src/http/openapi_spec.yaml b/compute_tools/src/http/openapi_spec.yaml index d2ec54299f..b0ddaeae2b 100644 --- a/compute_tools/src/http/openapi_spec.yaml +++ b/compute_tools/src/http/openapi_spec.yaml @@ -68,6 +68,51 @@ paths: schema: $ref: "#/components/schemas/Info" + /dbs_and_roles: + get: + tags: + - Info + summary: Get databases and roles in the catalog. + description: "" + operationId: getDbsAndRoles + responses: + 200: + description: Compute schema objects + content: + application/json: + schema: + $ref: "#/components/schemas/DbsAndRoles" + + /database_schema: + get: + tags: + - Info + summary: Get schema dump + parameters: + - name: database + in: query + description: Database name to dump. + required: true + schema: + type: string + example: "postgres" + description: Get schema dump in SQL format. + operationId: getDatabaseSchema + responses: + 200: + description: Schema dump + content: + text/plain: + schema: + type: string + description: Schema dump in SQL format. + 404: + description: Non existing database. + content: + application/json: + schema: + $ref: "#/components/schemas/GenericError" + /check_writability: post: tags: @@ -229,6 +274,73 @@ components: num_cpus: type: integer + DbsAndRoles: + type: object + description: Databases and Roles + required: + - roles + - databases + properties: + roles: + type: array + items: + $ref: "#/components/schemas/Role" + databases: + type: array + items: + $ref: "#/components/schemas/Database" + + Database: + type: object + description: Database + required: + - name + - owner + - restrict_conn + - invalid + properties: + name: + type: string + owner: + type: string + options: + type: array + items: + $ref: "#/components/schemas/GenericOption" + restrict_conn: + type: boolean + invalid: + type: boolean + + Role: + type: object + description: Role + required: + - name + properties: + name: + type: string + encrypted_password: + type: string + options: + type: array + items: + $ref: "#/components/schemas/GenericOption" + + GenericOption: + type: object + description: Schema Generic option + required: + - name + - vartype + properties: + name: + type: string + value: + type: string + vartype: + type: string + ComputeState: type: object required: diff --git a/compute_tools/src/lib.rs b/compute_tools/src/lib.rs index eac808385c..18c228ba54 100644 --- a/compute_tools/src/lib.rs +++ b/compute_tools/src/lib.rs @@ -8,6 +8,7 @@ pub mod configurator; pub mod http; #[macro_use] pub mod logger; +pub mod catalog; pub mod compute; pub mod extension_server; pub mod monitor; diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index fd0c90d447..d05d625b0a 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -3,7 +3,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize, Serializer}; -use crate::spec::ComputeSpec; +use crate::spec::{ComputeSpec, Database, Role}; #[derive(Serialize, Debug, Deserialize)] pub struct GenericAPIError { @@ -113,6 +113,12 @@ pub struct ComputeMetrics { pub total_ext_download_size: u64, } +#[derive(Clone, Debug, Default, Serialize)] +pub struct CatalogObjects { + pub roles: Vec, + pub databases: Vec, +} + /// Response of the `/computes/{compute_id}/spec` control-plane API. /// This is not actually a compute API response, so consider moving /// to a different place. diff --git a/test_runner/fixtures/endpoint/__init__.py b/test_runner/fixtures/endpoint/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py new file mode 100644 index 0000000000..42f0539c19 --- /dev/null +++ b/test_runner/fixtures/endpoint/http.py @@ -0,0 +1,23 @@ +import requests +from requests.adapters import HTTPAdapter + + +class EndpointHttpClient(requests.Session): + def __init__( + self, + port: int, + ): + super().__init__() + self.port = port + + self.mount("http://", HTTPAdapter()) + + def dbs_and_roles(self): + res = self.get(f"http://localhost:{self.port}/dbs_and_roles") + res.raise_for_status() + return res.json() + + def database_schema(self, database: str): + res = self.get(f"http://localhost:{self.port}/database_schema?database={database}") + res.raise_for_status() + return res.text diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index a6fd4792dd..b4761f103b 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -48,6 +48,7 @@ from urllib3.util.retry import Retry from fixtures import overlayfs from fixtures.broker import NeonBroker from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId +from fixtures.endpoint.http import EndpointHttpClient from fixtures.log_helper import log from fixtures.metrics import Metrics, MetricsGetter, parse_metrics from fixtures.pageserver.allowed_errors import ( @@ -3373,6 +3374,13 @@ class Endpoint(PgProtocol): self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf + def http_client( + self, auth_token: Optional[str] = None, retries: Optional[Retry] = None + ) -> EndpointHttpClient: + return EndpointHttpClient( + port=self.http_port, + ) + def create( self, branch_name: str, diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py new file mode 100644 index 0000000000..dd36190fcd --- /dev/null +++ b/test_runner/regress/test_compute_catalog.py @@ -0,0 +1,34 @@ +import requests +from fixtures.neon_fixtures import NeonEnv + + +def test_compute_catalog(neon_simple_env: NeonEnv): + env = neon_simple_env + env.neon_cli.create_branch("test_config", "empty") + + endpoint = env.endpoints.create_start("test_config", config_lines=["log_min_messages=debug1"]) + client = endpoint.http_client() + + objects = client.dbs_and_roles() + + # Assert that 'cloud_admin' role exists in the 'roles' list + assert any( + role["name"] == "cloud_admin" for role in objects["roles"] + ), "The 'cloud_admin' role is missing" + + # Assert that 'postgres' database exists in the 'databases' list + assert any( + db["name"] == "postgres" for db in objects["databases"] + ), "The 'postgres' database is missing" + + ddl = client.database_schema(database="postgres") + + assert "-- PostgreSQL database dump" in ddl + + try: + client.database_schema(database="nonexistentdb") + raise AssertionError("Expected HTTPError was not raised") + except requests.exceptions.HTTPError as e: + assert ( + e.response.status_code == 404 + ), f"Expected 404 status code, but got {e.response.status_code}" From 790c05d67543a2f183193eff66de4e90dbe2e7f9 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 16 May 2024 12:05:50 +0100 Subject: [PATCH 003/220] proxy: swap tungstenite for a simpler impl (#7353) ## Problem I wanted to do a deep dive of the tungstenite codebase. tokio-tungstenite is incredibly convoluted... In my searching I found [fastwebsockets by deno](https://github.com/denoland/fastwebsockets), but it wasn't quite sufficient. This also removes the default 16MB/64MB frame/message size limitation. framed-websockets solves this by inserting continuation frames for partially received messages, so the whole message does not need to be entirely read into memory. ## Summary of changes I took the fastwebsockets code as a starting off point and rewrote it to be simpler, server-only, and be poll-based to support our Read/Write wrappers. I have replaced our tungstenite code with my framed-websockets fork. --- Cargo.lock | 109 ++++++++----------- Cargo.toml | 7 +- proxy/Cargo.toml | 4 +- proxy/src/serverless.rs | 9 +- proxy/src/serverless/websocket.rs | 93 ++++++++-------- test_runner/regress/test_proxy_websockets.py | 9 +- 6 files changed, 107 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1f53404ea..e1edd53fea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -708,7 +708,7 @@ dependencies = [ "sha1", "sync_wrapper", "tokio", - "tokio-tungstenite 0.20.0", + "tokio-tungstenite", "tower", "tower-layer", "tower-service", @@ -979,6 +979,12 @@ version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +[[package]] +name = "bytemuck" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" + [[package]] name = "byteorder" version = "1.4.3" @@ -1598,7 +1604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.8", @@ -1999,6 +2005,27 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "framed-websockets" +version = "0.1.0" +source = "git+https://github.com/neondatabase/framed-websockets#34eff3d6f8cfccbc5f35e4f65314ff7328621127" +dependencies = [ + "base64 0.21.1", + "bytemuck", + "bytes", + "futures-core", + "futures-sink", + "http-body-util", + "hyper 1.2.0", + "hyper-util", + "pin-project", + "rand 0.8.5", + "sha1", + "thiserror", + "tokio", + "tokio-util", +] + [[package]] name = "fs2" version = "0.4.3" @@ -2277,9 +2304,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -2287,11 +2314,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.5", ] [[package]] @@ -2600,21 +2627,6 @@ dependencies = [ "tokio-native-tls", ] -[[package]] -name = "hyper-tungstenite" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a343d17fe7885302ed7252767dc7bb83609a874b6ff581142241ec4b73957ad" -dependencies = [ - "http-body-util", - "hyper 1.2.0", - "hyper-util", - "pin-project-lite", - "tokio", - "tokio-tungstenite 0.21.0", - "tungstenite 0.21.0", -] - [[package]] name = "hyper-util" version = "0.1.3" @@ -2692,7 +2704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.5", ] [[package]] @@ -2954,7 +2966,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.5", ] [[package]] @@ -3007,7 +3019,7 @@ checksum = "652bc741286361c06de8cb4d89b21a6437f120c508c51713663589eeb9928ac5" dependencies = [ "bytes", "crossbeam-utils", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "itoa", "lasso", "measured-derive", @@ -3569,7 +3581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" dependencies = [ "dlv-list", - "hashbrown 0.14.0", + "hashbrown 0.14.5", ] [[package]] @@ -3896,7 +3908,7 @@ dependencies = [ "ahash", "bytes", "chrono", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "num", "num-bigint", "paste", @@ -4380,9 +4392,10 @@ dependencies = [ "dashmap", "env_logger", "fallible-iterator", + "framed-websockets", "futures", "git-version", - "hashbrown 0.13.2", + "hashbrown 0.14.5", "hashlink", "hex", "hmac", @@ -4392,7 +4405,6 @@ dependencies = [ "humantime", "hyper 0.14.26", "hyper 1.2.0", - "hyper-tungstenite", "hyper-util", "indexmap 2.0.1", "ipnet", @@ -4437,7 +4449,6 @@ dependencies = [ "smol_str", "socket2 0.5.5", "subtle", - "sync_wrapper", "task-local-extensions", "thiserror", "tikv-jemalloc-ctl", @@ -4446,6 +4457,7 @@ dependencies = [ "tokio-postgres", "tokio-postgres-rustls", "tokio-rustls 0.25.0", + "tokio-tungstenite", "tokio-util", "tower-service", "tracing", @@ -6382,19 +6394,7 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.20.1", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.21.0", + "tungstenite", ] [[package]] @@ -6408,7 +6408,7 @@ dependencies = [ "futures-io", "futures-sink", "futures-util", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "pin-project-lite", "tokio", "tracing", @@ -6690,25 +6690,6 @@ dependencies = [ "utf-8", ] -[[package]] -name = "tungstenite" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http 1.1.0", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - [[package]] name = "twox-hash" version = "1.6.3" @@ -7504,7 +7485,7 @@ dependencies = [ "futures-sink", "futures-util", "getrandom 0.2.11", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "hex", "hmac", "hyper 0.14.26", diff --git a/Cargo.toml b/Cargo.toml index 3ccdabee18..b59a5dcd6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,13 +81,14 @@ enum-map = "2.4.2" enumset = "1.0.12" fail = "0.5.0" fallible-iterator = "0.2" +framed-websockets = { version = "0.1.0", git = "https://github.com/neondatabase/framed-websockets" } fs2 = "0.4.3" futures = "0.3" futures-core = "0.3" futures-util = "0.3" git-version = "0.3" -hashbrown = "0.13" -hashlink = "0.8.4" +hashbrown = "0.14" +hashlink = "0.9.1" hdrhistogram = "7.5.2" hex = "0.4" hex-literal = "0.4" @@ -98,7 +99,7 @@ http-types = { version = "2", default-features = false } humantime = "2.1" humantime-serde = "1.1.1" hyper = "0.14" -hyper-tungstenite = "0.13.0" +tokio-tungstenite = "0.20.0" indexmap = "2" inotify = "0.10.2" ipnet = "2.9.0" diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 3002006aed..5f9b0aa75b 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -26,6 +26,7 @@ clap.workspace = true consumption_metrics.workspace = true dashmap.workspace = true env_logger.workspace = true +framed-websockets.workspace = true futures.workspace = true git-version.workspace = true hashbrown.workspace = true @@ -35,7 +36,6 @@ hmac.workspace = true hostname.workspace = true http.workspace = true humantime.workspace = true -hyper-tungstenite.workspace = true hyper.workspace = true hyper1 = { package = "hyper", version = "1.2", features = ["server"] } hyper-util = { version = "0.1", features = ["server", "http1", "http2", "tokio"] } @@ -76,7 +76,6 @@ smol_str.workspace = true smallvec.workspace = true socket2.workspace = true subtle.workspace = true -sync_wrapper.workspace = true task-local-extensions.workspace = true thiserror.workspace = true tikv-jemallocator.workspace = true @@ -106,6 +105,7 @@ workspace_hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true fallible-iterator.workspace = true +tokio-tungstenite.workspace = true rcgen.workspace = true rstest.workspace = true tokio-postgres-rustls.workspace = true diff --git a/proxy/src/serverless.rs b/proxy/src/serverless.rs index f634ab4e98..24ee749e6e 100644 --- a/proxy/src/serverless.rs +++ b/proxy/src/serverless.rs @@ -102,7 +102,7 @@ pub async fn task_main( let connections = tokio_util::task::task_tracker::TaskTracker::new(); connections.close(); // allows `connections.wait to complete` - let server = Builder::new(hyper_util::rt::TokioExecutor::new()); + let server = Builder::new(TokioExecutor::new()); while let Some(res) = run_until_cancelled(ws_listener.accept(), &cancellation_token).await { let (conn, peer_addr) = res.context("could not accept TCP stream")?; @@ -255,7 +255,6 @@ async fn connection_handler( .in_current_span() .map_ok_or_else(api_error_into_response, |r| r), ); - async move { let res = handler.await; cancel_request.disarm(); @@ -301,7 +300,7 @@ async fn request_handler( .map(|s| s.to_string()); // Check if the request is a websocket upgrade request. - if hyper_tungstenite::is_upgrade_request(&request) { + if framed_websockets::upgrade::is_upgrade_request(&request) { let ctx = RequestMonitoring::new( session_id, peer_addr, @@ -312,7 +311,7 @@ async fn request_handler( let span = ctx.span.clone(); info!(parent: &span, "performing websocket upgrade"); - let (response, websocket) = hyper_tungstenite::upgrade(&mut request, None) + let (response, websocket) = framed_websockets::upgrade::upgrade(&mut request) .map_err(|e| ApiError::BadRequest(e.into()))?; ws_connections.spawn( @@ -334,7 +333,7 @@ async fn request_handler( ); // Return the response so the spawned future can continue. - Ok(response) + Ok(response.map(|_: http_body_util::Empty| Full::new(Bytes::new()))) } else if request.uri().path() == "/sql" && *request.method() == Method::POST { let ctx = RequestMonitoring::new( session_id, diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 649bec2c7c..61d6d60dbe 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -7,10 +7,11 @@ use crate::{ proxy::{handle_client, ClientMode}, rate_limiter::EndpointRateLimiter, }; -use bytes::{Buf, Bytes}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use framed_websockets::{Frame, OpCode, WebSocketServer}; use futures::{Sink, Stream}; -use hyper::upgrade::Upgraded; -use hyper_tungstenite::{tungstenite::Message, HyperWebsocket, WebSocketStream}; +use hyper1::upgrade::OnUpgrade; +use hyper_util::rt::TokioIo; use pin_project_lite::pin_project; use std::{ @@ -21,25 +22,23 @@ use std::{ use tokio::io::{self, AsyncBufRead, AsyncRead, AsyncWrite, ReadBuf}; use tracing::warn; -// TODO: use `std::sync::Exclusive` once it's stabilized. -// Tracking issue: https://github.com/rust-lang/rust/issues/98407. -use sync_wrapper::SyncWrapper; - pin_project! { /// This is a wrapper around a [`WebSocketStream`] that /// implements [`AsyncRead`] and [`AsyncWrite`]. - pub struct WebSocketRw { + pub struct WebSocketRw { #[pin] - stream: SyncWrapper>, - bytes: Bytes, + stream: WebSocketServer, + recv: Bytes, + send: BytesMut, } } impl WebSocketRw { - pub fn new(stream: WebSocketStream) -> Self { + pub fn new(stream: WebSocketServer) -> Self { Self { - stream: stream.into(), - bytes: Bytes::new(), + stream, + recv: Bytes::new(), + send: BytesMut::new(), } } } @@ -50,22 +49,24 @@ impl AsyncWrite for WebSocketRw { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - let mut stream = self.project().stream.get_pin_mut(); + let this = self.project(); + let mut stream = this.stream; + this.send.put(buf); ready!(stream.as_mut().poll_ready(cx).map_err(io_error))?; - match stream.as_mut().start_send(Message::Binary(buf.into())) { + match stream.as_mut().start_send(Frame::binary(this.send.split())) { Ok(()) => Poll::Ready(Ok(buf.len())), Err(e) => Poll::Ready(Err(io_error(e))), } } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let stream = self.project().stream.get_pin_mut(); + let stream = self.project().stream; stream.poll_flush(cx).map_err(io_error) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let stream = self.project().stream.get_pin_mut(); + let stream = self.project().stream; stream.poll_close(cx).map_err(io_error) } } @@ -76,13 +77,10 @@ impl AsyncRead for WebSocketRw { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { - if buf.remaining() > 0 { - let bytes = ready!(self.as_mut().poll_fill_buf(cx))?; - let len = std::cmp::min(bytes.len(), buf.remaining()); - buf.put_slice(&bytes[..len]); - self.consume(len); - } - + let bytes = ready!(self.as_mut().poll_fill_buf(cx))?; + let len = std::cmp::min(bytes.len(), buf.remaining()); + buf.put_slice(&bytes[..len]); + self.consume(len); Poll::Ready(Ok(())) } } @@ -94,31 +92,27 @@ impl AsyncBufRead for WebSocketRw { let mut this = self.project(); loop { - if !this.bytes.chunk().is_empty() { - let chunk = (*this.bytes).chunk(); + if !this.recv.chunk().is_empty() { + let chunk = (*this.recv).chunk(); return Poll::Ready(Ok(chunk)); } - let res = ready!(this.stream.as_mut().get_pin_mut().poll_next(cx)); + let res = ready!(this.stream.as_mut().poll_next(cx)); match res.transpose().map_err(io_error)? { - Some(message) => match message { - Message::Ping(_) => {} - Message::Pong(_) => {} - Message::Text(text) => { + Some(message) => match message.opcode { + OpCode::Ping => {} + OpCode::Pong => {} + OpCode::Text => { // We expect to see only binary messages. let error = "unexpected text message in the websocket"; - warn!(length = text.len(), error); + warn!(length = message.payload.len(), error); return Poll::Ready(Err(io_error(error))); } - Message::Frame(_) => { - // This case is impossible according to Frame's doc. - panic!("unexpected raw frame in the websocket"); + OpCode::Binary | OpCode::Continuation => { + debug_assert!(this.recv.is_empty()); + *this.recv = message.payload.freeze(); } - Message::Binary(chunk) => { - assert!(this.bytes.is_empty()); - *this.bytes = Bytes::from(chunk); - } - Message::Close(_) => return EOF, + OpCode::Close => return EOF, }, None => return EOF, } @@ -126,19 +120,21 @@ impl AsyncBufRead for WebSocketRw { } fn consume(self: Pin<&mut Self>, amount: usize) { - self.project().bytes.advance(amount); + self.project().recv.advance(amount); } } pub async fn serve_websocket( config: &'static ProxyConfig, mut ctx: RequestMonitoring, - websocket: HyperWebsocket, + websocket: OnUpgrade, cancellation_handler: Arc, endpoint_rate_limiter: Arc, hostname: Option, ) -> anyhow::Result<()> { let websocket = websocket.await?; + let websocket = WebSocketServer::after_handshake(TokioIo::new(websocket)); + let conn_gauge = Metrics::get() .proxy .client_connections @@ -177,15 +173,16 @@ pub async fn serve_websocket( mod tests { use std::pin::pin; + use framed_websockets::WebSocketServer; use futures::{SinkExt, StreamExt}; - use hyper_tungstenite::{ - tungstenite::{protocol::Role, Message}, - WebSocketStream, - }; use tokio::{ io::{duplex, AsyncReadExt, AsyncWriteExt}, task::JoinSet, }; + use tokio_tungstenite::{ + tungstenite::{protocol::Role, Message}, + WebSocketStream, + }; use super::WebSocketRw; @@ -210,9 +207,7 @@ mod tests { }); js.spawn(async move { - let mut rw = pin!(WebSocketRw::new( - WebSocketStream::from_raw_socket(stream2, Role::Server, None).await - )); + let mut rw = pin!(WebSocketRw::new(WebSocketServer::after_handshake(stream2))); let mut buf = vec![0; 1024]; let n = rw.read(&mut buf).await.unwrap(); diff --git a/test_runner/regress/test_proxy_websockets.py b/test_runner/regress/test_proxy_websockets.py index 6d1cb9765a..6211446a40 100644 --- a/test_runner/regress/test_proxy_websockets.py +++ b/test_runner/regress/test_proxy_websockets.py @@ -135,7 +135,14 @@ async def test_websockets_pipelined(static_proxy: NeonProxy): query_message = "SELECT 1".encode("utf-8") + b"\0" length2 = (4 + len(query_message)).to_bytes(4, byteorder="big") await websocket.send( - [length0, startup_message, b"p", length1, auth_message, b"Q", length2, query_message] + length0 + + startup_message + + b"p" + + length1 + + auth_message + + b"Q" + + length2 + + query_message ) startup_response = await websocket.recv() From ec069dc45ec6c0ef9b500dc0f433a0415b7e26db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 16 May 2024 16:48:49 +0200 Subject: [PATCH 004/220] tiered compaction: introduce PAGE_SZ constant and use it (#7785) pointed out by @problame : we use the literal 8192 instead of a properly defined constant. replace the literal by a PAGE_SZ constant. --- pageserver/compaction/src/bin/compaction-simulator.rs | 3 ++- pageserver/compaction/src/compact_tiered.rs | 6 +++--- pageserver/compaction/src/helpers.rs | 2 ++ pageserver/compaction/src/simulator.rs | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pageserver/compaction/src/bin/compaction-simulator.rs b/pageserver/compaction/src/bin/compaction-simulator.rs index 1fd69407d3..c308694ae1 100644 --- a/pageserver/compaction/src/bin/compaction-simulator.rs +++ b/pageserver/compaction/src/bin/compaction-simulator.rs @@ -1,4 +1,5 @@ use clap::{Parser, Subcommand}; +use pageserver_compaction::helpers::PAGE_SZ; use pageserver_compaction::simulator::MockTimeline; use rand::Rng; use std::io::Write; @@ -51,7 +52,7 @@ async fn simulate(cmd: &SimulateCmd, results_path: &Path) -> anyhow::Result<()> let mut executor = MockTimeline::new(); // Convert the logical size in MB into a key range. - let key_range = 0..((cmd.logical_size * 1024 * 1024) / 8192); + let key_range = 0..((cmd.logical_size * 1024 * 1024) / PAGE_SZ); //let key_range = u64::MIN..u64::MAX; println!( "starting simulation with key range {:016X}-{:016X}", diff --git a/pageserver/compaction/src/compact_tiered.rs b/pageserver/compaction/src/compact_tiered.rs index a8f184af24..33c9948f45 100644 --- a/pageserver/compaction/src/compact_tiered.rs +++ b/pageserver/compaction/src/compact_tiered.rs @@ -25,7 +25,7 @@ use std::collections::{HashSet, VecDeque}; use std::ops::Range; use crate::helpers::{ - accum_key_values, keyspace_total_size, merge_delta_keys_buffered, overlaps_with, + accum_key_values, keyspace_total_size, merge_delta_keys_buffered, overlaps_with, PAGE_SZ, }; use crate::interface::*; use utils::lsn::Lsn; @@ -379,7 +379,7 @@ where .get_keyspace(&job.key_range, job.lsn_range.end, ctx) .await?, &self.shard_identity, - ) * 8192; + ) * PAGE_SZ; let wal_size = job .input_layers @@ -441,7 +441,7 @@ where let mut window = KeyspaceWindow::new( E::Key::MIN..E::Key::MAX, keyspace, - self.target_file_size / 8192, + self.target_file_size / PAGE_SZ, ); while let Some(key_range) = window.choose_next_image(&self.shard_identity) { new_jobs.push(CompactionJob:: { diff --git a/pageserver/compaction/src/helpers.rs b/pageserver/compaction/src/helpers.rs index 2c922b0a49..8ed1d16082 100644 --- a/pageserver/compaction/src/helpers.rs +++ b/pageserver/compaction/src/helpers.rs @@ -16,6 +16,8 @@ use std::pin::Pin; use std::task::{ready, Poll}; use utils::lsn::Lsn; +pub const PAGE_SZ: u64 = 8192; + pub fn keyspace_total_size( keyspace: &CompactionKeySpace, shard_identity: &ShardIdentity, diff --git a/pageserver/compaction/src/simulator.rs b/pageserver/compaction/src/simulator.rs index 3543df64fa..a7c8bd5c1f 100644 --- a/pageserver/compaction/src/simulator.rs +++ b/pageserver/compaction/src/simulator.rs @@ -14,6 +14,7 @@ use std::ops::Range; use std::sync::Arc; use std::sync::Mutex; +use crate::helpers::PAGE_SZ; use crate::helpers::{merge_delta_keys, overlaps_with}; use crate::interface; @@ -509,7 +510,7 @@ impl interface::CompactionJobExecutor for MockTimeline { let new_layer = Arc::new(MockImageLayer { key_range: key_range.clone(), lsn_range: lsn..lsn, - file_size: accum_size * 8192, + file_size: accum_size * PAGE_SZ, deleted: Mutex::new(false), }); info!( From 4c5afb7b1000768b4ec6dd3db362c1159a189aaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 16 May 2024 19:35:13 +0200 Subject: [PATCH 005/220] Remove SSO_ACCOUNT_ID from scrubber docs and BucketConfig (#7774) As of #6202 we support `AWS_PROFILE` as well, which is more convenient. Change the docs to using it instead of `SSO_ACCOUNT_ID`. Also, remove `SSO_ACCOUNT_ID` from BucketConfig as it is confusing to the code's reader: it's not the "main" way of setting up authentication for the scrubber any more. It is a breaking change for the on-disk format as we persist `sso_account_id` to disk, but it was quite inconsistent with the other methods which are not persistet. Also, I don't think we want to support the case where one version writes the json and another version reads it. Related: #7667 --- s3_scrubber/README.md | 12 +++++++----- s3_scrubber/src/lib.rs | 24 ++++-------------------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/s3_scrubber/README.md b/s3_scrubber/README.md index c1deab8852..8a96542ada 100644 --- a/s3_scrubber/README.md +++ b/s3_scrubber/README.md @@ -9,11 +9,13 @@ and `safekeeper`, and does housekeeping such as cleaning up objects for tenants #### S3 -Do `aws sso login --profile dev` to get the SSO access to the bucket to clean, get the SSO_ACCOUNT_ID for your profile (`cat ~/.aws/config` may help). +Do `aws sso login --profile dev` to get the SSO access to the bucket to clean. +Also, set the following environment variables: -- `SSO_ACCOUNT_ID`: Credentials id to use for accessing S3 buckets +- `AWS_PROFILE`: Profile name to use for accessing S3 buckets (e.g. `dev`) - `REGION`: A region where the bucket is located at. - `BUCKET`: Bucket name +- `BUCKET_PREFIX` (optional): Prefix inside the bucket #### Console API @@ -43,7 +45,7 @@ processing by the `purge-garbage` subcommand. Example: -`env SSO_ACCOUNT_ID=123456 REGION=eu-west-1 BUCKET=my-dev-bucket CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- find-garbage --node-kind=pageserver --depth=tenant --output-path=eu-west-1-garbage.json` +`env AWS_PROFILE=dev REGION=eu-west-1 BUCKET=my-dev-bucket CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- find-garbage --node-kind=pageserver --depth=tenant --output-path=eu-west-1-garbage.json` #### `purge-garbage` @@ -59,7 +61,7 @@ to pass them on the command line Example: -`env SSO_ACCOUNT_ID=123456 cargo run --release -- purge-garbage --node-kind=pageserver --depth=tenant --input-path=eu-west-1-garbage.json` +`env AWS_PROFILE=dev cargo run --release -- purge-garbage --node-kind=pageserver --depth=tenant --input-path=eu-west-1-garbage.json` Add the `--delete` argument before `purge-garbage` to enable deletion. This is intentionally not provided inline in the example above to avoid accidents. Without the `--delete` flag @@ -72,7 +74,7 @@ Errors are logged to stderr and summary to stdout. For pageserver: ``` -env SSO_ACCOUNT_ID=123456 REGION=eu-west-1 BUCKET=my-dev-bucket CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- scan-metadata --node-kind pageserver +env AWS_PROFILE=dev REGION=eu-west-1 BUCKET=my-dev-bucket CLOUD_ADMIN_API_TOKEN=${NEON_CLOUD_ADMIN_API_STAGING_KEY} CLOUD_ADMIN_API_URL=[url] cargo run --release -- scan-metadata --node-kind pageserver Timelines: 31106 With errors: 3 diff --git a/s3_scrubber/src/lib.rs b/s3_scrubber/src/lib.rs index 7966fb6a88..e0f99ecd9c 100644 --- a/s3_scrubber/src/lib.rs +++ b/s3_scrubber/src/lib.rs @@ -200,30 +200,15 @@ impl RootTarget { } #[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] pub struct BucketConfig { pub region: String, pub bucket: String, pub prefix_in_bucket: Option, - - /// Use SSO if this is set, else rely on AWS_* environment vars - pub sso_account_id: Option, -} - -impl Display for BucketConfig { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}/{}/{}", - self.sso_account_id.as_deref().unwrap_or(""), - self.region, - self.bucket - ) - } } impl BucketConfig { pub fn from_env() -> anyhow::Result { - let sso_account_id = env::var("SSO_ACCOUNT_ID").ok(); let region = env::var("REGION").context("'REGION' param retrieval")?; let bucket = env::var("BUCKET").context("'BUCKET' param retrieval")?; let prefix_in_bucket = env::var("BUCKET_PREFIX").ok(); @@ -232,7 +217,6 @@ impl BucketConfig { region, bucket, prefix_in_bucket, - sso_account_id, }) } } @@ -276,7 +260,7 @@ pub fn init_logging(file_name: &str) -> WorkerGuard { guard } -pub fn init_s3_client(account_id: Option, bucket_region: Region) -> Client { +pub fn init_s3_client(bucket_region: Region) -> Client { let credentials_provider = { // uses "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY" let chain = CredentialsProviderChain::first_try( @@ -290,7 +274,7 @@ pub fn init_s3_client(account_id: Option, bucket_region: Region) -> Clie ); // Use SSO if we were given an account ID - match account_id { + match std::env::var("SSO_ACCOUNT_ID").ok() { Some(sso_account) => chain.or_else( "sso", SsoCredentialsProvider::builder() @@ -334,7 +318,7 @@ fn init_remote( ) -> anyhow::Result<(Arc, RootTarget)> { let bucket_region = Region::new(bucket_config.region); let delimiter = "/".to_string(); - let s3_client = Arc::new(init_s3_client(bucket_config.sso_account_id, bucket_region)); + let s3_client = Arc::new(init_s3_client(bucket_region)); let s3_root = match node_kind { NodeKind::Pageserver => RootTarget::Pageserver(S3Target { From 4b8809b280b04c92d0c9e2cfb21cbc230de7995b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 16 May 2024 22:25:19 +0200 Subject: [PATCH 006/220] Tiered compaction: improvements to the windows (#7787) Tiered compaction employs two sliding windows over the keyspace: `KeyspaceWindow` for the image layer generation and `Window` for the delta layer generation. Do some fixes to both windows: * The distinction between the two windows is not very clear. Do the absolute minimum to mention where they are used in the rustdoc description of the struct. Maybe we should rename them (say `WindowForImage` and `WindowForDelta`) or merge them into one window implementation. * Require the keys to strictly increase. The `accum_key_values` already combines the key, so there is no logic needed in `Window::feed` for the same key repeating. This is a follow-up to address the request in https://github.com/neondatabase/neon/pull/7671#pullrequestreview-2051995541 * In `choose_next_delta`, we claimed in the comment to use 1.25 as the factor but it was 1.66 instead. Fix this discrepancy by using `*5/4` as the two operations. --- pageserver/compaction/src/compact_tiered.rs | 24 +++++++++++---------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/pageserver/compaction/src/compact_tiered.rs b/pageserver/compaction/src/compact_tiered.rs index 33c9948f45..20f88868f9 100644 --- a/pageserver/compaction/src/compact_tiered.rs +++ b/pageserver/compaction/src/compact_tiered.rs @@ -663,8 +663,8 @@ where } } -// Sliding window through keyspace and values -// This is used by over_with_images to decide on good split points +/// Sliding window through keyspace and values for image layer +/// This is used by [`LevelCompactionState::cover_with_images`] to decide on good split points struct KeyspaceWindow { head: KeyspaceWindowHead, @@ -804,9 +804,9 @@ struct WindowElement { accum_size: u64, } -// Sliding window through keyspace and values -// -// This is used to decide what layer to write next, from the beginning of the window. +/// Sliding window through keyspace and values for delta layer tiling +/// +/// This is used to decide which delta layer to write next. struct Window { elems: VecDeque>, @@ -830,11 +830,13 @@ where fn feed(&mut self, key: K, size: u64) { let last_size; if let Some(last) = self.elems.back_mut() { - assert!(last.last_key <= key); - if key == last.last_key { - last.accum_size += size; - return; - } + // We require the keys to be strictly increasing for the window. + // Keys should already have been deduplicated by `accum_key_values` + assert!( + last.last_key < key, + "last_key(={}) >= key(={key})", + last.last_key + ); last_size = last.accum_size; } else { last_size = 0; @@ -922,7 +924,7 @@ where // If we're willing to stretch it up to 1.25 target size, could we // gobble up the rest of the work? This avoids creating very small // "tail" layers at the end of the keyspace - if !has_more && self.remain_size() < target_size * 5 / 3 { + if !has_more && self.remain_size() < target_size * 5 / 4 { self.commit_upto(self.elems.len()); } else { let delta_split_at = self.find_size_split(target_size); From 6d951e69d636dc1fbfda2bc0282547fcae19ec81 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Fri, 17 May 2024 12:24:02 +0200 Subject: [PATCH 007/220] test_suite: patch, don't replace, the `tenant_config` field, where appropriate (#7771) Before this PR, the changed tests would overwrite the entire `tenant_config` because `pageserver_config_override` is merged non-recursively into the `ps_cfg`. This meant they would override the `PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM`, impacting our matrix build for `compaction_algorithm=Tiered|Legacy` in https://github.com/neondatabase/neon/pull/7748. I found the tests fixed in this PR using the `NEON_PAGESERVER_PANIC_ON_UNSPECIFIED_COMPACTION_ALGORITHM` env var that I added in #7748. Therefore, I think this is an exhaustive fix. This is better than just searching the code base for `tenant_config`, which is what I had sketched in #7747. refs #7749 --- test_runner/fixtures/neon_fixtures.py | 14 +++++++---- test_runner/regress/test_branch_behind.py | 3 +-- test_runner/regress/test_gc_aggressive.py | 9 +++---- test_runner/regress/test_old_request_lsn.py | 3 +-- test_runner/regress/test_pageserver_api.py | 2 ++ test_runner/regress/test_pitr_gc.py | 6 ++--- test_runner/regress/test_recovery.py | 8 +++--- test_runner/regress/test_tenant_conf.py | 27 +++++++++++++-------- test_runner/regress/test_tenant_size.py | 11 ++++++--- test_runner/regress/test_timeline_size.py | 20 +++++++++------ test_runner/regress/test_wal_receiver.py | 13 +++++++--- 11 files changed, 70 insertions(+), 46 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index b4761f103b..23f30804b4 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -455,7 +455,7 @@ class NeonEnvBuilder: test_overlay_dir: Optional[Path] = None, pageserver_remote_storage: Optional[RemoteStorage] = None, # toml that will be decomposed into `--config-override` flags during `pageserver --init` - pageserver_config_override: Optional[str] = None, + pageserver_config_override: Optional[str | Callable[[Dict[str, Any]], None]] = None, num_safekeepers: int = 1, num_pageservers: int = 1, # Use non-standard SK ids to check for various parsing bugs @@ -1127,10 +1127,14 @@ class NeonEnv: ) if config.pageserver_config_override is not None: - for o in config.pageserver_config_override.split(";"): - override = toml.loads(o) - for key, value in override.items(): - ps_cfg[key] = value + if callable(config.pageserver_config_override): + config.pageserver_config_override(ps_cfg) + else: + assert isinstance(config.pageserver_config_override, str) + for o in config.pageserver_config_override.split(";"): + override = toml.loads(o) + for key, value in override.items(): + ps_cfg[key] = value # Create a corresponding NeonPageserver object self.pageservers.append( diff --git a/test_runner/regress/test_branch_behind.py b/test_runner/regress/test_branch_behind.py index ac2fc79be4..0a5336f5a2 100644 --- a/test_runner/regress/test_branch_behind.py +++ b/test_runner/regress/test_branch_behind.py @@ -11,8 +11,7 @@ from fixtures.utils import print_gc_result, query_scalar # def test_branch_behind(neon_env_builder: NeonEnvBuilder): # Disable pitr, because here we want to test branch creation after GC - neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" - env = neon_env_builder.init_start() + env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"}) error_regexes = [ ".*invalid branch start lsn.*", diff --git a/test_runner/regress/test_gc_aggressive.py b/test_runner/regress/test_gc_aggressive.py index e5067bba8b..44133f2350 100644 --- a/test_runner/regress/test_gc_aggressive.py +++ b/test_runner/regress/test_gc_aggressive.py @@ -67,8 +67,7 @@ async def update_and_gc(env: NeonEnv, endpoint: Endpoint, timeline: TimelineId): # def test_gc_aggressive(neon_env_builder: NeonEnvBuilder): # Disable pitr, because here we want to test branch creation after GC - neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" - env = neon_env_builder.init_start() + env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"}) timeline = env.neon_cli.create_branch("test_gc_aggressive", "main") endpoint = env.endpoints.create_start("test_gc_aggressive") @@ -94,13 +93,11 @@ def test_gc_aggressive(neon_env_builder: NeonEnvBuilder): # def test_gc_index_upload(neon_env_builder: NeonEnvBuilder): - # Disable time-based pitr, we will use LSN-based thresholds in the manual GC calls - neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" num_index_uploads = 0 neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - - env = neon_env_builder.init_start() + # Disable time-based pitr, we will use LSN-based thresholds in the manual GC calls + env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"}) tenant_id = env.initial_tenant timeline_id = env.neon_cli.create_branch("test_gc_index_upload", "main") endpoint = env.endpoints.create_start("test_gc_index_upload") diff --git a/test_runner/regress/test_old_request_lsn.py b/test_runner/regress/test_old_request_lsn.py index 43b0bb56f0..f1dd3fb67d 100644 --- a/test_runner/regress/test_old_request_lsn.py +++ b/test_runner/regress/test_old_request_lsn.py @@ -16,8 +16,7 @@ from fixtures.utils import print_gc_result, query_scalar # def test_old_request_lsn(neon_env_builder: NeonEnvBuilder): # Disable pitr, because here we want to test branch creation after GC - neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" - env = neon_env_builder.init_start() + env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"}) env.neon_cli.create_branch("test_old_request_lsn", "main") endpoint = env.endpoints.create_start("test_old_request_lsn") diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index 80a1d72f4a..abbea59113 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -42,6 +42,8 @@ def test_pageserver_init_node_id(neon_simple_env: NeonEnv, neon_binpath: Path): "listen_http_addr", "pg_auth_type", "http_auth_type", + # TODO: only needed for NEON_PAGESERVER_PANIC_ON_UNSPECIFIED_COMPACTION_ALGORITHM in https://github.com/neondatabase/neon/pull/7748 + # "tenant_config", ] required_config_overrides = [ f"--config-override={toml.dumps({k: ps_config[k]})}" for k in required_config_keys diff --git a/test_runner/regress/test_pitr_gc.py b/test_runner/regress/test_pitr_gc.py index 6434f431a4..7e676b5515 100644 --- a/test_runner/regress/test_pitr_gc.py +++ b/test_runner/regress/test_pitr_gc.py @@ -10,11 +10,9 @@ from fixtures.utils import print_gc_result, query_scalar # def test_pitr_gc(neon_env_builder: NeonEnvBuilder): # Set pitr interval such that we need to keep the data - neon_env_builder.pageserver_config_override = ( - "tenant_config={pitr_interval = '1 day', gc_horizon = 0}" + env = neon_env_builder.init_start( + initial_tenant_conf={"pitr_interval": "1 day", "gc_horizon": "0"} ) - - env = neon_env_builder.init_start() endpoint_main = env.endpoints.create_start("main") main_pg_conn = endpoint_main.connect() diff --git a/test_runner/regress/test_recovery.py b/test_runner/regress/test_recovery.py index ab5c8be256..e21f9bb6f6 100644 --- a/test_runner/regress/test_recovery.py +++ b/test_runner/regress/test_recovery.py @@ -10,9 +10,11 @@ from fixtures.neon_fixtures import NeonEnvBuilder # def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder): # Override default checkpointer settings to run it more often - neon_env_builder.pageserver_config_override = "tenant_config={checkpoint_distance = 1048576}" - - env = neon_env_builder.init_start() + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_distance": "1048576", + } + ) env.pageserver.is_testing_enabled_or_skip() # We expect the pageserver to exit, which will cause storage storage controller diff --git a/test_runner/regress/test_tenant_conf.py b/test_runner/regress/test_tenant_conf.py index a345464208..2cbb036c0d 100644 --- a/test_runner/regress/test_tenant_conf.py +++ b/test_runner/regress/test_tenant_conf.py @@ -1,5 +1,6 @@ import json from contextlib import closing +from typing import Any, Dict import psycopg2.extras from fixtures.common_types import Lsn @@ -14,16 +15,22 @@ from fixtures.utils import wait_until def test_tenant_config(neon_env_builder: NeonEnvBuilder): """Test per tenant configuration""" - # set some non-default global config - neon_env_builder.pageserver_config_override = """ -page_cache_size=444; -wait_lsn_timeout='111 s'; -[tenant_config] -checkpoint_distance = 10000 -compaction_target_size = 1048576 -evictions_low_residence_duration_metric_threshold = "2 days" -eviction_policy = { "kind" = "LayerAccessThreshold", period = "20s", threshold = "23 hours" } -""" + + def set_some_nondefault_global_config(ps_cfg: Dict[str, Any]): + ps_cfg["page_cache_size"] = 444 + ps_cfg["wait_lsn_timeout"] = "111 s" + + tenant_config = ps_cfg.setdefault("tenant_config", {}) + tenant_config["checkpoint_distance"] = 10000 + tenant_config["compaction_target_size"] = 1048576 + tenant_config["evictions_low_residence_duration_metric_threshold"] = "2 days" + tenant_config["eviction_policy"] = { + "kind": "LayerAccessThreshold", + "period": "20s", + "threshold": "23 hours", + } + + neon_env_builder.pageserver_config_override = set_some_nondefault_global_config env = neon_env_builder.init_start() # we configure eviction but no remote storage, there might be error lines diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index 7894f6933d..d3a228dbeb 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -502,9 +502,14 @@ def test_get_tenant_size_with_multiple_branches( gc_horizon = 128 * 1024 - neon_env_builder.pageserver_config_override = f"tenant_config={{compaction_period='0s', gc_period='0s', pitr_interval='0sec', gc_horizon={gc_horizon}}}" - - env = neon_env_builder.init_start() + env = neon_env_builder.init_start( + initial_tenant_conf={ + "compaction_period": "0s", + "gc_period": "0s", + "pitr_interval": "0sec", + "gc_horizon": gc_horizon, + } + ) # FIXME: we have a race condition between GC and delete timeline. GC might fail with this # error. Similar to https://github.com/neondatabase/neon/issues/2671 diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index 18063bf104..a6d06df3b6 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -415,11 +415,12 @@ def test_timeline_physical_size_post_compaction(neon_env_builder: NeonEnvBuilder # Disable background compaction as we don't want it to happen after `get_physical_size` request # and before checking the expected size on disk, which makes the assertion failed - neon_env_builder.pageserver_config_override = ( - "tenant_config={checkpoint_distance=100000, compaction_period='10m'}" + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_distance": "100000", + "compaction_period": "10m", + } ) - - env = neon_env_builder.init_start() pageserver_http = env.pageserver.http_client() new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_post_compaction") @@ -462,9 +463,14 @@ def test_timeline_physical_size_post_gc(neon_env_builder: NeonEnvBuilder): # Disable background compaction and GC as we don't want it to happen after `get_physical_size` request # and before checking the expected size on disk, which makes the assertion failed - neon_env_builder.pageserver_config_override = "tenant_config={checkpoint_distance=100000, compaction_period='0s', gc_period='0s', pitr_interval='1s'}" - - env = neon_env_builder.init_start() + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_distance": "100000", + "compaction_period": "0s", + "gc_period": "0s", + "pitr_interval": "1s", + } + ) pageserver_http = env.pageserver.http_client() new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_post_gc") diff --git a/test_runner/regress/test_wal_receiver.py b/test_runner/regress/test_wal_receiver.py index d9265dcbcd..6582b34218 100644 --- a/test_runner/regress/test_wal_receiver.py +++ b/test_runner/regress/test_wal_receiver.py @@ -1,4 +1,5 @@ import time +from typing import Any, Dict from fixtures.common_types import Lsn, TenantId from fixtures.log_helper import log @@ -42,10 +43,14 @@ def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): # Kills one of the safekeepers and ensures that only the active ones are printed in the state. def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuilder): # Trigger WAL wait timeout faster - neon_env_builder.pageserver_config_override = """ - wait_lsn_timeout = "1s" - tenant_config={walreceiver_connect_timeout = "2s", lagging_wal_timeout = "2s"} - """ + def customize_pageserver_toml(ps_cfg: Dict[str, Any]): + ps_cfg["wait_lsn_timeout"] = "1s" + tenant_config = ps_cfg.setdefault("tenant_config", {}) + tenant_config["walreceiver_connect_timeout"] = "2s" + tenant_config["lagging_wal_timeout"] = "2s" + + neon_env_builder.pageserver_config_override = customize_pageserver_toml + # Have notable SK ids to ensure we check logs for their presence, not some other random numbers neon_env_builder.safekeepers_id_start = 12345 neon_env_builder.num_safekeepers = 3 From c1390bfc3bbd3a7f00df334a39220ca312fc888e Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Fri, 17 May 2024 13:25:01 +0300 Subject: [PATCH 008/220] chore: update defaults for timeline_detach_ancestor (#7779) by having 100 copy operations in flight twe climb up to 2500 requests per min or 41/s. This is still probably less than is allowed, but fast enough for our purposes. --- pageserver/src/tenant/timeline/detach_ancestor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pageserver/src/tenant/timeline/detach_ancestor.rs b/pageserver/src/tenant/timeline/detach_ancestor.rs index 7f59758c87..4d8e570181 100644 --- a/pageserver/src/tenant/timeline/detach_ancestor.rs +++ b/pageserver/src/tenant/timeline/detach_ancestor.rs @@ -56,7 +56,7 @@ impl Default for Options { fn default() -> Self { Self { rewrite_concurrency: std::num::NonZeroUsize::new(2).unwrap(), - copy_concurrency: std::num::NonZeroUsize::new(10).unwrap(), + copy_concurrency: std::num::NonZeroUsize::new(100).unwrap(), } } } From a8e6d259cb49d1bf156dfc2215b92c04d1e8a08f Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 17 May 2024 13:24:03 +0100 Subject: [PATCH 009/220] pageserver: fixes for layer path changes (#7786) ## Problem - When a layer with legacy local path format is evicted and then re-downloaded, a panic happened because the path downloaded by remote storage didn't match the path stored in Layer. - While investigating, I also realized that secondary locations would have a similar issue with evictions. Closes: #7783 ## Summary of changes - Make remote timeline client take local paths as an input: it should not have its own ideas about local paths, instead it just uses the layer path that the Layer has. - Make secondary state store an explicit local path, populated on scan of local disk at startup. This provides the same behavior as for Layer, that our local_layer_path is a _default_, but the layer path can actually be anything (e.g. an old style one). - Add tests for both cases. --- pageserver/src/disk_usage_eviction_task.rs | 8 +- .../src/tenant/remote_timeline_client.rs | 2 + .../tenant/remote_timeline_client/download.rs | 11 +-- pageserver/src/tenant/secondary.rs | 54 +++--------- pageserver/src/tenant/secondary/downloader.rs | 47 ++++++++--- pageserver/src/tenant/storage_layer/layer.rs | 1 + .../regress/test_pageserver_generations.py | 84 +++++++++++++++---- 7 files changed, 119 insertions(+), 88 deletions(-) diff --git a/pageserver/src/disk_usage_eviction_task.rs b/pageserver/src/disk_usage_eviction_task.rs index ebeb8bbb20..7f25e49570 100644 --- a/pageserver/src/disk_usage_eviction_task.rs +++ b/pageserver/src/disk_usage_eviction_task.rs @@ -535,17 +535,11 @@ pub(crate) async fn disk_usage_eviction_task_iteration_impl( } EvictionLayer::Secondary(layer) => { let file_size = layer.metadata.file_size(); - let tenant_manager = tenant_manager.clone(); js.spawn(async move { layer .secondary_tenant - .evict_layer( - tenant_manager.get_conf(), - layer.timeline_id, - layer.name, - layer.metadata, - ) + .evict_layer(layer.timeline_id, layer.name) .await; Ok(file_size) }); diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index c5462dac43..07d6af696c 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -518,6 +518,7 @@ impl RemoteTimelineClient { &self, layer_file_name: &LayerName, layer_metadata: &LayerFileMetadata, + local_path: &Utf8Path, cancel: &CancellationToken, ctx: &RequestContext, ) -> anyhow::Result { @@ -536,6 +537,7 @@ impl RemoteTimelineClient { self.timeline_id, layer_file_name, layer_metadata, + local_path, cancel, ctx, ) diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index f3c9e64533..70c5cae05e 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -21,7 +21,6 @@ use crate::config::PageServerConf; use crate::context::RequestContext; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; use crate::tenant::remote_timeline_client::{remote_layer_path, remote_timelines_path}; -use crate::tenant::storage_layer::layer::local_layer_path; use crate::tenant::storage_layer::LayerName; use crate::tenant::Generation; use crate::virtual_file::{on_fatal_io_error, MaybeFatalIo, VirtualFile}; @@ -50,19 +49,13 @@ pub async fn download_layer_file<'a>( timeline_id: TimelineId, layer_file_name: &'a LayerName, layer_metadata: &'a LayerFileMetadata, + local_path: &Utf8Path, cancel: &CancellationToken, ctx: &RequestContext, ) -> Result { debug_assert_current_span_has_tenant_and_timeline_id(); let timeline_path = conf.timeline_path(&tenant_shard_id, &timeline_id); - let local_path = local_layer_path( - conf, - &tenant_shard_id, - &timeline_id, - layer_file_name, - &layer_metadata.generation, - ); let remote_path = remote_layer_path( &tenant_shard_id.tenant_id, @@ -82,7 +75,7 @@ pub async fn download_layer_file<'a>( // For more context about durable_rename check this email from postgres mailing list: // https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com // If pageserver crashes the temp file will be deleted on startup and re-downloaded. - let temp_file_path = path_with_suffix_extension(&local_path, TEMP_DOWNLOAD_EXTENSION); + let temp_file_path = path_with_suffix_extension(local_path, TEMP_DOWNLOAD_EXTENSION); let bytes_amount = download_retry( || async { download_object(storage, &remote_path, &temp_file_path, cancel, ctx).await }, diff --git a/pageserver/src/tenant/secondary.rs b/pageserver/src/tenant/secondary.rs index 7075044baf..252b6eb11b 100644 --- a/pageserver/src/tenant/secondary.rs +++ b/pageserver/src/tenant/secondary.rs @@ -6,11 +6,9 @@ mod scheduler; use std::{sync::Arc, time::SystemTime}; use crate::{ - config::PageServerConf, context::RequestContext, disk_usage_eviction_task::DiskUsageEvictionInfo, task_mgr::{self, TaskKind, BACKGROUND_RUNTIME}, - virtual_file::MaybeFatalIo, }; use self::{ @@ -21,9 +19,8 @@ use self::{ use super::{ config::{SecondaryLocationConfig, TenantConfOpt}, mgr::TenantManager, - remote_timeline_client::LayerFileMetadata, span::debug_assert_current_span_has_tenant_id, - storage_layer::{layer::local_layer_path, LayerName}, + storage_layer::LayerName, }; use pageserver_api::{ @@ -178,13 +175,7 @@ impl SecondaryTenant { /// Cancellation safe, but on cancellation the eviction will go through #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%timeline_id, name=%name))] - pub(crate) async fn evict_layer( - self: &Arc, - conf: &PageServerConf, - timeline_id: TimelineId, - name: LayerName, - metadata: LayerFileMetadata, - ) { + pub(crate) async fn evict_layer(self: &Arc, timeline_id: TimelineId, name: LayerName) { debug_assert_current_span_has_tenant_id(); let guard = match self.gate.enter() { @@ -197,41 +188,11 @@ impl SecondaryTenant { let now = SystemTime::now(); - let local_path = local_layer_path( - conf, - &self.tenant_shard_id, - &timeline_id, - &name, - &metadata.generation, - ); - let this = self.clone(); // spawn it to be cancellation safe tokio::task::spawn_blocking(move || { let _guard = guard; - // We tolerate ENOENT, because between planning eviction and executing - // it, the secondary downloader could have seen an updated heatmap that - // resulted in a layer being deleted. - // Other local I/O errors are process-fatal: these should never happen. - let deleted = std::fs::remove_file(local_path); - - let not_found = deleted - .as_ref() - .is_err_and(|x| x.kind() == std::io::ErrorKind::NotFound); - - let deleted = if not_found { - false - } else { - deleted - .map(|()| true) - .fatal_err("Deleting layer during eviction") - }; - - if !deleted { - // skip updating accounting and putting perhaps later timestamp - return; - } // Update the timeline's state. This does not have to be synchronized with // the download process, because: @@ -250,8 +211,15 @@ impl SecondaryTenant { // of the cache. let mut detail = this.detail.lock().unwrap(); if let Some(timeline_detail) = detail.timelines.get_mut(&timeline_id) { - timeline_detail.on_disk_layers.remove(&name); - timeline_detail.evicted_at.insert(name, now); + let removed = timeline_detail.on_disk_layers.remove(&name); + + // We might race with removal of the same layer during downloads, if it was removed + // from the heatmap. If we see that the OnDiskState is gone, then no need to + // do a physical deletion or store in evicted_at. + if let Some(removed) = removed { + removed.remove_blocking(); + timeline_detail.evicted_at.insert(name, now); + } } }) .await diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 46a3d7e81f..8f27220771 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -111,6 +111,7 @@ struct SecondaryDownloader { pub(super) struct OnDiskState { metadata: LayerFileMetadata, access_time: SystemTime, + local_path: Utf8PathBuf, } impl OnDiskState { @@ -121,12 +122,26 @@ impl OnDiskState { _ame: LayerName, metadata: LayerFileMetadata, access_time: SystemTime, + local_path: Utf8PathBuf, ) -> Self { Self { metadata, access_time, + local_path, } } + + // This is infallible, because all errors are either acceptable (ENOENT), or totally + // unexpected (fatal). + pub(super) fn remove_blocking(&self) { + // We tolerate ENOENT, because between planning eviction and executing + // it, the secondary downloader could have seen an updated heatmap that + // resulted in a layer being deleted. + // Other local I/O errors are process-fatal: these should never happen. + std::fs::remove_file(&self.local_path) + .or_else(fs_ext::ignore_not_found) + .fatal_err("Deleting secondary layer") + } } #[derive(Debug, Clone, Default)] @@ -816,20 +831,12 @@ impl<'a> TenantDownloader<'a> { if cfg!(debug_assertions) { // Debug for https://github.com/neondatabase/neon/issues/6966: check that the files we think // are already present on disk are really there. - let local_path = local_layer_path( - self.conf, - tenant_shard_id, - &timeline.timeline_id, - &layer.name, - &layer.metadata.generation, - ); - - match tokio::fs::metadata(&local_path).await { + match tokio::fs::metadata(&on_disk.local_path).await { Ok(meta) => { tracing::debug!( "Layer {} present at {}, size {}", layer.name, - local_path, + on_disk.local_path, meta.len(), ); } @@ -837,7 +844,7 @@ impl<'a> TenantDownloader<'a> { tracing::warn!( "Layer {} not found at {} ({})", layer.name, - local_path, + on_disk.local_path, e ); debug_assert!(false); @@ -926,6 +933,13 @@ impl<'a> TenantDownloader<'a> { v.get_mut().access_time = t.access_time; } Entry::Vacant(e) => { + let local_path = local_layer_path( + self.conf, + tenant_shard_id, + &timeline.timeline_id, + &t.name, + &t.metadata.generation, + ); e.insert(OnDiskState::new( self.conf, tenant_shard_id, @@ -933,6 +947,7 @@ impl<'a> TenantDownloader<'a> { t.name, LayerFileMetadata::from(&t.metadata), t.access_time, + local_path, )); } } @@ -955,6 +970,14 @@ impl<'a> TenantDownloader<'a> { &self.secondary_state.cancel ); + let local_path = local_layer_path( + self.conf, + tenant_shard_id, + timeline_id, + &layer.name, + &layer.metadata.generation, + ); + // Note: no backoff::retry wrapper here because download_layer_file does its own retries internally let downloaded_bytes = match download_layer_file( self.conf, @@ -963,6 +986,7 @@ impl<'a> TenantDownloader<'a> { *timeline_id, &layer.name, &LayerFileMetadata::from(&layer.metadata), + &local_path, &self.secondary_state.cancel, ctx, ) @@ -1116,6 +1140,7 @@ async fn init_timeline_state( name, LayerFileMetadata::from(&remote_meta.metadata), remote_meta.access_time, + file_path, ), ); } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index e8c712c4c6..97b349f635 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -1108,6 +1108,7 @@ impl LayerInner { .download_layer_file( &self.desc.layer_name(), &self.metadata(), + &self.path, &timeline.cancel, ctx, ) diff --git a/test_runner/regress/test_pageserver_generations.py b/test_runner/regress/test_pageserver_generations.py index 9b97254410..0235cf6d20 100644 --- a/test_runner/regress/test_pageserver_generations.py +++ b/test_runner/regress/test_pageserver_generations.py @@ -25,6 +25,7 @@ from fixtures.neon_fixtures import ( S3Scrubber, generate_uploads_and_deletions, ) +from fixtures.pageserver.common_types import parse_layer_file_name from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import ( assert_tenant_state, @@ -632,39 +633,86 @@ def test_upgrade_generationless_local_file_paths( generation numbers: it should accept these layer files, and avoid doing a delete/download cycle on them. """ - env = neon_env_builder.init_start(initial_tenant_conf=TENANT_CONF) - tenant_id = env.initial_tenant - timeline_id = env.initial_timeline + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.start() + + tenant_id = TenantId.generate() + timeline_id = TimelineId.generate() + env.neon_cli.create_tenant( + tenant_id, timeline_id, conf=TENANT_CONF, placement_policy='{"Attached":1}' + ) workload = Workload(env, tenant_id, timeline_id) workload.init() workload.write_rows(1000) - env.pageserver.stop() + attached_pageserver = env.get_tenant_pageserver(tenant_id) + secondary_pageserver = list([ps for ps in env.pageservers if ps.id != attached_pageserver.id])[ + 0 + ] + + attached_pageserver.http_client().tenant_heatmap_upload(tenant_id) + secondary_pageserver.http_client().tenant_secondary_download(tenant_id) # Rename the local paths to legacy format, to simulate what - # we would see when upgrading - timeline_dir = env.pageserver.timeline_dir(tenant_id, timeline_id) - files_renamed = 0 - for filename in os.listdir(timeline_dir): - path = os.path.join(timeline_dir, filename) - log.info(f"Found file {path}") - if path.endswith("-v1-00000001"): - new_path = path[:-12] - os.rename(path, new_path) - log.info(f"Renamed {path} -> {new_path}") - files_renamed += 1 + # we would see when upgrading. Do this on both attached and secondary locations, as we will + # test the behavior of both. + for pageserver in env.pageservers: + pageserver.stop() + timeline_dir = pageserver.timeline_dir(tenant_id, timeline_id) + files_renamed = 0 + for filename in os.listdir(timeline_dir): + path = os.path.join(timeline_dir, filename) + log.info(f"Found file {path}") + if path.endswith("-v1-00000001"): + new_path = path[:-12] + os.rename(path, new_path) + log.info(f"Renamed {path} -> {new_path}") + files_renamed += 1 - assert files_renamed > 0 + assert files_renamed > 0 - env.pageserver.start() + pageserver.start() workload.validate() # Assert that there were no on-demand downloads assert ( - env.pageserver.http_client().get_metric_value( + attached_pageserver.http_client().get_metric_value( "pageserver_remote_ondemand_downloaded_layers_total" ) == 0 ) + + # Do a secondary download and ensure there were no layer downloads + secondary_pageserver.http_client().tenant_secondary_download(tenant_id) + assert ( + secondary_pageserver.http_client().get_metric_value( + "pageserver_secondary_download_layer_total" + ) + == 0 + ) + + # Check that when we evict and promote one of the legacy-named layers, everything works as + # expected + local_layers = list( + ( + parse_layer_file_name(path.name), + os.path.join(attached_pageserver.timeline_dir(tenant_id, timeline_id), path), + ) + for path in attached_pageserver.list_layers(tenant_id, timeline_id) + ) + (victim_layer_name, victim_path) = local_layers[0] + assert os.path.exists(victim_path) + + attached_pageserver.http_client().evict_layer( + tenant_id, timeline_id, victim_layer_name.to_str() + ) + assert not os.path.exists(victim_path) + + attached_pageserver.http_client().download_layer( + tenant_id, timeline_id, victim_layer_name.to_str() + ) + # We should download into the same local path we started with + assert os.path.exists(victim_path) From af99c959ef460326b35716239c09b3a572c43b4c Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 17 May 2024 16:44:33 +0100 Subject: [PATCH 010/220] storage controller: use SERIALIZABLE isolation level (#7792) ## Problem The storage controller generally assumes that things like updating generation numbers are atomic: it should use a strict isolation level. ## Summary of changes - Wrap all database operations in a SERIALIZABLE transaction. - Retry serialization failures, as these do not indicate problems and are normal when plenty of concurrent work is happening. Using this isolation level for all reads is overkill, but much simpler than reasoning about it on a per-operation basis, and does not hurt performance. Tested this with a modified version of storage_controller_many_tenants test with 128k shards, to check that our performance is still fine: it is. --- storage_controller/src/persistence.rs | 230 ++++++++++++++------------ 1 file changed, 126 insertions(+), 104 deletions(-) diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index dca37166ba..67c05296d5 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -173,7 +173,7 @@ impl Persistence { /// Wraps `with_conn` in order to collect latency and error metrics async fn with_measured_conn(&self, op: DatabaseOperation, func: F) -> DatabaseResult where - F: FnOnce(&mut PgConnection) -> DatabaseResult + Send + 'static, + F: Fn(&mut PgConnection) -> DatabaseResult + Send + 'static, R: Send + 'static, { let latency = &METRICS_REGISTRY @@ -199,13 +199,48 @@ impl Persistence { /// Call the provided function in a tokio blocking thread, with a Diesel database connection. async fn with_conn(&self, func: F) -> DatabaseResult where - F: FnOnce(&mut PgConnection) -> DatabaseResult + Send + 'static, + F: Fn(&mut PgConnection) -> DatabaseResult + Send + 'static, R: Send + 'static, { + // A generous allowance for how many times we may retry serializable transactions + // before giving up. This is not expected to be hit: it is a defensive measure in case we + // somehow engineer a situation where duelling transactions might otherwise live-lock. + const MAX_RETRIES: usize = 128; + let mut conn = self.connection_pool.get()?; - tokio::task::spawn_blocking(move || -> DatabaseResult { func(&mut conn) }) - .await - .expect("Task panic") + tokio::task::spawn_blocking(move || -> DatabaseResult { + let mut retry_count = 0; + loop { + match conn.build_transaction().serializable().run(|c| func(c)) { + Ok(r) => break Ok(r), + Err( + err @ DatabaseError::Query(diesel::result::Error::DatabaseError( + diesel::result::DatabaseErrorKind::SerializationFailure, + _, + )), + ) => { + retry_count += 1; + if retry_count > MAX_RETRIES { + tracing::error!( + "Exceeded max retries on SerializationFailure errors: {err:?}" + ); + break Err(err); + } else { + // Retry on serialization errors: these are expected, because even though our + // transactions don't fight for the same rows, they will occasionally collide + // on index pages (e.g. increment_generation for unrelated shards can collide) + tracing::debug!( + "Retrying transaction on serialization failure {err:?}" + ); + continue; + } + } + Err(e) => break Err(e), + } + } + }) + .await + .expect("Task panic") } /// When a node is first registered, persist it before using it for anything @@ -358,14 +393,11 @@ impl Persistence { self.with_measured_conn( DatabaseOperation::InsertTenantShards, move |conn| -> DatabaseResult<()> { - conn.transaction(|conn| -> QueryResult<()> { - for tenant in &shards { - diesel::insert_into(tenant_shards) - .values(tenant) - .execute(conn)?; - } - Ok(()) - })?; + for tenant in &shards { + diesel::insert_into(tenant_shards) + .values(tenant) + .execute(conn)?; + } Ok(()) }, ) @@ -533,8 +565,11 @@ impl Persistence { let update = ShardUpdate { generation: input_generation.map(|g| g.into().unwrap() as i32), placement_policy: input_placement_policy + .as_ref() .map(|p| serde_json::to_string(&p).unwrap()), - config: input_config.map(|c| serde_json::to_string(&c).unwrap()), + config: input_config + .as_ref() + .map(|c| serde_json::to_string(&c).unwrap()), scheduling_policy: input_scheduling_policy .map(|p| serde_json::to_string(&p).unwrap()), }; @@ -581,55 +616,51 @@ impl Persistence { ) -> DatabaseResult<()> { use crate::schema::tenant_shards::dsl::*; self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| -> DatabaseResult<()> { - conn.transaction(|conn| -> DatabaseResult<()> { - // Mark parent shards as splitting + // Mark parent shards as splitting - let updated = diesel::update(tenant_shards) - .filter(tenant_id.eq(split_tenant_id.to_string())) - .filter(shard_count.eq(old_shard_count.literal() as i32)) - .set((splitting.eq(1),)) - .execute(conn)?; - if u8::try_from(updated) - .map_err(|_| DatabaseError::Logical( - format!("Overflow existing shard count {} while splitting", updated)) - )? != old_shard_count.count() { - // Perhaps a deletion or another split raced with this attempt to split, mutating - // the parent shards that we intend to split. In this case the split request should fail. - return Err(DatabaseError::Logical( - format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count()) - )); + let updated = diesel::update(tenant_shards) + .filter(tenant_id.eq(split_tenant_id.to_string())) + .filter(shard_count.eq(old_shard_count.literal() as i32)) + .set((splitting.eq(1),)) + .execute(conn)?; + if u8::try_from(updated) + .map_err(|_| DatabaseError::Logical( + format!("Overflow existing shard count {} while splitting", updated)) + )? != old_shard_count.count() { + // Perhaps a deletion or another split raced with this attempt to split, mutating + // the parent shards that we intend to split. In this case the split request should fail. + return Err(DatabaseError::Logical( + format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count()) + )); + } + + // FIXME: spurious clone to sidestep closure move rules + let parent_to_children = parent_to_children.clone(); + + // Insert child shards + for (parent_shard_id, children) in parent_to_children { + let mut parent = crate::schema::tenant_shards::table + .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string())) + .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32)) + .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32)) + .load::(conn)?; + let parent = if parent.len() != 1 { + return Err(DatabaseError::Logical(format!( + "Parent shard {parent_shard_id} not found" + ))); + } else { + parent.pop().unwrap() + }; + for mut shard in children { + // Carry the parent's generation into the child + shard.generation = parent.generation; + + debug_assert!(shard.splitting == SplitState::Splitting); + diesel::insert_into(tenant_shards) + .values(shard) + .execute(conn)?; } - - // FIXME: spurious clone to sidestep closure move rules - let parent_to_children = parent_to_children.clone(); - - // Insert child shards - for (parent_shard_id, children) in parent_to_children { - let mut parent = crate::schema::tenant_shards::table - .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string())) - .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32)) - .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32)) - .load::(conn)?; - let parent = if parent.len() != 1 { - return Err(DatabaseError::Logical(format!( - "Parent shard {parent_shard_id} not found" - ))); - } else { - parent.pop().unwrap() - }; - for mut shard in children { - // Carry the parent's generation into the child - shard.generation = parent.generation; - - debug_assert!(shard.splitting == SplitState::Splitting); - diesel::insert_into(tenant_shards) - .values(shard) - .execute(conn)?; - } - } - - Ok(()) - })?; + } Ok(()) }) @@ -647,22 +678,18 @@ impl Persistence { self.with_measured_conn( DatabaseOperation::CompleteShardSplit, move |conn| -> DatabaseResult<()> { - conn.transaction(|conn| -> QueryResult<()> { - // Drop parent shards - diesel::delete(tenant_shards) - .filter(tenant_id.eq(split_tenant_id.to_string())) - .filter(shard_count.eq(old_shard_count.literal() as i32)) - .execute(conn)?; + // Drop parent shards + diesel::delete(tenant_shards) + .filter(tenant_id.eq(split_tenant_id.to_string())) + .filter(shard_count.eq(old_shard_count.literal() as i32)) + .execute(conn)?; - // Clear sharding flag - let updated = diesel::update(tenant_shards) - .filter(tenant_id.eq(split_tenant_id.to_string())) - .set((splitting.eq(0),)) - .execute(conn)?; - debug_assert!(updated > 0); - - Ok(()) - })?; + // Clear sharding flag + let updated = diesel::update(tenant_shards) + .filter(tenant_id.eq(split_tenant_id.to_string())) + .set((splitting.eq(0),)) + .execute(conn)?; + debug_assert!(updated > 0); Ok(()) }, @@ -681,39 +708,34 @@ impl Persistence { self.with_measured_conn( DatabaseOperation::AbortShardSplit, move |conn| -> DatabaseResult { - let aborted = - conn.transaction(|conn| -> DatabaseResult { - // Clear the splitting state on parent shards - let updated = diesel::update(tenant_shards) - .filter(tenant_id.eq(split_tenant_id.to_string())) - .filter(shard_count.ne(new_shard_count.literal() as i32)) - .set((splitting.eq(0),)) - .execute(conn)?; + // Clear the splitting state on parent shards + let updated = diesel::update(tenant_shards) + .filter(tenant_id.eq(split_tenant_id.to_string())) + .filter(shard_count.ne(new_shard_count.literal() as i32)) + .set((splitting.eq(0),)) + .execute(conn)?; - // Parent shards are already gone: we cannot abort. - if updated == 0 { - return Ok(AbortShardSplitStatus::Complete); - } + // Parent shards are already gone: we cannot abort. + if updated == 0 { + return Ok(AbortShardSplitStatus::Complete); + } - // Sanity check: if parent shards were present, their cardinality should - // be less than the number of child shards. - if updated >= new_shard_count.count() as usize { - return Err(DatabaseError::Logical(format!( - "Unexpected parent shard count {updated} while aborting split to \ + // Sanity check: if parent shards were present, their cardinality should + // be less than the number of child shards. + if updated >= new_shard_count.count() as usize { + return Err(DatabaseError::Logical(format!( + "Unexpected parent shard count {updated} while aborting split to \ count {new_shard_count:?} on tenant {split_tenant_id}" - ))); - } + ))); + } - // Erase child shards - diesel::delete(tenant_shards) - .filter(tenant_id.eq(split_tenant_id.to_string())) - .filter(shard_count.eq(new_shard_count.literal() as i32)) - .execute(conn)?; + // Erase child shards + diesel::delete(tenant_shards) + .filter(tenant_id.eq(split_tenant_id.to_string())) + .filter(shard_count.eq(new_shard_count.literal() as i32)) + .execute(conn)?; - Ok(AbortShardSplitStatus::Aborted) - })?; - - Ok(aborted) + Ok(AbortShardSplitStatus::Aborted) }, ) .await From c84656a53e92ca4628ffa8061e34102263576f43 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 17 May 2024 17:01:24 +0100 Subject: [PATCH 011/220] pageserver: implement auto-splitting (#7681) ## Problem Currently tenants are only split into multiple shards if a human being calls the API to do it. Issue: #7388 ## Summary of changes - Add a pageserver API for returning the top tenants by size - Add a step to the controller's background loop where if there is no reconciliation or optimization to be done, it looks for things to split. - Add a test that runs pgbench on many tenants concurrently, and checks that splitting happens as expected as tenants grow, without interrupting the client I/O. This PR is quite basic: there is a tasklist in https://github.com/neondatabase/neon/issues/7388 for further work. This PR is meant to be safe (off by default), and sufficient to enable our staging environment to run lots of sharded tenants without a human having to set them up. --- control_plane/src/local_env.rs | 4 + control_plane/src/storage_controller.rs | 4 + libs/pageserver_api/src/models.rs | 49 +++ libs/pageserver_api/src/shard.rs | 2 +- pageserver/client/src/mgmt_api.rs | 12 + pageserver/src/http/routes.rs | 98 ++++++ pageserver/src/metrics.rs | 64 ++-- pageserver/src/tenant.rs | 26 ++ .../src/tenant/remote_timeline_client.rs | 6 +- storage_controller/src/main.rs | 5 + storage_controller/src/pageserver_client.rs | 14 +- storage_controller/src/service.rs | 114 ++++++- test_runner/fixtures/pageserver/http.py | 15 + .../performance/test_sharding_autosplit.py | 280 ++++++++++++++++++ test_runner/regress/test_sharding.py | 42 +++ 15 files changed, 689 insertions(+), 46 deletions(-) create mode 100644 test_runner/performance/test_sharding_autosplit.py diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index d13884198e..0edcf1be4e 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -152,6 +152,9 @@ pub struct NeonStorageControllerConf { /// Heartbeat timeout before marking a node offline #[serde(with = "humantime_serde")] pub max_unavailable: Duration, + + /// Threshold for auto-splitting a tenant into shards + pub split_threshold: Option, } impl NeonStorageControllerConf { @@ -164,6 +167,7 @@ impl Default for NeonStorageControllerConf { fn default() -> Self { Self { max_unavailable: Self::DEFAULT_MAX_UNAVAILABLE_INTERVAL, + split_threshold: None, } } } diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index f1c43f4036..96e8276f4d 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -305,6 +305,10 @@ impl StorageController { )); } + if let Some(split_threshold) = self.config.split_threshold.as_ref() { + args.push(format!("--split-threshold={split_threshold}")) + } + background_process::start_process( COMMAND, &self.env.base_data_dir, diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 7cf54bf32a..d52fb5e93d 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -824,6 +824,55 @@ pub struct TenantScanRemoteStorageResponse { pub shards: Vec, } +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] +pub enum TenantSorting { + ResidentSize, + MaxLogicalSize, +} + +impl Default for TenantSorting { + fn default() -> Self { + Self::ResidentSize + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TopTenantShardsRequest { + // How would you like to sort the tenants? + pub order_by: TenantSorting, + + // How many results? + pub limit: usize, + + // Omit tenants with more than this many shards (e.g. if this is the max number of shards + // that the caller would ever split to) + pub where_shards_lt: Option, + + // Omit tenants where the ordering metric is less than this (this is an optimization to + // let us quickly exclude numerous tiny shards) + pub where_gt: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct TopTenantShardItem { + pub id: TenantShardId, + + /// Total size of layers on local disk for all timelines in this tenant + pub resident_size: u64, + + /// Total size of layers in remote storage for all timelines in this tenant + pub physical_size: u64, + + /// The largest logical size of a timeline within this tenant + pub max_logical_size: u64, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +pub struct TopTenantShardsResponse { + pub shards: Vec, +} + pub mod virtual_file { #[derive( Copy, diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index ff6d3d91b6..43d9b2e48c 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -125,7 +125,7 @@ impl ShardCount { /// `v` may be zero, or the number of shards in the tenant. `v` is what /// [`Self::literal`] would return. - pub fn new(val: u8) -> Self { + pub const fn new(val: u8) -> Self { Self(val) } } diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 6df8b2170d..5904713da9 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -486,6 +486,18 @@ impl Client { .map_err(Error::ReceiveBody) } + pub async fn top_tenant_shards( + &self, + request: TopTenantShardsRequest, + ) -> Result { + let uri = format!("{}/v1/top_tenants", self.mgmt_api_endpoint); + self.request(Method::POST, uri, request) + .await? + .json() + .await + .map_err(Error::ReceiveBody) + } + pub async fn layer_map_info( &self, tenant_shard_id: TenantShardId, diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 0a98d32f02..b8d5c67ce0 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1,6 +1,8 @@ //! //! Management HTTP API //! +use std::cmp::Reverse; +use std::collections::BinaryHeap; use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; @@ -24,7 +26,11 @@ use pageserver_api::models::TenantScanRemoteStorageShard; use pageserver_api::models::TenantShardLocation; use pageserver_api::models::TenantShardSplitRequest; use pageserver_api::models::TenantShardSplitResponse; +use pageserver_api::models::TenantSorting; use pageserver_api::models::TenantState; +use pageserver_api::models::TopTenantShardItem; +use pageserver_api::models::TopTenantShardsRequest; +use pageserver_api::models::TopTenantShardsResponse; use pageserver_api::models::{ DownloadRemoteLayersTaskSpawnRequest, LocationConfigMode, TenantAttachRequest, TenantLoadRequest, TenantLocationConfigRequest, @@ -2323,6 +2329,97 @@ async fn get_utilization( .map_err(ApiError::InternalServerError) } +/// Report on the largest tenants on this pageserver, for the storage controller to identify +/// candidates for splitting +async fn post_top_tenants( + mut r: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&r, None)?; + let request: TopTenantShardsRequest = json_request(&mut r).await?; + let state = get_state(&r); + + fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 { + match order_by { + TenantSorting::ResidentSize => sizes.resident_size, + TenantSorting::MaxLogicalSize => sizes.max_logical_size, + } + } + + #[derive(Eq, PartialEq)] + struct HeapItem { + metric: u64, + sizes: TopTenantShardItem, + } + + impl PartialOrd for HeapItem { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which + /// supports popping the greatest item but not the smallest. + impl Ord for HeapItem { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + Reverse(self.metric).cmp(&Reverse(other.metric)) + } + } + + let mut top_n: BinaryHeap = BinaryHeap::with_capacity(request.limit); + + // FIXME: this is a lot of clones to take this tenant list + for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() { + if let Some(shards_lt) = request.where_shards_lt { + // Ignore tenants which already have >= this many shards + if tenant_shard_id.shard_count >= shards_lt { + continue; + } + } + + let sizes = match tenant_slot { + TenantSlot::Attached(tenant) => tenant.get_sizes(), + TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => { + continue; + } + }; + let metric = get_size_metric(&sizes, &request.order_by); + + if let Some(gt) = request.where_gt { + // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work + if metric <= gt { + continue; + } + }; + + match top_n.peek() { + None => { + // Top N list is empty: candidate becomes first member + top_n.push(HeapItem { metric, sizes }); + } + Some(i) if i.metric > metric && top_n.len() < request.limit => { + // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end + top_n.push(HeapItem { metric, sizes }); + } + Some(i) if i.metric > metric => { + // List is at limit and lowest value is greater than our candidate, drop it. + } + Some(_) => top_n.push(HeapItem { metric, sizes }), + } + + while top_n.len() > request.limit { + top_n.pop(); + } + } + + json_response( + StatusCode::OK, + TopTenantShardsResponse { + shards: top_n.into_iter().map(|i| i.sizes).collect(), + }, + ) +} + /// Common functionality of all the HTTP API handlers. /// /// - Adds a tracing span to each request (by `request_span`) @@ -2609,5 +2706,6 @@ pub fn make_router( ) .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler)) .get("/v1/utilization", |r| api_handler(r, get_utilization)) + .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants)) .any(handler_404)) } diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index ffcd08b4b3..5315f0b936 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -2098,7 +2098,7 @@ pub(crate) struct TimelineMetrics { pub garbage_collect_histo: StorageTimeMetrics, pub find_gc_cutoffs_histo: StorageTimeMetrics, pub last_record_gauge: IntGauge, - resident_physical_size_gauge: UIntGauge, + pub resident_physical_size_gauge: UIntGauge, /// copy of LayeredTimeline.current_logical_size pub current_logical_size_gauge: UIntGauge, pub aux_file_size_gauge: IntGauge, @@ -2312,6 +2312,7 @@ use pin_project_lite::pin_project; use std::collections::HashMap; use std::num::NonZeroUsize; use std::pin::Pin; +use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; @@ -2321,35 +2322,35 @@ use crate::task_mgr::TaskKind; use crate::tenant::mgr::TenantSlot; /// Maintain a per timeline gauge in addition to the global gauge. -struct PerTimelineRemotePhysicalSizeGauge { - last_set: u64, +pub(crate) struct PerTimelineRemotePhysicalSizeGauge { + last_set: AtomicU64, gauge: UIntGauge, } impl PerTimelineRemotePhysicalSizeGauge { fn new(per_timeline_gauge: UIntGauge) -> Self { Self { - last_set: per_timeline_gauge.get(), + last_set: AtomicU64::new(0), gauge: per_timeline_gauge, } } - fn set(&mut self, sz: u64) { + pub(crate) fn set(&self, sz: u64) { self.gauge.set(sz); - if sz < self.last_set { - REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set - sz); + let prev = self.last_set.swap(sz, std::sync::atomic::Ordering::Relaxed); + if sz < prev { + REMOTE_PHYSICAL_SIZE_GLOBAL.sub(prev - sz); } else { - REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - self.last_set); + REMOTE_PHYSICAL_SIZE_GLOBAL.add(sz - prev); }; - self.last_set = sz; } - fn get(&self) -> u64 { + pub(crate) fn get(&self) -> u64 { self.gauge.get() } } impl Drop for PerTimelineRemotePhysicalSizeGauge { fn drop(&mut self) { - REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set); + REMOTE_PHYSICAL_SIZE_GLOBAL.sub(self.last_set.load(std::sync::atomic::Ordering::Relaxed)); } } @@ -2357,7 +2358,7 @@ pub(crate) struct RemoteTimelineClientMetrics { tenant_id: String, shard_id: String, timeline_id: String, - remote_physical_size_gauge: Mutex>, + pub(crate) remote_physical_size_gauge: PerTimelineRemotePhysicalSizeGauge, calls: Mutex>, bytes_started_counter: Mutex>, bytes_finished_counter: Mutex>, @@ -2365,38 +2366,27 @@ pub(crate) struct RemoteTimelineClientMetrics { impl RemoteTimelineClientMetrics { pub fn new(tenant_shard_id: &TenantShardId, timeline_id: &TimelineId) -> Self { + let tenant_id_str = tenant_shard_id.tenant_id.to_string(); + let shard_id_str = format!("{}", tenant_shard_id.shard_slug()); + let timeline_id_str = timeline_id.to_string(); + + let remote_physical_size_gauge = PerTimelineRemotePhysicalSizeGauge::new( + REMOTE_PHYSICAL_SIZE + .get_metric_with_label_values(&[&tenant_id_str, &shard_id_str, &timeline_id_str]) + .unwrap(), + ); + RemoteTimelineClientMetrics { - tenant_id: tenant_shard_id.tenant_id.to_string(), - shard_id: format!("{}", tenant_shard_id.shard_slug()), - timeline_id: timeline_id.to_string(), + tenant_id: tenant_id_str, + shard_id: shard_id_str, + timeline_id: timeline_id_str, calls: Mutex::new(HashMap::default()), bytes_started_counter: Mutex::new(HashMap::default()), bytes_finished_counter: Mutex::new(HashMap::default()), - remote_physical_size_gauge: Mutex::new(None), + remote_physical_size_gauge, } } - pub(crate) fn remote_physical_size_set(&self, sz: u64) { - let mut guard = self.remote_physical_size_gauge.lock().unwrap(); - let gauge = guard.get_or_insert_with(|| { - PerTimelineRemotePhysicalSizeGauge::new( - REMOTE_PHYSICAL_SIZE - .get_metric_with_label_values(&[ - &self.tenant_id, - &self.shard_id, - &self.timeline_id, - ]) - .unwrap(), - ) - }); - gauge.set(sz); - } - - pub(crate) fn remote_physical_size_get(&self) -> u64 { - let guard = self.remote_physical_size_gauge.lock().unwrap(); - guard.as_ref().map(|gauge| gauge.get()).unwrap_or(0) - } - pub fn remote_operation_time( &self, file_kind: &RemoteOpFileKind, diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 026cbc107c..54b63f7042 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -21,6 +21,7 @@ use futures::FutureExt; use futures::StreamExt; use pageserver_api::models; use pageserver_api::models::TimelineState; +use pageserver_api::models::TopTenantShardItem; use pageserver_api::models::WalRedoManagerStatus; use pageserver_api::shard::ShardIdentity; use pageserver_api::shard::ShardStripeSize; @@ -2196,6 +2197,31 @@ impl Tenant { Ok(()) } + + pub(crate) fn get_sizes(&self) -> TopTenantShardItem { + let mut result = TopTenantShardItem { + id: self.tenant_shard_id, + resident_size: 0, + physical_size: 0, + max_logical_size: 0, + }; + + for timeline in self.timelines.lock().unwrap().values() { + result.resident_size += timeline.metrics.resident_physical_size_gauge.get(); + + result.physical_size += timeline + .remote_client + .metrics + .remote_physical_size_gauge + .get(); + result.max_logical_size = std::cmp::max( + result.max_logical_size, + timeline.metrics.current_logical_size_gauge.get(), + ); + } + + result + } } /// Given a Vec of timelines and their ancestors (timeline_id, ancestor_id), diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 07d6af696c..3a1113cf01 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -317,7 +317,7 @@ pub struct RemoteTimelineClient { upload_queue: Mutex, - metrics: Arc, + pub(crate) metrics: Arc, storage_impl: GenericRemoteStorage, @@ -461,11 +461,11 @@ impl RemoteTimelineClient { } else { 0 }; - self.metrics.remote_physical_size_set(size); + self.metrics.remote_physical_size_gauge.set(size); } pub fn get_remote_physical_size(&self) -> u64 { - self.metrics.remote_physical_size_get() + self.metrics.remote_physical_size_gauge.get() } // diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index f1454af533..ce8f8d0cdd 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -66,6 +66,10 @@ struct Cli { #[arg(long)] max_unavailable_interval: Option, + /// Size threshold for automatically splitting shards (disabled by default) + #[arg(long)] + split_threshold: Option, + /// Maximum number of reconcilers that may run in parallel #[arg(long)] reconciler_concurrency: Option, @@ -255,6 +259,7 @@ async fn async_main() -> anyhow::Result<()> { reconciler_concurrency: args .reconciler_concurrency .unwrap_or(RECONCILER_CONCURRENCY_DEFAULT), + split_threshold: args.split_threshold, }; // After loading secrets & config, but before starting anything else, apply database migrations diff --git a/storage_controller/src/pageserver_client.rs b/storage_controller/src/pageserver_client.rs index 25b6b67e12..769aba80ca 100644 --- a/storage_controller/src/pageserver_client.rs +++ b/storage_controller/src/pageserver_client.rs @@ -2,7 +2,7 @@ use pageserver_api::{ models::{ LocationConfig, LocationConfigListResponse, PageserverUtilization, SecondaryProgress, TenantScanRemoteStorageResponse, TenantShardSplitRequest, TenantShardSplitResponse, - TimelineCreateRequest, TimelineInfo, + TimelineCreateRequest, TimelineInfo, TopTenantShardsRequest, TopTenantShardsResponse, }, shard::TenantShardId, }; @@ -234,4 +234,16 @@ impl PageserverClient { self.inner.get_utilization().await ) } + + pub(crate) async fn top_tenant_shards( + &self, + request: TopTenantShardsRequest, + ) -> Result { + measured_request!( + "top_tenants", + crate::metrics::Method::Post, + &self.node_id_label, + self.inner.top_tenant_shards(request).await + ) + } } diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index ae7e8d3d7d..f914f4e0bb 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -32,10 +32,10 @@ use pageserver_api::{ TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse, UtilizationScore, }, - models::{SecondaryProgress, TenantConfigRequest}, + models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest}, }; use reqwest::StatusCode; -use tracing::instrument; +use tracing::{instrument, Instrument}; use crate::pageserver_client::PageserverClient; use pageserver_api::{ @@ -222,6 +222,10 @@ pub struct Config { /// How many Reconcilers may be spawned concurrently pub reconciler_concurrency: usize, + + /// How large must a shard grow in bytes before we split it? + /// None disables auto-splitting. + pub split_threshold: Option, } impl From for ApiError { @@ -699,7 +703,7 @@ impl Service { /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible /// for those retries. #[instrument(skip_all)] - async fn background_reconcile(&self) { + async fn background_reconcile(self: &Arc) { self.startup_complete.clone().wait().await; const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20); @@ -711,7 +715,11 @@ impl Service { let reconciles_spawned = self.reconcile_all(); if reconciles_spawned == 0 { // Run optimizer only when we didn't find any other work to do - self.optimize_all().await; + let optimizations = self.optimize_all().await; + if optimizations == 0 { + // Run new splits only when no optimizations are pending + self.autosplit_tenants().await; + } } } _ = self.cancel.cancelled() => return @@ -4766,6 +4774,104 @@ impl Service { validated_work } + /// Look for shards which are oversized and in need of splitting + async fn autosplit_tenants(self: &Arc) { + let Some(split_threshold) = self.config.split_threshold else { + // Auto-splitting is disabled + return; + }; + + let nodes = self.inner.read().unwrap().nodes.clone(); + + const SPLIT_TO_MAX: ShardCount = ShardCount::new(8); + + let mut top_n = Vec::new(); + + // Call into each node to look for big tenants + let top_n_request = TopTenantShardsRequest { + // We currently split based on logical size, for simplicity: logical size is a signal of + // the user's intent to run a large database, whereas physical/resident size can be symptoms + // of compaction issues. Eventually we should switch to using resident size to bound the + // disk space impact of one shard. + order_by: models::TenantSorting::MaxLogicalSize, + limit: 10, + where_shards_lt: Some(SPLIT_TO_MAX), + where_gt: Some(split_threshold), + }; + for node in nodes.values() { + let request_ref = &top_n_request; + match node + .with_client_retries( + |client| async move { + let request = request_ref.clone(); + client.top_tenant_shards(request.clone()).await + }, + &self.config.jwt_token, + 3, + 3, + Duration::from_secs(5), + &self.cancel, + ) + .await + { + Some(Ok(node_top_n)) => { + top_n.extend(node_top_n.shards.into_iter()); + } + Some(Err(mgmt_api::Error::Cancelled)) => { + continue; + } + Some(Err(e)) => { + tracing::warn!("Failed to fetch top N tenants from {node}: {e}"); + continue; + } + None => { + // Node is shutting down + continue; + } + }; + } + + // Pick the biggest tenant to split first + top_n.sort_by_key(|i| i.resident_size); + let Some(split_candidate) = top_n.into_iter().next() else { + tracing::debug!("No split-elegible shards found"); + return; + }; + + // We spawn a task to run this, so it's exactly like some external API client requesting it. We don't + // want to block the background reconcile loop on this. + tracing::info!("Auto-splitting tenant for size threshold {split_threshold}: current size {split_candidate:?}"); + + let this = self.clone(); + tokio::spawn( + async move { + match this + .tenant_shard_split( + split_candidate.id.tenant_id, + TenantShardSplitRequest { + // Always split to the max number of shards: this avoids stepping through + // intervening shard counts and encountering the overrhead of a split+cleanup + // each time as a tenant grows, and is not too expensive because our max shard + // count is relatively low anyway. + // This policy will be adjusted in future once we support higher shard count. + new_shard_count: SPLIT_TO_MAX.literal(), + new_stripe_size: Some(ShardParameters::DEFAULT_STRIPE_SIZE), + }, + ) + .await + { + Ok(_) => { + tracing::info!("Successful auto-split"); + } + Err(e) => { + tracing::error!("Auto-split failed: {e}"); + } + } + } + .instrument(tracing::info_span!("auto_split", tenant_id=%split_candidate.id.tenant_id)), + ); + } + /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should /// put the system into a quiescent state where future background reconciliations won't do anything. diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 0b2963d89c..4d563a532b 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -890,3 +890,18 @@ class PageserverHttpClient(requests.Session, MetricsGetter): assert current_logical_size == non_incremental assert isinstance(current_logical_size, int) return current_logical_size + + def top_tenants( + self, order_by: str, limit: int, where_shards_lt: int, where_gt: int + ) -> dict[Any, Any]: + res = self.post( + f"http://localhost:{self.port}/v1/top_tenants", + json={ + "order_by": order_by, + "limit": limit, + "where_shards_lt": where_shards_lt, + "where_gt": where_gt, + }, + ) + self.verbose_error(res) + return res.json() # type: ignore diff --git a/test_runner/performance/test_sharding_autosplit.py b/test_runner/performance/test_sharding_autosplit.py new file mode 100644 index 0000000000..9cd83f0959 --- /dev/null +++ b/test_runner/performance/test_sharding_autosplit.py @@ -0,0 +1,280 @@ +import concurrent.futures +import re +from pathlib import Path + +import pytest +from fixtures.common_types import TenantId, TimelineId +from fixtures.log_helper import log +from fixtures.neon_fixtures import ( + NeonEnvBuilder, + PgBin, + tenant_get_shards, +) + + +@pytest.mark.timeout(600) +def test_sharding_autosplit(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): + """ + Check that sharding, including auto-splitting, "just works" under pgbench workloads. + + This is not a benchmark, but it lives in the same place as benchmarks in order to be run + on a dedicated node that can sustain some significant throughput. + + Other tests validate the details of shard splitting, error cases etc. This test is + the sanity check that it all really works as expected with realistic amounts of data + and under load. + + Success conditions: + - Tenants auto-split when their capacity grows + - Client workloads are not interrupted while that happens + """ + + neon_env_builder.num_pageservers = 8 + neon_env_builder.storage_controller_config = { + # Split tenants at 500MB: it's up to the storage controller how it interprets this (logical + # sizes, physical sizes, etc). We will write this much data logically, therefore other sizes + # will reliably be greater. + "split_threshold": 1024 * 1024 * 500 + } + + tenant_conf = { + # We want layer rewrites to happen as soon as possible (this is the most stressful + # case for the system), so set PITR interval to something tiny. + "pitr_interval": "5s", + # Scaled down thresholds. We will run at ~1GB scale but would like to emulate + # the behavior of a system running at ~100GB scale. + "checkpoint_distance": f"{1024 * 1024}", + "compaction_threshold": "1", + "compaction_target_size": f"{1024 * 1024}", + "image_creation_threshold": "2", + "image_layer_creation_check_threshold": "0", + } + + env = neon_env_builder.init_start() + + for ps in env.pageservers: + ps.allowed_errors.extend( + [ + # We shut down pageservers while they might have some compaction work going on + ".*Compaction failed.*shutting down.*" + ] + ) + + env.storage_controller.allowed_errors.extend( + [ + # The neon_local functionality for updating computes is flaky for unknown reasons + ".*Local notification hook failed.*", + ".*Marking shard.*for notification retry.*", + ".*Failed to notify compute.*", + ] + ) + + # Total tenants + tenant_count = 4 + + # Transaction rate: we set this rather than running at full-speed because we + # might run on a slow node that doesn't cope well with many full-speed pgbenches running concurrently. + transaction_rate = 100 + + class TenantState: + def __init__(self, timeline_id, endpoint): + self.timeline_id = timeline_id + self.endpoint = endpoint + + # Create tenants + tenants = {} + for tenant_id in set(TenantId.generate() for _i in range(0, tenant_count)): + timeline_id = TimelineId.generate() + env.neon_cli.create_tenant(tenant_id, timeline_id, conf=tenant_conf) + endpoint = env.endpoints.create("main", tenant_id=tenant_id) + tenants[tenant_id] = TenantState(timeline_id, endpoint) + endpoint.start() + + def run_pgbench_init(endpoint): + pg_bin.run_capture( + [ + "pgbench", + "-s50", + "-i", + f"postgres://cloud_admin@localhost:{endpoint.pg_port}/postgres", + ] + ) + + def check_pgbench_output(out_path: str): + """ + When we run pgbench, we want not just an absence of errors, but also continuous evidence + of I/O progressing: our shard splitting and migration should not interrrupt the benchmark. + """ + matched_lines = 0 + stderr = Path(f"{out_path}.stderr").read_text() + + low_watermark = None + + # Apply this as a threshold for what we consider an unacceptable interruption to I/O + min_tps = transaction_rate // 10 + + for line in stderr.split("\n"): + match = re.match(r"progress: ([0-9\.]+) s, ([0-9\.]+) tps, .* ([0-9]+) failed", line) + if match is None: + # Fall back to older-version pgbench output (omits failure count) + match = re.match(r"progress: ([0-9\.]+) s, ([0-9\.]+) tps, .*", line) + if match is None: + continue + else: + (_time, tps) = match.groups() + tps = float(tps) + failed = 0 + else: + (_time, tps, failed) = match.groups() # type: ignore + tps = float(tps) + failed = int(failed) + + matched_lines += 1 + + if failed > 0: + raise RuntimeError( + f"pgbench on tenant {endpoint.tenant_id} run at {out_path} has failed > 0" + ) + + if low_watermark is None or low_watermark > tps: + low_watermark = tps + + # Temporarily disabled: have seen some 0 tps regions on Hetzner runners, but not + # at the same time as a shard split. + # if tps < min_tps: + # raise RuntimeError( + # f"pgbench on tenant {endpoint.tenant_id} run at {out_path} has tps < {min_tps}" + # ) + + log.info(f"Checked {matched_lines} progress lines, lowest TPS was {min_tps}") + + if matched_lines == 0: + raise RuntimeError(f"pgbench output at {out_path} contained no progress lines") + + def run_pgbench_main(endpoint): + out_path = pg_bin.run_capture( + [ + "pgbench", + "-s50", + "-T", + "180", + "-R", + f"{transaction_rate}", + "-P", + "1", + f"postgres://cloud_admin@localhost:{endpoint.pg_port}/postgres", + ] + ) + + check_pgbench_output(out_path) + + def run_pgbench_read(endpoint): + out_path = pg_bin.run_capture( + [ + "pgbench", + "-s50", + "-T", + "30", + "-R", + f"{transaction_rate}", + "-S", + "-P", + "1", + f"postgres://cloud_admin@localhost:{endpoint.pg_port}/postgres", + ] + ) + + check_pgbench_output(out_path) + + with concurrent.futures.ThreadPoolExecutor(max_workers=tenant_count) as pgbench_threads: + pgbench_futs = [] + for tenant_state in tenants.values(): + fut = pgbench_threads.submit(run_pgbench_init, tenant_state.endpoint) + pgbench_futs.append(fut) + + log.info("Waiting for pgbench inits") + for fut in pgbench_futs: + fut.result() + + pgbench_futs = [] + for tenant_state in tenants.values(): + fut = pgbench_threads.submit(run_pgbench_main, tenant_state.endpoint) + pgbench_futs.append(fut) + + log.info("Waiting for pgbench read/write pass") + for fut in pgbench_futs: + fut.result() + + def assert_all_split(): + for tenant_id in tenants.keys(): + shards = tenant_get_shards(env, tenant_id) + assert len(shards) == 8 + + # This is not a wait_until, because we wanted the splits to happen _while_ pgbench is running: otherwise + # this test is not properly doing its job of validating that splits work nicely under load. + assert_all_split() + + env.storage_controller.assert_log_contains(".*Successful auto-split.*") + + # Log timeline sizes, useful for debug, and implicitly validates that the shards + # are available in the places the controller thinks they should be. + for tenant_id, tenant_state in tenants.items(): + (shard_zero_id, shard_zero_ps) = tenant_get_shards(env, tenant_id)[0] + timeline_info = shard_zero_ps.http_client().timeline_detail( + shard_zero_id, tenant_state.timeline_id + ) + log.info(f"{shard_zero_id} timeline: {timeline_info}") + + # Run compaction for all tenants, restart endpoint so that on subsequent reads we will + # definitely hit pageserver for reads. This compaction passis expected to drop unwanted + # layers but not do any rewrites (we're still in the same generation) + for tenant_id, tenant_state in tenants.items(): + tenant_state.endpoint.stop() + for shard_id, shard_ps in tenant_get_shards(env, tenant_id): + shard_ps.http_client().timeline_gc(shard_id, tenant_state.timeline_id, gc_horizon=None) + shard_ps.http_client().timeline_compact(shard_id, tenant_state.timeline_id) + tenant_state.endpoint.start() + + with concurrent.futures.ThreadPoolExecutor(max_workers=tenant_count) as pgbench_threads: + pgbench_futs = [] + for tenant_state in tenants.values(): + fut = pgbench_threads.submit(run_pgbench_read, tenant_state.endpoint) + pgbench_futs.append(fut) + + log.info("Waiting for pgbench read pass") + for fut in pgbench_futs: + fut.result() + + env.storage_controller.consistency_check() + + # Restart the storage controller + env.storage_controller.stop() + env.storage_controller.start() + + env.storage_controller.consistency_check() + + # Restart all pageservers + for ps in env.pageservers: + ps.stop() + ps.start() + + # Freshen gc_info in Timeline, so that when compaction runs in the background in the + # subsequent pgbench period, the last_gc_cutoff is updated and enables the conditions for a rewrite to pass. + for tenant_id, tenant_state in tenants.items(): + for shard_id, shard_ps in tenant_get_shards(env, tenant_id): + shard_ps.http_client().timeline_gc(shard_id, tenant_state.timeline_id, gc_horizon=None) + + # One last check data remains readable after everything has restarted + with concurrent.futures.ThreadPoolExecutor(max_workers=tenant_count) as pgbench_threads: + pgbench_futs = [] + for tenant_state in tenants.values(): + fut = pgbench_threads.submit(run_pgbench_read, tenant_state.endpoint) + pgbench_futs.append(fut) + + log.info("Waiting for pgbench read pass") + for fut in pgbench_futs: + fut.result() + + # Assert that some rewrites happened + # TODO: uncomment this after https://github.com/neondatabase/neon/pull/7531 is merged + # assert any(ps.log_contains(".*Rewriting layer after shard split.*") for ps in env.pageservers) diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 87544af598..1bfeec6f4b 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1326,3 +1326,45 @@ def test_sharding_unlogged_relation(neon_env_builder: NeonEnvBuilder): # Ensure that post-endpoint-restart modifications are ingested happily by pageserver wait_for_last_flush_lsn(env, ep, tenant_id, timeline_id) + + +def test_top_tenants(neon_env_builder: NeonEnvBuilder): + """ + The top_tenants API is used in shard auto-splitting to find candidates. + """ + + env = neon_env_builder.init_configs() + neon_env_builder.start() + + tenants = [] + n_tenants = 8 + for i in range(0, n_tenants): + tenant_id = TenantId.generate() + timeline_id = TimelineId.generate() + env.neon_cli.create_tenant(tenant_id, timeline_id) + + # Write a different amount of data to each tenant + w = Workload(env, tenant_id, timeline_id) + w.init() + w.write_rows(i * 1000) + w.stop() + + logical_size = env.pageserver.http_client().timeline_detail(tenant_id, timeline_id)[ + "current_logical_size" + ] + tenants.append((tenant_id, timeline_id, logical_size)) + + log.info(f"Created {tenant_id}/{timeline_id} with size {logical_size}") + + # Ask for 1 largest tenant + top_1 = env.pageserver.http_client().top_tenants("max_logical_size", 1, 8, 0) + assert len(top_1["shards"]) == 1 + assert top_1["shards"][0]["id"] == str(tenants[-1][0]) + assert top_1["shards"][0]["max_logical_size"] == tenants[-1][2] + + # Apply a lower bound limit + top = env.pageserver.http_client().top_tenants( + "max_logical_size", 100, 8, where_gt=tenants[3][2] + ) + assert len(top["shards"]) == n_tenants - 4 + assert set(i["id"] for i in top["shards"]) == set(str(i[0]) for i in tenants[4:]) From aaf60819fa479e37a4b477b20e1fbcee2d5a046f Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 17 May 2024 15:22:49 -0400 Subject: [PATCH 012/220] feat(pageserver): persist aux file policy in index part (#7668) Part of https://github.com/neondatabase/neon/issues/7462 ## Summary of changes Tenant config is not persisted unless it's attached on the storage controller. In this pull request, we persist the aux file policy flag in the `index_part.json`. Admins can set `switch_aux_file_policy` in the storage controller or using the page server API. Upon the first aux file gets written, the write path will compare the aux file policy target with the current policy. If it is switch-able, we will do the switch. Otherwise, the original policy will be used. The test cases show what the admins can do / cannot do. The `last_aux_file_policy` is stored in `IndexPart`. Updates to the persisted policy are done via `schedule_index_upload_for_aux_file_policy_update`. On the write path, the writer will update the field. --------- Signed-off-by: Alex Chi Z Co-authored-by: Joonas Koivunen --- libs/pageserver_api/src/models.rs | 134 ++++++++ pageserver/ctl/src/main.rs | 1 + pageserver/src/http/routes.rs | 2 + pageserver/src/pgdatadir_mapping.rs | 33 +- pageserver/src/tenant.rs | 296 ++++++++++++++++++ pageserver/src/tenant/config.rs | 4 +- .../src/tenant/remote_timeline_client.rs | 13 + .../tenant/remote_timeline_client/index.rs | 87 ++++- pageserver/src/tenant/timeline.rs | 9 +- pageserver/src/tenant/timeline/delete.rs | 2 + pageserver/src/tenant/upload_queue.rs | 6 + test_runner/regress/test_aux_files.py | 72 +++++ 12 files changed, 648 insertions(+), 11 deletions(-) create mode 100644 test_runner/regress/test_aux_files.py diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index d52fb5e93d..80ca696313 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -10,6 +10,7 @@ use std::{ io::{BufRead, Read}, num::{NonZeroU64, NonZeroUsize}, str::FromStr, + sync::atomic::AtomicUsize, time::{Duration, SystemTime}, }; @@ -308,13 +309,88 @@ pub struct TenantConfig { pub switch_aux_file_policy: Option, } +/// The policy for the aux file storage. It can be switched through `switch_aux_file_policy` +/// tenant config. When the first aux file written, the policy will be persisted in the +/// `index_part.json` file and has a limited migration path. +/// +/// Currently, we only allow the following migration path: +/// +/// Unset -> V1 +/// -> V2 +/// -> CrossValidation -> V2 #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum AuxFilePolicy { + /// V1 aux file policy: store everything in AUX_FILE_KEY V1, + /// V2 aux file policy: store in the AUX_FILE keyspace V2, + /// Cross validation runs both formats on the write path and does validation + /// on the read path. CrossValidation, } +impl AuxFilePolicy { + pub fn is_valid_migration_path(from: Option, to: Self) -> bool { + matches!( + (from, to), + (None, _) | (Some(AuxFilePolicy::CrossValidation), AuxFilePolicy::V2) + ) + } + + /// If a tenant writes aux files without setting `switch_aux_policy`, this value will be used. + pub fn default_tenant_config() -> Self { + Self::V1 + } +} + +/// The aux file policy memory flag. Users can store `Option` into this atomic flag. 0 == unspecified. +pub struct AtomicAuxFilePolicy(AtomicUsize); + +impl AtomicAuxFilePolicy { + pub fn new(policy: Option) -> Self { + Self(AtomicUsize::new( + policy.map(AuxFilePolicy::to_usize).unwrap_or_default(), + )) + } + + pub fn load(&self) -> Option { + match self.0.load(std::sync::atomic::Ordering::Acquire) { + 0 => None, + other => Some(AuxFilePolicy::from_usize(other)), + } + } + + pub fn store(&self, policy: Option) { + self.0.store( + policy.map(AuxFilePolicy::to_usize).unwrap_or_default(), + std::sync::atomic::Ordering::Release, + ); + } +} + +impl AuxFilePolicy { + pub fn to_usize(self) -> usize { + match self { + Self::V1 => 1, + Self::CrossValidation => 2, + Self::V2 => 3, + } + } + + pub fn try_from_usize(this: usize) -> Option { + match this { + 1 => Some(Self::V1), + 2 => Some(Self::CrossValidation), + 3 => Some(Self::V2), + _ => None, + } + } + + pub fn from_usize(this: usize) -> Self { + Self::try_from_usize(this).unwrap() + } +} + impl FromStr for AuxFilePolicy { type Err = anyhow::Error; @@ -604,6 +680,9 @@ pub struct TimelineInfo { pub state: TimelineState, pub walreceiver_status: String, + + /// The last aux file policy being used on this timeline + pub last_aux_file_policy: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -1505,4 +1584,59 @@ mod tests { assert_eq!(actual, expected, "example on {line}"); } } + + #[test] + fn test_aux_file_migration_path() { + assert!(AuxFilePolicy::is_valid_migration_path( + None, + AuxFilePolicy::V1 + )); + assert!(AuxFilePolicy::is_valid_migration_path( + None, + AuxFilePolicy::V2 + )); + assert!(AuxFilePolicy::is_valid_migration_path( + None, + AuxFilePolicy::CrossValidation + )); + // Self-migration is not a valid migration path, and the caller should handle it by itself. + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V1), + AuxFilePolicy::V1 + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V2), + AuxFilePolicy::V2 + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::CrossValidation), + AuxFilePolicy::CrossValidation + )); + // Migrations not allowed + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::CrossValidation), + AuxFilePolicy::V1 + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V1), + AuxFilePolicy::V2 + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V2), + AuxFilePolicy::V1 + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V2), + AuxFilePolicy::CrossValidation + )); + assert!(!AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::V1), + AuxFilePolicy::CrossValidation + )); + // Migrations allowed + assert!(AuxFilePolicy::is_valid_migration_path( + Some(AuxFilePolicy::CrossValidation), + AuxFilePolicy::V2 + )); + } } diff --git a/pageserver/ctl/src/main.rs b/pageserver/ctl/src/main.rs index 1fb75584fc..e92c352dab 100644 --- a/pageserver/ctl/src/main.rs +++ b/pageserver/ctl/src/main.rs @@ -219,6 +219,7 @@ fn handle_metadata( let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?; println!("Current metadata:\n{meta:?}"); let mut update_meta = false; + // TODO: simplify this part if let Some(disk_consistent_lsn) = disk_consistent_lsn { meta = TimelineMetadata::new( *disk_consistent_lsn, diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index b8d5c67ce0..7efd48afc7 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -439,6 +439,8 @@ async fn build_timeline_info_common( state, walreceiver_status, + + last_aux_file_policy: timeline.last_aux_file_policy.load(), }; Ok(info) } diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 1092d64d33..402f075365 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -35,7 +35,7 @@ use std::ops::ControlFlow; use std::ops::Range; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; -use tracing::{debug, trace, warn}; +use tracing::{debug, info, trace, warn}; use utils::bin_ser::DeserializeError; use utils::vec_map::{VecMap, VecMapOrdering}; use utils::{bin_ser::BeSer, lsn::Lsn}; @@ -718,10 +718,11 @@ impl Timeline { lsn: Lsn, ctx: &RequestContext, ) -> Result, PageReconstructError> { - match self.get_switch_aux_file_policy() { - AuxFilePolicy::V1 => self.list_aux_files_v1(lsn, ctx).await, - AuxFilePolicy::V2 => self.list_aux_files_v2(lsn, ctx).await, - AuxFilePolicy::CrossValidation => { + let current_policy = self.last_aux_file_policy.load(); + match current_policy { + Some(AuxFilePolicy::V1) | None => self.list_aux_files_v1(lsn, ctx).await, + Some(AuxFilePolicy::V2) => self.list_aux_files_v2(lsn, ctx).await, + Some(AuxFilePolicy::CrossValidation) => { let v1_result = self.list_aux_files_v1(lsn, ctx).await; let v2_result = self.list_aux_files_v2(lsn, ctx).await; match (v1_result, v2_result) { @@ -1469,7 +1470,27 @@ impl<'a> DatadirModification<'a> { content: &[u8], ctx: &RequestContext, ) -> anyhow::Result<()> { - let policy = self.tline.get_switch_aux_file_policy(); + let switch_policy = self.tline.get_switch_aux_file_policy(); + + let policy = { + let current_policy = self.tline.last_aux_file_policy.load(); + // Allowed switch path: + // * no aux files -> v1/v2/cross-validation + // * cross-validation->v2 + if AuxFilePolicy::is_valid_migration_path(current_policy, switch_policy) { + self.tline.last_aux_file_policy.store(Some(switch_policy)); + self.tline + .remote_client + .schedule_index_upload_for_aux_file_policy_update(Some(switch_policy))?; + info!(current=?current_policy, next=?switch_policy, "switching aux file policy"); + switch_policy + } else { + // This branch handles non-valid migration path, and the case that switch_policy == current_policy. + // And actually, because the migration path always allow unspecified -> *, this unwrap_or will never be hit. + current_policy.unwrap_or(AuxFilePolicy::default_tenant_config()) + } + }; + if let AuxFilePolicy::V2 | AuxFilePolicy::CrossValidation = policy { let key = aux_file::encode_aux_file_key(path); // retrieve the key from the engine diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 54b63f7042..d42b9082b7 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -20,6 +20,7 @@ use futures::stream::FuturesUnordered; use futures::FutureExt; use futures::StreamExt; use pageserver_api::models; +use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::TimelineState; use pageserver_api::models::TopTenantShardItem; use pageserver_api::models::WalRedoManagerStatus; @@ -529,6 +530,7 @@ impl Tenant { index_part: Option, metadata: TimelineMetadata, ancestor: Option>, + last_aux_file_policy: Option, _ctx: &RequestContext, ) -> anyhow::Result<()> { let tenant_id = self.tenant_shard_id; @@ -539,6 +541,10 @@ impl Tenant { ancestor.clone(), resources, CreateTimelineCause::Load, + // This could be derived from ancestor branch + index part. Though the only caller of `timeline_init_and_sync` is `load_remote_timeline`, + // there will potentially be other caller of this function in the future, and we don't know whether `index_part` or `ancestor` takes precedence. + // Therefore, we pass this field explicitly for now, and remove it once we fully migrate to aux file v2. + last_aux_file_policy, )?; let disk_consistent_lsn = timeline.get_disk_consistent_lsn(); anyhow::ensure!( @@ -553,6 +559,10 @@ impl Tenant { if let Some(index_part) = index_part.as_ref() { timeline.remote_client.init_upload_queue(index_part)?; + + timeline + .last_aux_file_policy + .store(index_part.last_aux_file_policy()); } else { // No data on the remote storage, but we have local metadata file. We can end up // here with timeline_create being interrupted before finishing index part upload. @@ -1173,12 +1183,15 @@ impl Tenant { None }; + let last_aux_file_policy = index_part.last_aux_file_policy(); + self.timeline_init_and_sync( timeline_id, resources, Some(index_part), remote_metadata, ancestor, + last_aux_file_policy, ctx, ) .await @@ -1358,6 +1371,7 @@ impl Tenant { create_guard, initdb_lsn, None, + None, ) .await } @@ -2441,6 +2455,7 @@ impl Tenant { ancestor: Option>, resources: TimelineResources, cause: CreateTimelineCause, + last_aux_file_policy: Option, ) -> anyhow::Result> { let state = match cause { CreateTimelineCause::Load => { @@ -2469,6 +2484,7 @@ impl Tenant { resources, pg_version, state, + last_aux_file_policy, self.cancel.child_token(), ); @@ -3119,6 +3135,7 @@ impl Tenant { timeline_create_guard, start_lsn + 1, Some(Arc::clone(src_timeline)), + src_timeline.last_aux_file_policy.load(), ) .await?; @@ -3312,6 +3329,7 @@ impl Tenant { timeline_create_guard, pgdata_lsn, None, + None, ) .await?; @@ -3383,6 +3401,7 @@ impl Tenant { create_guard: TimelineCreateGuard<'a>, start_lsn: Lsn, ancestor: Option>, + last_aux_file_policy: Option, ) -> anyhow::Result { let tenant_shard_id = self.tenant_shard_id; @@ -3398,6 +3417,7 @@ impl Tenant { ancestor, resources, CreateTimelineCause::Load, + last_aux_file_policy, ) .context("Failed to create timeline data structure")?; @@ -5621,4 +5641,280 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_branch_copies_dirty_aux_file_flag() { + let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap(); + + // the default aux file policy to switch is v1 if not set by the admins + assert_eq!( + harness.tenant_conf.switch_aux_file_policy, + AuxFilePolicy::V1 + ); + let (tenant, ctx) = harness.load().await; + + let mut lsn = Lsn(0x08); + + let tline: Arc = tenant + .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + // no aux file is written at this point, so the persistent flag should be unset + assert_eq!(tline.last_aux_file_policy.load(), None); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test1", b"first", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + // there is no tenant manager to pass the configuration through, so lets mimic it + tenant.set_new_location_config( + AttachedTenantConf::try_from(LocationConf::attached_single( + TenantConfOpt { + switch_aux_file_policy: Some(AuxFilePolicy::V2), + ..Default::default() + }, + tenant.generation, + &pageserver_api::models::ShardParameters::default(), + )) + .unwrap(), + ); + + assert_eq!( + tline.get_switch_aux_file_policy(), + AuxFilePolicy::V2, + "wanted state has been updated" + ); + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V1), + "aux file is written with switch_aux_file_policy unset (which is v1), so we should keep v1" + ); + + // we can read everything from the storage + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")) + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test2", b"second", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V1), + "keep v1 storage format when new files are written" + ); + + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test2"), + Some(&bytes::Bytes::from_static(b"second")) + ); + + let child = tenant + .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx) + .await + .unwrap(); + + // child copies the last flag even if that is not on remote storage yet + assert_eq!(child.get_switch_aux_file_policy(), AuxFilePolicy::V2); + assert_eq!(child.last_aux_file_policy.load(), Some(AuxFilePolicy::V1)); + + let files = child.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!(files.get("pg_logical/mappings/test1"), None); + assert_eq!(files.get("pg_logical/mappings/test2"), None); + + // even if we crash here without flushing parent timeline with it's new + // last_aux_file_policy we are safe, because child was never meant to access ancestor's + // files. the ancestor can even switch back to V1 because of a migration safely. + } + + #[tokio::test] + async fn aux_file_policy_switch() { + let mut harness = TenantHarness::create("aux_file_policy_switch").unwrap(); + harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::CrossValidation; // set to cross-validation mode + let (tenant, ctx) = harness.load().await; + + let mut lsn = Lsn(0x08); + + let tline: Arc = tenant + .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + assert_eq!( + tline.last_aux_file_policy.load(), + None, + "no aux file is written so it should be unset" + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test1", b"first", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + // there is no tenant manager to pass the configuration through, so lets mimic it + tenant.set_new_location_config( + AttachedTenantConf::try_from(LocationConf::attached_single( + TenantConfOpt { + switch_aux_file_policy: Some(AuxFilePolicy::V2), + ..Default::default() + }, + tenant.generation, + &pageserver_api::models::ShardParameters::default(), + )) + .unwrap(), + ); + + assert_eq!( + tline.get_switch_aux_file_policy(), + AuxFilePolicy::V2, + "wanted state has been updated" + ); + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::CrossValidation), + "dirty index_part.json reflected state is yet to be updated" + ); + + // we can still read the auxfile v1 before we ingest anything new + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")) + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test2", b"second", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V2), + "ingesting a file should apply the wanted switch state when applicable" + ); + + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")), + "cross validation writes to both v1 and v2 so this should be available in v2" + ); + assert_eq!( + files.get("pg_logical/mappings/test2"), + Some(&bytes::Bytes::from_static(b"second")) + ); + + // mimic again by trying to flip it from V2 to V1 (not switched to while ingesting a file) + tenant.set_new_location_config( + AttachedTenantConf::try_from(LocationConf::attached_single( + TenantConfOpt { + switch_aux_file_policy: Some(AuxFilePolicy::V1), + ..Default::default() + }, + tenant.generation, + &pageserver_api::models::ShardParameters::default(), + )) + .unwrap(), + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test2", b"third", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + assert_eq!( + tline.get_switch_aux_file_policy(), + AuxFilePolicy::V1, + "wanted state has been updated again, even if invalid request" + ); + + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V2), + "ingesting a file should apply the wanted switch state when applicable" + ); + + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")) + ); + assert_eq!( + files.get("pg_logical/mappings/test2"), + Some(&bytes::Bytes::from_static(b"third")) + ); + + // mimic again by trying to flip it from from V1 to V2 (not switched to while ingesting a file) + tenant.set_new_location_config( + AttachedTenantConf::try_from(LocationConf::attached_single( + TenantConfOpt { + switch_aux_file_policy: Some(AuxFilePolicy::V2), + ..Default::default() + }, + tenant.generation, + &pageserver_api::models::ShardParameters::default(), + )) + .unwrap(), + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test3", b"last", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + assert_eq!(tline.get_switch_aux_file_policy(), AuxFilePolicy::V2); + + assert_eq!(tline.last_aux_file_policy.load(), Some(AuxFilePolicy::V2)); + + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")) + ); + assert_eq!( + files.get("pg_logical/mappings/test2"), + Some(&bytes::Bytes::from_static(b"third")) + ); + assert_eq!( + files.get("pg_logical/mappings/test3"), + Some(&bytes::Bytes::from_static(b"last")) + ); + } } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index a743ce3c16..a695363cdc 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -373,6 +373,8 @@ pub struct TenantConf { /// Switch to a new aux file policy. Switching this flag requires the user has not written any aux file into /// the storage before, and this flag cannot be switched back. Otherwise there will be data corruptions. + /// There is a `last_aux_file_policy` flag which gets persisted in `index_part.json` once the first aux + /// file is written. pub switch_aux_file_policy: AuxFilePolicy, } @@ -574,7 +576,7 @@ impl Default for TenantConf { lazy_slru_download: false, timeline_get_throttle: crate::tenant::throttle::Config::disabled(), image_layer_creation_check_threshold: DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD, - switch_aux_file_policy: AuxFilePolicy::V1, + switch_aux_file_policy: AuxFilePolicy::default_tenant_config(), } } } diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 3a1113cf01..d3adae6841 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -189,6 +189,7 @@ use camino::Utf8Path; use chrono::{NaiveDateTime, Utc}; pub(crate) use download::download_initdb_tar_zst; +use pageserver_api::models::AuxFilePolicy; use pageserver_api::shard::{ShardIndex, TenantShardId}; use scopeguard::ScopeGuard; use tokio_util::sync::CancellationToken; @@ -611,6 +612,17 @@ impl RemoteTimelineClient { Ok(()) } + /// Launch an index-file upload operation in the background, with only aux_file_policy flag updated. + pub(crate) fn schedule_index_upload_for_aux_file_policy_update( + self: &Arc, + last_aux_file_policy: Option, + ) -> anyhow::Result<()> { + let mut guard = self.upload_queue.lock().unwrap(); + let upload_queue = guard.initialized_mut()?; + upload_queue.last_aux_file_policy = last_aux_file_policy; + self.schedule_index_upload(upload_queue); + Ok(()) + } /// /// Launch an index-file upload operation in the background, if necessary. /// @@ -1851,6 +1863,7 @@ impl RemoteTimelineClient { dangling_files: HashMap::default(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), + last_aux_file_policy: initialized.last_aux_file_policy, }; let upload_queue = std::mem::replace( diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index b114d6aa10..032dda7ff3 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use chrono::NaiveDateTime; +use pageserver_api::models::AuxFilePolicy; use serde::{Deserialize, Serialize}; use utils::id::TimelineId; @@ -88,6 +89,16 @@ pub struct IndexPart { #[serde(default)] pub(crate) lineage: Lineage, + + /// Describes the kind of aux files stored in the timeline. + /// + /// The value is modified during file ingestion when the latest wanted value communicated via tenant config is applied if it is acceptable. + /// A V1 setting after V2 files have been committed is not accepted. + /// + /// None means no aux files have been written to the storage before the point + /// when this flag is introduced. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub(crate) last_aux_file_policy: Option, } impl IndexPart { @@ -101,10 +112,11 @@ impl IndexPart { /// is always generated from the keys of `layer_metadata`) /// - 4: timeline_layers is fully removed. /// - 5: lineage was added - const LATEST_VERSION: usize = 5; + /// - 6: last_aux_file_policy is added. + const LATEST_VERSION: usize = 6; // Versions we may see when reading from a bucket. - pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5]; + pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6]; pub const FILE_NAME: &'static str = "index_part.json"; @@ -113,6 +125,7 @@ impl IndexPart { disk_consistent_lsn: Lsn, metadata: TimelineMetadata, lineage: Lineage, + last_aux_file_policy: Option, ) -> Self { let layer_metadata = layers_and_metadata .iter() @@ -126,6 +139,7 @@ impl IndexPart { metadata, deleted_at: None, lineage, + last_aux_file_policy, } } @@ -155,8 +169,13 @@ impl IndexPart { example_metadata.disk_consistent_lsn(), example_metadata, Default::default(), + Some(AuxFilePolicy::V1), ) } + + pub(crate) fn last_aux_file_policy(&self) -> Option { + self.last_aux_file_policy + } } impl From<&UploadQueueInitialized> for IndexPart { @@ -165,7 +184,13 @@ impl From<&UploadQueueInitialized> for IndexPart { let metadata = uq.latest_metadata.clone(); let lineage = uq.latest_lineage.clone(); - Self::new(&uq.latest_files, disk_consistent_lsn, metadata, lineage) + Self::new( + &uq.latest_files, + disk_consistent_lsn, + metadata, + lineage, + uq.last_aux_file_policy, + ) } } @@ -299,6 +324,7 @@ mod tests { metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: None, lineage: Lineage::default(), + last_aux_file_policy: None, }; let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); @@ -340,6 +366,7 @@ mod tests { metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: None, lineage: Lineage::default(), + last_aux_file_policy: None, }; let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); @@ -383,6 +410,7 @@ mod tests { deleted_at: Some(chrono::NaiveDateTime::parse_from_str( "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), lineage: Lineage::default(), + last_aux_file_policy: None, }; let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); @@ -428,6 +456,7 @@ mod tests { .unwrap(), deleted_at: None, lineage: Lineage::default(), + last_aux_file_policy: None, }; let empty_layers_parsed = IndexPart::from_s3_bytes(empty_layers_json.as_bytes()).unwrap(); @@ -468,6 +497,7 @@ mod tests { metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), lineage: Lineage::default(), + last_aux_file_policy: None, }; let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); @@ -511,6 +541,57 @@ mod tests { reparenting_history: vec![TimelineId::from_str("e1bfd8c633d713d279e6fcd2bcc15b6d").unwrap()], original_ancestor: Some((TimelineId::from_str("e2bfd8c633d713d279e6fcd2bcc15b6d").unwrap(), Lsn::from_str("0/15A7618").unwrap(), parse_naive_datetime("2024-05-07T18:52:36.322426563"))), }, + last_aux_file_policy: None, + }; + + let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); + assert_eq!(part, expected); + } + + #[test] + fn v6_indexpart_is_parsed() { + let example = r#"{ + "version":6, + "layer_metadata":{ + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } + }, + "disk_consistent_lsn":"0/16960E8", + "metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], + "deleted_at": "2023-07-31T09:00:00.123", + "lineage":{ + "original_ancestor":["e2bfd8c633d713d279e6fcd2bcc15b6d","0/15A7618","2024-05-07T18:52:36.322426563"], + "reparenting_history":["e1bfd8c633d713d279e6fcd2bcc15b6d"] + }, + "last_aux_file_policy": "V2" + }"#; + + let expected = IndexPart { + version: 6, + layer_metadata: HashMap::from([ + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + file_size: 25600000, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }), + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + // serde_json should always parse this but this might be a double with jq for + // example. + file_size: 9007199254741001, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }) + ]), + disk_consistent_lsn: "0/16960E8".parse::().unwrap(), + metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), + deleted_at: Some(chrono::NaiveDateTime::parse_from_str( + "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), + lineage: Lineage { + reparenting_history_truncated: false, + reparenting_history: vec![TimelineId::from_str("e1bfd8c633d713d279e6fcd2bcc15b6d").unwrap()], + original_ancestor: Some((TimelineId::from_str("e2bfd8c633d713d279e6fcd2bcc15b6d").unwrap(), Lsn::from_str("0/15A7618").unwrap(), parse_naive_datetime("2024-05-07T18:52:36.322426563"))), + }, + last_aux_file_policy: Some(AuxFilePolicy::V2), }; let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index df9bc9b35b..1fb1928079 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -23,7 +23,7 @@ use pageserver_api::{ }, keyspace::{KeySpaceAccum, SparseKeyPartitioning}, models::{ - AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo, + AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, TimelineState, }, @@ -413,7 +413,11 @@ pub struct Timeline { /// Keep aux directory cache to avoid it's reconstruction on each update pub(crate) aux_files: tokio::sync::Mutex, + /// Size estimator for aux file v2 pub(crate) aux_file_size_estimator: AuxFileSizeEstimator, + + /// Indicate whether aux file v2 storage is enabled. + pub(crate) last_aux_file_policy: AtomicAuxFilePolicy, } pub struct WalReceiverInfo { @@ -2133,6 +2137,7 @@ impl Timeline { resources: TimelineResources, pg_version: u32, state: TimelineState, + aux_file_policy: Option, cancel: CancellationToken, ) -> Arc { let disk_consistent_lsn = metadata.disk_consistent_lsn(); @@ -2257,6 +2262,8 @@ impl Timeline { }), aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics), + + last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy), }; result.repartition_threshold = result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE; diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index 901f5149b3..b5dfc86e77 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -280,6 +280,8 @@ impl DeleteTimelineFlow { // Important. We dont pass ancestor above because it can be missing. // Thus we need to skip the validation here. CreateTimelineCause::Delete, + // Aux file policy is not needed for deletion, assuming deletion does not read aux keyspace + None, ) .context("create_timeline_struct")?; diff --git a/pageserver/src/tenant/upload_queue.rs b/pageserver/src/tenant/upload_queue.rs index a2f761fa94..c0cc8f3124 100644 --- a/pageserver/src/tenant/upload_queue.rs +++ b/pageserver/src/tenant/upload_queue.rs @@ -8,6 +8,7 @@ use std::collections::{HashMap, VecDeque}; use std::fmt::Debug; use chrono::NaiveDateTime; +use pageserver_api::models::AuxFilePolicy; use std::sync::Arc; use tracing::info; use utils::lsn::AtomicLsn; @@ -60,6 +61,9 @@ pub(crate) struct UploadQueueInitialized { /// Part of the flattened "next" `index_part.json`. pub(crate) latest_lineage: Lineage, + /// The last aux file policy used on this timeline. + pub(crate) last_aux_file_policy: Option, + /// `disk_consistent_lsn` from the last metadata file that was successfully /// uploaded. `Lsn(0)` if nothing was uploaded yet. /// Unlike `latest_files` or `latest_metadata`, this value is never ahead. @@ -189,6 +193,7 @@ impl UploadQueue { dangling_files: HashMap::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), + last_aux_file_policy: Default::default(), }; *self = UploadQueue::Initialized(state); @@ -239,6 +244,7 @@ impl UploadQueue { dangling_files: HashMap::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), + last_aux_file_policy: index_part.last_aux_file_policy(), }; *self = UploadQueue::Initialized(state); diff --git a/test_runner/regress/test_aux_files.py b/test_runner/regress/test_aux_files.py new file mode 100644 index 0000000000..be9c41a867 --- /dev/null +++ b/test_runner/regress/test_aux_files.py @@ -0,0 +1,72 @@ +from fixtures.log_helper import log +from fixtures.neon_fixtures import ( + NeonEnvBuilder, + logical_replication_sync, +) + + +def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): + env = neon_env_builder.init_start() + endpoint = env.endpoints.create_start("main") + client = env.pageserver.http_client() + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + tenant_config = client.tenant_config(tenant_id).effective_config + tenant_config["switch_aux_file_policy"] = "V2" + client.set_tenant_config(tenant_id, tenant_config) + # aux file v2 is enabled on the write path, so for now, it should be unset (or null) + assert ( + client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id)["last_aux_file_policy"] + is None + ) + + pg_conn = endpoint.connect() + cur = pg_conn.cursor() + + cur.execute("create table t(pk integer primary key, payload integer)") + cur.execute( + "CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));" + ) + cur.execute("create publication pub1 for table t, replication_example") + + # now start subscriber, aux files will be created at this point. TODO: find better ways of testing aux files (i.e., neon_test_utils) + # instead of going through the full logical replication process. + vanilla_pg.start() + vanilla_pg.safe_psql("create table t(pk integer primary key, payload integer)") + vanilla_pg.safe_psql( + "CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120), testcolumn1 int, testcolumn2 int, testcolumn3 int);" + ) + connstr = endpoint.connstr().replace("'", "''") + log.info(f"ep connstr is {endpoint.connstr()}, subscriber connstr {vanilla_pg.connstr()}") + vanilla_pg.safe_psql(f"create subscription sub1 connection '{connstr}' publication pub1") + + # Wait logical replication channel to be established + logical_replication_sync(vanilla_pg, endpoint) + vanilla_pg.stop() + endpoint.stop() + + with env.pageserver.http_client() as client: + # aux file v2 flag should be enabled at this point + assert client.timeline_detail(tenant_id, timeline_id)["last_aux_file_policy"] == "V2" + with env.pageserver.http_client() as client: + tenant_config = client.tenant_config(tenant_id).effective_config + tenant_config["switch_aux_file_policy"] = "V1" + client.set_tenant_config(tenant_id, tenant_config) + # the flag should still be enabled + assert ( + client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id)[ + "last_aux_file_policy" + ] + == "V2" + ) + env.pageserver.restart() + with env.pageserver.http_client() as client: + # aux file v2 flag should be persisted + assert ( + client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id)[ + "last_aux_file_policy" + ] + == "V2" + ) From e1a9669d05374ea27685a1cf527676fe01df7722 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 17 May 2024 16:04:02 -0400 Subject: [PATCH 013/220] feat(pagebench): add aux file bench (#7746) part of https://github.com/neondatabase/neon/issues/7462 ## Summary of changes This pull request adds two APIs to the pageserver management API: list_aux_files and ingest_aux_files. The aux file pagebench is intended to be used on an empty timeline because the data do not go through the safekeeper. LSNs are advanced by 8 for each ingestion, to avoid invariant checks inside the pageserver. For now, I only care about space amplification / read amplification, so the bench is designed in a very simple way: ingest 10000 files, and I will manually dump the layer map to analyze. --------- Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/models.rs | 10 +++ pageserver/client/src/mgmt_api.rs | 57 +++++++++++++ pageserver/pagebench/src/cmd/aux_files.rs | 98 +++++++++++++++++++++++ pageserver/pagebench/src/main.rs | 3 + pageserver/src/http/routes.rs | 75 +++++++++++++++++ 5 files changed, 243 insertions(+) create mode 100644 pageserver/pagebench/src/cmd/aux_files.rs diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 80ca696313..451ee1a13c 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -841,6 +841,16 @@ pub struct DownloadRemoteLayersTaskSpawnRequest { pub max_concurrent_downloads: NonZeroUsize, } +#[derive(Debug, Serialize, Deserialize)] +pub struct IngestAuxFilesRequest { + pub aux_files: HashMap, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ListAuxFilesRequest { + pub lsn: Lsn, +} + #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DownloadRemoteLayersTaskInfo { pub task_id: String, diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 5904713da9..69b86d9c46 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -1,8 +1,12 @@ +use std::collections::HashMap; + +use bytes::Bytes; use pageserver_api::{models::*, shard::TenantShardId}; use reqwest::{IntoUrl, Method, StatusCode}; use utils::{ http::error::HttpErrorBody, id::{TenantId, TimelineId}, + lsn::Lsn, }; pub mod util; @@ -561,4 +565,57 @@ impl Client { }), } } + + pub async fn ingest_aux_files( + &self, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + aux_files: HashMap, + ) -> Result { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}/ingest_aux_files", + self.mgmt_api_endpoint, tenant_shard_id, timeline_id + ); + let resp = self + .request_noerror(Method::POST, &uri, IngestAuxFilesRequest { aux_files }) + .await?; + match resp.status() { + StatusCode::OK => Ok(true), + status => Err(match resp.json::().await { + Ok(HttpErrorBody { msg }) => Error::ApiError(status, msg), + Err(_) => { + Error::ReceiveErrorBody(format!("Http error ({}) at {}.", status.as_u16(), uri)) + } + }), + } + } + + pub async fn list_aux_files( + &self, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + lsn: Lsn, + ) -> Result> { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}/list_aux_files", + self.mgmt_api_endpoint, tenant_shard_id, timeline_id + ); + let resp = self + .request_noerror(Method::POST, &uri, ListAuxFilesRequest { lsn }) + .await?; + match resp.status() { + StatusCode::OK => { + let resp: HashMap = resp.json().await.map_err(|e| { + Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, format!("{e}")) + })?; + Ok(resp) + } + status => Err(match resp.json::().await { + Ok(HttpErrorBody { msg }) => Error::ApiError(status, msg), + Err(_) => { + Error::ReceiveErrorBody(format!("Http error ({}) at {}.", status.as_u16(), uri)) + } + }), + } + } } diff --git a/pageserver/pagebench/src/cmd/aux_files.rs b/pageserver/pagebench/src/cmd/aux_files.rs new file mode 100644 index 0000000000..eb5b242a5f --- /dev/null +++ b/pageserver/pagebench/src/cmd/aux_files.rs @@ -0,0 +1,98 @@ +use pageserver_api::models::{AuxFilePolicy, TenantConfig, TenantConfigRequest}; +use pageserver_api::shard::TenantShardId; +use utils::id::TenantTimelineId; +use utils::lsn::Lsn; + +use std::collections::HashMap; +use std::sync::Arc; + +/// Ingest aux files into the pageserver. +#[derive(clap::Parser)] +pub(crate) struct Args { + #[clap(long, default_value = "http://localhost:9898")] + mgmt_api_endpoint: String, + #[clap(long, default_value = "postgres://postgres@localhost:64000")] + page_service_connstring: String, + #[clap(long)] + pageserver_jwt: Option, + + targets: Option>, +} + +pub(crate) fn main(args: Args) -> anyhow::Result<()> { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + + let main_task = rt.spawn(main_impl(args)); + rt.block_on(main_task).unwrap() +} + +async fn main_impl(args: Args) -> anyhow::Result<()> { + let args: &'static Args = Box::leak(Box::new(args)); + + let mgmt_api_client = Arc::new(pageserver_client::mgmt_api::Client::new( + args.mgmt_api_endpoint.clone(), + args.pageserver_jwt.as_deref(), + )); + + // discover targets + let timelines: Vec = crate::util::cli::targets::discover( + &mgmt_api_client, + crate::util::cli::targets::Spec { + limit_to_first_n_targets: None, + targets: { + if let Some(targets) = &args.targets { + if targets.len() != 1 { + anyhow::bail!("must specify exactly one target"); + } + Some(targets.clone()) + } else { + None + } + }, + }, + ) + .await?; + + let timeline = timelines[0]; + let tenant_shard_id = TenantShardId::unsharded(timeline.tenant_id); + let timeline_id = timeline.timeline_id; + + println!("operating on timeline {}", timeline); + + mgmt_api_client + .tenant_config(&TenantConfigRequest { + tenant_id: timeline.tenant_id, + config: TenantConfig { + switch_aux_file_policy: Some(AuxFilePolicy::V2), + ..Default::default() + }, + }) + .await?; + + for batch in 0..100 { + let items = (0..100) + .map(|id| { + ( + format!("pg_logical/mappings/{:03}.{:03}", batch, id), + format!("{:08}", id), + ) + }) + .collect::>(); + let file_cnt = items.len(); + mgmt_api_client + .ingest_aux_files(tenant_shard_id, timeline_id, items) + .await?; + println!("ingested {file_cnt} files"); + } + + let files = mgmt_api_client + .list_aux_files(tenant_shard_id, timeline_id, Lsn(Lsn::MAX.0 - 1)) + .await?; + + println!("{} files found", files.len()); + + anyhow::Ok(()) +} diff --git a/pageserver/pagebench/src/main.rs b/pageserver/pagebench/src/main.rs index 743102d853..5527557450 100644 --- a/pageserver/pagebench/src/main.rs +++ b/pageserver/pagebench/src/main.rs @@ -14,6 +14,7 @@ mod util { /// The pagebench CLI sub-commands, dispatched in [`main`] below. mod cmd { + pub(super) mod aux_files; pub(super) mod basebackup; pub(super) mod getpage_latest_lsn; pub(super) mod ondemand_download_churn; @@ -27,6 +28,7 @@ enum Args { GetPageLatestLsn(cmd::getpage_latest_lsn::Args), TriggerInitialSizeCalculation(cmd::trigger_initial_size_calculation::Args), OndemandDownloadChurn(cmd::ondemand_download_churn::Args), + AuxFiles(cmd::aux_files::Args), } fn main() { @@ -46,6 +48,7 @@ fn main() { cmd::trigger_initial_size_calculation::main(args) } Args::OndemandDownloadChurn(args) => cmd::ondemand_download_churn::main(args), + Args::AuxFiles(args) => cmd::aux_files::main(args), } .unwrap() } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 7efd48afc7..0eab6510ca 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -16,6 +16,8 @@ use hyper::header; use hyper::StatusCode; use hyper::{Body, Request, Response, Uri}; use metrics::launch_timestamp::LaunchTimestamp; +use pageserver_api::models::IngestAuxFilesRequest; +use pageserver_api::models::ListAuxFilesRequest; use pageserver_api::models::LocationConfig; use pageserver_api::models::LocationConfigListResponse; use pageserver_api::models::ShardParameters; @@ -2331,6 +2333,71 @@ async fn get_utilization( .map_err(ApiError::InternalServerError) } +async fn list_aux_files( + mut request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; + let body: ListAuxFilesRequest = json_request(&mut request).await?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let state = get_state(&request); + + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + + let process = || async move { + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); + let files = timeline.list_aux_files(body.lsn, &ctx).await?; + Ok::<_, anyhow::Error>(files) + }; + + match process().await { + Ok(st) => json_response(StatusCode::OK, st), + Err(err) => json_response( + StatusCode::INTERNAL_SERVER_ERROR, + ApiError::InternalServerError(err).to_string(), + ), + } +} + +async fn ingest_aux_files( + mut request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; + let body: IngestAuxFilesRequest = json_request(&mut request).await?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let state = get_state(&request); + + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + + let process = || async move { + let mut modification = timeline.begin_modification(Lsn( + timeline.get_last_record_lsn().0 + 8 + ) /* advance LSN by 8 */); + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); + for (fname, content) in body.aux_files { + modification + .put_file(&fname, content.as_bytes(), &ctx) + .await?; + } + modification.commit(&ctx).await?; + Ok::<_, anyhow::Error>(()) + }; + + match process().await { + Ok(st) => json_response(StatusCode::OK, st), + Err(err) => Err(ApiError::InternalServerError(err)), + } +} + /// Report on the largest tenants on this pageserver, for the storage controller to identify /// candidates for splitting async fn post_top_tenants( @@ -2708,6 +2775,14 @@ pub fn make_router( ) .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler)) .get("/v1/utilization", |r| api_handler(r, get_utilization)) + .post( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files", + |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files), + ) + .post( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files", + |r| testing_api_handler("list_aux_files", r, list_aux_files), + ) .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants)) .any(handler_404)) } From 5caee4ca54ea16905ccc3e7f60b3221f33a74b91 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sun, 19 May 2024 20:49:49 +0300 Subject: [PATCH 014/220] Fix calculation in test The comment says that this checks if there's enough space on the page for logical message *and* an XLOG_SWITCH. So the sizes of the logical message and the XLOG_SWITCH record should be added together, not subtracted. I saw a panic in the test that led me to investigate and notice this (https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7803/9142396223/index.html): RuntimeError: Run ['/tmp/neon/bin/wal_craft', 'in-existing', 'last_wal_record_xlog_switch_ends_on_page_boundary', "host=localhost port=16165 user=cloud_admin dbname=postgres options='-cstatement_timeout=120s '"] failed: stdout: stderr: thread 'main' panicked at libs/postgres_ffi/wal_craft/src/lib.rs:370:27: attempt to subtract with overflow stack backtrace: 0: rust_begin_unwind at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/std/src/panicking.rs:645:5 1: core::panicking::panic_fmt at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/panicking.rs:72:14 2: core::panicking::panic at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/panicking.rs:145:5 3: ::craft:: at libs/postgres_ffi/wal_craft/src/lib.rs:370:27 4: wal_craft::main::{closure#0} at libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs:21:17 5: wal_craft::main at libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs:66:47 6: core::result::Result<(), anyhow::Error> as core::ops::function::FnOnce<()>>::call_once at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:250:5 note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace. --- libs/postgres_ffi/wal_craft/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/postgres_ffi/wal_craft/src/lib.rs b/libs/postgres_ffi/wal_craft/src/lib.rs index 262068cbda..b6769629a8 100644 --- a/libs/postgres_ffi/wal_craft/src/lib.rs +++ b/libs/postgres_ffi/wal_craft/src/lib.rs @@ -359,7 +359,7 @@ impl Crafter for LastWalRecordXlogSwitchEndsOnPageBoundary { // Is there enough space on the page for another logical message and an // XLOG_SWITCH? If not, start over. let page_remain = XLOG_BLCKSZ as u64 - u64::from(after_lsn) % XLOG_BLCKSZ as u64; - if page_remain < base_size - XLOG_SIZE_OF_XLOG_RECORD as u64 { + if page_remain < base_size + XLOG_SIZE_OF_XLOG_RECORD as u64 { continue; } From a5ecca976ec3abf97c8c3db4f78c230b4553b509 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Sun, 19 May 2024 20:45:53 +0100 Subject: [PATCH 015/220] proxy: bump parquet (#7782) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary of changes Updates the parquet lib. one change left that we need is in an open PR against upstream, hopefully we can remove the git dependency by 52.0.0 https://github.com/apache/arrow-rs/pull/5773 I'm not sure why the parquet files got a little bit bigger. I tested them and they still open fine. 🤷 side effect of the update, chrono updated and added yet another deprecation warning (hence why the safekeepers change) --- Cargo.lock | 29 ++++++++++---- Cargo.toml | 8 ++-- proxy/src/context/parquet.rs | 73 +++++++++++++++++++----------------- safekeeper/src/broker.rs | 2 +- workspace_hack/Cargo.toml | 4 +- 5 files changed, 67 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1edd53fea..e6060c82f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1072,9 +1072,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1082,7 +1082,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -1109,7 +1109,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", - "half", + "half 1.8.2", ] [[package]] @@ -2278,6 +2278,17 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", +] + [[package]] name = "hash32" version = "0.3.1" @@ -3902,12 +3913,13 @@ dependencies = [ [[package]] name = "parquet" -version = "49.0.0" -source = "git+https://github.com/neondatabase/arrow-rs?branch=neon-fix-bugs#8a0bc58aa67b98aabbd8eee7c6ca4281967ff9e9" +version = "51.0.0" +source = "git+https://github.com/apache/arrow-rs?branch=master#2534976a564be3d2d56312dc88fb1b6ed4cef829" dependencies = [ "ahash", "bytes", "chrono", + "half 2.4.1", "hashbrown 0.14.5", "num", "num-bigint", @@ -3916,12 +3928,13 @@ dependencies = [ "thrift", "twox-hash", "zstd", + "zstd-sys", ] [[package]] name = "parquet_derive" -version = "49.0.0" -source = "git+https://github.com/neondatabase/arrow-rs?branch=neon-fix-bugs#8a0bc58aa67b98aabbd8eee7c6ca4281967ff9e9" +version = "51.0.0" +source = "git+https://github.com/apache/arrow-rs?branch=master#2534976a564be3d2d56312dc88fb1b6ed4cef829" dependencies = [ "parquet", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index b59a5dcd6d..2a7dea447e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -122,8 +122,8 @@ opentelemetry = "0.20.0" opentelemetry-otlp = { version = "0.13.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions = "0.12.0" parking_lot = "0.12" -parquet = { version = "49.0.0", default-features = false, features = ["zstd"] } -parquet_derive = "49.0.0" +parquet = { version = "51.0.0", default-features = false, features = ["zstd"] } +parquet_derive = "51.0.0" pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pin-project-lite = "0.2" procfs = "0.14" @@ -244,8 +244,8 @@ tonic-build = "0.9" tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } # bug fixes for UUID -parquet = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs" } -parquet_derive = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs" } +parquet = { git = "https://github.com/apache/arrow-rs", branch = "master" } +parquet_derive = { git = "https://github.com/apache/arrow-rs", branch = "master" } ################# Binary contents sections diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index 8104fe6087..392821c430 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -307,7 +307,7 @@ where } async fn upload_parquet( - w: SerializedFileWriter>, + mut w: SerializedFileWriter>, len: i64, storage: &GenericRemoteStorage, ) -> anyhow::Result> { @@ -319,11 +319,15 @@ async fn upload_parquet( // I don't know how compute intensive this is, although it probably isn't much... better be safe than sorry. // finish method only available on the fork: https://github.com/apache/arrow-rs/issues/5253 - let (writer, metadata) = tokio::task::spawn_blocking(move || w.finish()) + let (mut buffer, metadata) = + tokio::task::spawn_blocking(move || -> parquet::errors::Result<_> { + let metadata = w.finish()?; + let buffer = std::mem::take(w.inner_mut().get_mut()); + Ok((buffer, metadata)) + }) .await .unwrap()?; - let mut buffer = writer.into_inner(); let data = buffer.split().freeze(); let compression = len as f64 / len_uncompressed as f64; @@ -474,10 +478,11 @@ mod tests { RequestData { session_id: uuid::Builder::from_random_bytes(rng.gen()).into_uuid(), peer_addr: Ipv4Addr::from(rng.gen::<[u8; 4]>()).to_string(), - timestamp: chrono::NaiveDateTime::from_timestamp_millis( + timestamp: chrono::DateTime::from_timestamp_millis( rng.gen_range(1703862754..1803862754), ) - .unwrap(), + .unwrap() + .naive_utc(), application_name: Some("test".to_owned()), username: Some(hex::encode(rng.gen::<[u8; 4]>())), endpoint_id: Some(hex::encode(rng.gen::<[u8; 16]>())), @@ -560,15 +565,15 @@ mod tests { assert_eq!( file_stats, [ - (1315008, 3, 6000), - (1315001, 3, 6000), - (1315061, 3, 6000), - (1315018, 3, 6000), - (1315148, 3, 6000), - (1314990, 3, 6000), - (1314782, 3, 6000), - (1315018, 3, 6000), - (438575, 1, 2000) + (1315314, 3, 6000), + (1315307, 3, 6000), + (1315367, 3, 6000), + (1315324, 3, 6000), + (1315454, 3, 6000), + (1315296, 3, 6000), + (1315088, 3, 6000), + (1315324, 3, 6000), + (438713, 1, 2000) ] ); @@ -598,11 +603,11 @@ mod tests { assert_eq!( file_stats, [ - (1221738, 5, 10000), - (1227888, 5, 10000), - (1229682, 5, 10000), - (1229044, 5, 10000), - (1220322, 5, 10000) + (1222212, 5, 10000), + (1228362, 5, 10000), + (1230156, 5, 10000), + (1229518, 5, 10000), + (1220796, 5, 10000) ] ); @@ -634,11 +639,11 @@ mod tests { assert_eq!( file_stats, [ - (1207385, 5, 10000), - (1207116, 5, 10000), - (1207409, 5, 10000), - (1207397, 5, 10000), - (1207652, 5, 10000) + (1207859, 5, 10000), + (1207590, 5, 10000), + (1207883, 5, 10000), + (1207871, 5, 10000), + (1208126, 5, 10000) ] ); @@ -663,15 +668,15 @@ mod tests { assert_eq!( file_stats, [ - (1315008, 3, 6000), - (1315001, 3, 6000), - (1315061, 3, 6000), - (1315018, 3, 6000), - (1315148, 3, 6000), - (1314990, 3, 6000), - (1314782, 3, 6000), - (1315018, 3, 6000), - (438575, 1, 2000) + (1315314, 3, 6000), + (1315307, 3, 6000), + (1315367, 3, 6000), + (1315324, 3, 6000), + (1315454, 3, 6000), + (1315296, 3, 6000), + (1315088, 3, 6000), + (1315324, 3, 6000), + (438713, 1, 2000) ] ); @@ -708,7 +713,7 @@ mod tests { // files are smaller than the size threshold, but they took too long to fill so were flushed early assert_eq!( file_stats, - [(659240, 2, 3001), (658954, 2, 3000), (658750, 2, 2999)] + [(659462, 2, 3001), (659176, 2, 3000), (658972, 2, 2999)] ); tmpdir.close().unwrap(); diff --git a/safekeeper/src/broker.rs b/safekeeper/src/broker.rs index 98f58d3e49..ea16ce450f 100644 --- a/safekeeper/src/broker.rs +++ b/safekeeper/src/broker.rs @@ -319,7 +319,7 @@ async fn task_stats(stats: Arc) { let now = BrokerStats::now_millis(); if now > last_pulled && now - last_pulled > warn_duration.as_millis() as u64 { - let ts = chrono::NaiveDateTime::from_timestamp_millis(last_pulled as i64).expect("invalid timestamp"); + let ts = chrono::DateTime::from_timestamp_millis(last_pulled as i64).expect("invalid timestamp"); info!("no broker updates for some time, last update: {:?}", ts); } } diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index b605757f64..7582562450 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -51,7 +51,7 @@ num-bigint = { version = "0.4" } num-integer = { version = "0.1", features = ["i128"] } num-traits = { version = "0.2", features = ["i128", "libm"] } once_cell = { version = "1" } -parquet = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs", default-features = false, features = ["zstd"] } +parquet = { git = "https://github.com/apache/arrow-rs", branch = "master", default-features = false, features = ["zstd"] } prost = { version = "0.11" } rand = { version = "0.8", features = ["small_rng"] } regex = { version = "1" } @@ -102,7 +102,7 @@ num-bigint = { version = "0.4" } num-integer = { version = "0.1", features = ["i128"] } num-traits = { version = "0.2", features = ["i128", "libm"] } once_cell = { version = "1" } -parquet = { git = "https://github.com/neondatabase/arrow-rs", branch = "neon-fix-bugs", default-features = false, features = ["zstd"] } +parquet = { git = "https://github.com/apache/arrow-rs", branch = "master", default-features = false, features = ["zstd"] } prost = { version = "0.11" } regex = { version = "1" } regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } From 291fcb9e4f7086237c409580b1fd3accf46d5ba3 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 20 May 2024 09:25:25 +0100 Subject: [PATCH 016/220] pageserver: use the heatmap upload interval to set the secondary download interval (#7793) ## Problem The heatmap upload period is configurable, but secondary mode downloads were using a fixed download period. Closes: #6200 ## Summary of changes - Use the upload period in the heatmap to adjust the download period. In practice, this will reduce the frequency of downloads from its current 60 second period to what heatmaps use, which is 5-10m depending on environment. This is an improvement rather than being optimal: we could be smarter about periods, and schedule downloads to occur around the time we expect the next upload, rather than just using the same period, but that's something we can address in future if it comes up. --- pageserver/src/tenant/secondary/downloader.rs | 84 ++++++++++--------- .../regress/test_pageserver_secondary.py | 18 ++-- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 8f27220771..609e1431cf 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -62,14 +62,10 @@ use super::{ CommandRequest, DownloadCommand, }; -/// For each tenant, how long must have passed since the last download_tenant call before -/// calling it again. This is approximately the time by which local data is allowed -/// to fall behind remote data. -/// -/// TODO: this should just be a default, and the actual period should be controlled -/// via the heatmap itself -/// `` -const DOWNLOAD_FRESHEN_INTERVAL: Duration = Duration::from_millis(60000); +/// For each tenant, default period for how long must have passed since the last download_tenant call before +/// calling it again. This default is replaced with the value of [`HeatMapTenant::upload_period_ms`] after first +/// download, if the uploader populated it. +const DEFAULT_DOWNLOAD_INTERVAL: Duration = Duration::from_millis(60000); /// Range of concurrency we may use when downloading layers within a timeline. This is independent /// for each tenant we're downloading: the concurrency of _tenants_ is defined separately in @@ -152,14 +148,22 @@ pub(super) struct SecondaryDetailTimeline { pub(super) evicted_at: HashMap, } +// Aspects of a heatmap that we remember after downloading it +#[derive(Clone, Debug)] +struct DownloadSummary { + etag: Etag, + #[allow(unused)] + mtime: SystemTime, + upload_period: Duration, +} + /// This state is written by the secondary downloader, it is opaque /// to TenantManager #[derive(Debug)] pub(super) struct SecondaryDetail { pub(super) config: SecondaryLocationConfig, - last_download: Option, - last_etag: Option, + last_download: Option, next_download: Option, pub(super) timelines: HashMap, } @@ -189,7 +193,6 @@ impl SecondaryDetail { Self { config, last_download: None, - last_etag: None, next_download: None, timelines: HashMap::new(), } @@ -243,9 +246,8 @@ impl SecondaryDetail { struct PendingDownload { secondary_state: Arc, - last_download: Option, + last_download: Option, target_time: Option, - period: Option, } impl scheduler::PendingJob for PendingDownload { @@ -295,10 +297,17 @@ impl JobGenerator SchedulingResult { @@ -331,11 +340,11 @@ impl JobGenerator next_download { @@ -343,7 +352,6 @@ impl JobGenerator TenantDownloader<'a> { let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); // We will use the etag from last successful download to make the download conditional on changes - let last_etag = self + let last_download = self .secondary_state .detail .lock() .unwrap() - .last_etag + .last_download .clone(); // Download the tenant's heatmap @@ -539,7 +540,7 @@ impl<'a> TenantDownloader<'a> { etag: heatmap_etag, bytes: heatmap_bytes, } = match tokio::select!( - bytes = self.download_heatmap(last_etag.as_ref()) => {bytes?}, + bytes = self.download_heatmap(last_download.as_ref().map(|d| &d.etag)) => {bytes?}, _ = self.secondary_state.cancel.cancelled() => return Ok(()) ) { HeatMapDownload::Unmodified => { @@ -599,7 +600,14 @@ impl<'a> TenantDownloader<'a> { // Only update last_etag after a full successful download: this way will not skip // the next download, even if the heatmap's actual etag is unchanged. - self.secondary_state.detail.lock().unwrap().last_etag = Some(heatmap_etag); + self.secondary_state.detail.lock().unwrap().last_download = Some(DownloadSummary { + etag: heatmap_etag, + mtime: heatmap_mtime, + upload_period: heatmap + .upload_period_ms + .map(|ms| Duration::from_millis(ms as u64)) + .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL), + }); Ok(()) } diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index fdc09a063d..127340a1e7 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -575,7 +575,10 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): tenant_timelines = {} # This mirrors a constant in `downloader.rs` - freshen_interval_secs = 60 + default_download_period_secs = 60 + + # The upload period, which will also be the download once the secondary has seen its first heatmap + upload_period_secs = 20 for _i in range(0, tenant_count): tenant_id = TenantId.generate() @@ -587,7 +590,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): placement_policy='{"Attached":1}', # Run with a low heatmap period so that we can avoid having to do synthetic API calls # to trigger the upload promptly. - conf={"heatmap_period": "1s"}, + conf={"heatmap_period": f"{upload_period_secs}s"}, ) env.neon_cli.create_timeline("main2", tenant_id, timeline_b) @@ -597,7 +600,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): # Wait long enough that the background downloads should happen; we expect all the inital layers # of all the initial timelines to show up on the secondary location of each tenant. - time.sleep(freshen_interval_secs * 1.5) + time.sleep(default_download_period_secs * 1.5) for tenant_id, timelines in tenant_timelines.items(): attached_to_id = env.storage_controller.locate(tenant_id)[0]["node_id"] @@ -613,8 +616,8 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): # Delete the second timeline: this should be reflected later on the secondary env.storage_controller.pageserver_api().timeline_delete(tenant_id, timelines[1]) - # Wait long enough for the secondary locations to see the deletion - time.sleep(freshen_interval_secs * 1.5) + # Wait long enough for the secondary locations to see the deletion: 2x period plus a grace factor + time.sleep(upload_period_secs * 2.5) for tenant_id, timelines in tenant_timelines.items(): attached_to_id = env.storage_controller.locate(tenant_id)[0]["node_id"] @@ -626,6 +629,9 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): assert ps_secondary.list_layers(tenant_id, timelines[0]) # This one was deleted + log.info( + f"Checking for secondary timeline deletion {tenant_id}/{timeline_id} on node {ps_secondary.id}" + ) assert not ps_secondary.list_layers(tenant_id, timelines[1]) t_end = time.time() @@ -640,7 +646,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): download_rate = (total_heatmap_downloads / tenant_count) / (t_end - t_start) - expect_download_rate = 1.0 / freshen_interval_secs + expect_download_rate = 1.0 / upload_period_secs log.info(f"Download rate: {download_rate * 60}/min vs expected {expect_download_rate * 60}/min") assert download_rate < expect_download_rate * 2 From a7b84cca5aef3dacc90895f39f0e0ad2b6c950cc Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Mon, 20 May 2024 12:07:25 +0200 Subject: [PATCH 017/220] Upgrade of pgvector to 0.7.0 (#7726) Upgrade pgvector to 0.7.0. This PR is based on Heikki's PR #6753 and just uses pgvector 0.7.0 instead of 0.6.0 I have now done all planned manual tests. The pull request is ready to be reviewed and merged and can be deployed in production together / after swap enablement. See (https://github.com/neondatabase/autoscaling/issues/800) Fixes https://github.com/neondatabase/neon/issues/6516 Fixes https://github.com/neondatabase/neon/issues/7780 ## Documentation input for usage recommendations ### maintenance_work_mem In Neon `maintenance_work_mem` is very small by default (depends on configured RAM for your compute but can be as low as 64 MB). To optimize pgvector index build time you may have to bump it up according to your working set size (size of tuples for vector index creation). You can do so in the current session using `SET maintenance_work_mem='10 GB';` The target value you choose should fit into the memory of your compute size and not exceed 50-60% of available RAM. The value above has been successfully used on a 7CU endpoint. ### max_parallel_maintenance_workers max_parallel_maintenance_workers is also small by default (2). For efficient parallel pgvector index creation you have to bump it up with `SET max_parallel_maintenance_workers = 7` to make use of all the CPUs available, assuming you have configured your endpoint to use 7CU. ## ID input for changelog pgvector extension in Neon has been upgraded from version 0.5.1 to version 0.7.0. Please see https://github.com/pgvector/pgvector/ for documentation of new capabilities in pgvector version 0.7.0 If you have existing databases with pgvector 0.5.1 already installed there is a slight difference in behavior in the following corner cases even if you don't run `ALTER EXTENSION UPDATE`: ### L2 distance from NULL::vector For the following script, comparing the NULL::vector to non-null vectors the resulting output changes: ```sql SET enable_seqscan = off; CREATE TABLE t (val vector(3)); INSERT INTO t (val) VALUES ('[0,0,0]'), ('[1,2,3]'), ('[1,1,1]'), (NULL); CREATE INDEX ON t USING hnsw (val vector_l2_ops); INSERT INTO t (val) VALUES ('[1,2,4]'); SELECT * FROM t ORDER BY val <-> (SELECT NULL::vector); ``` and now the output is ``` val --------- [1,1,1] [1,2,4] [1,2,3] [0,0,0] (4 rows) ``` For the following script ```sql SET enable_seqscan = off; CREATE TABLE t (val vector(3)); INSERT INTO t (val) VALUES ('[0,0,0]'), ('[1,2,3]'), ('[1,1,1]'), (NULL); CREATE INDEX ON t USING ivfflat (val vector_l2_ops) WITH (lists = 1); INSERT INTO t (val) VALUES ('[1,2,4]'); SELECT * FROM t ORDER BY val <-> (SELECT NULL::vector); ``` the output now is ``` val --------- [0,0,0] [1,2,3] [1,1,1] [1,2,4] (4 rows) ``` ### changed error messages If you provide invalid literals for datatype vector you may get improved/changed error messages, for example: ```sql neondb=> SELECT '[4e38,1]'::vector; ERROR: "4e38" is out of range for type vector LINE 1: SELECT '[4e38,1]'::vector; ^ ``` --------- Co-authored-by: Heikki Linnakangas --- .dockerignore | 1 + Dockerfile.compute-node | 7 +++- patches/pgvector.patch | 78 ++++++++++++++++++++++++++++++++++++++ pgxn/neon/pagestore_smgr.c | 19 +++++++++- 4 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 patches/pgvector.patch diff --git a/.dockerignore b/.dockerignore index f7a6232ba1..1258532db8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -17,6 +17,7 @@ !libs/ !neon_local/ !pageserver/ +!patches/ !pgxn/ !proxy/ !s3_scrubber/ diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index bd4534ce1d..5bf3246f34 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -241,9 +241,12 @@ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz - FROM build-deps AS vector-pg-build COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.5.1.tar.gz -O pgvector.tar.gz && \ - echo "cc7a8e034a96e30a819911ac79d32f6bc47bdd1aa2de4d7d4904e26b83209dc8 pgvector.tar.gz" | sha256sum --check && \ +COPY patches/pgvector.patch /pgvector.patch + +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.0.tar.gz -O pgvector.tar.gz && \ + echo "1b5503a35c265408b6eb282621c5e1e75f7801afc04eecb950796cfee2e3d1d8 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ + patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control diff --git a/patches/pgvector.patch b/patches/pgvector.patch new file mode 100644 index 0000000000..84ac6644c5 --- /dev/null +++ b/patches/pgvector.patch @@ -0,0 +1,78 @@ +From 0b0194a57bd0f3598bd57dbedd0df3932330169d Mon Sep 17 00:00:00 2001 +From: Heikki Linnakangas +Date: Fri, 2 Feb 2024 22:26:45 +0200 +Subject: [PATCH 1/1] Make v0.6.0 work with Neon + +Now that the WAL-logging happens as a separate step at the end of the +build, we need a few neon-specific hints to make it work. +--- + src/hnswbuild.c | 36 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/src/hnswbuild.c b/src/hnswbuild.c +index 680789b..ec54dea 100644 +--- a/src/hnswbuild.c ++++ b/src/hnswbuild.c +@@ -840,9 +840,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) + + hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false); + ++#ifdef NEON_SMGR ++ smgr_start_unlogged_build(RelationGetSmgr(indexRel)); ++#endif ++ + /* Perform inserts */ + HnswParallelScanAndInsert(heapRel, indexRel, hnswshared, hnswarea, false); + ++#ifdef NEON_SMGR ++ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(indexRel)); ++#endif ++ + /* Close relations within worker */ + index_close(indexRel, indexLockmode); + table_close(heapRel, heapLockmode); +@@ -1089,13 +1097,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, + SeedRandom(42); + #endif + ++#ifdef NEON_SMGR ++ smgr_start_unlogged_build(RelationGetSmgr(index)); ++#endif ++ + InitBuildState(buildstate, heap, index, indexInfo, forkNum); + + BuildGraph(buildstate, forkNum); + ++#ifdef NEON_SMGR ++ smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); ++#endif ++ + if (RelationNeedsWAL(index)) ++ { + log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); + ++#ifdef NEON_SMGR ++ { ++#if PG_VERSION_NUM >= 160000 ++ RelFileLocator rlocator = RelationGetSmgr(index)->smgr_rlocator.locator; ++#else ++ RelFileNode rlocator = RelationGetSmgr(index)->smgr_rnode.node; ++#endif ++ ++ SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, ++ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); ++ SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); ++ } ++#endif ++ } ++ ++#ifdef NEON_SMGR ++ smgr_end_unlogged_build(RelationGetSmgr(index)); ++#endif ++ + FreeBuildState(buildstate); + } + +-- +2.39.2 + diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index e3b841f526..249ad313b0 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -45,6 +45,7 @@ */ #include "postgres.h" +#include "access/parallel.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xlogdefs.h" @@ -2822,10 +2823,14 @@ neon_start_unlogged_build(SMgrRelation reln) reln->smgr_relpersistence = RELPERSISTENCE_UNLOGGED; /* + * Create the local file. In a parallel build, the leader is expected to + * call this first and do it. + * * FIXME: should we pass isRedo true to create the tablespace dir if it * doesn't exist? Is it needed? */ - mdcreate(reln, MAIN_FORKNUM, false); + if (!IsParallelWorker()) + mdcreate(reln, MAIN_FORKNUM, false); } /* @@ -2849,7 +2854,17 @@ neon_finish_unlogged_build_phase_1(SMgrRelation reln) Assert(unlogged_build_phase == UNLOGGED_BUILD_PHASE_1); Assert(reln->smgr_relpersistence == RELPERSISTENCE_UNLOGGED); - unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; + /* + * In a parallel build, (only) the leader process performs the 2nd + * phase. + */ + if (IsParallelWorker()) + { + unlogged_build_rel = NULL; + unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS; + } + else + unlogged_build_phase = UNLOGGED_BUILD_PHASE_2; } /* From e3f51abadf835784471034c583605c6b41154f2c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 13 May 2024 13:18:18 +0300 Subject: [PATCH 018/220] safekeeper: close connection when COPY stream ends. We can't gracefully exit COPY mode (and don't need that), so close connection to prevent further attempts to use it. --- libs/postgres_backend/src/lib.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 260018ad89..6c41b7f347 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -820,10 +820,11 @@ impl PostgresBackend { Ok(ProcessMsgResult::Continue) } - /// Log as info/error result of handling COPY stream and send back - /// ErrorResponse if that makes sense. Shutdown the stream if we got - /// Terminate. TODO: transition into waiting for Sync msg if we initiate the - /// close. + /// - Log as info/error result of handling COPY stream and send back + /// ErrorResponse if that makes sense. + /// - Shutdown the stream if we got Terminate. + /// - Then close the connection because we don't handle exiting from COPY + /// stream normally. pub async fn handle_copy_stream_end(&mut self, end: CopyStreamHandlerEnd) { use CopyStreamHandlerEnd::*; @@ -849,10 +850,6 @@ impl PostgresBackend { } } - if let Terminate = &end { - self.state = ProtoState::Closed; - } - let err_to_send_and_errcode = match &end { ServerInitiated(_) => Some((end.to_string(), SQLSTATE_SUCCESSFUL_COMPLETION)), Other(_) => Some((format!("{end:#}"), SQLSTATE_INTERNAL_ERROR)), @@ -882,6 +879,12 @@ impl PostgresBackend { error!("failed to send ErrorResponse: {}", ee); } } + + // Proper COPY stream finishing to continue using the connection is not + // implemented at the server side (we don't need it so far). To prevent + // further usages of the connection, close it. + self.framed.shutdown().await.ok(); + self.state = ProtoState::Closed; } } From de8dfee4bda97deb8a2f02e4e4cc0c7641f56d09 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 13 May 2024 13:19:12 +0300 Subject: [PATCH 019/220] safekeeper: log LSNs on walreceiver/walsender exit. Useful for observability. --- safekeeper/src/receive_wal.rs | 29 ++++++++++++++++++++++------- safekeeper/src/send_wal.rs | 11 +++++++---- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index 015b53bb2e..0356def7df 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -183,9 +183,19 @@ impl SafekeeperPostgresHandler { &mut self, pgb: &mut PostgresBackend, ) -> Result<(), QueryError> { - if let Err(end) = self.handle_start_wal_push_guts(pgb).await { + let mut tli: Option> = None; + if let Err(end) = self.handle_start_wal_push_guts(pgb, &mut tli).await { // Log the result and probably send it to the client, closing the stream. - pgb.handle_copy_stream_end(end).await; + let handle_end_fut = pgb.handle_copy_stream_end(end); + // If we managed to create the timeline, augment logging with current LSNs etc. + if let Some(tli) = tli { + let info = tli.get_safekeeper_info(&self.conf).await; + handle_end_fut + .instrument(info_span!("", term=%info.term, last_log_term=%info.last_log_term, flush_lsn=%Lsn(info.flush_lsn), commit_lsn=%Lsn(info.commit_lsn))) + .await; + } else { + handle_end_fut.await; + } } Ok(()) } @@ -193,6 +203,7 @@ impl SafekeeperPostgresHandler { pub async fn handle_start_wal_push_guts( &mut self, pgb: &mut PostgresBackend, + tli: &mut Option>, ) -> Result<(), CopyStreamHandlerEnd> { // Notify the libpq client that it's allowed to send `CopyData` messages pgb.write_message(&BeMessage::CopyBothResponse).await?; @@ -222,13 +233,17 @@ impl SafekeeperPostgresHandler { // Read first message and create timeline if needed. let res = network_reader.read_first_message().await; - let res = if let Ok((tli, next_msg)) = res { + let network_res = if let Ok((timeline, next_msg)) = res { let pageserver_feedback_rx: tokio::sync::broadcast::Receiver = - tli.get_walreceivers().pageserver_feedback_tx.subscribe(); + timeline + .get_walreceivers() + .pageserver_feedback_tx + .subscribe(); + *tli = Some(timeline.clone()); tokio::select! { // todo: add read|write .context to these errors - r = network_reader.run(msg_tx, msg_rx, reply_tx, tli.clone(), next_msg) => r, + r = network_reader.run(msg_tx, msg_rx, reply_tx, timeline.clone(), next_msg) => r, r = network_write(pgb, reply_rx, pageserver_feedback_rx) => r, } } else { @@ -244,13 +259,13 @@ impl SafekeeperPostgresHandler { match acceptor_handle { None => { // failed even before spawning; read_network should have error - Err(res.expect_err("no error with WalAcceptor not spawn")) + Err(network_res.expect_err("no error with WalAcceptor not spawn")) } Some(handle) => { let wal_acceptor_res = handle.await; // If there was any network error, return it. - res?; + network_res?; // Otherwise, WalAcceptor thread must have errored. match wal_acceptor_res { diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 59a8c595ab..ecaae9cfe7 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -340,12 +340,16 @@ impl SafekeeperPostgresHandler { start_pos: Lsn, term: Option, ) -> Result<(), QueryError> { + let tli = GlobalTimelines::get(self.ttid).map_err(|e| QueryError::Other(e.into()))?; if let Err(end) = self - .handle_start_replication_guts(pgb, start_pos, term) + .handle_start_replication_guts(pgb, start_pos, term, tli.clone()) .await { + let info = tli.get_safekeeper_info(&self.conf).await; // Log the result and probably send it to the client, closing the stream. - pgb.handle_copy_stream_end(end).await; + pgb.handle_copy_stream_end(end) + .instrument(info_span!("", term=%info.term, last_log_term=%info.last_log_term, flush_lsn=%Lsn(info.flush_lsn), commit_lsn=%Lsn(info.flush_lsn))) + .await; } Ok(()) } @@ -355,10 +359,9 @@ impl SafekeeperPostgresHandler { pgb: &mut PostgresBackend, start_pos: Lsn, term: Option, + tli: Arc, ) -> Result<(), CopyStreamHandlerEnd> { let appname = self.appname.clone(); - let tli = - GlobalTimelines::get(self.ttid).map_err(|e| CopyStreamHandlerEnd::Other(e.into()))?; // Use a guard object to remove our entry from the timeline when we are done. let ws_guard = Arc::new(tli.get_walsenders().register( From 7701ca45dd2215ecca8b8c3de50926ae9b520ffd Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 20 May 2024 12:08:45 -0400 Subject: [PATCH 020/220] feat(pageserver): generate image layers for sparse keyspace (#7567) Part of https://github.com/neondatabase/neon/issues/7462 Sparse keyspace does not generate image layers for now. This pull request adds support for generating image layers for sparse keyspace. ## Summary of changes * Use the scan interface to generate compaction data for sparse keyspace. * Track num of delta layers reads during scan. * Read-trigger compaction: when a scan on the keyspace touches too many delta files, generate an image layer. There are one hard-coded threshold for now: max delta layers we want to touch for a scan. * L0 compaction does not need to compute holes for metadata keyspace. Know issue: the scan interface currently reads past the image layer, which causes `delta_layer_accessed` keeps increasing even if image layers are generated. The pull request to fix that will be separate, and orthogonal to this one. --------- Signed-off-by: Alex Chi Z --- pageserver/src/pgdatadir_mapping.rs | 6 +- pageserver/src/tenant.rs | 105 +++++- pageserver/src/tenant/storage_layer.rs | 19 +- pageserver/src/tenant/timeline.rs | 339 +++++++++++++------ pageserver/src/tenant/timeline/compaction.rs | 35 +- 5 files changed, 363 insertions(+), 141 deletions(-) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 402f075365..b4fc4a08ee 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -40,7 +40,11 @@ use utils::bin_ser::DeserializeError; use utils::vec_map::{VecMap, VecMapOrdering}; use utils::{bin_ser::BeSer, lsn::Lsn}; -const MAX_AUX_FILE_DELTAS: usize = 1024; +/// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached. +pub const MAX_AUX_FILE_DELTAS: usize = 1024; + +/// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached. +pub const MAX_AUX_FILE_V2_DELTAS: usize = 64; #[derive(Debug)] pub enum LsnForTimestamp { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index d42b9082b7..e598e9d2e3 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4777,7 +4777,12 @@ mod tests { info!("Doing vectored read on {:?}", read); let vectored_res = tline - .get_vectored_impl(read.clone(), reads_lsn, ValuesReconstructState::new(), &ctx) + .get_vectored_impl( + read.clone(), + reads_lsn, + &mut ValuesReconstructState::new(), + &ctx, + ) .await; tline .validate_get_vectored_impl(&vectored_res, read, reads_lsn, &ctx) @@ -4826,7 +4831,7 @@ mod tests { .get_vectored_impl( aux_keyspace.clone(), read_lsn, - ValuesReconstructState::new(), + &mut ValuesReconstructState::new(), &ctx, ) .await; @@ -4971,7 +4976,7 @@ mod tests { .get_vectored_impl( read.clone(), current_lsn, - ValuesReconstructState::new(), + &mut ValuesReconstructState::new(), &ctx, ) .await?; @@ -5106,7 +5111,7 @@ mod tests { ranges: vec![child_gap_at_key..child_gap_at_key.next()], }, query_lsn, - ValuesReconstructState::new(), + &mut ValuesReconstructState::new(), &ctx, ) .await; @@ -5547,7 +5552,7 @@ mod tests { .await?; const NUM_KEYS: usize = 1000; - const STEP: usize = 100; // random update + scan base_key + idx * STEP + const STEP: usize = 10000; // random update + scan base_key + idx * STEP let cancel = CancellationToken::new(); @@ -5580,7 +5585,7 @@ mod tests { let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32)); - for _ in 0..10 { + for iter in 0..=10 { // Read all the blocks for (blknum, last_lsn) in updated.iter().enumerate() { test_key.field6 = (blknum * STEP) as u32; @@ -5595,7 +5600,7 @@ mod tests { .get_vectored_impl( keyspace.clone(), lsn, - ValuesReconstructState::default(), + &mut ValuesReconstructState::default(), &ctx, ) .await? @@ -5631,17 +5636,91 @@ mod tests { updated[blknum] = lsn; } - // Perform a cycle of flush, compact, and GC - tline.freeze_and_flush().await?; - tline.compact(&cancel, EnumSet::empty(), &ctx).await?; - tenant - .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx) - .await?; + // Perform two cycles of flush, compact, and GC + for round in 0..2 { + tline.freeze_and_flush().await?; + tline + .compact( + &cancel, + if iter % 5 == 0 && round == 0 { + let mut flags = EnumSet::new(); + flags.insert(CompactFlags::ForceImageLayerCreation); + flags.insert(CompactFlags::ForceRepartition); + flags + } else { + EnumSet::empty() + }, + &ctx, + ) + .await?; + tenant + .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx) + .await?; + } } Ok(()) } + #[tokio::test] + async fn test_metadata_compaction_trigger() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_metadata_compaction_trigger")?; + let (tenant, ctx) = harness.load().await; + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + + let cancel = CancellationToken::new(); + + let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + base_key.field1 = AUX_KEY_PREFIX; + let test_key = base_key; + let mut lsn = Lsn(0x10); + + for _ in 0..20 { + lsn = Lsn(lsn.0 + 0x10); + let mut writer = tline.writer().await; + writer + .put( + test_key, + lsn, + &Value::Image(test_img(&format!("{} at {}", 0, lsn))), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + tline.freeze_and_flush().await?; // force create a delta layer + } + + let before_num_l0_delta_files = tline + .layers + .read() + .await + .layer_map() + .get_level0_deltas()? + .len(); + + tline.compact(&cancel, EnumSet::empty(), &ctx).await?; + + let after_num_l0_delta_files = tline + .layers + .read() + .await + .layer_map() + .get_level0_deltas()? + .len(); + + assert!(after_num_l0_delta_files < before_num_l0_delta_files, "after_num_l0_delta_files={after_num_l0_delta_files}, before_num_l0_delta_files={before_num_l0_delta_files}"); + + assert_eq!( + tline.get(test_key, lsn, &ctx).await?, + test_img(&format!("{} at {}", 0, lsn)) + ); + + Ok(()) + } + #[tokio::test] async fn test_branch_copies_dirty_aux_file_flag() { let harness = TenantHarness::create("test_branch_copies_dirty_aux_file_flag").unwrap(); diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 94a5e9ec47..4c8a518551 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -113,12 +113,17 @@ impl From for ValueReconstructState { } } -/// Bag of data accumulated during a vectored get +/// Bag of data accumulated during a vectored get. pub(crate) struct ValuesReconstructState { + /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline` + /// should not expect to get anything from this hashmap. pub(crate) keys: HashMap>, keys_done: KeySpaceRandomAccum, + + // Statistics that are still accessible as a caller of `get_vectored_impl`. layers_visited: u32, + delta_layers_visited: u32, } impl ValuesReconstructState { @@ -127,6 +132,7 @@ impl ValuesReconstructState { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), layers_visited: 0, + delta_layers_visited: 0, } } @@ -140,8 +146,17 @@ impl ValuesReconstructState { } } - pub(crate) fn on_layer_visited(&mut self) { + pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) { self.layers_visited += 1; + if let ReadableLayer::PersistentLayer(layer) = layer { + if layer.layer_desc().is_delta() { + self.delta_layers_visited += 1; + } + } + } + + pub(crate) fn get_delta_layers_visited(&self) -> u32 { + self.delta_layers_visited } pub(crate) fn get_layers_visited(&self) -> u32 { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 1fb1928079..e6b58b7166 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -18,8 +18,8 @@ use fail::fail_point; use once_cell::sync::Lazy; use pageserver_api::{ key::{ - AUX_FILES_KEY, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE, - NON_INHERITED_SPARSE_RANGE, + AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, + NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE, }, keyspace::{KeySpaceAccum, SparseKeyPartitioning}, models::{ @@ -60,7 +60,6 @@ use std::{ ops::ControlFlow, }; -use crate::tenant::timeline::init::LocalLayerFileMetadata; use crate::{ aux_file::AuxFileSizeEstimator, tenant::{ @@ -89,6 +88,9 @@ use crate::{ metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize, }; use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind}; +use crate::{ + pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::timeline::init::LocalLayerFileMetadata, +}; use crate::{ pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind}, virtual_file::{MaybeFatalIo, VirtualFile}, @@ -782,6 +784,11 @@ pub(crate) enum ShutdownMode { Hard, } +struct ImageLayerCreationOutcome { + image: Option, + next_start_key: Key, +} + /// Public interface functions impl Timeline { /// Get the LSN where this branch was created @@ -883,7 +890,7 @@ impl Timeline { } let vectored_res = self - .get_vectored_impl(keyspace.clone(), lsn, reconstruct_state, ctx) + .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx) .await; if self.conf.validate_vectored_get { @@ -1028,7 +1035,12 @@ impl Timeline { } GetVectoredImpl::Vectored => { let vectored_res = self - .get_vectored_impl(keyspace.clone(), lsn, ValuesReconstructState::new(), ctx) + .get_vectored_impl( + keyspace.clone(), + lsn, + &mut ValuesReconstructState::new(), + ctx, + ) .await; if self.conf.validate_vectored_get { @@ -1116,7 +1128,7 @@ impl Timeline { .get_vectored_impl( keyspace.clone(), lsn, - ValuesReconstructState::default(), + &mut ValuesReconstructState::default(), ctx, ) .await; @@ -1193,7 +1205,7 @@ impl Timeline { &self, keyspace: KeySpace, lsn: Lsn, - mut reconstruct_state: ValuesReconstructState, + reconstruct_state: &mut ValuesReconstructState, ctx: &RequestContext, ) -> Result>, GetVectoredError> { let get_kind = if keyspace.total_raw_size() == 1 { @@ -1205,7 +1217,7 @@ impl Timeline { let get_data_timer = crate::metrics::GET_RECONSTRUCT_DATA_TIME .for_get_kind(get_kind) .start_timer(); - self.get_vectored_reconstruct_data(keyspace, lsn, &mut reconstruct_state, ctx) + self.get_vectored_reconstruct_data(keyspace, lsn, reconstruct_state, ctx) .await?; get_data_timer.stop_and_record(); @@ -1214,7 +1226,8 @@ impl Timeline { .start_timer(); let mut results: BTreeMap> = BTreeMap::new(); let layers_visited = reconstruct_state.get_layers_visited(); - for (key, res) in reconstruct_state.keys { + + for (key, res) in std::mem::take(&mut reconstruct_state.keys) { match res { Err(err) => { results.insert(key, Err(err)); @@ -3448,7 +3461,7 @@ impl Timeline { unmapped_keyspace = keyspace_to_read; cont_lsn = next_cont_lsn; - reconstruct_state.on_layer_visited(); + reconstruct_state.on_layer_visited(&layer_to_read); } else { break; } @@ -4134,6 +4147,176 @@ impl Timeline { false } + /// Create image layers for Postgres data. Assumes the caller passes a partition that is not too large, + /// so that at most one image layer will be produced from this function. + async fn create_image_layer_for_rel_blocks( + self: &Arc, + partition: &KeySpace, + mut image_layer_writer: ImageLayerWriter, + lsn: Lsn, + ctx: &RequestContext, + img_range: Range, + start: Key, + ) -> Result { + let mut wrote_keys = false; + + let mut key_request_accum = KeySpaceAccum::new(); + for range in &partition.ranges { + let mut key = range.start; + while key < range.end { + // Decide whether to retain this key: usually we do, but sharded tenants may + // need to drop keys that don't belong to them. If we retain the key, add it + // to `key_request_accum` for later issuing a vectored get + if self.shard_identity.is_key_disposable(&key) { + debug!( + "Dropping key {} during compaction (it belongs on shard {:?})", + key, + self.shard_identity.get_shard_number(&key) + ); + } else { + key_request_accum.add_key(key); + } + + let last_key_in_range = key.next() == range.end; + key = key.next(); + + // Maybe flush `key_rest_accum` + if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS + || (last_key_in_range && key_request_accum.raw_size() > 0) + { + let results = self + .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx) + .await?; + + for (img_key, img) in results { + let img = match img { + Ok(img) => img, + Err(err) => { + // If we fail to reconstruct a VM or FSM page, we can zero the + // page without losing any actual user data. That seems better + // than failing repeatedly and getting stuck. + // + // We had a bug at one point, where we truncated the FSM and VM + // in the pageserver, but the Postgres didn't know about that + // and continued to generate incremental WAL records for pages + // that didn't exist in the pageserver. Trying to replay those + // WAL records failed to find the previous image of the page. + // This special case allows us to recover from that situation. + // See https://github.com/neondatabase/neon/issues/2601. + // + // Unfortunately we cannot do this for the main fork, or for + // any metadata keys, keys, as that would lead to actual data + // loss. + if is_rel_fsm_block_key(img_key) || is_rel_vm_block_key(img_key) { + warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"); + ZERO_PAGE.clone() + } else { + return Err(CreateImageLayersError::PageReconstructError(err)); + } + } + }; + + // Write all the keys we just read into our new image layer. + image_layer_writer.put_image(img_key, img, ctx).await?; + wrote_keys = true; + } + } + } + } + + if wrote_keys { + // Normal path: we have written some data into the new image layer for this + // partition, so flush it to disk. + let image_layer = image_layer_writer.finish(self, ctx).await?; + Ok(ImageLayerCreationOutcome { + image: Some(image_layer), + next_start_key: img_range.end, + }) + } else { + // Special case: the image layer may be empty if this is a sharded tenant and the + // partition does not cover any keys owned by this shard. In this case, to ensure + // we don't leave gaps between image layers, leave `start` where it is, so that the next + // layer we write will cover the key range that we just scanned. + tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); + Ok(ImageLayerCreationOutcome { + image: None, + next_start_key: start, + }) + } + } + + /// Create an image layer for metadata keys. This function produces one image layer for all metadata + /// keys for now. Because metadata keys cannot exceed basebackup size limit, the image layer for it + /// would not be too large to fit in a single image layer. + #[allow(clippy::too_many_arguments)] + async fn create_image_layer_for_metadata_keys( + self: &Arc, + partition: &KeySpace, + mut image_layer_writer: ImageLayerWriter, + lsn: Lsn, + ctx: &RequestContext, + img_range: Range, + mode: ImageLayerCreationMode, + ) -> Result { + assert!(!matches!(mode, ImageLayerCreationMode::Initial)); + + // Metadata keys image layer creation. + let mut reconstruct_state = ValuesReconstructState::default(); + let data = self + .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx) + .await?; + let (data, total_kb_retrieved, total_key_retrieved) = { + let mut new_data = BTreeMap::new(); + let mut total_kb_retrieved = 0; + let mut total_key_retrieved = 0; + for (k, v) in data { + let v = v.map_err(CreateImageLayersError::PageReconstructError)?; + total_kb_retrieved += KEY_SIZE + v.len(); + total_key_retrieved += 1; + new_data.insert(k, v); + } + (new_data, total_kb_retrieved / 1024, total_key_retrieved) + }; + let delta_file_accessed = reconstruct_state.get_delta_layers_visited(); + + let trigger_generation = delta_file_accessed as usize >= MAX_AUX_FILE_V2_DELTAS; + info!( + "generate image layers for metadata keys: trigger_generation={trigger_generation}, \ + delta_file_accessed={delta_file_accessed}, total_kb_retrieved={total_kb_retrieved}, \ + total_key_retrieved={total_key_retrieved}" + ); + if !trigger_generation && mode == ImageLayerCreationMode::Try { + return Ok(ImageLayerCreationOutcome { + image: None, + next_start_key: img_range.end, + }); + } + let has_keys = !data.is_empty(); + for (k, v) in data { + // Even if the value is empty (deleted), we do not delete it for now until we can ensure vectored get + // considers this situation properly. + // if v.is_empty() { + // continue; + // } + + // No need to handle sharding b/c metadata keys are always on the 0-th shard. + + // TODO: split image layers to avoid too large layer files. Too large image files are not handled + // on the normal data path either. + image_layer_writer.put_image(k, v, ctx).await?; + } + Ok(ImageLayerCreationOutcome { + image: if has_keys { + let image_layer = image_layer_writer.finish(self, ctx).await?; + Some(image_layer) + } else { + tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); + None + }, + next_start_key: img_range.end, + }) + } + #[tracing::instrument(skip_all, fields(%lsn, %mode))] async fn create_image_layers( self: &Arc, @@ -4175,19 +4358,17 @@ impl Timeline { for partition in partitioning.parts.iter() { let img_range = start..partition.ranges.last().unwrap().end; - - if partition.overlaps(&Key::metadata_key_range()) { - // TODO(chi): The next patch will correctly create image layers for metadata keys, and it would be a - // rather big change. Keep this patch small for now. - match mode { - ImageLayerCreationMode::Force | ImageLayerCreationMode::Try => { - // skip image layer creation anyways for metadata keys. - start = img_range.end; - continue; - } - ImageLayerCreationMode::Initial => { - return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers"))); - } + let compact_metadata = partition.overlaps(&Key::metadata_key_range()); + if compact_metadata { + for range in &partition.ranges { + assert!( + range.start.field1 >= METADATA_KEY_BEGIN_PREFIX + && range.end.field1 <= METADATA_KEY_END_PREFIX, + "metadata keys must be partitioned separately" + ); + } + if mode == ImageLayerCreationMode::Initial { + return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers"))); } } else if let ImageLayerCreationMode::Try = mode { // check_for_image_layers = false -> skip @@ -4198,7 +4379,7 @@ impl Timeline { } } - let mut image_layer_writer = ImageLayerWriter::new( + let image_layer_writer = ImageLayerWriter::new( self.conf, self.timeline_id, self.tenant_shard_id, @@ -4214,87 +4395,39 @@ impl Timeline { ))) }); - let mut wrote_keys = false; + if !compact_metadata { + let ImageLayerCreationOutcome { + image, + next_start_key, + } = self + .create_image_layer_for_rel_blocks( + partition, + image_layer_writer, + lsn, + ctx, + img_range, + start, + ) + .await?; - let mut key_request_accum = KeySpaceAccum::new(); - for range in &partition.ranges { - let mut key = range.start; - while key < range.end { - // Decide whether to retain this key: usually we do, but sharded tenants may - // need to drop keys that don't belong to them. If we retain the key, add it - // to `key_request_accum` for later issuing a vectored get - if self.shard_identity.is_key_disposable(&key) { - debug!( - "Dropping key {} during compaction (it belongs on shard {:?})", - key, - self.shard_identity.get_shard_number(&key) - ); - } else { - key_request_accum.add_key(key); - } - - let last_key_in_range = key.next() == range.end; - key = key.next(); - - // Maybe flush `key_rest_accum` - if key_request_accum.raw_size() >= Timeline::MAX_GET_VECTORED_KEYS - || (last_key_in_range && key_request_accum.raw_size() > 0) - { - let results = self - .get_vectored(key_request_accum.consume_keyspace(), lsn, ctx) - .await?; - - for (img_key, img) in results { - let img = match img { - Ok(img) => img, - Err(err) => { - // If we fail to reconstruct a VM or FSM page, we can zero the - // page without losing any actual user data. That seems better - // than failing repeatedly and getting stuck. - // - // We had a bug at one point, where we truncated the FSM and VM - // in the pageserver, but the Postgres didn't know about that - // and continued to generate incremental WAL records for pages - // that didn't exist in the pageserver. Trying to replay those - // WAL records failed to find the previous image of the page. - // This special case allows us to recover from that situation. - // See https://github.com/neondatabase/neon/issues/2601. - // - // Unfortunately we cannot do this for the main fork, or for - // any metadata keys, keys, as that would lead to actual data - // loss. - if is_rel_fsm_block_key(img_key) || is_rel_vm_block_key(img_key) - { - warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"); - ZERO_PAGE.clone() - } else { - return Err(CreateImageLayersError::PageReconstructError( - err, - )); - } - } - }; - - // Write all the keys we just read into our new image layer. - image_layer_writer.put_image(img_key, img, ctx).await?; - wrote_keys = true; - } - } - } - } - - if wrote_keys { - // Normal path: we have written some data into the new image layer for this - // partition, so flush it to disk. - start = img_range.end; - let image_layer = image_layer_writer.finish(self, ctx).await?; - image_layers.push(image_layer); + start = next_start_key; + image_layers.extend(image); } else { - // Special case: the image layer may be empty if this is a sharded tenant and the - // partition does not cover any keys owned by this shard. In this case, to ensure - // we don't leave gaps between image layers, leave `start` where it is, so that the next - // layer we write will cover the key range that we just scanned. - tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); + let ImageLayerCreationOutcome { + image, + next_start_key, + } = self + .create_image_layer_for_metadata_keys( + partition, + image_layer_writer, + lsn, + ctx, + img_range, + mode, + ) + .await?; + start = next_start_key; + image_layers.extend(image); } } diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index ed48b4c9cb..2eff469591 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -116,9 +116,13 @@ impl Timeline { // 3. Create new image layers for partitions that have been modified // "enough". - let dense_layers = self + let mut partitioning = dense_partitioning; + partitioning + .parts + .extend(sparse_partitioning.into_dense().parts); + let image_layers = self .create_image_layers( - &dense_partitioning, + &partitioning, lsn, if flags.contains(CompactFlags::ForceImageLayerCreation) { ImageLayerCreationMode::Force @@ -130,24 +134,8 @@ impl Timeline { .await .map_err(anyhow::Error::from)?; - // For now, nothing will be produced... - let sparse_layers = self - .create_image_layers( - &sparse_partitioning.clone().into_dense(), - lsn, - if flags.contains(CompactFlags::ForceImageLayerCreation) { - ImageLayerCreationMode::Force - } else { - ImageLayerCreationMode::Try - }, - &image_ctx, - ) - .await - .map_err(anyhow::Error::from)?; - assert!(sparse_layers.is_empty()); - - self.upload_new_image_layers(dense_layers)?; - dense_partitioning.parts.len() + self.upload_new_image_layers(image_layers)?; + partitioning.parts.len() } Err(err) => { // no partitioning? This is normal, if the timeline was just created @@ -499,8 +487,11 @@ impl Timeline { for &DeltaEntry { key: next_key, .. } in all_keys.iter() { if let Some(prev_key) = prev { - // just first fast filter - if next_key.to_i128() - prev_key.to_i128() >= min_hole_range { + // just first fast filter, do not create hole entries for metadata keys. The last hole in the + // compaction is the gap between data key and metadata keys. + if next_key.to_i128() - prev_key.to_i128() >= min_hole_range + && !Key::is_metadata_key(&prev_key) + { let key_range = prev_key..next_key; // Measuring hole by just subtraction of i128 representation of key range boundaries // has not so much sense, because largest holes will corresponds field1/field2 changes. From 2d7091871f82b6e9d598fdaed8cc28bdc27b9a16 Mon Sep 17 00:00:00 2001 From: Andy Hattemer Date: Mon, 20 May 2024 12:15:43 -0400 Subject: [PATCH 021/220] Update banner image in Readme (#7801) Update the readme banner with updated branding. --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 00a90f4483..ea0a289502 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -[![Neon](https://user-images.githubusercontent.com/13738772/236813940-dcfdcb5b-69d3-449b-a686-013febe834d4.png)](https://neon.tech) +[![Neon](https://github.com/neondatabase/neon/assets/11527560/f15a17f0-836e-40c5-b35d-030606a6b660)](https://neon.tech) + + # Neon From 6810d2aa53b7b7646013d2f236d155a4f1b4721d Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 20 May 2024 14:24:18 -0400 Subject: [PATCH 022/220] feat(pageserver): do not read past image layers for vectored get (#7773) ## Problem Part of https://github.com/neondatabase/neon/issues/7462 On metadata keyspace, vectored get will not stop if a key is not found, and will read past the image layer. However, the semantics is different from single get, because if a key does not exist in the image layer, it means that the key does not exist in the past, or have been deleted. This pull request fixed it by recording image layer coverage during the vectored get process and stop when the full keyspace is covered by an image layer. A corresponding test case is added to ensure generating image layer reduces the number of delta layers. This optimization (or bug fix) also applies to rel block keyspaces. If a key is missing, we can know it's missing once the first image layer is reached. Page server will not attempt to read lower layers, which potentially incurs layer downloads + evictions. --------- Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/keyspace.rs | 2 +- pageserver/src/tenant.rs | 372 +++++++++++++++++- pageserver/src/tenant/storage_layer.rs | 26 +- .../src/tenant/storage_layer/image_layer.rs | 4 + pageserver/src/tenant/timeline.rs | 79 +++- 5 files changed, 461 insertions(+), 22 deletions(-) diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index c0c4710a00..12c6dc3a6d 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -307,7 +307,7 @@ impl KeySpace { } /// Merge another keyspace into the current one. - /// Note: the keyspaces must not ovelap (enforced via assertions) + /// Note: the keyspaces must not overlap (enforced via assertions). To merge overlapping key ranges, use `KeySpaceRandomAccum`. pub fn merge(&mut self, other: &KeySpace) { let all_ranges = self .ranges diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index e598e9d2e3..1a66f2c919 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3968,7 +3968,7 @@ mod tests { use crate::tenant::harness::*; use crate::tenant::timeline::CompactFlags; use crate::DEFAULT_PG_VERSION; - use bytes::BytesMut; + use bytes::{Bytes, BytesMut}; use hex_literal::hex; use pageserver_api::key::{AUX_KEY_PREFIX, NON_INHERITED_RANGE}; use pageserver_api::keyspace::KeySpace; @@ -5996,4 +5996,374 @@ mod tests { Some(&bytes::Bytes::from_static(b"last")) ); } + + #[tokio::test] + async fn test_metadata_image_creation() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_metadata_image_creation")?; + let (tenant, ctx) = harness.load().await; + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + + const NUM_KEYS: usize = 1000; + const STEP: usize = 10000; // random update + scan base_key + idx * STEP + + let cancel = CancellationToken::new(); + + let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + base_key.field1 = AUX_KEY_PREFIX; + let mut test_key = base_key; + let mut lsn = Lsn(0x10); + + async fn scan_with_statistics( + tline: &Timeline, + keyspace: &KeySpace, + lsn: Lsn, + ctx: &RequestContext, + ) -> anyhow::Result<(BTreeMap>, usize)> { + let mut reconstruct_state = ValuesReconstructState::default(); + let res = tline + .get_vectored_impl(keyspace.clone(), lsn, &mut reconstruct_state, ctx) + .await?; + Ok((res, reconstruct_state.get_delta_layers_visited() as usize)) + } + + #[allow(clippy::needless_range_loop)] + for blknum in 0..NUM_KEYS { + lsn = Lsn(lsn.0 + 0x10); + test_key.field6 = (blknum * STEP) as u32; + let mut writer = tline.writer().await; + writer + .put( + test_key, + lsn, + &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + } + + let keyspace = KeySpace::single(base_key..base_key.add((NUM_KEYS * STEP) as u32)); + + for iter in 1..=10 { + for _ in 0..NUM_KEYS { + lsn = Lsn(lsn.0 + 0x10); + let blknum = thread_rng().gen_range(0..NUM_KEYS); + test_key.field6 = (blknum * STEP) as u32; + let mut writer = tline.writer().await; + writer + .put( + test_key, + lsn, + &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + } + + tline.freeze_and_flush().await?; + + if iter % 5 == 0 { + let (_, before_delta_file_accessed) = + scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?; + tline + .compact( + &cancel, + { + let mut flags = EnumSet::new(); + flags.insert(CompactFlags::ForceImageLayerCreation); + flags.insert(CompactFlags::ForceRepartition); + flags + }, + &ctx, + ) + .await?; + let (_, after_delta_file_accessed) = + scan_with_statistics(&tline, &keyspace, lsn, &ctx).await?; + assert!(after_delta_file_accessed < before_delta_file_accessed, "after_delta_file_accessed={after_delta_file_accessed}, before_delta_file_accessed={before_delta_file_accessed}"); + // Given that we already produced an image layer, there should be no delta layer needed for the scan, but still setting a low threshold there for unforeseen circumstances. + assert!( + after_delta_file_accessed <= 2, + "after_delta_file_accessed={after_delta_file_accessed}" + ); + } + } + + Ok(()) + } + + #[tokio::test] + async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?; + let (tenant, ctx) = harness.load().await; + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + + let cancel = CancellationToken::new(); + + let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap(); + let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap(); + + let mut lsn = Lsn(0x20); + + { + let mut writer = tline.writer().await; + writer + .put(base_key, lsn, &Value::Image(test_img("data key 1")), &ctx) + .await?; + writer.finish_write(lsn); + drop(writer); + + tline.freeze_and_flush().await?; // this will create a image layer + } + + let child = tenant + .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx) + .await + .unwrap(); + + lsn.0 += 0x10; + + { + let mut writer = child.writer().await; + writer + .put( + base_key_child, + lsn, + &Value::Image(test_img("data key 2")), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + + child.freeze_and_flush().await?; // this will create a delta + + { + // update the partitioning to include the test key space, otherwise they + // will be dropped by image layer creation + let mut guard = child.partitioning.lock().await; + let ((partitioning, _), partition_lsn) = &mut *guard; + partitioning + .parts + .push(KeySpace::single(base_key..base_key_nonexist)); // exclude the nonexist key + *partition_lsn = lsn; + } + + child + .compact( + &cancel, + { + let mut set = EnumSet::empty(); + set.insert(CompactFlags::ForceImageLayerCreation); + set + }, + &ctx, + ) + .await?; // force create an image layer for the keys, TODO: check if the image layer is created + } + + async fn get_vectored_impl_wrapper( + tline: &Arc, + key: Key, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result, GetVectoredError> { + let mut reconstruct_state = ValuesReconstructState::new(); + let mut res = tline + .get_vectored_impl( + KeySpace::single(key..key.next()), + lsn, + &mut reconstruct_state, + ctx, + ) + .await?; + Ok(res.pop_last().map(|(k, v)| { + assert_eq!(k, key); + v.unwrap() + })) + } + + // test vectored get on parent timeline + assert_eq!( + get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?, + Some(test_img("data key 1")) + ); + assert!(get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx) + .await + .unwrap_err() + .is_missing_key_error()); + assert!( + get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx) + .await + .unwrap_err() + .is_missing_key_error() + ); + + // test vectored get on child timeline + assert_eq!( + get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?, + Some(test_img("data key 1")) + ); + assert_eq!( + get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?, + Some(test_img("data key 2")) + ); + assert!( + get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx) + .await + .unwrap_err() + .is_missing_key_error() + ); + + Ok(()) + } + + #[tokio::test] + async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads")?; + let (tenant, ctx) = harness.load().await; + let tline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + + let cancel = CancellationToken::new(); + + let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); + let mut base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap(); + let mut base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap(); + base_key.field1 = AUX_KEY_PREFIX; + base_key_child.field1 = AUX_KEY_PREFIX; + base_key_nonexist.field1 = AUX_KEY_PREFIX; + + let mut lsn = Lsn(0x20); + + { + let mut writer = tline.writer().await; + writer + .put( + base_key, + lsn, + &Value::Image(test_img("metadata key 1")), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + + tline.freeze_and_flush().await?; // this will create an image layer + + tline + .compact( + &cancel, + { + let mut set = EnumSet::empty(); + set.insert(CompactFlags::ForceImageLayerCreation); + set.insert(CompactFlags::ForceRepartition); + set + }, + &ctx, + ) + .await?; // force create an image layer for metadata keys + tenant + .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx) + .await?; + } + + let child = tenant + .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx) + .await + .unwrap(); + + lsn.0 += 0x10; + + { + let mut writer = child.writer().await; + writer + .put( + base_key_child, + lsn, + &Value::Image(test_img("metadata key 2")), + &ctx, + ) + .await?; + writer.finish_write(lsn); + drop(writer); + + child.freeze_and_flush().await?; + + child + .compact( + &cancel, + { + let mut set = EnumSet::empty(); + set.insert(CompactFlags::ForceImageLayerCreation); + set.insert(CompactFlags::ForceRepartition); + set + }, + &ctx, + ) + .await?; // force create an image layer for metadata keys + tenant + .gc_iteration(Some(child.timeline_id), 0, Duration::ZERO, &cancel, &ctx) + .await?; + } + + async fn get_vectored_impl_wrapper( + tline: &Arc, + key: Key, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result, GetVectoredError> { + let mut reconstruct_state = ValuesReconstructState::new(); + let mut res = tline + .get_vectored_impl( + KeySpace::single(key..key.next()), + lsn, + &mut reconstruct_state, + ctx, + ) + .await?; + Ok(res.pop_last().map(|(k, v)| { + assert_eq!(k, key); + v.unwrap() + })) + } + + // test vectored get on parent timeline + assert_eq!( + get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?, + Some(test_img("metadata key 1")) + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, base_key_child, lsn, &ctx).await?, + None + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, base_key_nonexist, lsn, &ctx).await?, + None + ); + + // test vectored get on child timeline + assert_eq!( + get_vectored_impl_wrapper(&child, base_key, lsn, &ctx).await?, + None + ); + assert_eq!( + get_vectored_impl_wrapper(&child, base_key_child, lsn, &ctx).await?, + Some(test_img("metadata key 2")) + ); + assert_eq!( + get_vectored_impl_wrapper(&child, base_key_nonexist, lsn, &ctx).await?, + None + ); + + Ok(()) + } } diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 4c8a518551..9ccf20c0d4 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -113,14 +113,17 @@ impl From for ValueReconstructState { } } -/// Bag of data accumulated during a vectored get. +/// Bag of data accumulated during a vectored get.. pub(crate) struct ValuesReconstructState { /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline` /// should not expect to get anything from this hashmap. pub(crate) keys: HashMap>, - + /// The keys which are already retrieved keys_done: KeySpaceRandomAccum, + /// The keys covered by the image layers + keys_with_image_coverage: Option>, + // Statistics that are still accessible as a caller of `get_vectored_impl`. layers_visited: u32, delta_layers_visited: u32, @@ -131,6 +134,7 @@ impl ValuesReconstructState { Self { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), + keys_with_image_coverage: None, layers_visited: 0, delta_layers_visited: 0, } @@ -186,6 +190,16 @@ impl ValuesReconstructState { } } + /// On hitting image layer, we can mark all keys in this range as done, because + /// if the image layer does not contain a key, it is deleted/never added. + pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range) { + let prev_val = self.keys_with_image_coverage.replace(key_range.clone()); + assert_eq!( + prev_val, None, + "should consume the keyspace before the next iteration" + ); + } + /// Update the state collected for a given key. /// Returns true if this was the last value needed for the key and false otherwise. /// @@ -248,8 +262,12 @@ impl ValuesReconstructState { /// Returns the key space describing the keys that have /// been marked as completed since the last call to this function. - pub(crate) fn consume_done_keys(&mut self) -> KeySpace { - self.keys_done.consume_keyspace() + /// Returns individual keys done, and the image layer coverage. + pub(crate) fn consume_done_keys(&mut self) -> (KeySpace, Option>) { + ( + self.keys_done.consume_keyspace(), + self.keys_with_image_coverage.take(), + ) } } diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 6ea452b993..becd1e7a6d 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -158,6 +158,7 @@ pub struct ImageLayerInner { index_start_blk: u32, index_root_blk: u32, + key_range: Range, lsn: Lsn, file: VirtualFile, @@ -419,6 +420,7 @@ impl ImageLayerInner { file, file_id, max_vectored_read_bytes, + key_range: actual_summary.key_range, })) } @@ -478,6 +480,8 @@ impl ImageLayerInner { self.do_reads_and_update_state(reads, reconstruct_state, ctx) .await; + reconstruct_state.on_image_layer_visited(&self.key_range); + Ok(()) } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index e6b58b7166..7f2a41d90c 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -21,7 +21,7 @@ use pageserver_api::{ AUX_FILES_KEY, KEY_SIZE, METADATA_KEY_BEGIN_PREFIX, METADATA_KEY_END_PREFIX, NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE, }, - keyspace::{KeySpaceAccum, SparseKeyPartitioning}, + keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning}, models::{ AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, @@ -348,8 +348,8 @@ pub struct Timeline { // though let's keep them both for better error visibility. pub initdb_lsn: Lsn, - /// When did we last calculate the partitioning? - partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>, + /// When did we last calculate the partitioning? Make it pub to test cases. + pub(super) partitioning: tokio::sync::Mutex<((KeyPartitioning, SparseKeyPartitioning), Lsn)>, /// Configuration: how often should the partitioning be recalculated. repartition_threshold: u64, @@ -483,6 +483,11 @@ impl GcCutoffs { } } +pub(crate) struct TimelineVisitOutcome { + completed_keyspace: KeySpace, + image_covered_keyspace: KeySpace, +} + /// An error happened in a get() operation. #[derive(thiserror::Error, Debug)] pub(crate) enum PageReconstructError { @@ -507,6 +512,13 @@ pub(crate) enum PageReconstructError { MissingKey(MissingKeyError), } +impl GetVectoredError { + #[cfg(test)] + pub(crate) fn is_missing_key_error(&self) -> bool { + matches!(self, Self::MissingKey(_)) + } +} + #[derive(Debug)] pub struct MissingKeyError { key: Key, @@ -3300,12 +3312,15 @@ impl Timeline { let mut cont_lsn = Lsn(request_lsn.0 + 1); - loop { + let missing_keyspace = loop { if self.cancel.is_cancelled() { return Err(GetVectoredError::Cancelled); } - let completed = Self::get_vectored_reconstruct_data_timeline( + let TimelineVisitOutcome { + completed_keyspace: completed, + image_covered_keyspace, + } = Self::get_vectored_reconstruct_data_timeline( timeline, keyspace.clone(), cont_lsn, @@ -3324,12 +3339,31 @@ impl Timeline { ranges: vec![NON_INHERITED_RANGE, NON_INHERITED_SPARSE_RANGE], }); - // Keyspace is fully retrieved, no ancestor timeline, or metadata scan (where we do not look - // into ancestor timelines). TODO: is there any other metadata which we want to inherit? - if keyspace.total_raw_size() == 0 || timeline.ancestor_timeline.is_none() { - break; + // Keyspace is fully retrieved + if keyspace.is_empty() { + break None; } + // Not fully retrieved but no ancestor timeline. + if timeline.ancestor_timeline.is_none() { + break Some(keyspace); + } + + // Now we see if there are keys covered by the image layer but does not exist in the + // image layer, which means that the key does not exist. + + // The block below will stop the vectored search if any of the keys encountered an image layer + // which did not contain a snapshot for said key. Since we have already removed all completed + // keys from `keyspace`, we expect there to be no overlap between it and the image covered key + // space. If that's not the case, we had at least one key encounter a gap in the image layer + // and stop the search as a result of that. + let removed = keyspace.remove_overlapping_with(&image_covered_keyspace); + if !removed.is_empty() { + break Some(removed); + } + // If we reached this point, `remove_overlapping_with` should not have made any change to the + // keyspace. + // Take the min to avoid reconstructing a page with data newer than request Lsn. cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1)); timeline_owned = timeline @@ -3337,14 +3371,14 @@ impl Timeline { .await .map_err(GetVectoredError::GetReadyAncestorError)?; timeline = &*timeline_owned; - } + }; - if keyspace.total_raw_size() != 0 { + if let Some(missing_keyspace) = missing_keyspace { return Err(GetVectoredError::MissingKey(MissingKeyError { - key: keyspace.start().unwrap(), /* better if we can store the full keyspace */ + key: missing_keyspace.start().unwrap(), /* better if we can store the full keyspace */ shard: self .shard_identity - .get_shard_number(&keyspace.start().unwrap()), + .get_shard_number(&missing_keyspace.start().unwrap()), cont_lsn, request_lsn, ancestor_lsn: Some(timeline.ancestor_lsn), @@ -3369,6 +3403,9 @@ impl Timeline { /// /// At each iteration pop the top of the fringe (the layer with the highest Lsn) /// and get all the required reconstruct data from the layer in one go. + /// + /// Returns the completed keyspace and the keyspaces with image coverage. The caller + /// decides how to deal with these two keyspaces. async fn get_vectored_reconstruct_data_timeline( timeline: &Timeline, keyspace: KeySpace, @@ -3376,20 +3413,27 @@ impl Timeline { reconstruct_state: &mut ValuesReconstructState, cancel: &CancellationToken, ctx: &RequestContext, - ) -> Result { + ) -> Result { let mut unmapped_keyspace = keyspace.clone(); let mut fringe = LayerFringe::new(); let mut completed_keyspace = KeySpace::default(); + let mut image_covered_keyspace = KeySpaceRandomAccum::new(); loop { if cancel.is_cancelled() { return Err(GetVectoredError::Cancelled); } - let keys_done_last_step = reconstruct_state.consume_done_keys(); + let (keys_done_last_step, keys_with_image_coverage) = + reconstruct_state.consume_done_keys(); unmapped_keyspace.remove_overlapping_with(&keys_done_last_step); completed_keyspace.merge(&keys_done_last_step); + if let Some(keys_with_image_coverage) = keys_with_image_coverage { + unmapped_keyspace + .remove_overlapping_with(&KeySpace::single(keys_with_image_coverage.clone())); + image_covered_keyspace.add_range(keys_with_image_coverage); + } // Do not descent any further if the last layer we visited // completed all keys in the keyspace it inspected. This is not @@ -3467,7 +3511,10 @@ impl Timeline { } } - Ok(completed_keyspace) + Ok(TimelineVisitOutcome { + completed_keyspace, + image_covered_keyspace: image_covered_keyspace.consume_keyspace(), + }) } /// # Cancel-safety From 6f3e043a76dd47a18180eec627ad5f5bbade6186 Mon Sep 17 00:00:00 2001 From: Sasha Krassovsky Date: Mon, 20 May 2024 17:00:47 -0700 Subject: [PATCH 023/220] Add some more replication slot metrics (#7761) ## Problem We want to add alerts for when people's replication slots break, and also metrics for retained WAL so that we can make warn customers when their storage gets bloated. ## Summary of changes Adds the metrics. Addresses https://github.com/neondatabase/neon/issues/7593 --- vm-image-spec.yaml | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index fa7cd014bf..0f9d56e466 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -254,8 +254,8 @@ files: select case when pg_catalog.pg_is_in_recovery() - then pg_last_wal_replay_lsn() - else pg_current_wal_lsn() + then (pg_last_wal_replay_lsn() - '0/0')::FLOAT8 + else (pg_current_wal_lsn() - '0/0')::FLOAT8 end as lsn; - metric_name: replication_delay_bytes @@ -294,6 +294,9 @@ files: query: | SELECT checkpoints_timed FROM pg_stat_bgwriter; + # In all the below metrics, we cast LSNs to floats because Prometheus only supports floats. + # It's probably fine because float64 can store integers from -2^53 to +2^53 exactly. + # Number of slots is limited by max_replication_slots, so collecting position for all of them shouldn't be bad. - metric_name: logical_slot_restart_lsn type: gauge @@ -302,7 +305,32 @@ files: - slot_name values: [restart_lsn] query: | - select slot_name, restart_lsn from pg_replication_slots where slot_type = 'logical'; + select slot_name, (restart_lsn - '0/0')::FLOAT8 from pg_replication_slots where slot_type = 'logical'; + + - metric_name: retained_wal + type: gauge + help: 'Retained WAL in inactive replication slots' + key_labels: + - slot_name + values: [retained_wal] + query: | + SELECT slot_name, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)::FLOAT8 AS retained_wal + FROM pg_replication_slots + WHERE active = false; + + - metric_name: wal_is_lost + type: gauge + help: 'Whether or not the replication slot\'s wal_status is lost' + key_labels: + - slot_name + values: [wal_status_is_lost] + query: | + SELECT slot_name, + CASE + WHEN wal_status = 'lost' THEN 1 + ELSE 0 + END AS wal_status_is_lost + FROM pg_replication_slots; - filename: neon_collector_autoscaling.yml content: | collector_name: neon_collector_autoscaling From baeb58432f77809f68eb648481e81ed5af15a8b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 10:48:17 +0000 Subject: [PATCH 024/220] build(deps): bump requests from 2.31.0 to 2.32.0 (#7816) --- poetry.lock | 21 ++++++++++++++++----- pyproject.toml | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index ef9f572b17..25c0c7398d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2405,6 +2405,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -2529,13 +2530,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, + {file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, ] [package.dependencies] @@ -2959,6 +2960,16 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, + {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, + {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -3196,4 +3207,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "dcde14c58a32bda5f123319a069352c458b3719f3c62977991eebb9803a46a9e" +content-hash = "16ebd6a46768be7f67dbdb4ee5903b167d94edc9965f29252f038c67e9e907b0" diff --git a/pyproject.toml b/pyproject.toml index ac7f9b061c..131d1121f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ pytest = "^7.4.4" psycopg2-binary = "^2.9.6" typing-extensions = "^4.6.1" PyJWT = {version = "^2.1.0", extras = ["crypto"]} -requests = "^2.31.0" +requests = "^2.32.0" pytest-xdist = "^3.3.1" asyncpg = "^0.29.0" aiopg = "^1.4.0" From 4ce6e2d2fc83ff8664eef2f80912cade71240669 Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 21 May 2024 13:46:04 +0100 Subject: [PATCH 025/220] pageserver: fix secondary progress stats when layers are 404 (#7814) ## Problem Noticed this issue in staging. When a tenant is under somewhat heavy timeline creation/deletion thrashing, it becomes quite common for secondary downloads to encounter 404s downloading layers. This is tolerated by design, because heatmaps are not guaranteed to be up to date with what layers/timelines actually exist. However, we were not updating the SecondaryProgress structure in this case, so after such a download pass, we would leave a SecondaryProgress state with lower "downloaded" stats than "total" stats. This causes the storage controller to consider this secondary location inelegible for optimization actions such as we do after shard splits This issue has relative low impact because a typical tenant will eventually upload a heatmap where we do download all the layers and thereby enable the controller to progress with migrations -- the heavy thrashing of timeline creation/deletion is an artifact of our nightly stress tests. ## Summary of changes - In the layer 404 case, subtract the skipped layer's stats from the totals, so that at the end of this download pass we should still end up in a complete state. - When updating `last_downloaded`, do a sanity check that our progress is complete. In debug builds, assert out if this is not the case. In prod builds, correct the stats and log a warning. --- pageserver/src/tenant/secondary/downloader.rs | 92 +++++++++++++------ 1 file changed, 63 insertions(+), 29 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 609e1431cf..870475eb57 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -569,6 +569,39 @@ impl<'a> TenantDownloader<'a> { heatmap.timelines.len() ); + // Get or initialize the local disk state for the timelines we will update + let mut timeline_states = HashMap::new(); + for timeline in &heatmap.timelines { + let timeline_state = self + .secondary_state + .detail + .lock() + .unwrap() + .timelines + .get(&timeline.timeline_id) + .cloned(); + + let timeline_state = match timeline_state { + Some(t) => t, + None => { + // We have no existing state: need to scan local disk for layers first. + let timeline_state = + init_timeline_state(self.conf, tenant_shard_id, timeline).await; + + // Re-acquire detail lock now that we're done with async load from local FS + self.secondary_state + .detail + .lock() + .unwrap() + .timelines + .insert(timeline.timeline_id, timeline_state.clone()); + timeline_state + } + }; + + timeline_states.insert(timeline.timeline_id, timeline_state); + } + // Clean up any local layers that aren't in the heatmap. We do this first for all timelines, on the general // principle that deletions should be done before writes wherever possible, and so that we can use this // phase to initialize our SecondaryProgress. @@ -579,6 +612,10 @@ impl<'a> TenantDownloader<'a> { // Download the layers in the heatmap for timeline in heatmap.timelines { + let timeline_state = timeline_states + .remove(&timeline.timeline_id) + .expect("Just populated above"); + if self.secondary_state.cancel.is_cancelled() { tracing::debug!( "Cancelled before downloading timeline {}", @@ -588,7 +625,7 @@ impl<'a> TenantDownloader<'a> { } let timeline_id = timeline.timeline_id; - self.download_timeline(timeline, ctx) + self.download_timeline(timeline, timeline_state, ctx) .instrument(tracing::info_span!( "secondary_download_timeline", tenant_id=%tenant_shard_id.tenant_id, @@ -609,6 +646,22 @@ impl<'a> TenantDownloader<'a> { .unwrap_or(DEFAULT_DOWNLOAD_INTERVAL), }); + // Robustness: we should have updated progress properly, but in case we didn't, make sure + // we don't leave the tenant in a state where we claim to have successfully downloaded + // everything, but our progress is incomplete. The invariant here should be that if + // we have set `last_download` to this heatmap's etag, then the next time we see that + // etag we can safely do no work (i.e. we must be complete). + let mut progress = self.secondary_state.progress.lock().unwrap(); + debug_assert!(progress.layers_downloaded == progress.layers_total); + debug_assert!(progress.bytes_downloaded == progress.bytes_total); + if progress.layers_downloaded != progress.layers_total + || progress.bytes_downloaded != progress.bytes_total + { + tracing::warn!("Correcting drift in progress stats ({progress:?})"); + progress.layers_downloaded = progress.layers_total; + progress.bytes_downloaded = progress.bytes_total; + } + Ok(()) } @@ -784,6 +837,7 @@ impl<'a> TenantDownloader<'a> { async fn download_timeline( &self, timeline: HeatMapTimeline, + timeline_state: SecondaryDetailTimeline, ctx: &RequestContext, ) -> Result<(), UpdateError> { debug_assert_current_span_has_tenant_and_timeline_id(); @@ -792,34 +846,6 @@ impl<'a> TenantDownloader<'a> { // Accumulate updates to the state let mut touched = Vec::new(); - // Clone a view of what layers already exist on disk - let timeline_state = self - .secondary_state - .detail - .lock() - .unwrap() - .timelines - .get(&timeline.timeline_id) - .cloned(); - - let timeline_state = match timeline_state { - Some(t) => t, - None => { - // We have no existing state: need to scan local disk for layers first. - let timeline_state = - init_timeline_state(self.conf, tenant_shard_id, &timeline).await; - - // Re-acquire detail lock now that we're done with async load from local FS - self.secondary_state - .detail - .lock() - .unwrap() - .timelines - .insert(timeline.timeline_id, timeline_state.clone()); - timeline_state - } - }; - tracing::debug!(timeline_id=%timeline.timeline_id, "Downloading layers, {} in heatmap", timeline.layers.len()); let mut download_futs = Vec::new(); @@ -1009,6 +1035,14 @@ impl<'a> TenantDownloader<'a> { "Skipped downloading missing layer {}, raced with compaction/gc?", layer.name ); + + // If the layer is 404, adjust the progress statistics to reflect that we will not download it. + let mut progress = self.secondary_state.progress.lock().unwrap(); + progress.layers_total = progress.layers_total.saturating_sub(1); + progress.bytes_total = progress + .bytes_total + .saturating_sub(layer.metadata.file_size); + return Ok(None); } Err(e) => return Err(e.into()), From 478cc37a70c4357e7817e6c8834264be4d3e3bfd Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 21 May 2024 14:32:29 +0300 Subject: [PATCH 026/220] Propagate standby apply LSN to pageserver to hold off GC. To avoid pageserver gc'ing data needed by standby, propagate standby apply LSN through standby -> safekeeper -> broker -> pageserver flow and hold off GC for it. Iteration of GC resets the value to remove the horizon when standby goes away -- pushes are assumed to happen at least once between gc iterations. As a safety guard max allowed lag compared to normal GC horizon is hardcoded as 10GB. Add test for the feature. Co-authored-by: Konstantin Knizhnik --- libs/safekeeper_api/src/models.rs | 3 + pageserver/src/tenant/timeline.rs | 28 +++++- .../walreceiver/connection_manager.rs | 13 +++ safekeeper/src/broker.rs | 1 + safekeeper/src/http/routes.rs | 1 + safekeeper/src/send_wal.rs | 85 +++++++++++++++---- safekeeper/src/timeline.rs | 7 +- storage_broker/benches/rps.rs | 1 + storage_broker/proto/broker.proto | 3 + storage_broker/src/bin/storage_broker.rs | 1 + test_runner/fixtures/neon_fixtures.py | 11 +++ test_runner/regress/test_hot_standby.py | 47 +++++++--- 12 files changed, 169 insertions(+), 32 deletions(-) diff --git a/libs/safekeeper_api/src/models.rs b/libs/safekeeper_api/src/models.rs index ce5a1e411e..2fbc333075 100644 --- a/libs/safekeeper_api/src/models.rs +++ b/libs/safekeeper_api/src/models.rs @@ -50,6 +50,9 @@ pub struct SkTimelineInfo { pub safekeeper_connstr: Option, #[serde(default)] pub http_connstr: Option, + // Minimum of all active RO replicas flush LSN + #[serde(default = "lsn_invalid")] + pub standby_horizon: Lsn, } #[derive(Debug, Clone, Deserialize, Serialize)] diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 7f2a41d90c..2c43c26359 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -269,6 +269,8 @@ pub struct Timeline { // Atomic would be more appropriate here. last_freeze_ts: RwLock, + pub(crate) standby_horizon: AtomicLsn, + // WAL redo manager. `None` only for broken tenants. walredo_mgr: Option>, @@ -2279,6 +2281,8 @@ impl Timeline { compaction_lock: tokio::sync::Mutex::default(), gc_lock: tokio::sync::Mutex::default(), + standby_horizon: AtomicLsn::new(0), + timeline_get_throttle: resources.timeline_get_throttle, aux_files: tokio::sync::Mutex::new(AuxFilesState { @@ -4844,7 +4848,29 @@ impl Timeline { (horizon_cutoff, pitr_cutoff, retain_lsns) }; - let new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff); + let mut new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff); + let standby_horizon = self.standby_horizon.load(); + // Hold GC for the standby, but as a safety guard do it only within some + // reasonable lag. + if standby_horizon != Lsn::INVALID { + if let Some(standby_lag) = new_gc_cutoff.checked_sub(standby_horizon) { + const MAX_ALLOWED_STANDBY_LAG: u64 = 10u64 << 30; // 10 GB + if standby_lag.0 < MAX_ALLOWED_STANDBY_LAG { + new_gc_cutoff = Lsn::min(standby_horizon, new_gc_cutoff); + trace!("holding off GC for standby apply LSN {}", standby_horizon); + } else { + warn!( + "standby is lagging for more than {}MB, not holding gc for it", + MAX_ALLOWED_STANDBY_LAG / 1024 / 1024 + ) + } + } + } + + // Reset standby horizon to ignore it if it is not updated till next GC. + // It is an easy way to unset it when standby disappears without adding + // more conf options. + self.standby_horizon.store(Lsn::INVALID); let res = self .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff) diff --git a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs index 991e4ac045..a3c7adae44 100644 --- a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs +++ b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs @@ -705,6 +705,7 @@ impl ConnectionManagerState { commit_lsn: info.commit_lsn, safekeeper_connstr: info.safekeeper_connstr, availability_zone: info.availability_zone, + standby_horizon: info.standby_horizon, } } MessageType::SafekeeperDiscoveryResponse => { @@ -725,6 +726,17 @@ impl ConnectionManagerState { WALRECEIVER_BROKER_UPDATES.inc(); + trace!( + "safekeeper info update: standby_horizon(cutoff)={}", + timeline_update.standby_horizon + ); + if timeline_update.standby_horizon != 0 { + // ignore reports from safekeepers not connected to replicas + self.timeline + .standby_horizon + .store(Lsn(timeline_update.standby_horizon)); + } + let new_safekeeper_id = NodeId(timeline_update.safekeeper_id); let old_entry = self.wal_stream_candidates.insert( new_safekeeper_id, @@ -1094,6 +1106,7 @@ mod tests { commit_lsn, safekeeper_connstr: safekeeper_connstr.to_owned(), availability_zone: None, + standby_horizon: 0, }, latest_update, } diff --git a/safekeeper/src/broker.rs b/safekeeper/src/broker.rs index ea16ce450f..46a51438ea 100644 --- a/safekeeper/src/broker.rs +++ b/safekeeper/src/broker.rs @@ -186,6 +186,7 @@ async fn discover_loop(conf: SafeKeeperConf, stats: Arc) -> Result< commit_lsn: sk_info.commit_lsn, safekeeper_connstr: sk_info.safekeeper_connstr, availability_zone: sk_info.availability_zone, + standby_horizon: 0, }; // note this is a blocking call diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 30d0081a47..808bb1e490 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -350,6 +350,7 @@ async fn record_safekeeper_info(mut request: Request) -> Result Self { + StandbyFeedback { + reply: StandbyReply::empty(), + hs_feedback: HotStandbyFeedback::empty(), + } + } } /// WalSenders registry. Timeline holds it (wrapped in Arc). @@ -162,8 +171,8 @@ impl WalSenders { } /// Get aggregated hot standby feedback (we send it to compute). - pub fn get_hotstandby(self: &Arc) -> HotStandbyFeedback { - self.mutex.lock().agg_hs_feedback + pub fn get_hotstandby(self: &Arc) -> StandbyFeedback { + self.mutex.lock().agg_standby_feedback } /// Record new pageserver feedback, update aggregated values. @@ -184,6 +193,10 @@ impl WalSenders { fn record_standby_reply(self: &Arc, id: WalSenderId, reply: &StandbyReply) { let mut shared = self.mutex.lock(); let slot = shared.get_slot_mut(id); + debug!( + "Record standby reply: ts={} apply_lsn={}", + reply.reply_ts, reply.apply_lsn + ); match &mut slot.feedback { ReplicationFeedback::Standby(sf) => sf.reply = *reply, ReplicationFeedback::Pageserver(_) => { @@ -208,7 +221,7 @@ impl WalSenders { }) } } - shared.update_hs_feedback(); + shared.update_reply_feedback(); } /// Get remote_consistent_lsn reported by the pageserver. Returns None if @@ -226,13 +239,13 @@ impl WalSenders { fn unregister(self: &Arc, id: WalSenderId) { let mut shared = self.mutex.lock(); shared.slots[id] = None; - shared.update_hs_feedback(); + shared.update_reply_feedback(); } } struct WalSendersShared { // aggregated over all walsenders value - agg_hs_feedback: HotStandbyFeedback, + agg_standby_feedback: StandbyFeedback, // last feedback ever received from any pageserver, empty if none last_ps_feedback: PageserverFeedback, // total counter of pageserver feedbacks received @@ -243,7 +256,7 @@ struct WalSendersShared { impl WalSendersShared { fn new() -> Self { WalSendersShared { - agg_hs_feedback: HotStandbyFeedback::empty(), + agg_standby_feedback: StandbyFeedback::empty(), last_ps_feedback: PageserverFeedback::empty(), ps_feedback_counter: 0, slots: Vec::new(), @@ -260,10 +273,11 @@ impl WalSendersShared { self.slots[id].as_mut().expect("walsender doesn't exist") } - /// Update aggregated hot standy feedback. We just take min of valid xmins + /// Update aggregated hot standy and normal reply feedbacks. We just take min of valid xmins /// and ts. - fn update_hs_feedback(&mut self) { + fn update_reply_feedback(&mut self) { let mut agg = HotStandbyFeedback::empty(); + let mut reply_agg = StandbyReply::empty(); for ws_state in self.slots.iter().flatten() { if let ReplicationFeedback::Standby(standby_feedback) = ws_state.feedback { let hs_feedback = standby_feedback.hs_feedback; @@ -276,7 +290,7 @@ impl WalSendersShared { } else { agg.xmin = hs_feedback.xmin; } - agg.ts = min(agg.ts, hs_feedback.ts); + agg.ts = max(agg.ts, hs_feedback.ts); } if hs_feedback.catalog_xmin != INVALID_FULL_TRANSACTION_ID { if agg.catalog_xmin != INVALID_FULL_TRANSACTION_ID { @@ -284,11 +298,43 @@ impl WalSendersShared { } else { agg.catalog_xmin = hs_feedback.catalog_xmin; } - agg.ts = min(agg.ts, hs_feedback.ts); + agg.ts = max(agg.ts, hs_feedback.ts); + } + let reply = standby_feedback.reply; + if reply.write_lsn != Lsn::INVALID { + if reply_agg.write_lsn != Lsn::INVALID { + reply_agg.write_lsn = Lsn::min(reply_agg.write_lsn, reply.write_lsn); + } else { + reply_agg.write_lsn = reply.write_lsn; + } + } + if reply.flush_lsn != Lsn::INVALID { + if reply_agg.flush_lsn != Lsn::INVALID { + reply_agg.flush_lsn = Lsn::min(reply_agg.flush_lsn, reply.flush_lsn); + } else { + reply_agg.flush_lsn = reply.flush_lsn; + } + } + if reply.apply_lsn != Lsn::INVALID { + if reply_agg.apply_lsn != Lsn::INVALID { + reply_agg.apply_lsn = Lsn::min(reply_agg.apply_lsn, reply.apply_lsn); + } else { + reply_agg.apply_lsn = reply.apply_lsn; + } + } + if reply.reply_ts != 0 { + if reply_agg.reply_ts != 0 { + reply_agg.reply_ts = TimestampTz::min(reply_agg.reply_ts, reply.reply_ts); + } else { + reply_agg.reply_ts = reply.reply_ts; + } } } } - self.agg_hs_feedback = agg; + self.agg_standby_feedback = StandbyFeedback { + reply: reply_agg, + hs_feedback: agg, + }; } } @@ -793,8 +839,11 @@ mod tests { fn test_hs_feedback_no_valid() { let mut wss = WalSendersShared::new(); push_feedback(&mut wss, hs_feedback(1, INVALID_FULL_TRANSACTION_ID)); - wss.update_hs_feedback(); - assert_eq!(wss.agg_hs_feedback.xmin, INVALID_FULL_TRANSACTION_ID); + wss.update_reply_feedback(); + assert_eq!( + wss.agg_standby_feedback.hs_feedback.xmin, + INVALID_FULL_TRANSACTION_ID + ); } #[test] @@ -803,7 +852,7 @@ mod tests { push_feedback(&mut wss, hs_feedback(1, INVALID_FULL_TRANSACTION_ID)); push_feedback(&mut wss, hs_feedback(1, 42)); push_feedback(&mut wss, hs_feedback(1, 64)); - wss.update_hs_feedback(); - assert_eq!(wss.agg_hs_feedback.xmin, 42); + wss.update_reply_feedback(); + assert_eq!(wss.agg_standby_feedback.hs_feedback.xmin, 42); } } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 64f764f191..e97247dc7c 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -248,6 +248,7 @@ impl SharedState { &self, ttid: &TenantTimelineId, conf: &SafeKeeperConf, + standby_apply_lsn: Lsn, ) -> SafekeeperTimelineInfo { SafekeeperTimelineInfo { safekeeper_id: conf.my_id.0, @@ -270,6 +271,7 @@ impl SharedState { backup_lsn: self.sk.state.inmem.backup_lsn.0, local_start_lsn: self.sk.state.local_start_lsn.0, availability_zone: conf.availability_zone.clone(), + standby_horizon: standby_apply_lsn.0, } } @@ -663,7 +665,7 @@ impl Timeline { // if this is AppendResponse, fill in proper hot standby feedback. if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg { - resp.hs_feedback = self.walsenders.get_hotstandby(); + resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback; } commit_lsn = shared_state.sk.state.inmem.commit_lsn; @@ -716,7 +718,8 @@ impl Timeline { /// Get safekeeper info for broadcasting to broker and other peers. pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo { let shared_state = self.write_shared_state().await; - shared_state.get_safekeeper_info(&self.ttid, conf) + let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn; + shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn) } /// Update timeline state with peer safekeeper data. diff --git a/storage_broker/benches/rps.rs b/storage_broker/benches/rps.rs index d66cbefa45..1a6fb7fedf 100644 --- a/storage_broker/benches/rps.rs +++ b/storage_broker/benches/rps.rs @@ -147,6 +147,7 @@ async fn publish(client: Option, n_keys: u64) { http_connstr: "zenith-1-sk-1.local:7677".to_owned(), local_start_lsn: 0, availability_zone: None, + standby_horizon: 0, }; counter += 1; yield info; diff --git a/storage_broker/proto/broker.proto b/storage_broker/proto/broker.proto index 7d1b63d23f..a420fd9c66 100644 --- a/storage_broker/proto/broker.proto +++ b/storage_broker/proto/broker.proto @@ -42,6 +42,7 @@ message SafekeeperTimelineInfo { uint64 remote_consistent_lsn = 7; uint64 peer_horizon_lsn = 8; uint64 local_start_lsn = 9; + uint64 standby_horizon = 14; // A connection string to use for WAL receiving. string safekeeper_connstr = 10; // HTTP endpoint connection string @@ -105,4 +106,6 @@ message SafekeeperDiscoveryResponse { string safekeeper_connstr = 4; // Availability zone of a safekeeper. optional string availability_zone = 5; + // Replica apply LSN + uint64 standby_horizon = 6; } diff --git a/storage_broker/src/bin/storage_broker.rs b/storage_broker/src/bin/storage_broker.rs index 8c88b61abc..0a4af543ab 100644 --- a/storage_broker/src/bin/storage_broker.rs +++ b/storage_broker/src/bin/storage_broker.rs @@ -736,6 +736,7 @@ mod tests { http_connstr: "neon-1-sk-1.local:7677".to_owned(), local_start_lsn: 0, availability_zone: None, + standby_horizon: 0, }) } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 23f30804b4..41377e2db2 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4288,6 +4288,17 @@ def wait_replica_caughtup(primary: Endpoint, secondary: Endpoint): time.sleep(1) +def log_replica_lag(primary: Endpoint, secondary: Endpoint): + last_replay_lsn = Lsn( + secondary.safe_psql_scalar("SELECT pg_last_wal_replay_lsn()", log_query=False) + ) + primary_lsn = Lsn( + primary.safe_psql_scalar("SELECT pg_current_wal_flush_lsn()", log_query=False) + ) + lag = primary_lsn - last_replay_lsn + log.info(f"primary_lsn={primary_lsn}, replay_lsn={last_replay_lsn}, lag={lag}") + + def wait_for_last_flush_lsn( env: NeonEnv, endpoint: Endpoint, diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 179cc273ec..31f436cb4c 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -1,9 +1,20 @@ import os import re +import threading import time +from functools import partial +import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, tenant_get_shards, wait_replica_caughtup +from fixtures.neon_fixtures import ( + NeonEnv, + NeonEnvBuilder, + PgBin, + log_replica_lag, + tenant_get_shards, + wait_replica_caughtup, +) +from fixtures.utils import wait_until # Check for corrupted WAL messages which might otherwise go unnoticed if @@ -104,19 +115,28 @@ def test_2_replicas_start(neon_simple_env: NeonEnv): wait_replica_caughtup(primary, secondary2) -# We had an issue that a standby server made GetPage requests with an -# old LSN, based on the last-written LSN cache, to avoid waits in the -# pageserver. However, requesting a page with a very old LSN, such -# that the GC horizon has already advanced past it, results in an -# error from the pageserver: -# "Bad request: tried to request a page version that was garbage collected" +# Test two different scenarios related to gc of data needed by hot standby. # -# To avoid that, the compute<-> pageserver protocol was updated so -# that that the standby now sends two LSNs, the old last-written LSN -# and the current replay LSN. +# When pause_apply is False, standby is mostly caught up with the primary. +# However, in compute <-> pageserver protocol version 1 only one LSN had been +# sent to the pageserver in page request, and to avoid waits in the pageserver +# it was last-written LSN cache value. If page hasn't been updated for a long +# time that resulted in an error from the pageserver: "Bad request: tried to +# request a page version that was garbage collected". For primary this wasn't a +# problem because pageserver always bumped LSN to the newest one; for standy +# that would be incorrect since we might get page fresher then apply LSN. Hence, +# in protocol version v2 two LSNs were introduced: main request_lsn (apply LSN +# in case of standby) and not_modified_since which could be used as an +# optimization to avoid waiting. # # https://github.com/neondatabase/neon/issues/6211 -def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder): +# +# When pause_apply is True we model standby lagging behind primary (e.g. due to +# high max_standby_streaming_delay). To prevent pageserver from removing data +# still needed by the standby apply LSN is propagated in standby -> safekeepers +# -> broker -> pageserver flow so that pageserver could hold off gc for it. +@pytest.mark.parametrize("pause_apply", [False, True]) +def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool): tenant_conf = { # set PITR interval to be small, so we can do GC "pitr_interval": "0 s", @@ -160,6 +180,9 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder): # so we still remember the LSNs of the pages. s_cur.execute("SELECT clear_buffer_cache()") + if pause_apply: + s_cur.execute("SELECT pg_wal_replay_pause()") + # Do other stuff on the primary, to advance the WAL p_cur.execute("CREATE TABLE test2 AS SELECT generate_series(1, 1000000) AS g") @@ -176,6 +199,8 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder): # generates use old not_modified_since LSNs, older than # the GC cutoff, but new request LSNs. (In protocol # version 1 there was only one LSN, and this failed.) + log_replica_lag(primary, secondary) s_cur.execute("SELECT COUNT(*) FROM test") + log_replica_lag(primary, secondary) res = s_cur.fetchone() assert res[0] == 10000 From f54c3b96e08f78e56d1f82f8305994e9cc8f867f Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 21 May 2024 14:42:25 +0300 Subject: [PATCH 027/220] Fix bugs in hot standby feedback propagation and add test for it. Co-authored-by: Konstantin Knizhnik --- pgxn/neon/walproposer_pg.c | 19 +++--- safekeeper/src/send_wal.rs | 9 ++- test_runner/regress/test_hot_standby.py | 88 +++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 12 deletions(-) diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index e5ef93b456..492a46fd54 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -1852,34 +1852,30 @@ static void CombineHotStanbyFeedbacks(HotStandbyFeedback *hs, WalProposer *wp) { hs->ts = 0; - hs->xmin.value = ~0; /* largest unsigned value */ - hs->catalog_xmin.value = ~0; /* largest unsigned value */ + hs->xmin = InvalidFullTransactionId; + hs->catalog_xmin = InvalidFullTransactionId; for (int i = 0; i < wp->n_safekeepers; i++) { - if (wp->safekeeper[i].appendResponse.hs.ts != 0) + + if (wp->safekeeper[i].state == SS_ACTIVE) { HotStandbyFeedback *skhs = &wp->safekeeper[i].appendResponse.hs; if (FullTransactionIdIsNormal(skhs->xmin) - && FullTransactionIdPrecedes(skhs->xmin, hs->xmin)) + && (!FullTransactionIdIsValid(hs->xmin) || FullTransactionIdPrecedes(skhs->xmin, hs->xmin))) { hs->xmin = skhs->xmin; hs->ts = skhs->ts; } if (FullTransactionIdIsNormal(skhs->catalog_xmin) - && FullTransactionIdPrecedes(skhs->catalog_xmin, hs->xmin)) + && (!FullTransactionIdIsValid(hs->catalog_xmin) || FullTransactionIdPrecedes(skhs->catalog_xmin, hs->catalog_xmin))) { hs->catalog_xmin = skhs->catalog_xmin; hs->ts = skhs->ts; } } } - - if (hs->xmin.value == ~0) - hs->xmin = InvalidFullTransactionId; - if (hs->catalog_xmin.value == ~0) - hs->catalog_xmin = InvalidFullTransactionId; } /* @@ -1946,9 +1942,10 @@ walprop_pg_process_safekeeper_feedback(WalProposer *wp, Safekeeper *sk) } CombineHotStanbyFeedbacks(&hsFeedback, wp); - if (hsFeedback.ts != 0 && memcmp(&hsFeedback, &agg_hs_feedback, sizeof hsFeedback) != 0) + if (memcmp(&hsFeedback, &agg_hs_feedback, sizeof hsFeedback) != 0) { agg_hs_feedback = hsFeedback; + elog(DEBUG2, "ProcessStandbyHSFeedback(xmin=%d, catalog_xmin=%d", XidFromFullTransactionId(hsFeedback.xmin), XidFromFullTransactionId(hsFeedback.catalog_xmin)); ProcessStandbyHSFeedback(hsFeedback.ts, XidFromFullTransactionId(hsFeedback.xmin), EpochFromFullTransactionId(hsFeedback.xmin), diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 4edd09a318..5a9745e1c9 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -756,8 +756,15 @@ impl ReplyReader { match msg.first().cloned() { Some(HOT_STANDBY_FEEDBACK_TAG_BYTE) => { // Note: deserializing is on m[1..] because we skip the tag byte. - let hs_feedback = HotStandbyFeedback::des(&msg[1..]) + let mut hs_feedback = HotStandbyFeedback::des(&msg[1..]) .context("failed to deserialize HotStandbyFeedback")?; + // TODO: xmin/catalog_xmin are serialized by walreceiver.c in this way: + // pq_sendint32(&reply_message, xmin); + // pq_sendint32(&reply_message, xmin_epoch); + // So it is two big endian 32-bit words in low endian order! + hs_feedback.xmin = (hs_feedback.xmin >> 32) | (hs_feedback.xmin << 32); + hs_feedback.catalog_xmin = + (hs_feedback.catalog_xmin >> 32) | (hs_feedback.catalog_xmin << 32); self.ws_guard .walsenders .record_hs_feedback(self.ws_guard.id, &hs_feedback); diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 31f436cb4c..244d482c18 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -204,3 +204,91 @@ def test_hot_standby_gc(neon_env_builder: NeonEnvBuilder, pause_apply: bool): log_replica_lag(primary, secondary) res = s_cur.fetchone() assert res[0] == 10000 + + +def run_pgbench(connstr: str, pg_bin: PgBin): + log.info(f"Start a pgbench workload on pg {connstr}") + # s10 is about 150MB of data. In debug mode init takes about 15s on SSD. + pg_bin.run_capture(["pgbench", "-i", "-s10", connstr]) + log.info("pgbench init done") + pg_bin.run_capture(["pgbench", "-T60", connstr]) + + +# assert that pgbench_accounts and its index are created. +def pgbench_accounts_initialized(ep): + ep.safe_psql_scalar("select 'pgbench_accounts_pkey'::regclass") + + +# Test that hot_standby_feedback works in neon (it is forwarded through +# safekeepers). That is, ensure queries on standby don't fail during load on +# primary under the following conditions: +# - pgbench bombards primary with updates. +# - On the secondary we run long select of the updated table. +# - Set small max_standby_streaming_delay: hs feedback should prevent conflicts +# so apply doesn't need to wait. +# - Do agressive vacuum on primary which still shouldn't create conflicts. +# Actually this appears to be redundant due to microvacuum existence. +# +# Without hs feedback enabled we'd see 'User query might have needed to see row +# versions that must be removed.' errors. +def test_hot_standby_feedback(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): + env = neon_env_builder.init_start() + agressive_vacuum_conf = [ + "log_autovacuum_min_duration = 0", + "autovacuum_naptime = 10s", + "autovacuum_vacuum_threshold = 25", + "autovacuum_vacuum_scale_factor = 0.1", + "autovacuum_vacuum_cost_delay = -1", + ] + with env.endpoints.create_start( + branch_name="main", endpoint_id="primary", config_lines=agressive_vacuum_conf + ) as primary: + # It would be great to have more strict max_standby_streaming_delay=0s here, but then sometimes it fails with + # 'User was holding shared buffer pin for too long.'. + with env.endpoints.new_replica_start( + origin=primary, + endpoint_id="secondary", + config_lines=[ + "max_standby_streaming_delay=2s", + "neon.protocol_version=2", + "hot_standby_feedback=true", + ], + ) as secondary: + log.info( + f"primary connstr is {primary.connstr()}, secondary connstr {secondary.connstr()}" + ) + t = threading.Thread(target=run_pgbench, args=(primary.connstr(), pg_bin)) + t.start() + # Wait until pgbench_accounts is created + filled on replica *and* + # index is created. Otherwise index creation would conflict with + # read queries and hs feedback won't save us. + wait_until(60, 1.0, partial(pgbench_accounts_initialized, secondary)) + + # Test should fail if hs feedback is disabled anyway, but cross + # check that walproposer sets some xmin. + def xmin_is_not_null(): + slot_xmin = primary.safe_psql_scalar( + "select xmin from pg_replication_slots where slot_name = 'wal_proposer_slot'", + log_query=False, + ) + log.info(f"xmin is {slot_xmin}") + assert int(slot_xmin) > 0 + + wait_until(10, 1.0, xmin_is_not_null) + for _ in range(1, 5): + # in debug mode takes about 5-7s + balance = secondary.safe_psql_scalar("select sum(abalance) from pgbench_accounts") + log.info(f"balance={balance}") + log_replica_lag(primary, secondary) + t.join() + + # check xmin is reset when standby is gone + def xmin_is_null(): + slot_xmin = primary.safe_psql_scalar( + "select xmin from pg_replication_slots where slot_name = 'wal_proposer_slot'", + log_query=False, + ) + log.info(f"xmin is {slot_xmin}") + assert slot_xmin is None + + wait_until(10, 1.0, xmin_is_null) From f2771a99b7cc286a936ecc27949d305d1fe7103c Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 20 May 2024 14:28:36 +0300 Subject: [PATCH 028/220] Add metric for pageserver standby horizon. Co-authored-by: Konstantin Knizhnik --- pageserver/src/metrics.rs | 15 +++++++++++++++ pageserver/src/tenant/timeline.rs | 3 +++ .../timeline/walreceiver/connection_manager.rs | 4 ++++ test_runner/fixtures/metrics.py | 1 + 4 files changed, 23 insertions(+) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 5315f0b936..27e25d8e32 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -525,6 +525,15 @@ static LAST_RECORD_LSN: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); +static STANDBY_HORIZON: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "pageserver_standby_horizon", + "Standby apply LSN for which GC is hold off, by timeline.", + &["tenant_id", "shard_id", "timeline_id"] + ) + .expect("failed to define a metric") +}); + static RESIDENT_PHYSICAL_SIZE: Lazy = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_resident_physical_size", @@ -2098,6 +2107,7 @@ pub(crate) struct TimelineMetrics { pub garbage_collect_histo: StorageTimeMetrics, pub find_gc_cutoffs_histo: StorageTimeMetrics, pub last_record_gauge: IntGauge, + pub standby_horizon_gauge: IntGauge, pub resident_physical_size_gauge: UIntGauge, /// copy of LayeredTimeline.current_logical_size pub current_logical_size_gauge: UIntGauge, @@ -2167,6 +2177,9 @@ impl TimelineMetrics { let last_record_gauge = LAST_RECORD_LSN .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id]) .unwrap(); + let standby_horizon_gauge = STANDBY_HORIZON + .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id]) + .unwrap(); let resident_physical_size_gauge = RESIDENT_PHYSICAL_SIZE .get_metric_with_label_values(&[&tenant_id, &shard_id, &timeline_id]) .unwrap(); @@ -2212,6 +2225,7 @@ impl TimelineMetrics { find_gc_cutoffs_histo, load_layer_map_histo, last_record_gauge, + standby_horizon_gauge, resident_physical_size_gauge, current_logical_size_gauge, aux_file_size_gauge, @@ -2246,6 +2260,7 @@ impl TimelineMetrics { let timeline_id = &self.timeline_id; let shard_id = &self.shard_id; let _ = LAST_RECORD_LSN.remove_label_values(&[tenant_id, shard_id, timeline_id]); + let _ = STANDBY_HORIZON.remove_label_values(&[tenant_id, shard_id, timeline_id]); { RESIDENT_PHYSICAL_SIZE_GLOBAL.sub(self.resident_physical_size_get()); let _ = RESIDENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, shard_id, timeline_id]); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 2c43c26359..b0e5275b5f 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4871,6 +4871,9 @@ impl Timeline { // It is an easy way to unset it when standby disappears without adding // more conf options. self.standby_horizon.store(Lsn::INVALID); + self.metrics + .standby_horizon_gauge + .set(Lsn::INVALID.0 as i64); let res = self .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff) diff --git a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs index a3c7adae44..1d2ffec08f 100644 --- a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs +++ b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs @@ -735,6 +735,10 @@ impl ConnectionManagerState { self.timeline .standby_horizon .store(Lsn(timeline_update.standby_horizon)); + self.timeline + .metrics + .standby_horizon_gauge + .set(timeline_update.standby_horizon as i64); } let new_safekeeper_id = NodeId(timeline_update.safekeeper_id); diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 8fa67e75c9..8b8075f8c1 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -142,6 +142,7 @@ PAGESERVER_PER_TENANT_METRICS: Tuple[str, ...] = ( "pageserver_resident_physical_size", "pageserver_io_operations_bytes_total", "pageserver_last_record_lsn", + "pageserver_standby_horizon", "pageserver_smgr_query_seconds_bucket", "pageserver_smgr_query_seconds_count", "pageserver_smgr_query_seconds_sum", From d43dcceef9bac464cd2b80bd8f40cfae496c7f62 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 20 May 2024 18:41:39 +0300 Subject: [PATCH 029/220] Minimize hot standby feedback xmins to next_xid. Hot standby feedback xmins can be greater than next_xid due to sparse update of nextXid on pageserver (to do less writes it advances next xid on 1024). ProcessStandbyHSFeedback ignores such xids from the future; to fix, minimize received xmin to next_xid. Co-authored-by: Konstantin Knizhnik --- pgxn/neon/walproposer_pg.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 492a46fd54..316e23a72e 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -1944,13 +1944,26 @@ walprop_pg_process_safekeeper_feedback(WalProposer *wp, Safekeeper *sk) CombineHotStanbyFeedbacks(&hsFeedback, wp); if (memcmp(&hsFeedback, &agg_hs_feedback, sizeof hsFeedback) != 0) { + FullTransactionId xmin = hsFeedback.xmin; + FullTransactionId catalog_xmin = hsFeedback.catalog_xmin; + FullTransactionId next_xid = ReadNextFullTransactionId(); + /* + * Page server is updating nextXid in checkpoint each 1024 transactions, + * so feedback xmin can be actually larger then nextXid and + * function TransactionIdInRecentPast return false in this case, + * preventing update of slot's xmin. + */ + if (FullTransactionIdPrecedes(next_xid, xmin)) + xmin = next_xid; + if (FullTransactionIdPrecedes(next_xid, catalog_xmin)) + catalog_xmin = next_xid; agg_hs_feedback = hsFeedback; elog(DEBUG2, "ProcessStandbyHSFeedback(xmin=%d, catalog_xmin=%d", XidFromFullTransactionId(hsFeedback.xmin), XidFromFullTransactionId(hsFeedback.catalog_xmin)); ProcessStandbyHSFeedback(hsFeedback.ts, - XidFromFullTransactionId(hsFeedback.xmin), - EpochFromFullTransactionId(hsFeedback.xmin), - XidFromFullTransactionId(hsFeedback.catalog_xmin), - EpochFromFullTransactionId(hsFeedback.catalog_xmin)); + XidFromFullTransactionId(xmin), + EpochFromFullTransactionId(xmin), + XidFromFullTransactionId(catalog_xmin), + EpochFromFullTransactionId(catalog_xmin)); } CheckGracefulShutdown(wp); From d9d471e3c43c9e47b0ffc1ce9a796bd15aacf36c Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Tue, 14 May 2024 13:49:04 -0500 Subject: [PATCH 030/220] Add some Python typing in a few test files --- test_runner/fixtures/neon_fixtures.py | 7 ++++++- test_runner/regress/test_pg_regress.py | 22 +++++++++++++++------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 41377e2db2..5c865baa54 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2721,7 +2721,12 @@ class PgBin: env.update(env_add) return env - def run(self, command: List[str], env: Optional[Env] = None, cwd: Optional[str] = None): + def run( + self, + command: List[str], + env: Optional[Env] = None, + cwd: Optional[Union[str, Path]] = None, + ): """ Run one of the postgres binaries. diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 2b1b7fff34..c00a8ff6b7 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -1,8 +1,10 @@ # # This file runs pg_regress-based tests. # +from __future__ import annotations + from pathlib import Path -from typing import Optional +from typing import TYPE_CHECKING import pytest from fixtures.neon_fixtures import ( @@ -11,6 +13,12 @@ from fixtures.neon_fixtures import ( ) from fixtures.remote_storage import s3_storage +if TYPE_CHECKING: + from typing import Optional + + from fixtures.neon_fixtures import PgBin + from pytest import CaptureFixture + # Run the main PostgreSQL regression tests, in src/test/regress. # @@ -19,8 +27,8 @@ def test_pg_regress( neon_env_builder: NeonEnvBuilder, test_output_dir: Path, build_type: str, - pg_bin, - capsys, + pg_bin: PgBin, + capsys: CaptureFixture[str], base_dir: Path, pg_distrib_dir: Path, shard_count: Optional[int], @@ -86,8 +94,8 @@ def test_pg_regress( def test_isolation( neon_env_builder: NeonEnvBuilder, test_output_dir: Path, - pg_bin, - capsys, + pg_bin: PgBin, + capsys: CaptureFixture[str], base_dir: Path, pg_distrib_dir: Path, shard_count: Optional[int], @@ -142,8 +150,8 @@ def test_isolation( def test_sql_regress( neon_env_builder: NeonEnvBuilder, test_output_dir: Path, - pg_bin, - capsys, + pg_bin: PgBin, + capsys: CaptureFixture[str], base_dir: Path, pg_distrib_dir: Path, shard_count: Optional[int], From e8b8ebfa1da3f528b5ebfb0a5aff7b946a8eabc1 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Fri, 17 May 2024 13:39:33 -0500 Subject: [PATCH 031/220] Allow check_restored_datadir_content to ignore certain files Some files may have known differences that we are okay with. --- test_runner/fixtures/neon_fixtures.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 5c865baa54..7a660b64ed 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -4150,7 +4150,12 @@ def list_files_to_compare(pgdata_dir: Path) -> List[str]: # pg is the existing and running compute node, that we want to compare with a basebackup -def check_restored_datadir_content(test_output_dir: Path, env: NeonEnv, endpoint: Endpoint): +def check_restored_datadir_content( + test_output_dir: Path, + env: NeonEnv, + endpoint: Endpoint, + ignored_files: Optional[list[str]] = None, +): pg_bin = PgBin(test_output_dir, env.pg_distrib_dir, env.pg_version) # Get the timeline ID. We need it for the 'basebackup' command @@ -4203,6 +4208,10 @@ def check_restored_datadir_content(test_output_dir: Path, env: NeonEnv, endpoint if not f.startswith("pg_xact") and not f.startswith("pg_multixact") ] + if ignored_files: + pgdata_files = [f for f in pgdata_files if f not in ignored_files] + restored_files = [f for f in restored_files if f not in ignored_files] + # check that file sets are equal assert pgdata_files == restored_files From 9a4b896636465d4d24e5b11c6a7bdb6aed83f23f Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Fri, 17 May 2024 13:50:36 -0500 Subject: [PATCH 032/220] Use a constant for database name in test_pg_regress --- test_runner/regress/test_pg_regress.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index c00a8ff6b7..302f94064c 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -33,6 +33,8 @@ def test_pg_regress( pg_distrib_dir: Path, shard_count: Optional[int], ): + DBNAME = "regression" + """ :param shard_count: if None, create an unsharded tenant. Otherwise create a tenant with this many shards. @@ -50,7 +52,7 @@ def test_pg_regress( # Connect to postgres and create a database called "regression". endpoint = env.endpoints.create_start("main") - endpoint.safe_psql("CREATE DATABASE regression") + endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") # Create some local directories for pg_regress to run in. runpath = test_output_dir / "regress" From 8030b8e4c5f50a2320888433a39e550bfa1ec22e Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Fri, 17 May 2024 13:56:02 -0500 Subject: [PATCH 033/220] Fix test_pg_regress for unlogged relations Previously we worked around file comparison issues by dropping unlogged relations in the pg_regress tests, but this would lead to an unnecessary diff when compared to upstream in our Postgres fork. Instead, we can precompute the files that we know will be different, and ignore them. --- test_runner/regress/test_pg_regress.py | 65 +++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 302f94064c..885a94a557 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -4,13 +4,14 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast import pytest from fixtures.neon_fixtures import ( NeonEnvBuilder, check_restored_datadir_content, ) +from fixtures.pg_version import PgVersion from fixtures.remote_storage import s3_storage if TYPE_CHECKING: @@ -87,7 +88,67 @@ def test_pg_regress( with capsys.disabled(): pg_bin.run(pg_regress_command, env=env_vars, cwd=runpath) - check_restored_datadir_content(test_output_dir, env, endpoint) + ignored_files: Optional[list[str]] = None + + # Neon handles unlogged relations in a special manner. During a + # basebackup, we ship the init fork as the main fork. This presents a + # problem in that the endpoint's data directory and the basebackup will + # have differences and will fail the eventual file comparison. + # + # Unlogged tables were introduced in version 9.1. ALTER TABLE grew + # support for setting the persistence of a table in 9.5. The reason that + # this doesn't affect versions < 15 (but probably would between 9.1 and + # 9.5) is that all the regression tests that deal with unlogged tables + # up until that point dropped the unlogged tables or set them to logged + # at some point during the test. + # + # In version 15, Postgres grew support for unlogged sequences, and with + # that came a few more regression tests. These tests did not all drop + # the unlogged tables/sequences prior to finishing. + # + # But unlogged sequences came with a bug in that, sequences didn't + # inherit the persistence of their "parent" tables if they had one. This + # was fixed and backported to 15, thus exacerbating our problem a bit. + # + # So what we can do is just ignore file differences between the data + # directory and basebackup for unlogged relations. + results = cast( + "list[tuple[str, str]]", + endpoint.safe_psql( + """ + SELECT + relkind, + pg_relation_filepath( + pg_filenode_relation(reltablespace, relfilenode) + ) AS unlogged_relation_paths + FROM pg_class + WHERE relpersistence = 'u' + """, + dbname=DBNAME, + ), + ) + + unlogged_relation_files: list[str] = [] + for r in results: + unlogged_relation_files.append(r[1]) + # This is related to the following Postgres commit: + # + # commit ccadf73163ca88bdaa74b8223d4dde05d17f550b + # Author: Heikki Linnakangas + # Date: 2023-08-23 09:21:31 -0500 + # + # Use the buffer cache when initializing an unlogged index. + # + # This patch was backpatched to 16. Without it, the LSN in the + # page header would be 0/0 in the data directory, which wouldn't + # match the LSN generated during the basebackup, thus creating + # a difference. + if env.pg_version <= PgVersion.V15 and r[0] == "i": + unlogged_relation_files.append(f"{r[1]}_init") + + ignored_files = unlogged_relation_files + + check_restored_datadir_content(test_output_dir, env, endpoint, ignored_files=ignored_files) # Run the PostgreSQL "isolation" tests, in src/test/isolation. From 781352bd8e4dca5676168fe67521b178fa91a0ec Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Mon, 20 May 2024 10:01:58 -0500 Subject: [PATCH 034/220] Upgrade Postgres v14 to 14.12 --- vendor/postgres-v14 | 2 +- vendor/revisions.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index d6f7e2c604..21ec61d539 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit d6f7e2c604bfc7cbc4c46bcea0a8e800f4bc778a +Subproject commit 21ec61d539d22a81fe811c2d79e26436820bc3f4 diff --git a/vendor/revisions.json b/vendor/revisions.json index c5b55762fa..ec82786109 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { "v16": ["16.2", "8ef3c33aa01631e17cb24a122776349fcc777b46"], "v15": ["15.6", "f0d6b0ef7581bd78011832e23d8420a7d2c8a83a"], - "v14": ["14.11", "d6f7e2c604bfc7cbc4c46bcea0a8e800f4bc778a"] + "v14": ["14.12", "21ec61d539d22a81fe811c2d79e26436820bc3f4"] } From 9d081851ec846523d3893f8e66e8fb5112c1e7ca Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Mon, 20 May 2024 10:03:09 -0500 Subject: [PATCH 035/220] Upgrade Postgres v15 to 15.7 --- vendor/postgres-v15 | 2 +- vendor/revisions.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index f0d6b0ef75..e2dbd63345 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit f0d6b0ef7581bd78011832e23d8420a7d2c8a83a +Subproject commit e2dbd63345c584de75173c27951f111249ae0016 diff --git a/vendor/revisions.json b/vendor/revisions.json index ec82786109..d022e6f455 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { "v16": ["16.2", "8ef3c33aa01631e17cb24a122776349fcc777b46"], - "v15": ["15.6", "f0d6b0ef7581bd78011832e23d8420a7d2c8a83a"], + "v15": ["15.7", "e2dbd63345c584de75173c27951f111249ae0016"], "v14": ["14.12", "21ec61d539d22a81fe811c2d79e26436820bc3f4"] } From e3415706b7b23a30f21051a8063bc257a79e3953 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Mon, 20 May 2024 10:04:04 -0500 Subject: [PATCH 036/220] Upgrade Postgres v16 to 16.3 --- libs/walproposer/src/walproposer.rs | 4 ++-- vendor/postgres-v16 | 2 +- vendor/revisions.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/walproposer/src/walproposer.rs b/libs/walproposer/src/walproposer.rs index fb815607a7..f7b72b205f 100644 --- a/libs/walproposer/src/walproposer.rs +++ b/libs/walproposer/src/walproposer.rs @@ -496,9 +496,9 @@ mod tests { // TODO: When updating Postgres versions, this test will cause // problems. Postgres version in message needs updating. // - // Greeting(ProposerGreeting { protocol_version: 2, pg_version: 160002, proposer_id: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], system_id: 0, timeline_id: 9e4c8f36063c6c6e93bc20d65a820f3d, tenant_id: 9e4c8f36063c6c6e93bc20d65a820f3d, tli: 1, wal_seg_size: 16777216 }) + // Greeting(ProposerGreeting { protocol_version: 2, pg_version: 160003, proposer_id: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], system_id: 0, timeline_id: 9e4c8f36063c6c6e93bc20d65a820f3d, tenant_id: 9e4c8f36063c6c6e93bc20d65a820f3d, tli: 1, wal_seg_size: 16777216 }) vec![ - 103, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 113, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 103, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 113, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158, 76, 143, 54, 6, 60, 108, 110, 147, 188, 32, 214, 90, 130, 15, 61, 158, 76, 143, 54, 6, 60, 108, 110, 147, 188, 32, 214, 90, 130, 15, 61, 1, 0, 0, 0, 0, 0, 0, 1, diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 8ef3c33aa0..c271017c6c 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 8ef3c33aa01631e17cb24a122776349fcc777b46 +Subproject commit c271017c6c4846be59948766baec2ba4ace5dc9c diff --git a/vendor/revisions.json b/vendor/revisions.json index d022e6f455..a3af9331fe 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.2", "8ef3c33aa01631e17cb24a122776349fcc777b46"], + "v16": ["16.3", "c271017c6c4846be59948766baec2ba4ace5dc9c"], "v15": ["15.7", "e2dbd63345c584de75173c27951f111249ae0016"], "v14": ["14.12", "21ec61d539d22a81fe811c2d79e26436820bc3f4"] } From 1988ad8db702a24afc19ce4332e1199531441a8c Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Fri, 17 May 2024 13:42:51 -0500 Subject: [PATCH 037/220] Extend test_unlogged to include a sequence Unlogged sequences were added in v15, so let's just test to make sure they work on Neon. --- test_runner/regress/test_unlogged.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/test_runner/regress/test_unlogged.py b/test_runner/regress/test_unlogged.py index 708bf0dfeb..137d28b9fa 100644 --- a/test_runner/regress/test_unlogged.py +++ b/test_runner/regress/test_unlogged.py @@ -1,4 +1,5 @@ from fixtures.neon_fixtures import NeonEnv, fork_at_current_lsn +from fixtures.pg_version import PgVersion # @@ -17,7 +18,8 @@ def test_unlogged(neon_simple_env: NeonEnv): cur.execute("CREATE UNLOGGED TABLE iut (id int);") # create index to test unlogged index relation as well cur.execute("CREATE UNIQUE INDEX iut_idx ON iut (id);") - cur.execute("INSERT INTO iut values (42);") + cur.execute("ALTER TABLE iut ADD COLUMN seq int GENERATED ALWAYS AS IDENTITY;") + cur.execute("INSERT INTO iut (id) values (42);") # create another compute to fetch inital empty contents from pageserver fork_at_current_lsn(env, endpoint, "test_unlogged_basebackup", "test_unlogged") @@ -26,7 +28,15 @@ def test_unlogged(neon_simple_env: NeonEnv): conn2 = endpoint2.connect() cur2 = conn2.cursor() # after restart table should be empty but valid - cur2.execute("PREPARE iut_plan (int) AS INSERT INTO iut VALUES ($1)") + cur2.execute("PREPARE iut_plan (int) AS INSERT INTO iut (id) VALUES ($1)") cur2.execute("EXECUTE iut_plan (43);") cur2.execute("SELECT * FROM iut") - assert cur2.fetchall() == [(43,)] + results = cur2.fetchall() + # Unlogged sequences were introduced in v15. On <= v14, the sequence created + # for the GENERATED ALWAYS AS IDENTITY column is logged, and hence it keeps + # the old value (2) on restart. While on v15 and above, it's unlogged, so it + # gets reset to 1. + if env.pg_version <= PgVersion.V14: + assert results == [(43, 2)] + else: + assert results == [(43, 1)] From 353afe4fe7dc2c5a131aa01d06d10d0d229fd84a Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 21 May 2024 16:13:54 +0100 Subject: [PATCH 038/220] neon_local: run controller's postgres with fsync=off (#7817) ## Problem In `test_storage_controller_many_tenants` we [occasionally](https://neon-github-public-dev.s3.amazonaws.com/reports/main/9155810417/index.html#/testresult/8fbdf57a0e859c2d) see it hit the retry limit on serializable transactions. That's likely due to a combination of relative slow fsync on the hetzner nodes running the test, and the way the test does lots of parallel timeline creations, putting high load on the drive. Running the storage controller's db with fsync=off may help here. ## Summary of changes - Set `fsync=off` in the postgres config for the database used by the storage controller in tests --- control_plane/src/storage_controller.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 96e8276f4d..b6b7ea7762 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -243,9 +243,13 @@ impl StorageController { anyhow::bail!("initdb failed with status {status}"); } + // Write a minimal config file: + // - Specify the port, since this is chosen dynamically + // - Switch off fsync, since we're running on lightweight test environments and when e.g. scale testing + // the storage controller we don't want a slow local disk to interfere with that. tokio::fs::write( &pg_data_path.join("postgresql.conf"), - format!("port = {}", self.postgres_port), + format!("port = {}\nfsync=off\n", self.postgres_port), ) .await?; }; From a8a88ba7bcf99505b1552858db0f5415974b8f8a Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 21 May 2024 20:08:43 +0300 Subject: [PATCH 039/220] test(detach_ancestor): ensure L0 compaction in history is ok (#7813) detaching a timeline from its ancestor can leave the resulting timeline with more L0 layers than the compaction threshold. most of the time, the detached timeline has made progress, and next time the L0 -> L1 compaction happens near the original branch point and not near the last_record_lsn. add a test to ensure that inheriting the historical L0s does not change fullbackup. additionally: - add `wait_until_completed` to test-only timeline checkpoint and compact HTTP endpoints. with `?wait_until_completed=true` the endpoints will wait until the remote client has completed uploads. - for delta layers, describe L0-ness with the `/layer` endpoint Cc: #6994 --- libs/pageserver_api/src/models.rs | 2 + pageserver/src/http/routes.rs | 11 ++ pageserver/src/tenant/storage_layer/layer.rs | 1 + .../src/tenant/storage_layer/layer_name.rs | 16 +- test_runner/fixtures/pageserver/http.py | 20 ++- test_runner/regress/test_compaction.py | 3 +- test_runner/regress/test_layer_eviction.py | 4 +- test_runner/regress/test_ondemand_download.py | 1 - .../regress/test_pageserver_layer_rolling.py | 2 +- test_runner/regress/test_sharding.py | 1 - .../regress/test_timeline_detach_ancestor.py | 157 ++++++++++++++++++ test_runner/regress/test_timeline_size.py | 2 +- 12 files changed, 200 insertions(+), 20 deletions(-) diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 451ee1a13c..05a444d738 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -789,6 +789,8 @@ pub enum HistoricLayerInfo { lsn_end: Lsn, remote: bool, access_stats: LayerAccessStats, + + l0: bool, }, Image { layer_file_name: String, diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 0eab6510ca..ec3b1141f3 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1736,6 +1736,8 @@ async fn timeline_compact_handler( if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? { flags |= CompactFlags::ForceImageLayerCreation; } + let wait_until_uploaded = + parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false); async { let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); @@ -1744,6 +1746,9 @@ async fn timeline_compact_handler( .compact(&cancel, flags, &ctx) .await .map_err(|e| ApiError::InternalServerError(e.into()))?; + if wait_until_uploaded { + timeline.remote_client.wait_completion().await.map_err(ApiError::InternalServerError)?; + } json_response(StatusCode::OK, ()) } .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id)) @@ -1768,6 +1773,8 @@ async fn timeline_checkpoint_handler( if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? { flags |= CompactFlags::ForceImageLayerCreation; } + let wait_until_uploaded = + parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false); async { let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); @@ -1781,6 +1788,10 @@ async fn timeline_checkpoint_handler( .await .map_err(|e| ApiError::InternalServerError(e.into()))?; + if wait_until_uploaded { + timeline.remote_client.wait_completion().await.map_err(ApiError::InternalServerError)?; + } + json_response(StatusCode::OK, ()) } .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id)) diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 97b349f635..45d61ce048 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -1264,6 +1264,7 @@ impl LayerInner { lsn_end: lsn_range.end, remote: !resident, access_stats, + l0: crate::tenant::layer_map::LayerMap::is_l0(self.layer_desc()), } } else { let lsn = self.desc.image_layer_lsn(); diff --git a/pageserver/src/tenant/storage_layer/layer_name.rs b/pageserver/src/tenant/storage_layer/layer_name.rs index c733404693..da26e1eeb7 100644 --- a/pageserver/src/tenant/storage_layer/layer_name.rs +++ b/pageserver/src/tenant/storage_layer/layer_name.rs @@ -347,37 +347,33 @@ impl<'de> serde::de::Visitor<'de> for LayerNameVisitor { mod test { use super::*; #[test] - fn image_layer_parse() -> anyhow::Result<()> { + fn image_layer_parse() { let expected = LayerName::Image(ImageLayerName { key_range: Key::from_i128(0) ..Key::from_hex("000000067F00000001000004DF0000000006").unwrap(), lsn: Lsn::from_hex("00000000014FED58").unwrap(), }); - let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-v1-00000001").map_err(|s| anyhow::anyhow!(s))?; + let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-v1-00000001").unwrap(); assert_eq!(parsed, expected,); // Omitting generation suffix is valid - let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58").map_err(|s| anyhow::anyhow!(s))?; + let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58").unwrap(); assert_eq!(parsed, expected,); - - Ok(()) } #[test] - fn delta_layer_parse() -> anyhow::Result<()> { + fn delta_layer_parse() { let expected = LayerName::Delta(DeltaLayerName { key_range: Key::from_i128(0) ..Key::from_hex("000000067F00000001000004DF0000000006").unwrap(), lsn_range: Lsn::from_hex("00000000014FED58").unwrap() ..Lsn::from_hex("000000000154C481").unwrap(), }); - let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481-v1-00000001").map_err(|s| anyhow::anyhow!(s))?; + let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481-v1-00000001").unwrap(); assert_eq!(parsed, expected); // Omitting generation suffix is valid - let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481").map_err(|s| anyhow::anyhow!(s))?; + let parsed = LayerName::from_str("000000000000000000000000000000000000-000000067F00000001000004DF0000000006__00000000014FED58-000000000154C481").unwrap(); assert_eq!(parsed, expected); - - Ok(()) } } diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 4d563a532b..f1f96f6d5f 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -56,20 +56,30 @@ class InMemoryLayerInfo: class HistoricLayerInfo: kind: str layer_file_name: str - layer_file_size: Optional[int] + layer_file_size: int lsn_start: str lsn_end: Optional[str] remote: bool + # None for image layers, true if pageserver thinks this is an L0 delta layer + l0: Optional[bool] @classmethod def from_json(cls, d: Dict[str, Any]) -> HistoricLayerInfo: + # instead of parsing the key range lets keep the definition of "L0" in pageserver + l0_ness = d.get("l0") + assert l0_ness is None or isinstance(l0_ness, bool) + + size = d["layer_file_size"] + assert isinstance(size, int) + return HistoricLayerInfo( kind=d["kind"], layer_file_name=d["layer_file_name"], - layer_file_size=d.get("layer_file_size"), + layer_file_size=size, lsn_start=d["lsn_start"], lsn_end=d.get("lsn_end"), remote=d["remote"], + l0=l0_ness, ) @@ -583,6 +593,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): timeline_id: TimelineId, force_repartition=False, force_image_layer_creation=False, + wait_until_uploaded=False, ): self.is_testing_enabled_or_skip() query = {} @@ -590,6 +601,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter): query["force_repartition"] = "true" if force_image_layer_creation: query["force_image_layer_creation"] = "true" + if wait_until_uploaded: + query["wait_until_uploaded"] = "true" log.info(f"Requesting compact: tenant {tenant_id}, timeline {timeline_id}") res = self.put( @@ -656,6 +669,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): timeline_id: TimelineId, force_repartition=False, force_image_layer_creation=False, + wait_until_uploaded=False, ): self.is_testing_enabled_or_skip() query = {} @@ -663,6 +677,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter): query["force_repartition"] = "true" if force_image_layer_creation: query["force_image_layer_creation"] = "true" + if wait_until_uploaded: + query["wait_until_uploaded"] = "true" log.info(f"Requesting checkpoint: tenant {tenant_id}, timeline {timeline_id}") res = self.put( diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 93a16620a3..6a3515e1bd 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -165,7 +165,6 @@ def test_sharding_compaction( image_layer_sizes[layer.layer_file_name] = layer.layer_file_size # Pageserver should assert rather than emit an empty layer file, but double check here - assert layer.layer_file_size is not None assert layer.layer_file_size > 0 shard_has_image_layers.append(len(image_layer_sizes) > 1) @@ -178,7 +177,7 @@ def test_sharding_compaction( # # We only do this check with tiny stripes, because large stripes may not give all shards enough # data to have statistically significant image layers - avg_size = sum(v for v in image_layer_sizes.values()) / len(image_layer_sizes) # type: ignore + avg_size = sum(v for v in image_layer_sizes.values()) / len(image_layer_sizes) log.info(f"Shard {shard_id} average image layer size: {avg_size}") assert avg_size > compaction_target_size / 2 diff --git a/test_runner/regress/test_layer_eviction.py b/test_runner/regress/test_layer_eviction.py index 0ef4f6d95b..193149ea03 100644 --- a/test_runner/regress/test_layer_eviction.py +++ b/test_runner/regress/test_layer_eviction.py @@ -272,14 +272,14 @@ def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder): resident_physical_size_metric == 0 ), "ensure that resident_physical_size metric is zero" assert resident_physical_size_metric == sum( - layer.layer_file_size or 0 for layer in info.historic_layers if not layer.remote + layer.layer_file_size for layer in info.historic_layers if not layer.remote ), "ensure that resident_physical_size metric corresponds to layer map dump" remote_physical_size_metric = ps_http.get_timeline_metric( tenant_id, timeline_id, "pageserver_remote_physical_size" ) assert remote_physical_size_metric == sum( - layer.layer_file_size or 0 for layer in info.historic_layers if layer.remote + layer.layer_file_size for layer in info.historic_layers if layer.remote ), "ensure that remote_physical_size metric corresponds to layer map dump" log.info("before runnning GC, ensure that remote_physical size is zero") diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index b51754c9e0..b137fb3a5c 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -540,7 +540,6 @@ def test_compaction_downloads_on_demand_without_image_creation(neon_env_builder: for layer in layers.historic_layers: log.info(f"pre-compact: {layer}") - assert layer.layer_file_size is not None, "we must know layer file sizes" layer_sizes += layer.layer_file_size pageserver_http.evict_layer(tenant_id, timeline_id, layer.layer_file_name) diff --git a/test_runner/regress/test_pageserver_layer_rolling.py b/test_runner/regress/test_pageserver_layer_rolling.py index aab0536f5a..66b6185aaa 100644 --- a/test_runner/regress/test_pageserver_layer_rolling.py +++ b/test_runner/regress/test_pageserver_layer_rolling.py @@ -287,7 +287,7 @@ def test_total_size_limit(neon_env_builder: NeonEnvBuilder): total_historic_bytes += sum( layer.layer_file_size for layer in layer_map.historic_layers - if layer.layer_file_size is not None and Lsn(layer.lsn_start) > initdb_lsn + if Lsn(layer.lsn_start) > initdb_lsn ) total_ephemeral_layers += len(layer_map.in_memory_layers) diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 1bfeec6f4b..bbb1ad0c6d 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -632,7 +632,6 @@ def test_sharding_ingest_layer_sizes( historic_layers = sorted(layer_map.historic_layers, key=lambda layer: layer.lsn_start) for layer in historic_layers: - assert layer.layer_file_size is not None if layer.layer_file_size < expect_layer_size // 2: classification = "Small" small_layer_count += 1 diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index 8406de8bc1..3e435caeee 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -482,6 +482,163 @@ def test_detached_receives_flushes_while_being_detached(neon_env_builder: NeonEn env.pageserver.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) +def test_compaction_induced_by_detaches_in_history( + neon_env_builder: NeonEnvBuilder, test_output_dir, pg_distrib_dir, pg_bin: PgBin +): + """ + Assuming the tree of timelines: + + root + |- child1 + |- ... + |- wanted_detached_child + + Each detach can add N more L0 per level, this is actually unbounded because + compaction can be arbitrarily delayed (or detach happen right before one + starts). If "wanted_detached_child" has already made progress and compacted + L1s, we want to make sure "compaction in the history" does not leave the + timeline broken. + """ + + psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} + + env = neon_env_builder.init_start( + initial_tenant_conf={ + # we want to create layers manually so we don't branch on arbitrary + # Lsn, but we also do not want to compact L0 -> L1. + "compaction_threshold": "99999", + "compaction_period": "0s", + # shouldn't matter, but just in case + "gc_period": "0s", + } + ) + env.pageserver.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + client = env.pageserver.http_client() + + def delta_layers(timeline_id: TimelineId): + # shorthand for more readable formatting + return client.layer_map_info(env.initial_tenant, timeline_id).delta_layers() + + with env.endpoints.create_start("main", tenant_id=env.initial_tenant) as ep: + ep.safe_psql("create table integers (i bigint not null);") + ep.safe_psql("insert into integers (i) values (42)") + branch_lsn = wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline) + + client.timeline_checkpoint(env.initial_tenant, env.initial_timeline) + + assert len(delta_layers(env.initial_timeline)) == 2 + + more_good_numbers = range(0, 3) + + branches: List[Tuple[str, TimelineId]] = [("main", env.initial_timeline)] + + for num in more_good_numbers: + branch_name = f"br-{len(branches)}" + branch_timeline_id = env.neon_cli.create_branch( + branch_name, + ancestor_branch_name=branches[-1][0], + tenant_id=env.initial_tenant, + ancestor_start_lsn=branch_lsn, + ) + branches.append((branch_name, branch_timeline_id)) + + with env.endpoints.create_start(branches[-1][0], tenant_id=env.initial_tenant) as ep: + ep.safe_psql( + f"insert into integers (i) select i from generate_series({num}, {num + 100}) as s(i)" + ) + branch_lsn = wait_for_last_flush_lsn(env, ep, env.initial_tenant, branch_timeline_id) + client.timeline_checkpoint(env.initial_tenant, branch_timeline_id) + + assert len(delta_layers(branch_timeline_id)) == 1 + + # now fill in the final, most growing timeline + + branch_name, branch_timeline_id = branches[-1] + with env.endpoints.create_start(branch_name, tenant_id=env.initial_tenant) as ep: + ep.safe_psql("insert into integers (i) select i from generate_series(50, 500) s(i)") + + last_suffix = None + for suffix in range(0, 4): + ep.safe_psql(f"create table other_table_{suffix} as select * from integers") + wait_for_last_flush_lsn(env, ep, env.initial_tenant, branch_timeline_id) + client.timeline_checkpoint(env.initial_tenant, branch_timeline_id) + last_suffix = suffix + + assert last_suffix is not None + + assert len(delta_layers(branch_timeline_id)) == 5 + + client.patch_tenant_config_client_side( + env.initial_tenant, {"compaction_threshold": 5}, None + ) + + client.timeline_compact(env.initial_tenant, branch_timeline_id) + + # one more layer + ep.safe_psql(f"create table other_table_{last_suffix + 1} as select * from integers") + wait_for_last_flush_lsn(env, ep, env.initial_tenant, branch_timeline_id) + + # we need to wait here, because the detaches will do implicit tenant restart, + # and we could get unexpected layer counts + client.timeline_checkpoint(env.initial_tenant, branch_timeline_id, wait_until_uploaded=True) + + assert len([filter(lambda x: x.l0, delta_layers(branch_timeline_id))]) == 1 + + skip_main = branches[1:] + branch_lsn = client.timeline_detail(env.initial_tenant, branch_timeline_id)["ancestor_lsn"] + + # take the fullbackup before and after inheriting the new L0s + fullbackup_before = test_output_dir / "fullbackup-before.tar" + cmd = [ + "psql", + "--no-psqlrc", + env.pageserver.connstr(), + "-c", + f"fullbackup {env.initial_tenant} {branch_timeline_id} {branch_lsn}", + "-o", + str(fullbackup_before), + ] + pg_bin.run_capture(cmd, env=psql_env) + + for _, timeline_id in skip_main: + reparented = client.detach_ancestor(env.initial_tenant, timeline_id) + assert reparented == set(), "we have no earlier branches at any level" + + post_detach_l0s = list(filter(lambda x: x.l0, delta_layers(branch_timeline_id))) + assert len(post_detach_l0s) == 5, "should had inherited 4 L0s, have 5 in total" + + # checkpoint does compaction, which in turn decides to run, because + # there is now in total threshold number L0s even if they are not + # adjacent in Lsn space: + # + # inherited flushed during this checkpoint + # \\\\ / + # 1234X5---> lsn + # | + # l1 layers from "fill in the final, most growing timeline" + # + # branch_lsn is between 4 and first X. + client.timeline_checkpoint(env.initial_tenant, branch_timeline_id) + + post_compact_l0s = list(filter(lambda x: x.l0, delta_layers(branch_timeline_id))) + assert len(post_compact_l0s) == 1, "only the consecutive inherited L0s should be compacted" + + fullbackup_after = test_output_dir / "fullbackup_after.tar" + cmd = [ + "psql", + "--no-psqlrc", + env.pageserver.connstr(), + "-c", + f"fullbackup {env.initial_tenant} {branch_timeline_id} {branch_lsn}", + "-o", + str(fullbackup_after), + ] + pg_bin.run_capture(cmd, env=psql_env) + + # we don't need to skip any files, because zenith.signal will be identical + tar_cmp(fullbackup_before, fullbackup_after, set()) + + # TODO: # - after starting the operation, tenant is deleted # - after starting the operation, pageserver is shutdown, restarted diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index a6d06df3b6..db5297870e 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -656,7 +656,7 @@ def get_physical_size_values( client = env.pageserver.http_client() res.layer_map_file_size_sum = sum( - layer.layer_file_size or 0 + layer.layer_file_size for layer in client.layer_map_info(tenant_id, timeline_id).historic_layers ) From e3f6a07ca300549181042752c5943c71424e476a Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 21 May 2024 13:33:29 -0400 Subject: [PATCH 040/220] chore(pageserver): remove metrics for in-memory ingestion (#7823) The metrics was added in https://github.com/neondatabase/neon/pull/7515/ to observe if https://github.com/neondatabase/neon/pull/7467 introduces any perf regressions. The change was deployed on 5/7 and no changes are observed in the metrics. So it's safe to remove the metrics now. Signed-off-by: Alex Chi Z --- pageserver/src/metrics.rs | 7 ------- pageserver/src/pgdatadir_mapping.rs | 5 ----- 2 files changed, 12 deletions(-) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 27e25d8e32..4f2c75d308 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1867,7 +1867,6 @@ pub(crate) struct WalIngestMetrics { pub(crate) records_received: IntCounter, pub(crate) records_committed: IntCounter, pub(crate) records_filtered: IntCounter, - pub(crate) time_spent_on_ingest: Histogram, } pub(crate) static WAL_INGEST: Lazy = Lazy::new(|| WalIngestMetrics { @@ -1891,12 +1890,6 @@ pub(crate) static WAL_INGEST: Lazy = Lazy::new(|| WalIngestMet "Number of WAL records filtered out due to sharding" ) .expect("failed to define a metric"), - time_spent_on_ingest: register_histogram!( - "pageserver_wal_ingest_put_value_seconds", - "Actual time spent on ingesting a record", - redo_histogram_time_buckets!(), - ) - .expect("failed to define a metric"), }); pub(crate) static WAL_REDO_TIME: Lazy = Lazy::new(|| { diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index b4fc4a08ee..f9d8c1020d 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -9,7 +9,6 @@ use super::tenant::{PageReconstructError, Timeline}; use crate::context::RequestContext; use crate::keyspace::{KeySpace, KeySpaceAccum}; -use crate::metrics::WAL_INGEST; use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id; use crate::walrecord::NeonWalRecord; use crate::{aux_file, repository::*}; @@ -1702,8 +1701,6 @@ impl<'a> DatadirModification<'a> { pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> { let mut writer = self.tline.writer().await; - let timer = WAL_INGEST.time_spent_on_ingest.start_timer(); - let pending_nblocks = self.pending_nblocks; self.pending_nblocks = 0; @@ -1743,8 +1740,6 @@ impl<'a> DatadirModification<'a> { writer.update_directory_entries_count(kind, count as u64); } - timer.observe_duration(); - Ok(()) } From 679e031cf641d45d95705c61e121aa40781cc86f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Tue, 21 May 2024 23:31:20 +0200 Subject: [PATCH 041/220] Add dummy lsn lease http and page service APIs (#7815) We want to introduce a concept of temporary and expiring LSN leases. This adds both a http API as well as one for the page service to obtain temporary LSN leases. This adds a dummy implementation to unblock integration work of this API. A functional implementation of the lease feature is deferred to a later step. Fixes #7808 Co-authored-by: Joonas Koivunen --- libs/pageserver_api/src/models.rs | 16 ++++++ libs/pageserver_api/src/shard.rs | 8 +++ pageserver/src/http/openapi_spec.yml | 40 +++++++++++++++ pageserver/src/http/routes.rs | 30 ++++++++++++ pageserver/src/page_service.rs | 73 ++++++++++++++++++++++++++++ pageserver/src/tenant/mgr.rs | 9 +++- pageserver/src/tenant/timeline.rs | 16 +++++- 7 files changed, 190 insertions(+), 2 deletions(-) diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 05a444d738..55e9c48421 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -162,6 +162,22 @@ impl std::fmt::Debug for TenantState { } } +/// A temporary lease to a specific lsn inside a timeline. +/// Access to the lsn is guaranteed by the pageserver until the expiration indicated by `valid_until`. +#[serde_as] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct LsnLease { + #[serde_as(as = "SystemTimeAsRfc3339Millis")] + pub valid_until: SystemTime, +} + +serde_with::serde_conv!( + SystemTimeAsRfc3339Millis, + SystemTime, + |time: &SystemTime| humantime::format_rfc3339_millis(*time).to_string(), + |value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) } +); + /// The only [`TenantState`] variants we could be `TenantState::Activating` from. #[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ActivatingFrom { diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index 43d9b2e48c..1c05a01926 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -559,6 +559,14 @@ impl ShardIdentity { } } + /// Obtains the shard number and count combined into a `ShardIndex`. + pub fn shard_index(&self) -> ShardIndex { + ShardIndex { + shard_count: self.count, + shard_number: self.number, + } + } + pub fn shard_slug(&self) -> String { if self.count > ShardCount(0) { format!("-{:02x}{:02x}", self.number.0, self.count.0) diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 36c74ed140..107bcd4a22 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -257,6 +257,37 @@ paths: schema: $ref: "#/components/schemas/LsnByTimestampResponse" + /v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/lsn_lease: + parameters: + - name: tenant_shard_id + in: path + required: true + schema: + type: string + - name: timeline_id + in: path + required: true + schema: + type: string + format: hex + post: + description: Obtain lease for the given LSN + parameters: + - name: lsn + in: query + required: true + schema: + type: string + format: hex + description: A LSN to obtain the lease for + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/LsnLease" + /v1/tenant/{tenant_id}/timeline/{timeline_id}/do_gc: parameters: - name: tenant_id @@ -980,6 +1011,15 @@ components: type: string enum: [past, present, future, nodata] + LsnLease: + type: object + required: + - valid_until + properties: + valid_until: + type: string + format: date-time + PageserverUtilization: type: object required: diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index ec3b1141f3..c75e4ca5a9 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1701,6 +1701,32 @@ async fn handle_tenant_break( json_response(StatusCode::OK, ()) } +// Obtains an lsn lease on the given timeline. +async fn lsn_lease_handler( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let lsn: Lsn = parse_query_param(&request, "lsn")? + .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'lsn' query parameter")))?; + + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); + + let state = get_state(&request); + + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + let result = timeline + .make_lsn_lease(lsn, &ctx) + .map_err(|e| ApiError::InternalServerError(e.context("lsn lease http handler")))?; + + json_response(StatusCode::OK, result) +} + // Run GC immediately on given timeline. async fn timeline_gc_handler( mut request: Request, @@ -2712,6 +2738,10 @@ pub fn make_router( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn", |r| api_handler(r, get_timestamp_of_lsn_handler), ) + .post( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease", + |r| api_handler(r, lsn_lease_handler), + ) .put( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc", |r| api_handler(r, timeline_gc_handler), diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 35aba044b2..c066f56c17 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -19,6 +19,7 @@ use pageserver_api::models::{ }; use pageserver_api::shard::ShardIndex; use pageserver_api::shard::ShardNumber; +use pageserver_api::shard::TenantShardId; use postgres_backend::{is_expected_io_error, AuthType, PostgresBackend, QueryError}; use pq_proto::framed::ConnectionError; use pq_proto::FeStartupPacket; @@ -33,6 +34,7 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use std::time::Instant; +use std::time::SystemTime; use tokio::io::AsyncWriteExt; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::io::StreamReader; @@ -905,6 +907,39 @@ impl PageServerHandler { } } + #[instrument(skip_all, fields(shard_id, %lsn))] + async fn handle_make_lsn_lease( + &self, + pgb: &mut PostgresBackend, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result<(), QueryError> + where + IO: AsyncRead + AsyncWrite + Send + Sync + Unpin, + { + let shard_selector = ShardSelector::Known(tenant_shard_id.to_index()); + let timeline = self + .get_active_tenant_timeline(tenant_shard_id.tenant_id, timeline_id, shard_selector) + .await?; + let lease = timeline.make_lsn_lease(lsn, ctx)?; + let valid_until = lease + .valid_until + .duration_since(SystemTime::UNIX_EPOCH) + .map_err(|e| QueryError::Other(e.into()))?; + + pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor::text_col( + b"valid_until", + )]))? + .write_message_noflush(&BeMessage::DataRow(&[Some( + &valid_until.as_millis().to_be_bytes(), + )]))? + .write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?; + + Ok(()) + } + #[instrument(skip_all, fields(shard_id))] async fn handle_get_rel_exists_request( &mut self, @@ -1802,6 +1837,44 @@ where // important because psycopg2 executes "SET datestyle TO 'ISO'" // on connect pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?; + } else if query_string.starts_with("lease lsn ") { + let (_, params_raw) = query_string.split_at("lease lsn ".len()); + let params = params_raw.split_whitespace().collect::>(); + if params.len() != 3 { + return Err(QueryError::Other(anyhow::anyhow!( + "invalid param number {} for lease lsn command", + params.len() + ))); + } + + let tenant_shard_id = TenantShardId::from_str(params[0]) + .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?; + let timeline_id = TimelineId::from_str(params[1]) + .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?; + + tracing::Span::current() + .record("tenant_id", field::display(tenant_shard_id)) + .record("timeline_id", field::display(timeline_id)); + + self.check_permission(Some(tenant_shard_id.tenant_id))?; + + // The caller is responsible for providing correct lsn. + let lsn = Lsn::from_str(params[2]) + .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?; + + match self + .handle_make_lsn_lease(pgb, tenant_shard_id, timeline_id, lsn, &ctx) + .await + { + Ok(()) => pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?, + Err(e) => { + error!("error obtaining lsn lease for {lsn}: {e:?}"); + pgb.write_message_noflush(&BeMessage::ErrorResponse( + &e.to_string(), + Some(e.pg_error_code()), + ))? + } + }; } else if query_string.starts_with("show ") { // show let (_, params_raw) = query_string.split_at("show ".len()); diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 1d8e2cf6d3..89fdf31849 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -7,7 +7,7 @@ use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::models::LocationConfigMode; use pageserver_api::shard::{ - ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId, + ShardCount, ShardIdentity, ShardIndex, ShardNumber, ShardStripeSize, TenantShardId, }; use pageserver_api::upcall_api::ReAttachResponseTenant; use rand::{distributions::Alphanumeric, Rng}; @@ -127,6 +127,8 @@ pub(crate) enum ShardSelector { First, /// Pick the shard that holds this key Page(Key), + /// The shard ID is known: pick the given shard + Known(ShardIndex), } /// A convenience for use with the re_attach ControlPlaneClient function: rather @@ -2067,6 +2069,11 @@ impl TenantManager { return ShardResolveResult::Found(tenant.clone()); } } + ShardSelector::Known(shard) + if tenant.shard_identity.shard_index() == shard => + { + return ShardResolveResult::Found(tenant.clone()); + } _ => continue, } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index b0e5275b5f..1f8ee9ffc4 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -25,7 +25,7 @@ use pageserver_api::{ models::{ AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, - TimelineState, + LsnLease, TimelineState, }, reltag::BlockNumber, shard::{ShardIdentity, ShardNumber, TenantShardId}, @@ -1532,6 +1532,20 @@ impl Timeline { Ok(()) } + /// Obtains a temporary lease blocking garbage collection for the given LSN + pub(crate) fn make_lsn_lease( + &self, + _lsn: Lsn, + _ctx: &RequestContext, + ) -> anyhow::Result { + const LEASE_LENGTH: Duration = Duration::from_secs(5 * 60); + let lease = LsnLease { + valid_until: SystemTime::now() + LEASE_LENGTH, + }; + // TODO: dummy implementation + Ok(lease) + } + /// Flush to disk all data that was written with the put_* functions #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))] pub(crate) async fn freeze_and_flush(&self) -> anyhow::Result<()> { From 00d66e8012c2d4b0df706fb5e89ab8351e4eb402 Mon Sep 17 00:00:00 2001 From: Em Sharnoff Date: Tue, 21 May 2024 16:52:48 -0700 Subject: [PATCH 042/220] compute_ctl: Fix handling of missing /neonvm/bin/resize-swap (#7832) The logic added in the original PR (#7434) only worked before sudo was used, because 'sudo foo' will only fail with NotFound if 'sudo' doesn't exist; if 'foo' doesn't exist, then sudo will fail with a normal error exit. This means that compute_ctl may fail to restart if it exits after successfully enabling swap. --- compute_tools/src/swap.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/compute_tools/src/swap.rs b/compute_tools/src/swap.rs index c22b6bc14e..024c5b338e 100644 --- a/compute_tools/src/swap.rs +++ b/compute_tools/src/swap.rs @@ -1,3 +1,5 @@ +use std::path::Path; + use anyhow::{anyhow, Context}; use tracing::warn; @@ -17,17 +19,24 @@ pub fn resize_swap(size_bytes: u64) -> anyhow::Result<()> { .arg(size_bytes.to_string()) .spawn(); - if matches!(&child_result, Err(e) if e.kind() == std::io::ErrorKind::NotFound) { - warn!("ignoring \"not found\" error from resize-swap to avoid swapoff while compute is running"); - return Ok(()); - } - child_result .context("spawn() failed") .and_then(|mut child| child.wait().context("wait() failed")) .and_then(|status| match status.success() { true => Ok(()), - false => Err(anyhow!("process exited with {status}")), + false => { + // The command failed. Maybe it was because the resize-swap file doesn't exist? + // The --once flag causes it to delete itself on success so we don't disable swap + // while postgres is running; maybe this is fine. + match Path::new(RESIZE_SWAP_BIN).try_exists() { + Err(_) | Ok(true) => Err(anyhow!("process exited with {status}")), + // The path doesn't exist; we're actually ok + Ok(false) => { + warn!("ignoring \"not found\" error from resize-swap to avoid swapoff while compute is running"); + Ok(()) + }, + } + } }) // wrap any prior error with the overall context that we couldn't run the command .with_context(|| { From bd5cb9e86b2f8623b2528df9fa59daa21759e183 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Wed, 22 May 2024 09:34:39 +0100 Subject: [PATCH 043/220] Implement timeline_manager for safekeeper background tasks (#7768) In safekeepers we have several background tasks. Previously `WAL backup` task was spawned by another task called `wal_backup_launcher`. That task received notifications via `wal_backup_launcher_rx` and decided to spawn or kill existing backup task associated with the timeline. This was inconvenient because each code segment that touched shared state was responsible for pushing notification into `wal_backup_launcher_tx` channel. This was error prone because it's easy to miss and could lead to deadlock in some cases, if notification pushing was done in the wrong order. We also had a similar issue with `is_active` timeline flag. That flag was calculated based on the state and code modifying the state had to call function to update the flag. We had a few bugs related to that, when we forgot to update `is_active` flag in some places where it could change. To fix these issues, this PR adds a new `timeline_manager` background task associated with each timeline. This task is responsible for managing all background tasks, including `is_active` flag which is used for pushing broker messages. It is subscribed for updates in timeline state in a loop and decides to spawn/kill background tasks when needed. There is a new structure called `TimelinesSet`. It stores a set of `Arc` and allows to copy the set to iterate without holding the mutex. This is what replaced `is_active` flag for the broker. Now broker push task holds a reference to the `TimelinesSet` with active timelines and use it instead of iterating over all timelines and filtering by `is_active` flag. Also added some metrics for manager iterations and active backup tasks. Ideally manager should be doing not too many iterations and we should not have a lot of backup tasks spawned at the same time. Fixes #7751 --------- Co-authored-by: Arseny Sher --- safekeeper/src/bin/safekeeper.rs | 17 +- safekeeper/src/broker.rs | 11 +- safekeeper/src/lib.rs | 2 + safekeeper/src/metrics.rs | 35 ++- safekeeper/src/receive_wal.rs | 57 ++-- safekeeper/src/remove_wal.rs | 19 +- safekeeper/src/timeline.rs | 404 +++++++++++-------------- safekeeper/src/timeline_manager.rs | 153 ++++++++++ safekeeper/src/timelines_global_map.rs | 60 ++-- safekeeper/src/timelines_set.rs | 90 ++++++ safekeeper/src/wal_backup.rs | 201 +++++------- 11 files changed, 587 insertions(+), 462 deletions(-) create mode 100644 safekeeper/src/timeline_manager.rs create mode 100644 safekeeper/src/timelines_set.rs diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 09c565ce71..aee3898ac7 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -20,7 +20,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use storage_broker::Uri; -use tokio::sync::mpsc; use tracing::*; use utils::pid_file; @@ -30,13 +29,13 @@ use safekeeper::defaults::{ DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, }; +use safekeeper::remove_wal; use safekeeper::wal_service; use safekeeper::GlobalTimelines; use safekeeper::SafeKeeperConf; use safekeeper::{broker, WAL_SERVICE_RUNTIME}; use safekeeper::{control_file, BROKER_RUNTIME}; use safekeeper::{http, WAL_REMOVER_RUNTIME}; -use safekeeper::{remove_wal, WAL_BACKUP_RUNTIME}; use safekeeper::{wal_backup, HTTP_RUNTIME}; use storage_broker::DEFAULT_ENDPOINT; use utils::auth::{JwtAuth, Scope, SwappableJwtAuth}; @@ -377,8 +376,6 @@ async fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> { let timeline_collector = safekeeper::metrics::TimelineCollector::new(); metrics::register_internal(Box::new(timeline_collector))?; - let (wal_backup_launcher_tx, wal_backup_launcher_rx) = mpsc::channel(100); - wal_backup::init_remote_storage(&conf); // Keep handles to main tasks to die if any of them disappears. @@ -391,19 +388,9 @@ async fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> { let current_thread_rt = conf .current_thread_runtime .then(|| Handle::try_current().expect("no runtime in main")); - let conf_ = conf.clone(); - let wal_backup_handle = current_thread_rt - .as_ref() - .unwrap_or_else(|| WAL_BACKUP_RUNTIME.handle()) - .spawn(wal_backup::wal_backup_launcher_task_main( - conf_, - wal_backup_launcher_rx, - )) - .map(|res| ("WAL backup launcher".to_owned(), res)); - tasks_handles.push(Box::pin(wal_backup_handle)); // Load all timelines from disk to memory. - GlobalTimelines::init(conf.clone(), wal_backup_launcher_tx).await?; + GlobalTimelines::init(conf.clone()).await?; let conf_ = conf.clone(); // Run everything in current thread rt, if asked. diff --git a/safekeeper/src/broker.rs b/safekeeper/src/broker.rs index 46a51438ea..7cc2142291 100644 --- a/safekeeper/src/broker.rs +++ b/safekeeper/src/broker.rs @@ -46,6 +46,8 @@ async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { return Ok(()); } + let active_timelines_set = GlobalTimelines::get_global_broker_active_set(); + let mut client = storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?; let push_interval = Duration::from_millis(PUSH_INTERVAL_MSEC); @@ -57,15 +59,9 @@ async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { // sensitive and there is no risk of deadlock as we don't await while // lock is held. let now = Instant::now(); - let all_tlis = GlobalTimelines::get_all(); + let all_tlis = active_timelines_set.get_all(); let mut n_pushed_tlis = 0; for tli in &all_tlis { - // filtering alternative futures::stream::iter(all_tlis) - // .filter(|tli| {let tli = tli.clone(); async move { tli.is_active().await}}).collect::>().await; - // doesn't look better, and I'm not sure how to do that without collect. - if !tli.is_active().await { - continue; - } let sk_info = tli.get_safekeeper_info(&conf).await; yield sk_info; BROKER_PUSHED_UPDATES.inc(); @@ -90,6 +86,7 @@ async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { } /// Subscribe and fetch all the interesting data from the broker. +#[instrument(name = "broker pull", skip_all)] async fn pull_loop(conf: SafeKeeperConf, stats: Arc) -> Result<()> { let mut client = storage_broker::connect(conf.broker_endpoint, conf.broker_keepalive_interval)?; diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 543714a54e..8d8d2cf23e 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -31,6 +31,8 @@ pub mod safekeeper; pub mod send_wal; pub mod state; pub mod timeline; +pub mod timeline_manager; +pub mod timelines_set; pub mod wal_backup; pub mod wal_backup_partial; pub mod wal_service; diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index 28ae042bb3..1e965393e3 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -11,8 +11,9 @@ use futures::Future; use metrics::{ core::{AtomicU64, Collector, Desc, GenericCounter, GenericGaugeVec, Opts}, proto::MetricFamily, - register_int_counter, register_int_counter_pair_vec, register_int_counter_vec, Gauge, - IntCounter, IntCounterPairVec, IntCounterVec, IntGaugeVec, + register_int_counter, register_int_counter_pair, register_int_counter_pair_vec, + register_int_counter_vec, Gauge, IntCounter, IntCounterPair, IntCounterPairVec, IntCounterVec, + IntGaugeVec, }; use once_cell::sync::Lazy; @@ -162,6 +163,29 @@ pub static PARTIAL_BACKUP_UPLOADED_BYTES: Lazy = Lazy::new(|| { ) .expect("Failed to register safekeeper_partial_backup_uploaded_bytes_total counter") }); +pub static MANAGER_ITERATIONS_TOTAL: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_manager_iterations_total", + "Number of iterations of the timeline manager task" + ) + .expect("Failed to register safekeeper_manager_iterations_total counter") +}); +pub static MANAGER_ACTIVE_CHANGES: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_manager_active_changes_total", + "Number of timeline active status changes in the timeline manager task" + ) + .expect("Failed to register safekeeper_manager_active_changes_total counter") +}); +pub static WAL_BACKUP_TASKS: Lazy = Lazy::new(|| { + register_int_counter_pair!( + "safekeeper_wal_backup_tasks_started_total", + "Number of active WAL backup tasks", + "safekeeper_wal_backup_tasks_finished_total", + "Number of finished WAL backup tasks", + ) + .expect("Failed to register safekeeper_wal_backup_tasks_finished_total counter") +}); pub const LABEL_UNKNOWN: &str = "unknown"; @@ -614,8 +638,7 @@ impl Collector for TimelineCollector { self.written_wal_seconds.reset(); self.flushed_wal_seconds.reset(); - let timelines = GlobalTimelines::get_all(); - let timelines_count = timelines.len(); + let timelines_count = GlobalTimelines::get_all().len(); let mut active_timelines_count = 0; // Prometheus Collector is sync, and data is stored under async lock. To @@ -746,9 +769,9 @@ impl Collector for TimelineCollector { async fn collect_timeline_metrics() -> Vec { let mut res = vec![]; - let timelines = GlobalTimelines::get_all(); + let active_timelines = GlobalTimelines::get_global_broker_active_set().get_all(); - for tli in timelines { + for tli in active_timelines { if let Some(info) = tli.info_for_metrics().await { res.push(info); } diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index 0356def7df..03cfa882c4 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -45,6 +45,9 @@ const DEFAULT_FEEDBACK_CAPACITY: usize = 8; pub struct WalReceivers { mutex: Mutex, pageserver_feedback_tx: tokio::sync::broadcast::Sender, + + num_computes_tx: tokio::sync::watch::Sender, + num_computes_rx: tokio::sync::watch::Receiver, } /// Id under which walreceiver is registered in shmem. @@ -55,16 +58,21 @@ impl WalReceivers { let (pageserver_feedback_tx, _) = tokio::sync::broadcast::channel(DEFAULT_FEEDBACK_CAPACITY); + let (num_computes_tx, num_computes_rx) = tokio::sync::watch::channel(0usize); + Arc::new(WalReceivers { mutex: Mutex::new(WalReceiversShared { slots: Vec::new() }), pageserver_feedback_tx, + num_computes_tx, + num_computes_rx, }) } /// Register new walreceiver. Returned guard provides access to the slot and /// automatically deregisters in Drop. pub fn register(self: &Arc, conn_id: Option) -> WalReceiverGuard { - let slots = &mut self.mutex.lock().slots; + let mut shared = self.mutex.lock(); + let slots = &mut shared.slots; let walreceiver = WalReceiverState { conn_id, status: WalReceiverStatus::Voting, @@ -78,6 +86,9 @@ impl WalReceivers { slots.push(Some(walreceiver)); pos }; + + self.update_num(&shared); + WalReceiverGuard { id: pos, walreceivers: self.clone(), @@ -99,7 +110,18 @@ impl WalReceivers { /// Get number of walreceivers (compute connections). pub fn get_num(self: &Arc) -> usize { - self.mutex.lock().slots.iter().flatten().count() + self.mutex.lock().get_num() + } + + /// Get channel for number of walreceivers. + pub fn get_num_rx(self: &Arc) -> tokio::sync::watch::Receiver { + self.num_computes_rx.clone() + } + + /// Should get called after every update of slots. + fn update_num(self: &Arc, shared: &MutexGuard) { + let num = shared.get_num(); + self.num_computes_tx.send_replace(num); } /// Get state of all walreceivers. @@ -123,6 +145,7 @@ impl WalReceivers { fn unregister(self: &Arc, id: WalReceiverId) { let mut shared = self.mutex.lock(); shared.slots[id] = None; + self.update_num(&shared); } /// Broadcast pageserver feedback to connected walproposers. @@ -137,6 +160,13 @@ struct WalReceiversShared { slots: Vec>, } +impl WalReceiversShared { + /// Get number of walreceivers (compute connections). + fn get_num(&self) -> usize { + self.slots.iter().flatten().count() + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WalReceiverState { /// None means it is recovery initiated by us (this safekeeper). @@ -456,14 +486,7 @@ impl WalAcceptor { /// The main loop. Returns Ok(()) if either msg_rx or reply_tx got closed; /// it must mean that network thread terminated. async fn run(&mut self) -> anyhow::Result<()> { - // Register the connection and defer unregister. - // Order of the next two lines is important: we want first to remove our entry and then - // update status which depends on registered connections. - let _compute_conn_guard = ComputeConnectionGuard { - timeline: Arc::clone(&self.tli), - }; let walreceiver_guard = self.tli.get_walreceivers().register(self.conn_id); - self.tli.update_status_notify().await?; // After this timestamp we will stop processing AppendRequests and send a response // to the walproposer. walproposer sends at least one AppendRequest per second, @@ -529,19 +552,3 @@ impl WalAcceptor { } } } - -/// Calls update_status_notify in drop to update timeline status. -struct ComputeConnectionGuard { - timeline: Arc, -} - -impl Drop for ComputeConnectionGuard { - fn drop(&mut self) { - let tli = self.timeline.clone(); - tokio::spawn(async move { - if let Err(e) = tli.update_status_notify().await { - error!("failed to update timeline status: {}", e); - } - }); - } -} diff --git a/safekeeper/src/remove_wal.rs b/safekeeper/src/remove_wal.rs index 9dce06a886..98ce671182 100644 --- a/safekeeper/src/remove_wal.rs +++ b/safekeeper/src/remove_wal.rs @@ -7,29 +7,18 @@ use tracing::*; use crate::{GlobalTimelines, SafeKeeperConf}; -const ALLOW_INACTIVE_TIMELINES: bool = true; - -pub async fn task_main(conf: SafeKeeperConf) -> anyhow::Result<()> { +pub async fn task_main(_conf: SafeKeeperConf) -> anyhow::Result<()> { let wal_removal_interval = Duration::from_millis(5000); loop { let now = tokio::time::Instant::now(); - let mut active_timelines = 0; - let tlis = GlobalTimelines::get_all(); for tli in &tlis { - let is_active = tli.is_active().await; - if is_active { - active_timelines += 1; - } - if !ALLOW_INACTIVE_TIMELINES && !is_active { - continue; - } let ttid = tli.ttid; async { if let Err(e) = tli.maybe_persist_control_file().await { warn!("failed to persist control file: {e}"); } - if let Err(e) = tli.remove_old_wal(conf.wal_backup_enabled).await { + if let Err(e) = tli.remove_old_wal().await { error!("failed to remove WAL: {}", e); } } @@ -42,8 +31,8 @@ pub async fn task_main(conf: SafeKeeperConf) -> anyhow::Result<()> { if elapsed > wal_removal_interval { info!( - "WAL removal is too long, processed {} active timelines ({} total) in {:?}", - active_timelines, total_timelines, elapsed + "WAL removal is too long, processed {} timelines in {:?}", + total_timelines, elapsed ); } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index e97247dc7c..da2e3f4538 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -8,13 +8,12 @@ use serde::{Deserialize, Serialize}; use tokio::fs; use std::cmp::max; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; -use tokio::sync::{Mutex, MutexGuard}; -use tokio::{ - sync::{mpsc::Sender, watch}, - time::Instant, -}; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use tokio::{sync::watch, time::Instant}; use tracing::*; use utils::http::error::ApiError; use utils::{ @@ -33,12 +32,13 @@ use crate::safekeeper::{ }; use crate::send_wal::WalSenders; use crate::state::{TimelineMemState, TimelinePersistentState}; +use crate::timelines_set::TimelinesSet; use crate::wal_backup::{self}; use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION}; use crate::metrics::FullTimelineInfo; use crate::wal_storage::Storage as wal_storage_iface; -use crate::{debug_dump, wal_backup_partial, wal_storage}; +use crate::{debug_dump, timeline_manager, wal_backup_partial, wal_storage}; use crate::{GlobalTimelines, SafeKeeperConf}; /// Things safekeeper should know about timeline state on peers. @@ -51,8 +51,7 @@ pub struct PeerInfo { /// LSN of the last record. pub flush_lsn: Lsn, pub commit_lsn: Lsn, - /// Since which LSN safekeeper has WAL. TODO: remove this once we fill new - /// sk since backup_lsn. + /// Since which LSN safekeeper has WAL. pub local_start_lsn: Lsn, /// When info was received. Serde annotations are not very useful but make /// the code compile -- we don't rely on this field externally. @@ -97,25 +96,72 @@ impl PeersInfo { } } +pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>; + +/// WriteGuardSharedState is a wrapper around `RwLockWriteGuard` that +/// automatically updates `watch::Sender` channels with state on drop. +pub struct WriteGuardSharedState<'a> { + tli: Arc, + guard: RwLockWriteGuard<'a, SharedState>, +} + +impl<'a> WriteGuardSharedState<'a> { + fn new(tli: Arc, guard: RwLockWriteGuard<'a, SharedState>) -> Self { + WriteGuardSharedState { tli, guard } + } +} + +impl<'a> Deref for WriteGuardSharedState<'a> { + type Target = SharedState; + + fn deref(&self) -> &Self::Target { + &self.guard + } +} + +impl<'a> DerefMut for WriteGuardSharedState<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.guard + } +} + +impl<'a> Drop for WriteGuardSharedState<'a> { + fn drop(&mut self) { + let term_flush_lsn = TermLsn::from((self.guard.sk.get_term(), self.guard.sk.flush_lsn())); + let commit_lsn = self.guard.sk.state.inmem.commit_lsn; + + let _ = self.tli.term_flush_lsn_watch_tx.send_if_modified(|old| { + if *old != term_flush_lsn { + *old = term_flush_lsn; + true + } else { + false + } + }); + + let _ = self.tli.commit_lsn_watch_tx.send_if_modified(|old| { + if *old != commit_lsn { + *old = commit_lsn; + true + } else { + false + } + }); + + // send notification about shared state update + self.tli.shared_state_version_tx.send_modify(|old| { + *old += 1; + }); + } +} + /// Shared state associated with database instance pub struct SharedState { /// Safekeeper object - sk: SafeKeeper, + pub(crate) sk: SafeKeeper, /// In memory list containing state of peers sent in latest messages from them. - peers_info: PeersInfo, - /// True when WAL backup launcher oversees the timeline, making sure WAL is - /// offloaded, allows to bother launcher less. - wal_backup_active: bool, - /// True whenever there is at least some pending activity on timeline: live - /// compute connection, pageserver is not caughtup (it must have latest WAL - /// for new compute start) or WAL backuping is not finished. Practically it - /// means safekeepers broadcast info to peers about the timeline, old WAL is - /// trimmed. - /// - /// TODO: it might be better to remove tli completely from GlobalTimelines - /// when tli is inactive instead of having this flag. - active: bool, - last_removed_segno: XLogSegNo, + pub(crate) peers_info: PeersInfo, + pub(crate) last_removed_segno: XLogSegNo, } impl SharedState { @@ -152,8 +198,6 @@ impl SharedState { Ok(Self { sk, peers_info: PeersInfo(vec![]), - wal_backup_active: false, - active: false, last_removed_segno: 0, }) } @@ -171,75 +215,10 @@ impl SharedState { Ok(Self { sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?, peers_info: PeersInfo(vec![]), - wal_backup_active: false, - active: false, last_removed_segno: 0, }) } - fn is_active(&self, num_computes: usize) -> bool { - self.is_wal_backup_required(num_computes) - // FIXME: add tracking of relevant pageservers and check them here individually, - // otherwise migration won't work (we suspend too early). - || self.sk.state.inmem.remote_consistent_lsn < self.sk.state.inmem.commit_lsn - } - - /// Mark timeline active/inactive and return whether s3 offloading requires - /// start/stop action. If timeline is deactivated, control file is persisted - /// as maintenance task does that only for active timelines. - async fn update_status(&mut self, num_computes: usize, ttid: TenantTimelineId) -> bool { - let is_active = self.is_active(num_computes); - if self.active != is_active { - info!( - "timeline {} active={} now, remote_consistent_lsn={}, commit_lsn={}", - ttid, - is_active, - self.sk.state.inmem.remote_consistent_lsn, - self.sk.state.inmem.commit_lsn - ); - if !is_active { - if let Err(e) = self.sk.state.flush().await { - warn!("control file save in update_status failed: {:?}", e); - } - } - } - self.active = is_active; - self.is_wal_backup_action_pending(num_computes) - } - - /// Should we run s3 offloading in current state? - fn is_wal_backup_required(&self, num_computes: usize) -> bool { - let seg_size = self.get_wal_seg_size(); - num_computes > 0 || - // Currently only the whole segment is offloaded, so compare segment numbers. - (self.sk.state.inmem.commit_lsn.segment_number(seg_size) > - self.sk.state.inmem.backup_lsn.segment_number(seg_size)) - } - - /// Is current state of s3 offloading is not what it ought to be? - fn is_wal_backup_action_pending(&self, num_computes: usize) -> bool { - let res = self.wal_backup_active != self.is_wal_backup_required(num_computes); - if res { - let action_pending = if self.is_wal_backup_required(num_computes) { - "start" - } else { - "stop" - }; - trace!( - "timeline {} s3 offloading action {} pending: num_computes={}, commit_lsn={}, backup_lsn={}", - self.sk.state.timeline_id, action_pending, num_computes, self.sk.state.inmem.commit_lsn, self.sk.state.inmem.backup_lsn - ); - } - res - } - - /// Returns whether s3 offloading is required and sets current status as - /// matching. - fn wal_backup_attend(&mut self, num_computes: usize) -> bool { - self.wal_backup_active = self.is_wal_backup_required(num_computes); - self.wal_backup_active - } - fn get_wal_seg_size(&self) -> usize { self.sk.state.server.wal_seg_size as usize } @@ -278,7 +257,7 @@ impl SharedState { /// Get our latest view of alive peers status on the timeline. /// We pass our own info through the broker as well, so when we don't have connection /// to the broker returned vec is empty. - fn get_peers(&self, heartbeat_timeout: Duration) -> Vec { + pub(crate) fn get_peers(&self, heartbeat_timeout: Duration) -> Vec { let now = Instant::now(); self.peers_info .0 @@ -294,18 +273,13 @@ impl SharedState { /// offloading. /// While it is safe to use inmem values for determining horizon, /// we use persistent to make possible normal states less surprising. - fn get_horizon_segno( - &self, - wal_backup_enabled: bool, - extra_horizon_lsn: Option, - ) -> XLogSegNo { + fn get_horizon_segno(&self, extra_horizon_lsn: Option) -> XLogSegNo { let state = &self.sk.state; use std::cmp::min; let mut horizon_lsn = min(state.remote_consistent_lsn, state.peer_horizon_lsn); - if wal_backup_enabled { - horizon_lsn = min(horizon_lsn, state.backup_lsn); - } + // we don't want to remove WAL that is not yet offloaded to s3 + horizon_lsn = min(horizon_lsn, state.backup_lsn); if let Some(extra_horizon_lsn) = extra_horizon_lsn { horizon_lsn = min(horizon_lsn, extra_horizon_lsn); } @@ -346,11 +320,6 @@ impl From for ApiError { pub struct Timeline { pub ttid: TenantTimelineId, - /// Sending here asks for wal backup launcher attention (start/stop - /// offloading). Sending ttid instead of concrete command allows to do - /// sending without timeline lock. - pub wal_backup_launcher_tx: Sender, - /// Used to broadcast commit_lsn updates to all background jobs. commit_lsn_watch_tx: watch::Sender, commit_lsn_watch_rx: watch::Receiver, @@ -362,10 +331,14 @@ pub struct Timeline { term_flush_lsn_watch_tx: watch::Sender, term_flush_lsn_watch_rx: watch::Receiver, + /// Broadcasts shared state updates. + shared_state_version_tx: watch::Sender, + shared_state_version_rx: watch::Receiver, + /// Safekeeper and other state, that should remain consistent and /// synchronized with the disk. This is tokio mutex as we write WAL to disk /// while holding it, ensuring that consensus checks are in order. - mutex: Mutex, + mutex: RwLock, walsenders: Arc, walreceivers: Arc, @@ -384,15 +357,15 @@ pub struct Timeline { /// with different speed. // TODO: add `Arc` here instead of adding each field separately. walsenders_keep_horizon: bool, + + // timeline_manager controlled state + pub(crate) broker_active: AtomicBool, + pub(crate) wal_backup_active: AtomicBool, } impl Timeline { /// Load existing timeline from disk. - pub fn load_timeline( - conf: &SafeKeeperConf, - ttid: TenantTimelineId, - wal_backup_launcher_tx: Sender, - ) -> Result { + pub fn load_timeline(conf: &SafeKeeperConf, ttid: TenantTimelineId) -> Result { let _enter = info_span!("load_timeline", timeline = %ttid.timeline_id).entered(); let shared_state = SharedState::restore(conf, &ttid)?; @@ -402,23 +375,27 @@ impl Timeline { shared_state.sk.get_term(), shared_state.sk.flush_lsn(), ))); + let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0); let (cancellation_tx, cancellation_rx) = watch::channel(false); let walreceivers = WalReceivers::new(); Ok(Timeline { ttid, - wal_backup_launcher_tx, commit_lsn_watch_tx, commit_lsn_watch_rx, term_flush_lsn_watch_tx, term_flush_lsn_watch_rx, - mutex: Mutex::new(shared_state), + shared_state_version_tx, + shared_state_version_rx, + mutex: RwLock::new(shared_state), walsenders: WalSenders::new(walreceivers.clone()), walreceivers, cancellation_rx, cancellation_tx, timeline_dir: conf.timeline_dir(&ttid), walsenders_keep_horizon: conf.walsenders_keep_horizon, + broker_active: AtomicBool::new(false), + wal_backup_active: AtomicBool::new(false), }) } @@ -426,7 +403,6 @@ impl Timeline { pub fn create_empty( conf: &SafeKeeperConf, ttid: TenantTimelineId, - wal_backup_launcher_tx: Sender, server_info: ServerInfo, commit_lsn: Lsn, local_start_lsn: Lsn, @@ -434,25 +410,30 @@ impl Timeline { let (commit_lsn_watch_tx, commit_lsn_watch_rx) = watch::channel(Lsn::INVALID); let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((INVALID_TERM, Lsn::INVALID))); + let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0); let (cancellation_tx, cancellation_rx) = watch::channel(false); + let state = TimelinePersistentState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn); let walreceivers = WalReceivers::new(); Ok(Timeline { ttid, - wal_backup_launcher_tx, commit_lsn_watch_tx, commit_lsn_watch_rx, term_flush_lsn_watch_tx, term_flush_lsn_watch_rx, - mutex: Mutex::new(SharedState::create_new(conf, &ttid, state)?), + shared_state_version_tx, + shared_state_version_rx, + mutex: RwLock::new(SharedState::create_new(conf, &ttid, state)?), walsenders: WalSenders::new(walreceivers.clone()), walreceivers, cancellation_rx, cancellation_tx, timeline_dir: conf.timeline_dir(&ttid), walsenders_keep_horizon: conf.walsenders_keep_horizon, + broker_active: AtomicBool::new(false), + wal_backup_active: AtomicBool::new(false), }) } @@ -463,8 +444,9 @@ impl Timeline { /// and state on disk should remain unchanged. pub async fn init_new( self: &Arc, - shared_state: &mut MutexGuard<'_, SharedState>, + shared_state: &mut WriteGuardSharedState<'_>, conf: &SafeKeeperConf, + broker_active_set: Arc, ) -> Result<()> { match fs::metadata(&self.timeline_dir).await { Ok(_) => { @@ -495,16 +477,29 @@ impl Timeline { return Err(e); } - self.bootstrap(conf); + self.bootstrap(conf, broker_active_set); Ok(()) } - /// Bootstrap new or existing timeline starting background stasks. - pub fn bootstrap(self: &Arc, conf: &SafeKeeperConf) { + /// Bootstrap new or existing timeline starting background tasks. + pub fn bootstrap( + self: &Arc, + conf: &SafeKeeperConf, + broker_active_set: Arc, + ) { + // Start manager task which will monitor timeline state and update + // background tasks. + tokio::spawn(timeline_manager::main_task( + self.clone(), + conf.clone(), + broker_active_set, + )); + // Start recovery task which always runs on the timeline. if conf.peer_recovery_enabled { tokio::spawn(recovery_main(self.clone(), conf.clone())); } + // TODO: migrate to timeline_manager if conf.is_wal_backup_enabled() && conf.partial_backup_enabled { tokio::spawn(wal_backup_partial::main_task(self.clone(), conf.clone())); } @@ -517,10 +512,9 @@ impl Timeline { /// deletion API endpoint is retriable. pub async fn delete( &self, - shared_state: &mut MutexGuard<'_, SharedState>, + shared_state: &mut WriteGuardSharedState<'_>, only_local: bool, - ) -> Result<(bool, bool)> { - let was_active = shared_state.active; + ) -> Result { self.cancel(shared_state); // TODO: It's better to wait for s3 offloader termination before @@ -534,18 +528,12 @@ impl Timeline { wal_backup::delete_timeline(&self.ttid).await?; } let dir_existed = delete_dir(&self.timeline_dir).await?; - Ok((dir_existed, was_active)) + Ok(dir_existed) } /// Cancel timeline to prevent further usage. Background tasks will stop /// eventually after receiving cancellation signal. - /// - /// Note that we can't notify backup launcher here while holding - /// shared_state lock, as this is a potential deadlock: caller is - /// responsible for that. Generally we should probably make WAL backup tasks - /// to shut down on their own, checking once in a while whether it is the - /// time. - fn cancel(&self, shared_state: &mut MutexGuard<'_, SharedState>) { + fn cancel(&self, shared_state: &mut WriteGuardSharedState<'_>) { info!("timeline {} is cancelled", self.ttid); let _ = self.cancellation_tx.send(true); // Close associated FDs. Nobody will be able to touch timeline data once @@ -569,30 +557,12 @@ impl Timeline { } /// Take a writing mutual exclusive lock on timeline shared_state. - pub async fn write_shared_state(&self) -> MutexGuard { - self.mutex.lock().await + pub async fn write_shared_state<'a>(self: &'a Arc) -> WriteGuardSharedState<'a> { + WriteGuardSharedState::new(self.clone(), self.mutex.write().await) } - async fn update_status(&self, shared_state: &mut SharedState) -> bool { - shared_state - .update_status(self.walreceivers.get_num(), self.ttid) - .await - } - - /// Update timeline status and kick wal backup launcher to stop/start offloading if needed. - pub async fn update_status_notify(&self) -> Result<()> { - if self.is_cancelled() { - bail!(TimelineError::Cancelled(self.ttid)); - } - let is_wal_backup_action_pending: bool = { - let mut shared_state = self.write_shared_state().await; - self.update_status(&mut shared_state).await - }; - if is_wal_backup_action_pending { - // Can fail only if channel to a static thread got closed, which is not normal at all. - self.wal_backup_launcher_tx.send(self.ttid).await?; - } - Ok(()) + pub async fn read_shared_state(&self) -> ReadGuardSharedState { + self.mutex.read().await } /// Returns true if walsender should stop sending WAL to pageserver. We @@ -604,7 +574,7 @@ impl Timeline { if self.is_cancelled() { return true; } - let shared_state = self.write_shared_state().await; + let shared_state = self.read_shared_state().await; if self.walreceivers.get_num() == 0 { return shared_state.sk.state.inmem.commit_lsn == Lsn(0) || // no data at all yet reported_remote_consistent_lsn >= shared_state.sk.state.inmem.commit_lsn; @@ -612,9 +582,9 @@ impl Timeline { false } - /// Ensure taht current term is t, erroring otherwise, and lock the state. - pub async fn acquire_term(&self, t: Term) -> Result> { - let ss = self.write_shared_state().await; + /// Ensure that current term is t, erroring otherwise, and lock the state. + pub async fn acquire_term(&self, t: Term) -> Result { + let ss = self.read_shared_state().await; if ss.sk.state.acceptor_state.term != t { bail!( "failed to acquire term {}, current term {}", @@ -625,18 +595,6 @@ impl Timeline { Ok(ss) } - /// Returns whether s3 offloading is required and sets current status as - /// matching it. - pub async fn wal_backup_attend(&self) -> bool { - if self.is_cancelled() { - return false; - } - - self.write_shared_state() - .await - .wal_backup_attend(self.walreceivers.get_num()) - } - /// Returns commit_lsn watch channel. pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver { self.commit_lsn_watch_rx.clone() @@ -647,9 +605,14 @@ impl Timeline { self.term_flush_lsn_watch_rx.clone() } + /// Returns watch channel for SharedState update version. + pub fn get_state_version_rx(&self) -> watch::Receiver { + self.shared_state_version_rx.clone() + } + /// Pass arrived message to the safekeeper. pub async fn process_msg( - &self, + self: &Arc, msg: &ProposerAcceptorMessage, ) -> Result> { if self.is_cancelled() { @@ -657,8 +620,6 @@ impl Timeline { } let mut rmsg: Option; - let commit_lsn: Lsn; - let term_flush_lsn: TermLsn; { let mut shared_state = self.write_shared_state().await; rmsg = shared_state.sk.process_msg(msg).await?; @@ -667,43 +628,28 @@ impl Timeline { if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg { resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback; } - - commit_lsn = shared_state.sk.state.inmem.commit_lsn; - term_flush_lsn = - TermLsn::from((shared_state.sk.get_term(), shared_state.sk.flush_lsn())); } - self.term_flush_lsn_watch_tx.send(term_flush_lsn)?; - self.commit_lsn_watch_tx.send(commit_lsn)?; Ok(rmsg) } /// Returns wal_seg_size. pub async fn get_wal_seg_size(&self) -> usize { - self.write_shared_state().await.get_wal_seg_size() - } - - /// Returns true only if the timeline is loaded and active. - pub async fn is_active(&self) -> bool { - if self.is_cancelled() { - return false; - } - - self.write_shared_state().await.active + self.read_shared_state().await.get_wal_seg_size() } /// Returns state of the timeline. pub async fn get_state(&self) -> (TimelineMemState, TimelinePersistentState) { - let state = self.write_shared_state().await; + let state = self.read_shared_state().await; (state.sk.state.inmem.clone(), state.sk.state.clone()) } /// Returns latest backup_lsn. pub async fn get_wal_backup_lsn(&self) -> Lsn { - self.write_shared_state().await.sk.state.inmem.backup_lsn + self.read_shared_state().await.sk.state.inmem.backup_lsn } /// Sets backup_lsn to the given value. - pub async fn set_wal_backup_lsn(&self, backup_lsn: Lsn) -> Result<()> { + pub async fn set_wal_backup_lsn(self: &Arc, backup_lsn: Lsn) -> Result<()> { if self.is_cancelled() { bail!(TimelineError::Cancelled(self.ttid)); } @@ -717,40 +663,34 @@ impl Timeline { /// Get safekeeper info for broadcasting to broker and other peers. pub async fn get_safekeeper_info(&self, conf: &SafeKeeperConf) -> SafekeeperTimelineInfo { - let shared_state = self.write_shared_state().await; let standby_apply_lsn = self.walsenders.get_hotstandby().reply.apply_lsn; + let shared_state = self.read_shared_state().await; shared_state.get_safekeeper_info(&self.ttid, conf, standby_apply_lsn) } /// Update timeline state with peer safekeeper data. - pub async fn record_safekeeper_info(&self, sk_info: SafekeeperTimelineInfo) -> Result<()> { - let is_wal_backup_action_pending: bool; - let commit_lsn: Lsn; + pub async fn record_safekeeper_info( + self: &Arc, + sk_info: SafekeeperTimelineInfo, + ) -> Result<()> { { let mut shared_state = self.write_shared_state().await; shared_state.sk.record_safekeeper_info(&sk_info).await?; let peer_info = PeerInfo::from_sk_info(&sk_info, Instant::now()); shared_state.peers_info.upsert(&peer_info); - is_wal_backup_action_pending = self.update_status(&mut shared_state).await; - commit_lsn = shared_state.sk.state.inmem.commit_lsn; - } - self.commit_lsn_watch_tx.send(commit_lsn)?; - // Wake up wal backup launcher, if it is time to stop the offloading. - if is_wal_backup_action_pending { - self.wal_backup_launcher_tx.send(self.ttid).await?; } Ok(()) } /// Update in memory remote consistent lsn. - pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) { + pub async fn update_remote_consistent_lsn(self: &Arc, candidate: Lsn) { let mut shared_state = self.write_shared_state().await; shared_state.sk.state.inmem.remote_consistent_lsn = max(shared_state.sk.state.inmem.remote_consistent_lsn, candidate); } pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec { - let shared_state = self.write_shared_state().await; + let shared_state = self.read_shared_state().await; shared_state.get_peers(conf.heartbeat_timeout) } @@ -772,7 +712,7 @@ impl Timeline { /// depending on assembled quorum (e.g. classic picture 8 from Raft paper). /// Thus we don't try to predict it here. pub async fn recovery_needed(&self, heartbeat_timeout: Duration) -> RecoveryNeededInfo { - let ss = self.write_shared_state().await; + let ss = self.read_shared_state().await; let term = ss.sk.state.acceptor_state.term; let last_log_term = ss.sk.get_epoch(); let flush_lsn = ss.sk.flush_lsn(); @@ -843,12 +783,12 @@ impl Timeline { /// Returns flush_lsn. pub async fn get_flush_lsn(&self) -> Lsn { - self.write_shared_state().await.sk.wal_store.flush_lsn() + self.read_shared_state().await.sk.wal_store.flush_lsn() } /// Delete WAL segments from disk that are no longer needed. This is determined /// based on pageserver's remote_consistent_lsn and local backup_lsn/peer_lsn. - pub async fn remove_old_wal(&self, wal_backup_enabled: bool) -> Result<()> { + pub async fn remove_old_wal(self: &Arc) -> Result<()> { if self.is_cancelled() { bail!(TimelineError::Cancelled(self.ttid)); } @@ -864,9 +804,8 @@ impl Timeline { let horizon_segno: XLogSegNo; let remover = { - let shared_state = self.write_shared_state().await; - horizon_segno = - shared_state.get_horizon_segno(wal_backup_enabled, replication_horizon_lsn); + let shared_state = self.read_shared_state().await; + horizon_segno = shared_state.get_horizon_segno(replication_horizon_lsn); if horizon_segno <= 1 || horizon_segno <= shared_state.last_removed_segno { return Ok(()); // nothing to do } @@ -888,7 +827,7 @@ impl Timeline { /// passed after the last save. This helps to keep remote_consistent_lsn up /// to date so that storage nodes restart doesn't cause many pageserver -> /// safekeeper reconnections. - pub async fn maybe_persist_control_file(&self) -> Result<()> { + pub async fn maybe_persist_control_file(self: &Arc) -> Result<()> { self.write_shared_state() .await .sk @@ -896,38 +835,33 @@ impl Timeline { .await } - /// Gather timeline data for metrics. If the timeline is not active, returns - /// None, we do not collect these. + /// Gather timeline data for metrics. pub async fn info_for_metrics(&self) -> Option { if self.is_cancelled() { return None; } let (ps_feedback_count, last_ps_feedback) = self.walsenders.get_ps_feedback_stats(); - let state = self.write_shared_state().await; - if state.active { - Some(FullTimelineInfo { - ttid: self.ttid, - ps_feedback_count, - last_ps_feedback, - wal_backup_active: state.wal_backup_active, - timeline_is_active: state.active, - num_computes: self.walreceivers.get_num() as u32, - last_removed_segno: state.last_removed_segno, - epoch_start_lsn: state.sk.epoch_start_lsn, - mem_state: state.sk.state.inmem.clone(), - persisted_state: state.sk.state.clone(), - flush_lsn: state.sk.wal_store.flush_lsn(), - wal_storage: state.sk.wal_store.get_metrics(), - }) - } else { - None - } + let state = self.read_shared_state().await; + Some(FullTimelineInfo { + ttid: self.ttid, + ps_feedback_count, + last_ps_feedback, + wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed), + timeline_is_active: self.broker_active.load(Ordering::Relaxed), + num_computes: self.walreceivers.get_num() as u32, + last_removed_segno: state.last_removed_segno, + epoch_start_lsn: state.sk.epoch_start_lsn, + mem_state: state.sk.state.inmem.clone(), + persisted_state: state.sk.state.clone(), + flush_lsn: state.sk.wal_store.flush_lsn(), + wal_storage: state.sk.wal_store.get_metrics(), + }) } /// Returns in-memory timeline state to build a full debug dump. pub async fn memory_dump(&self) -> debug_dump::Memory { - let state = self.write_shared_state().await; + let state = self.read_shared_state().await; let (write_lsn, write_record_lsn, flush_lsn, file_open) = state.sk.wal_store.internal_state(); @@ -936,8 +870,8 @@ impl Timeline { is_cancelled: self.is_cancelled(), peers_info_len: state.peers_info.0.len(), walsenders: self.walsenders.get_all(), - wal_backup_active: state.wal_backup_active, - active: state.active, + wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed), + active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, last_removed_segno: state.last_removed_segno, epoch_start_lsn: state.sk.epoch_start_lsn, @@ -951,7 +885,7 @@ impl Timeline { /// Apply a function to the control file state and persist it. pub async fn map_control_file( - &self, + self: &Arc, f: impl FnOnce(&mut TimelinePersistentState) -> Result, ) -> Result { let mut state = self.write_shared_state().await; diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs new file mode 100644 index 0000000000..52ad915065 --- /dev/null +++ b/safekeeper/src/timeline_manager.rs @@ -0,0 +1,153 @@ +//! The timeline manager task is responsible for managing the timeline's background tasks. +//! It is spawned alongside each timeline and exits when the timeline is deleted. +//! It watches for changes in the timeline state and decides when to spawn or kill background tasks. +//! It also can manage some reactive state, like should the timeline be active for broker pushes or not. + +use std::{sync::Arc, time::Duration}; + +use tracing::{info, instrument, warn}; +use utils::lsn::Lsn; + +use crate::{ + metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL}, + timeline::{PeerInfo, ReadGuardSharedState, Timeline}, + timelines_set::TimelinesSet, + wal_backup::{self, WalBackupTaskHandle}, + SafeKeeperConf, +}; + +pub struct StateSnapshot { + pub commit_lsn: Lsn, + pub backup_lsn: Lsn, + pub remote_consistent_lsn: Lsn, + pub peers: Vec, +} + +impl StateSnapshot { + /// Create a new snapshot of the timeline state. + fn new(read_guard: ReadGuardSharedState, heartbeat_timeout: Duration) -> Self { + Self { + commit_lsn: read_guard.sk.state.inmem.commit_lsn, + backup_lsn: read_guard.sk.state.inmem.backup_lsn, + remote_consistent_lsn: read_guard.sk.state.inmem.remote_consistent_lsn, + peers: read_guard.get_peers(heartbeat_timeout), + } + } +} + +/// Control how often the manager task should wake up to check updates. +/// There is no need to check for updates more often than this. +const REFRESH_INTERVAL: Duration = Duration::from_millis(300); + +/// This task gets spawned alongside each timeline and is responsible for managing the timeline's +/// background tasks. +#[instrument(name = "manager", skip_all, fields(ttid = %tli.ttid))] +pub async fn main_task( + tli: Arc, + conf: SafeKeeperConf, + broker_active_set: Arc, +) { + let mut cancellation_rx = match tli.get_cancellation_rx() { + Ok(rx) => rx, + Err(_) => { + info!("timeline canceled during task start"); + return; + } + }; + + scopeguard::defer! { + if tli.is_cancelled() { + info!("manager task finished"); + } else { + warn!("manager task finished prematurely"); + } + }; + + // sets whether timeline is active for broker pushes or not + let mut tli_broker_active = broker_active_set.guard(tli.clone()); + + let ttid = tli.ttid; + let wal_seg_size = tli.get_wal_seg_size().await; + let heartbeat_timeout = conf.heartbeat_timeout; + + let mut state_version_rx = tli.get_state_version_rx(); + + let walreceivers = tli.get_walreceivers(); + let mut num_computes_rx = walreceivers.get_num_rx(); + + // list of background tasks + let mut backup_task: Option = None; + + let last_state = 'outer: loop { + MANAGER_ITERATIONS_TOTAL.inc(); + + let state_snapshot = StateSnapshot::new(tli.read_shared_state().await, heartbeat_timeout); + let num_computes = *num_computes_rx.borrow(); + + let is_wal_backup_required = + wal_backup::is_wal_backup_required(wal_seg_size, num_computes, &state_snapshot); + + if conf.is_wal_backup_enabled() { + wal_backup::update_task( + &conf, + ttid, + is_wal_backup_required, + &state_snapshot, + &mut backup_task, + ) + .await; + } + + let is_active = is_wal_backup_required + || num_computes > 0 + || state_snapshot.remote_consistent_lsn < state_snapshot.commit_lsn; + + // update the broker timeline set + if tli_broker_active.set(is_active) { + // write log if state has changed + info!( + "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}", + is_active, state_snapshot.remote_consistent_lsn, state_snapshot.commit_lsn, + ); + + MANAGER_ACTIVE_CHANGES.inc(); + + if !is_active { + // TODO: maybe use tokio::spawn? + if let Err(e) = tli.maybe_persist_control_file().await { + warn!("control file save in update_status failed: {:?}", e); + } + } + } + + // update the state in Arc + tli.wal_backup_active + .store(backup_task.is_some(), std::sync::atomic::Ordering::Relaxed); + tli.broker_active + .store(is_active, std::sync::atomic::Ordering::Relaxed); + + // wait until something changes. tx channels are stored under Arc, so they will not be + // dropped until the manager task is finished. + tokio::select! { + _ = cancellation_rx.changed() => { + // timeline was deleted + break 'outer state_snapshot; + } + _ = async { + // don't wake up on every state change, but at most every REFRESH_INTERVAL + tokio::time::sleep(REFRESH_INTERVAL).await; + let _ = state_version_rx.changed().await; + } => { + // state was updated + } + _ = num_computes_rx.changed() => { + // number of connected computes was updated + } + } + }; + + // shutdown background tasks + if conf.is_wal_backup_enabled() { + wal_backup::update_task(&conf, ttid, false, &last_state, &mut backup_task).await; + } +} diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index 079e706ff8..8d37bd6371 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -4,6 +4,7 @@ use crate::safekeeper::ServerInfo; use crate::timeline::{Timeline, TimelineError}; +use crate::timelines_set::TimelinesSet; use crate::SafeKeeperConf; use anyhow::{bail, Context, Result}; use camino::Utf8PathBuf; @@ -11,16 +12,16 @@ use once_cell::sync::Lazy; use serde::Serialize; use std::collections::HashMap; use std::str::FromStr; +use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; -use tokio::sync::mpsc::Sender; use tracing::*; use utils::id::{TenantId, TenantTimelineId, TimelineId}; use utils::lsn::Lsn; struct GlobalTimelinesState { timelines: HashMap>, - wal_backup_launcher_tx: Option>, conf: Option, + broker_active_set: Arc, load_lock: Arc>, } @@ -36,11 +37,8 @@ impl GlobalTimelinesState { } /// Get dependencies for a timeline constructor. - fn get_dependencies(&self) -> (SafeKeeperConf, Sender) { - ( - self.get_conf().clone(), - self.wal_backup_launcher_tx.as_ref().unwrap().clone(), - ) + fn get_dependencies(&self) -> (SafeKeeperConf, Arc) { + (self.get_conf().clone(), self.broker_active_set.clone()) } /// Insert timeline into the map. Returns error if timeline with the same id already exists. @@ -65,8 +63,8 @@ impl GlobalTimelinesState { static TIMELINES_STATE: Lazy> = Lazy::new(|| { Mutex::new(GlobalTimelinesState { timelines: HashMap::new(), - wal_backup_launcher_tx: None, conf: None, + broker_active_set: Arc::new(TimelinesSet::default()), load_lock: Arc::new(tokio::sync::Mutex::new(TimelineLoadLock)), }) }); @@ -76,16 +74,11 @@ pub struct GlobalTimelines; impl GlobalTimelines { /// Inject dependencies needed for the timeline constructors and load all timelines to memory. - pub async fn init( - conf: SafeKeeperConf, - wal_backup_launcher_tx: Sender, - ) -> Result<()> { + pub async fn init(conf: SafeKeeperConf) -> Result<()> { // clippy isn't smart enough to understand that drop(state) releases the // lock, so use explicit block let tenants_dir = { let mut state = TIMELINES_STATE.lock().unwrap(); - assert!(state.wal_backup_launcher_tx.is_none()); - state.wal_backup_launcher_tx = Some(wal_backup_launcher_tx); state.conf = Some(conf); // Iterate through all directories and load tenants for all directories @@ -129,12 +122,9 @@ impl GlobalTimelines { /// this function is called during init when nothing else is running, so /// this is fine. async fn load_tenant_timelines(tenant_id: TenantId) -> Result<()> { - let (conf, wal_backup_launcher_tx) = { + let (conf, broker_active_set) = { let state = TIMELINES_STATE.lock().unwrap(); - ( - state.get_conf().clone(), - state.wal_backup_launcher_tx.as_ref().unwrap().clone(), - ) + state.get_dependencies() }; let timelines_dir = conf.tenant_dir(&tenant_id); @@ -147,7 +137,7 @@ impl GlobalTimelines { TimelineId::from_str(timeline_dir_entry.file_name().to_str().unwrap_or("")) { let ttid = TenantTimelineId::new(tenant_id, timeline_id); - match Timeline::load_timeline(&conf, ttid, wal_backup_launcher_tx.clone()) { + match Timeline::load_timeline(&conf, ttid) { Ok(timeline) => { let tli = Arc::new(timeline); TIMELINES_STATE @@ -155,8 +145,7 @@ impl GlobalTimelines { .unwrap() .timelines .insert(ttid, tli.clone()); - tli.bootstrap(&conf); - tli.update_status_notify().await.unwrap(); + tli.bootstrap(&conf, broker_active_set.clone()); } // If we can't load a timeline, it's most likely because of a corrupted // directory. We will log an error and won't allow to delete/recreate @@ -189,9 +178,9 @@ impl GlobalTimelines { _guard: &tokio::sync::MutexGuard<'a, TimelineLoadLock>, ttid: TenantTimelineId, ) -> Result> { - let (conf, wal_backup_launcher_tx) = TIMELINES_STATE.lock().unwrap().get_dependencies(); + let (conf, broker_active_set) = TIMELINES_STATE.lock().unwrap().get_dependencies(); - match Timeline::load_timeline(&conf, ttid, wal_backup_launcher_tx) { + match Timeline::load_timeline(&conf, ttid) { Ok(timeline) => { let tli = Arc::new(timeline); @@ -202,7 +191,7 @@ impl GlobalTimelines { .timelines .insert(ttid, tli.clone()); - tli.bootstrap(&conf); + tli.bootstrap(&conf, broker_active_set); Ok(tli) } @@ -221,6 +210,10 @@ impl GlobalTimelines { TIMELINES_STATE.lock().unwrap().get_conf().clone() } + pub fn get_global_broker_active_set() -> Arc { + TIMELINES_STATE.lock().unwrap().broker_active_set.clone() + } + /// Create a new timeline with the given id. If the timeline already exists, returns /// an existing timeline. pub async fn create( @@ -229,7 +222,7 @@ impl GlobalTimelines { commit_lsn: Lsn, local_start_lsn: Lsn, ) -> Result> { - let (conf, wal_backup_launcher_tx) = { + let (conf, broker_active_set) = { let state = TIMELINES_STATE.lock().unwrap(); if let Ok(timeline) = state.get(&ttid) { // Timeline already exists, return it. @@ -243,7 +236,6 @@ impl GlobalTimelines { let timeline = Arc::new(Timeline::create_empty( &conf, ttid, - wal_backup_launcher_tx, server_info, commit_lsn, local_start_lsn, @@ -264,7 +256,10 @@ impl GlobalTimelines { // Write the new timeline to the disk and start background workers. // Bootstrap is transactional, so if it fails, the timeline will be deleted, // and the state on disk should remain unchanged. - if let Err(e) = timeline.init_new(&mut shared_state, &conf).await { + if let Err(e) = timeline + .init_new(&mut shared_state, &conf, broker_active_set) + .await + { // Note: the most likely reason for init failure is that the timeline // directory already exists on disk. This happens when timeline is corrupted // and wasn't loaded from disk on startup because of that. We want to preserve @@ -281,8 +276,6 @@ impl GlobalTimelines { // We are done with bootstrap, release the lock, return the timeline. // {} block forces release before .await } - timeline.update_status_notify().await?; - timeline.wal_backup_launcher_tx.send(timeline.ttid).await?; Ok(timeline) } @@ -335,12 +328,13 @@ impl GlobalTimelines { let tli_res = TIMELINES_STATE.lock().unwrap().get(ttid); match tli_res { Ok(timeline) => { + let was_active = timeline.broker_active.load(Ordering::Relaxed); + // Take a lock and finish the deletion holding this mutex. let mut shared_state = timeline.write_shared_state().await; info!("deleting timeline {}, only_local={}", ttid, only_local); - let (dir_existed, was_active) = - timeline.delete(&mut shared_state, only_local).await?; + let dir_existed = timeline.delete(&mut shared_state, only_local).await?; // Remove timeline from the map. // FIXME: re-enable it once we fix the issue with recreation of deleted timelines @@ -349,7 +343,7 @@ impl GlobalTimelines { Ok(TimelineDeleteForceResult { dir_existed, - was_active, + was_active, // TODO: we probably should remove this field }) } Err(_) => { diff --git a/safekeeper/src/timelines_set.rs b/safekeeper/src/timelines_set.rs new file mode 100644 index 0000000000..ea8e23bb72 --- /dev/null +++ b/safekeeper/src/timelines_set.rs @@ -0,0 +1,90 @@ +use std::{collections::HashMap, sync::Arc}; + +use utils::id::TenantTimelineId; + +use crate::timeline::Timeline; + +/// Set of timelines, supports operations: +/// - add timeline +/// - remove timeline +/// - clone the set +/// +/// Usually used for keeping subset of timelines. For example active timelines that require broker push. +pub struct TimelinesSet { + timelines: std::sync::Mutex>>, +} + +impl Default for TimelinesSet { + fn default() -> Self { + Self { + timelines: std::sync::Mutex::new(HashMap::new()), + } + } +} + +impl TimelinesSet { + pub fn insert(&self, tli: Arc) { + self.timelines.lock().unwrap().insert(tli.ttid, tli); + } + + pub fn delete(&self, ttid: &TenantTimelineId) { + self.timelines.lock().unwrap().remove(ttid); + } + + /// If present is true, adds timeline to the set, otherwise removes it. + pub fn set_present(&self, tli: Arc, present: bool) { + if present { + self.insert(tli); + } else { + self.delete(&tli.ttid); + } + } + + pub fn is_present(&self, ttid: &TenantTimelineId) -> bool { + self.timelines.lock().unwrap().contains_key(ttid) + } + + /// Returns all timelines in the set. + pub fn get_all(&self) -> Vec> { + self.timelines.lock().unwrap().values().cloned().collect() + } + + /// Returns a timeline guard for easy presence control. + pub fn guard(self: &Arc, tli: Arc) -> TimelineSetGuard { + let is_present = self.is_present(&tli.ttid); + TimelineSetGuard { + timelines_set: self.clone(), + tli, + is_present, + } + } +} + +/// Guard is used to add or remove timeline from the set. +/// If the timeline present in set, it will be removed from it on drop. +/// Note: do not use more than one guard for the same timeline, it caches the presence state. +/// It is designed to be used in the manager task only. +pub struct TimelineSetGuard { + timelines_set: Arc, + tli: Arc, + is_present: bool, +} + +impl TimelineSetGuard { + /// Returns true if the state was changed. + pub fn set(&mut self, present: bool) -> bool { + if present == self.is_present { + return false; + } + self.is_present = present; + self.timelines_set.set_present(self.tli.clone(), present); + true + } +} + +impl Drop for TimelineSetGuard { + fn drop(&mut self) { + // remove timeline from the map on drop + self.timelines_set.delete(&self.tli.ttid); + } +} diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index e496f07114..84680557f9 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -9,7 +9,7 @@ use utils::backoff; use utils::id::NodeId; use std::cmp::min; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::num::NonZeroU32; use std::pin::Pin; use std::sync::Arc; @@ -29,9 +29,10 @@ use tracing::*; use utils::{id::TenantTimelineId, lsn::Lsn}; -use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS}; +use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS}; use crate::timeline::{PeerInfo, Timeline}; -use crate::{GlobalTimelines, SafeKeeperConf}; +use crate::timeline_manager::StateSnapshot; +use crate::{GlobalTimelines, SafeKeeperConf, WAL_BACKUP_RUNTIME}; use once_cell::sync::OnceCell; @@ -41,35 +42,84 @@ const UPLOAD_FAILURE_RETRY_MAX_MS: u64 = 5000; /// Default buffer size when interfacing with [`tokio::fs::File`]. const BUFFER_SIZE: usize = 32 * 1024; -/// Check whether wal backup is required for timeline. If yes, mark that launcher is -/// aware of current status and return the timeline. -async fn is_wal_backup_required(ttid: TenantTimelineId) -> Option> { - match GlobalTimelines::get(ttid).ok() { - Some(tli) => { - tli.wal_backup_attend().await; - Some(tli) - } - None => None, - } -} - -struct WalBackupTaskHandle { +pub struct WalBackupTaskHandle { shutdown_tx: Sender<()>, handle: JoinHandle<()>, } -struct WalBackupTimelineEntry { - timeline: Arc, - handle: Option, +/// Do we have anything to upload to S3, i.e. should safekeepers run backup activity? +pub fn is_wal_backup_required( + wal_seg_size: usize, + num_computes: usize, + state: &StateSnapshot, +) -> bool { + num_computes > 0 || + // Currently only the whole segment is offloaded, so compare segment numbers. + (state.commit_lsn.segment_number(wal_seg_size) > state.backup_lsn.segment_number(wal_seg_size)) } -async fn shut_down_task(ttid: TenantTimelineId, entry: &mut WalBackupTimelineEntry) { - if let Some(wb_handle) = entry.handle.take() { +/// Based on peer information determine which safekeeper should offload; if it +/// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task +/// is running, kill it. +pub async fn update_task( + conf: &SafeKeeperConf, + ttid: TenantTimelineId, + need_backup: bool, + state: &StateSnapshot, + entry: &mut Option, +) { + let (offloader, election_dbg_str) = + determine_offloader(&state.peers, state.backup_lsn, ttid, conf); + let elected_me = Some(conf.my_id) == offloader; + + let should_task_run = need_backup && elected_me; + + // start or stop the task + if should_task_run != (entry.is_some()) { + if should_task_run { + info!("elected for backup: {}", election_dbg_str); + + let (shutdown_tx, shutdown_rx) = mpsc::channel(1); + let timeline_dir = conf.timeline_dir(&ttid); + + let async_task = backup_task_main( + ttid, + timeline_dir, + conf.workdir.clone(), + conf.backup_parallel_jobs, + shutdown_rx, + ); + + let handle = if conf.current_thread_runtime { + tokio::spawn(async_task) + } else { + WAL_BACKUP_RUNTIME.spawn(async_task) + }; + + *entry = Some(WalBackupTaskHandle { + shutdown_tx, + handle, + }); + } else { + if !need_backup { + // don't need backup at all + info!("stepping down from backup, need_backup={}", need_backup); + } else { + // someone else has been elected + info!("stepping down from backup: {}", election_dbg_str); + } + shut_down_task(entry).await; + } + } +} + +async fn shut_down_task(entry: &mut Option) { + if let Some(wb_handle) = entry.take() { // Tell the task to shutdown. Error means task exited earlier, that's ok. let _ = wb_handle.shutdown_tx.send(()).await; // Await the task itself. TODO: restart panicked tasks earlier. if let Err(e) = wb_handle.handle.await { - warn!("WAL backup task for {} panicked: {}", ttid, e); + warn!("WAL backup task panicked: {}", e); } } } @@ -126,49 +176,6 @@ fn determine_offloader( } } -/// Based on peer information determine which safekeeper should offload; if it -/// is me, run (per timeline) task, if not yet. OTOH, if it is not me and task -/// is running, kill it. -async fn update_task( - conf: &SafeKeeperConf, - ttid: TenantTimelineId, - entry: &mut WalBackupTimelineEntry, -) { - let alive_peers = entry.timeline.get_peers(conf).await; - let wal_backup_lsn = entry.timeline.get_wal_backup_lsn().await; - let (offloader, election_dbg_str) = - determine_offloader(&alive_peers, wal_backup_lsn, ttid, conf); - let elected_me = Some(conf.my_id) == offloader; - - if elected_me != (entry.handle.is_some()) { - if elected_me { - info!("elected for backup: {}", election_dbg_str); - - let (shutdown_tx, shutdown_rx) = mpsc::channel(1); - let timeline_dir = conf.timeline_dir(&ttid); - - let handle = tokio::spawn( - backup_task_main( - ttid, - timeline_dir, - conf.workdir.clone(), - conf.backup_parallel_jobs, - shutdown_rx, - ) - .in_current_span(), - ); - - entry.handle = Some(WalBackupTaskHandle { - shutdown_tx, - handle, - }); - } else { - info!("stepping down from backup: {}", election_dbg_str); - shut_down_task(ttid, entry).await; - } - } -} - static REMOTE_STORAGE: OnceCell> = OnceCell::new(); // Storage must be configured and initialized when this is called. @@ -190,67 +197,6 @@ pub fn init_remote_storage(conf: &SafeKeeperConf) { }); } -const CHECK_TASKS_INTERVAL_MSEC: u64 = 1000; - -/// Sits on wal_backup_launcher_rx and starts/stops per timeline wal backup -/// tasks. Having this in separate task simplifies locking, allows to reap -/// panics and separate elections from offloading itself. -pub async fn wal_backup_launcher_task_main( - conf: SafeKeeperConf, - mut wal_backup_launcher_rx: Receiver, -) -> anyhow::Result<()> { - info!( - "WAL backup launcher started, remote config {:?}", - conf.remote_storage - ); - - // Presence in this map means launcher is aware s3 offloading is needed for - // the timeline, but task is started only if it makes sense for to offload - // from this safekeeper. - let mut tasks: HashMap = HashMap::new(); - - let mut ticker = tokio::time::interval(Duration::from_millis(CHECK_TASKS_INTERVAL_MSEC)); - loop { - tokio::select! { - ttid = wal_backup_launcher_rx.recv() => { - // channel is never expected to get closed - let ttid = ttid.unwrap(); - if !conf.is_wal_backup_enabled() { - continue; /* just drain the channel and do nothing */ - } - async { - let timeline = is_wal_backup_required(ttid).await; - // do we need to do anything at all? - if timeline.is_some() != tasks.contains_key(&ttid) { - if let Some(timeline) = timeline { - // need to start the task - let entry = tasks.entry(ttid).or_insert(WalBackupTimelineEntry { - timeline, - handle: None, - }); - update_task(&conf, ttid, entry).await; - } else { - // need to stop the task - info!("stopping WAL backup task"); - let mut entry = tasks.remove(&ttid).unwrap(); - shut_down_task(ttid, &mut entry).await; - } - } - }.instrument(info_span!("WAL backup", ttid = %ttid)).await; - } - // For each timeline needing offloading, check if this safekeeper - // should do the job and start/stop the task accordingly. - _ = ticker.tick() => { - for (ttid, entry) in tasks.iter_mut() { - update_task(&conf, *ttid, entry) - .instrument(info_span!("WAL backup", ttid = %ttid)) - .await; - } - } - } - } -} - struct WalBackupTask { timeline: Arc, timeline_dir: Utf8PathBuf, @@ -261,6 +207,7 @@ struct WalBackupTask { } /// Offload single timeline. +#[instrument(name = "WAL backup", skip_all, fields(ttid = %ttid))] async fn backup_task_main( ttid: TenantTimelineId, timeline_dir: Utf8PathBuf, @@ -268,6 +215,8 @@ async fn backup_task_main( parallel_jobs: usize, mut shutdown_rx: Receiver<()>, ) { + let _guard = WAL_BACKUP_TASKS.guard(); + info!("started"); let res = GlobalTimelines::get(ttid); if let Err(e) = res { From 664f92dc6e6eac25380272fe4e466b4ddc4954bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Wed, 22 May 2024 12:43:03 +0200 Subject: [PATCH 044/220] Refactor PageServerHandler::process_query parsing (#7835) In the process_query function in page_service.rs there was some redundant duplication. Remove it and create a vector of whitespace separated parts at the start and then use `slice::strip_prefix`. Only use `starts_with` in the places with multiple whitespace separated parameters: here we want to preserve grep/rg ability. Followup of #7815, requested in https://github.com/neondatabase/neon/pull/7815#pullrequestreview-2068835674 --- pageserver/src/page_service.rs | 70 ++++++++++++---------------------- 1 file changed, 25 insertions(+), 45 deletions(-) diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index c066f56c17..d250864fd6 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -1521,9 +1521,8 @@ where let ctx = self.connection_ctx.attached_child(); debug!("process query {query_string:?}"); - if query_string.starts_with("pagestream_v2 ") { - let (_, params_raw) = query_string.split_at("pagestream_v2 ".len()); - let params = params_raw.split(' ').collect::>(); + let parts = query_string.split_whitespace().collect::>(); + if let Some(params) = parts.strip_prefix(&["pagestream_v2"]) { if params.len() != 2 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for pagestream command" @@ -1548,9 +1547,7 @@ where ctx, ) .await?; - } else if query_string.starts_with("pagestream ") { - let (_, params_raw) = query_string.split_at("pagestream ".len()); - let params = params_raw.split(' ').collect::>(); + } else if let Some(params) = parts.strip_prefix(&["pagestream"]) { if params.len() != 2 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for pagestream command" @@ -1575,10 +1572,7 @@ where ctx, ) .await?; - } else if query_string.starts_with("basebackup ") { - let (_, params_raw) = query_string.split_at("basebackup ".len()); - let params = params_raw.split_whitespace().collect::>(); - + } else if let Some(params) = parts.strip_prefix(&["basebackup"]) { if params.len() < 2 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for basebackup command" @@ -1596,26 +1590,23 @@ where self.check_permission(Some(tenant_id))?; - let lsn = if params.len() >= 3 { + let lsn = if let Some(lsn_str) = params.get(2) { Some( - Lsn::from_str(params[2]) - .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?, + Lsn::from_str(lsn_str) + .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?, ) } else { None }; - let gzip = if params.len() >= 4 { - if params[3] == "--gzip" { - true - } else { + let gzip = match params.get(3) { + Some(&"--gzip") => true, + None => false, + Some(third_param) => { return Err(QueryError::Other(anyhow::anyhow!( - "Parameter in position 3 unknown {}", - params[3], - ))); + "Parameter in position 3 unknown {third_param}", + ))) } - } else { - false }; let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording(&ctx); @@ -1639,10 +1630,7 @@ where res?; } // return pair of prev_lsn and last_lsn - else if query_string.starts_with("get_last_record_rlsn ") { - let (_, params_raw) = query_string.split_at("get_last_record_rlsn ".len()); - let params = params_raw.split_whitespace().collect::>(); - + else if let Some(params) = parts.strip_prefix(&["get_last_record_rlsn"]) { if params.len() != 2 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for get_last_record_rlsn command" @@ -1684,10 +1672,7 @@ where .await?; } // same as basebackup, but result includes relational data as well - else if query_string.starts_with("fullbackup ") { - let (_, params_raw) = query_string.split_at("fullbackup ".len()); - let params = params_raw.split_whitespace().collect::>(); - + else if let Some(params) = parts.strip_prefix(&["fullbackup"]) { if params.len() < 2 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for fullbackup command" @@ -1704,18 +1689,18 @@ where .record("timeline_id", field::display(timeline_id)); // The caller is responsible for providing correct lsn and prev_lsn. - let lsn = if params.len() > 2 { + let lsn = if let Some(lsn_str) = params.get(2) { Some( - Lsn::from_str(params[2]) - .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?, + Lsn::from_str(lsn_str) + .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?, ) } else { None }; - let prev_lsn = if params.len() > 3 { + let prev_lsn = if let Some(prev_lsn_str) = params.get(3) { Some( - Lsn::from_str(params[3]) - .with_context(|| format!("Failed to parse Lsn from {}", params[3]))?, + Lsn::from_str(prev_lsn_str) + .with_context(|| format!("Failed to parse Lsn from {prev_lsn_str}"))?, ) } else { None @@ -1748,8 +1733,7 @@ where // 2. Run: // cat my_backup/base.tar | psql -h $PAGESERVER \ // -c "import basebackup $TENANT $TIMELINE $START_LSN $END_LSN $PG_VERSION" - let (_, params_raw) = query_string.split_at("import basebackup ".len()); - let params = params_raw.split_whitespace().collect::>(); + let params = &parts[2..]; if params.len() != 5 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for import basebackup command" @@ -1798,8 +1782,7 @@ where // // Files are scheduled to be persisted to remote storage, and the // caller should poll the http api to check when that is done. - let (_, params_raw) = query_string.split_at("import wal ".len()); - let params = params_raw.split_whitespace().collect::>(); + let params = &parts[2..]; if params.len() != 4 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for import wal command" @@ -1838,8 +1821,7 @@ where // on connect pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?; } else if query_string.starts_with("lease lsn ") { - let (_, params_raw) = query_string.split_at("lease lsn ".len()); - let params = params_raw.split_whitespace().collect::>(); + let params = &parts[2..]; if params.len() != 3 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number {} for lease lsn command", @@ -1875,10 +1857,8 @@ where ))? } }; - } else if query_string.starts_with("show ") { + } else if let Some(params) = parts.strip_prefix(&["show"]) { // show - let (_, params_raw) = query_string.split_at("show ".len()); - let params = params_raw.split(' ').collect::>(); if params.len() != 1 { return Err(QueryError::Other(anyhow::anyhow!( "invalid param number for config command" From b43f6daa488de05691775a8908920d274fb53369 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 2 May 2024 15:53:30 +0300 Subject: [PATCH 045/220] One more iteration on making walcraft test more robust. Some WAL might be inserted on the page boundary before XLOG_SWITCH lands there, repeat construction in this case. --- libs/postgres_ffi/wal_craft/src/lib.rs | 46 ++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/libs/postgres_ffi/wal_craft/src/lib.rs b/libs/postgres_ffi/wal_craft/src/lib.rs index b6769629a8..6052f04d11 100644 --- a/libs/postgres_ffi/wal_craft/src/lib.rs +++ b/libs/postgres_ffi/wal_craft/src/lib.rs @@ -373,31 +373,29 @@ impl Crafter for LastWalRecordXlogSwitchEndsOnPageBoundary { "SELECT pg_logical_emit_message(false, 'swch', REPEAT('a', $1))", &[&(repeats as i32)], )?; - break; - } - info!( - "current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}", - client.pg_current_wal_insert_lsn()?, - XLOG_SIZE_OF_XLOG_RECORD - ); + info!( + "current_wal_insert_lsn={}, XLOG_SIZE_OF_XLOG_RECORD={}", + client.pg_current_wal_insert_lsn()?, + XLOG_SIZE_OF_XLOG_RECORD + ); - // Emit the XLOG_SWITCH - let before_xlog_switch = client.pg_current_wal_insert_lsn()?; - let xlog_switch_record_end: PgLsn = client.query_one("SELECT pg_switch_wal()", &[])?.get(0); - let next_segment = PgLsn::from(0x0200_0000); - ensure!( - xlog_switch_record_end < next_segment, - "XLOG_SWITCH record ended on or after the expected segment boundary: {} > {}", - xlog_switch_record_end, - next_segment - ); - ensure!( - u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ == XLOG_SIZE_OF_XLOG_SHORT_PHD, - "XLOG_SWITCH message ended not on page boundary: {}, offset = {}", - xlog_switch_record_end, - u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ - ); - Ok(vec![before_xlog_switch, xlog_switch_record_end]) + // Emit the XLOG_SWITCH + let before_xlog_switch = client.pg_current_wal_insert_lsn()?; + let xlog_switch_record_end: PgLsn = + client.query_one("SELECT pg_switch_wal()", &[])?.get(0); + + if u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ + != XLOG_SIZE_OF_XLOG_SHORT_PHD + { + warn!( + "XLOG_SWITCH message ended not on page boundary: {}, offset = {}, repeating", + xlog_switch_record_end, + u64::from(xlog_switch_record_end) as usize % XLOG_BLCKSZ + ); + continue; + } + return Ok(vec![before_xlog_switch, xlog_switch_record_end]); + } } } From ef96c82c9f189633629e6bfc051a78fbab12c9a6 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 21 May 2024 19:33:03 +0300 Subject: [PATCH 046/220] Fix zenith_test_evict mode and clear_buffer_cache() function Using InvalidateBuffer is wrong, because if the page is concurrently dirtied, it will throw away the dirty page without calling smgwrite(). In Neon, that means that the last-written LSN update for the page is missed. In v16, use the new InvalidateVictimBuffer() function that does what we need. In v15 and v14, backport the InvalidateVictimBuffer() function. Fixes issue https://github.com/neondatabase/neon/issues/7802 --- vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 21ec61d539..0d30e28f74 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 21ec61d539d22a81fe811c2d79e26436820bc3f4 +Subproject commit 0d30e28f74f49fe6a27a6bd45dcfeb1060656b8f diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index e2dbd63345..74fb144890 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit e2dbd63345c584de75173c27951f111249ae0016 +Subproject commit 74fb144890c4f955db1ef50ee1eeb9d8a6c2f69d diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index c271017c6c..3c2b9d576c 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit c271017c6c4846be59948766baec2ba4ace5dc9c +Subproject commit 3c2b9d576c580e0b5b7108001f959b8c5b42e0a2 diff --git a/vendor/revisions.json b/vendor/revisions.json index a3af9331fe..2f16f334c5 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "c271017c6c4846be59948766baec2ba4ace5dc9c"], - "v15": ["15.7", "e2dbd63345c584de75173c27951f111249ae0016"], - "v14": ["14.12", "21ec61d539d22a81fe811c2d79e26436820bc3f4"] + "v16": ["16.3", "3c2b9d576c580e0b5b7108001f959b8c5b42e0a2"], + "v15": ["15.7", "74fb144890c4f955db1ef50ee1eeb9d8a6c2f69d"], + "v14": ["14.12", "0d30e28f74f49fe6a27a6bd45dcfeb1060656b8f"] } From df9ab1b5e3962da4d98c7b1cd6a7fa20f4ff3902 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Wed, 22 May 2024 15:43:21 +0300 Subject: [PATCH 047/220] refactor(test): duplication with fullbackup, tar content hashing (#7828) "taking a fullbackup" is an ugly multi-liner copypasted in multiple places, most recently with timeline ancestor detach tests. move it under `PgBin` which is not a great place, but better than yet another utility function. Additionally: - cleanup `psql_env` repetition (PgBin already configures that) - move the backup tar comparison as a yet another free utility function - use backup tar comparison in `test_import.py` where a size check was done previously - cleanup extra timeline creation from test Cc: #7715 --- test_runner/fixtures/neon_fixtures.py | 22 ++++ test_runner/fixtures/utils.py | 50 ++++++++ test_runner/regress/test_fullbackup.py | 23 ++-- test_runner/regress/test_import.py | 25 +--- test_runner/regress/test_next_xid.py | 11 +- .../regress/test_timeline_detach_ancestor.py | 120 +++--------------- 6 files changed, 106 insertions(+), 145 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 7a660b64ed..b02054a702 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2788,6 +2788,28 @@ class PgBin: log.info(f"last checkpoint at {checkpoint_lsn}") return Lsn(checkpoint_lsn) + def take_fullbackup( + self, + pageserver: NeonPageserver, + tenant: TenantId, + timeline: TimelineId, + lsn: Lsn, + output: Path, + ): + """ + Request fullbackup from pageserver, store it at 'output'. + """ + cmd = [ + "psql", + "--no-psqlrc", + pageserver.connstr(), + "-c", + f"fullbackup {tenant} {timeline} {lsn}", + "-o", + str(output), + ] + self.run_capture(cmd) + @pytest.fixture(scope="function") def pg_bin(test_output_dir: Path, pg_distrib_dir: Path, pg_version: PgVersion) -> PgBin: diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 16dc9e8cfb..70263245e7 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -4,10 +4,13 @@ import json import os import re import subprocess +import tarfile import threading import time +from hashlib import sha256 from pathlib import Path from typing import ( + IO, TYPE_CHECKING, Any, Callable, @@ -15,8 +18,10 @@ from typing import ( Iterable, List, Optional, + Set, Tuple, TypeVar, + Union, ) from urllib.parse import urlencode @@ -499,3 +504,48 @@ class AuxFileStore(str, enum.Enum): def __str__(self) -> str: return f"'aux-{self.value}'" + + +def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: Set[str]): + """ + This is essentially: + + lines=$(comm -3 \ + <(mkdir left && cd left && tar xf "$left" && find . -type f -print0 | xargs sha256sum | sort -k2) \ + <(mkdir right && cd right && tar xf "$right" && find . -type f -print0 | xargs sha256sum | sort -k2) \ + | wc -l) + [ "$lines" = "0" ] + + But in a more mac friendly fashion. + """ + started_at = time.time() + + def hash_extracted(reader: Union[IO[bytes], None]) -> bytes: + assert reader is not None + digest = sha256(usedforsecurity=False) + while True: + buf = reader.read(64 * 1024) + if not buf: + break + digest.update(buf) + return digest.digest() + + def build_hash_list(p: Path) -> List[Tuple[str, bytes]]: + with tarfile.open(p) as f: + matching_files = (info for info in f if info.isreg() and info.name not in skip_files) + ret = list( + map(lambda info: (info.name, hash_extracted(f.extractfile(info))), matching_files) + ) + ret.sort(key=lambda t: t[0]) + return ret + + left_list, right_list = map(build_hash_list, [left, right]) + + try: + assert len(left_list) == len(right_list) + + for left_tuple, right_tuple in zip(left_list, right_list): + assert left_tuple == right_tuple + finally: + elapsed = time.time() - started_at + log.info(f"assert_pageserver_backups_equal completed in {elapsed}s") diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index e1e4f700d4..e6d51a77a6 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -1,7 +1,7 @@ import os from pathlib import Path -from fixtures.common_types import Lsn, TimelineId +from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, @@ -19,17 +19,16 @@ def test_fullbackup( neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, port_distributor: PortDistributor, - pg_distrib_dir: Path, test_output_dir: Path, ): env = neon_env_builder.init_start() - env.neon_cli.create_branch("test_fullbackup") - endpoint_main = env.endpoints.create_start("test_fullbackup") + # endpoint needs to be alive until the fullbackup so that we have + # prev_record_lsn for the vanilla_pg to start in read-write mode + # for some reason this does not happen if endpoint is shutdown. + endpoint_main = env.endpoints.create_start("main") with endpoint_main.cursor() as cur: - timeline = TimelineId(query_scalar(cur, "SHOW neon.timeline_id")) - # data loading may take a while, so increase statement timeout cur.execute("SET statement_timeout='300s'") cur.execute( @@ -41,17 +40,13 @@ def test_fullbackup( lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")) log.info(f"start_backup_lsn = {lsn}") - # Set LD_LIBRARY_PATH in the env properly, otherwise we may use the wrong libpq. - # PgBin sets it automatically, but here we need to pipe psql output to the tar command. - psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} - # Get and unpack fullbackup from pageserver restored_dir_path = env.repo_dir / "restored_datadir" os.mkdir(restored_dir_path, 0o750) - query = f"fullbackup {env.initial_tenant} {timeline} {lsn}" tar_output_file = test_output_dir / "fullbackup.tar" - cmd = ["psql", "--no-psqlrc", env.pageserver.connstr(), "-c", query, "-o", str(tar_output_file)] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup( + env.pageserver, env.initial_tenant, env.initial_timeline, lsn, tar_output_file + ) subprocess_capture( env.repo_dir, ["tar", "-xf", str(tar_output_file), "-C", str(restored_dir_path)] ) @@ -61,7 +56,7 @@ def test_fullbackup( # use resetwal to overwrite it pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, "pg_resetwal") cmd = [pg_resetwal_path, "-D", str(restored_dir_path)] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.run_capture(cmd) # Restore from the backup and find the data we inserted port = port_distributor.get_port() diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index 1f1c8cc582..62229ebfe7 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -21,7 +21,7 @@ from fixtures.pageserver.utils import ( wait_for_upload, ) from fixtures.remote_storage import RemoteStorageKind -from fixtures.utils import subprocess_capture +from fixtures.utils import assert_pageserver_backups_equal, subprocess_capture def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_builder): @@ -248,15 +248,9 @@ def _import( path to the backup archive file""" log.info(f"start_backup_lsn = {lsn}") - # Set LD_LIBRARY_PATH in the env properly, otherwise we may use the wrong libpq. - # PgBin sets it automatically, but here we need to pipe psql output to the tar command. - psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} - # Get a fullbackup from pageserver - query = f"fullbackup { env.initial_tenant} {timeline} {lsn}" tar_output_file = test_output_dir / "fullbackup.tar" - cmd = ["psql", "--no-psqlrc", env.pageserver.connstr(), "-c", query, "-o", str(tar_output_file)] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup(env.pageserver, env.initial_tenant, timeline, lsn, tar_output_file) # Stop the first pageserver instance, erase all its data env.endpoints.stop_all() @@ -305,22 +299,11 @@ def _import( assert endpoint.safe_psql("select count(*) from tbl") == [(expected_num_rows,)] # Take another fullbackup - query = f"fullbackup { tenant} {timeline} {lsn}" new_tar_output_file = test_output_dir / "fullbackup-new.tar" - cmd = [ - "psql", - "--no-psqlrc", - env.pageserver.connstr(), - "-c", - query, - "-o", - str(new_tar_output_file), - ] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup(env.pageserver, tenant, timeline, lsn, new_tar_output_file) # Check it's the same as the first fullbackup - # TODO pageserver should be checking checksum - assert os.path.getsize(tar_output_file) == os.path.getsize(new_tar_output_file) + assert_pageserver_backups_equal(tar_output_file, new_tar_output_file, set()) # Check that gc works pageserver_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_next_xid.py b/test_runner/regress/test_next_xid.py index 45c0e3e409..98fb06a0d6 100644 --- a/test_runner/regress/test_next_xid.py +++ b/test_runner/regress/test_next_xid.py @@ -5,7 +5,7 @@ from pathlib import Path from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_wal_insert_lsn +from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_wal_insert_lsn from fixtures.pageserver.utils import ( wait_for_last_record_lsn, ) @@ -71,22 +71,17 @@ def test_next_xid(neon_env_builder: NeonEnvBuilder): def test_import_at_2bil( neon_env_builder: NeonEnvBuilder, test_output_dir: Path, - pg_distrib_dir: Path, - pg_bin, + pg_bin: PgBin, vanilla_pg, ): neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) env = neon_env_builder.init_start() ps_http = env.pageserver.http_client() - # Set LD_LIBRARY_PATH in the env properly, otherwise we may use the wrong libpq. - # PgBin sets it automatically, but here we need to pipe psql output to the tar command. - psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} - # Reset the vanilla Postgres instance to somewhat before 2 billion transactions. pg_resetwal_path = os.path.join(pg_bin.pg_bin_path, "pg_resetwal") cmd = [pg_resetwal_path, "--next-transaction-id=2129920000", "-D", str(vanilla_pg.pgdatadir)] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.run_capture(cmd) vanilla_pg.start() vanilla_pg.safe_psql("create user cloud_admin with password 'postgres' superuser") diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index 3e435caeee..1563c161e0 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -1,25 +1,18 @@ import datetime import enum -import tarfile -import time from concurrent.futures import ThreadPoolExecutor -from hashlib import sha256 -from pathlib import Path from queue import Empty, Queue from threading import Barrier -from typing import IO, List, Set, Tuple, Union +from typing import List, Tuple import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnvBuilder, - PgBin, - wait_for_last_flush_lsn, -) +from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_last_flush_lsn from fixtures.pageserver.http import HistoricLayerInfo from fixtures.pageserver.utils import wait_timeline_detail_404 from fixtures.remote_storage import LocalFsStorage +from fixtures.utils import assert_pageserver_backups_equal def by_end_lsn(info: HistoricLayerInfo) -> Lsn: @@ -68,7 +61,6 @@ SHUTDOWN_ALLOWED_ERRORS = [ @pytest.mark.parametrize("write_to_branch_first", [True, False]) def test_ancestor_detach_branched_from( test_output_dir, - pg_distrib_dir, neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, branchpoint: Branchpoint, @@ -80,7 +72,6 @@ def test_ancestor_detach_branched_from( """ env = neon_env_builder.init_start() - psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} env.pageserver.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) client = env.pageserver.http_client() @@ -160,16 +151,9 @@ def test_ancestor_detach_branched_from( # run fullbackup to make sure there are no off by one errors # take this on the parent fullbackup_before = test_output_dir / "fullbackup-before.tar" - cmd = [ - "psql", - "--no-psqlrc", - env.pageserver.connstr(), - "-c", - f"fullbackup {env.initial_tenant} {env.initial_timeline} {branch_at}", - "-o", - str(fullbackup_before), - ] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup( + env.pageserver, env.initial_tenant, env.initial_timeline, branch_at, fullbackup_before + ) all_reparented = client.detach_ancestor(env.initial_tenant, timeline_id) assert all_reparented == set() @@ -200,16 +184,9 @@ def test_ancestor_detach_branched_from( # take this on the detached, at same lsn fullbackup_after = test_output_dir / "fullbackup-after.tar" - cmd = [ - "psql", - "--no-psqlrc", - env.pageserver.connstr(), - "-c", - f"fullbackup {env.initial_tenant} {timeline_id} {branch_at}", - "-o", - str(fullbackup_after), - ] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup( + env.pageserver, env.initial_tenant, timeline_id, branch_at, fullbackup_after + ) client.timeline_delete(env.initial_tenant, env.initial_timeline) wait_timeline_detail_404(client, env.initial_tenant, env.initial_timeline, 10, 1.0) @@ -218,52 +195,7 @@ def test_ancestor_detach_branched_from( # as there is always "PREV_LSN: invalid" for "before" skip_files = {"zenith.signal"} - tar_cmp(fullbackup_before, fullbackup_after, skip_files) - - -def tar_cmp(left: Path, right: Path, skip_files: Set[str]): - """ - This is essentially: - - lines=$(comm -3 \ - <(mkdir left && cd left && tar xf "$left" && find . -type f -print0 | xargs sha256sum | sort -k2) \ - <(mkdir right && cd right && tar xf "$right" && find . -type f -print0 | xargs sha256sum | sort -k2) \ - | wc -l) - [ "$lines" = "0" ] - - But in a more mac friendly fashion. - """ - started_at = time.time() - - def hash_extracted(reader: Union[IO[bytes], None]) -> bytes: - assert reader is not None - digest = sha256(usedforsecurity=False) - while True: - buf = reader.read(64 * 1024) - if not buf: - break - digest.update(buf) - return digest.digest() - - def build_hash_list(p: Path) -> List[Tuple[str, bytes]]: - with tarfile.open(p) as f: - matching_files = (info for info in f if info.isreg() and info.name not in skip_files) - ret = list( - map(lambda info: (info.name, hash_extracted(f.extractfile(info))), matching_files) - ) - ret.sort(key=lambda t: t[0]) - return ret - - left_list, right_list = map(build_hash_list, [left, right]) - - try: - assert len(left_list) == len(right_list) - - for left_tuple, right_tuple in zip(left_list, right_list): - assert left_tuple == right_tuple - finally: - elapsed = time.time() - started_at - log.info(f"tar_cmp completed in {elapsed}s") + assert_pageserver_backups_equal(fullbackup_before, fullbackup_after, skip_files) def test_ancestor_detach_reparents_earlier(neon_env_builder: NeonEnvBuilder): @@ -483,7 +415,7 @@ def test_detached_receives_flushes_while_being_detached(neon_env_builder: NeonEn def test_compaction_induced_by_detaches_in_history( - neon_env_builder: NeonEnvBuilder, test_output_dir, pg_distrib_dir, pg_bin: PgBin + neon_env_builder: NeonEnvBuilder, test_output_dir, pg_bin: PgBin ): """ Assuming the tree of timelines: @@ -500,8 +432,6 @@ def test_compaction_induced_by_detaches_in_history( timeline broken. """ - psql_env = {"LD_LIBRARY_PATH": str(pg_distrib_dir / "lib")} - env = neon_env_builder.init_start( initial_tenant_conf={ # we want to create layers manually so we don't branch on arbitrary @@ -589,16 +519,9 @@ def test_compaction_induced_by_detaches_in_history( # take the fullbackup before and after inheriting the new L0s fullbackup_before = test_output_dir / "fullbackup-before.tar" - cmd = [ - "psql", - "--no-psqlrc", - env.pageserver.connstr(), - "-c", - f"fullbackup {env.initial_tenant} {branch_timeline_id} {branch_lsn}", - "-o", - str(fullbackup_before), - ] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup( + env.pageserver, env.initial_tenant, branch_timeline_id, branch_lsn, fullbackup_before + ) for _, timeline_id in skip_main: reparented = client.detach_ancestor(env.initial_tenant, timeline_id) @@ -624,19 +547,12 @@ def test_compaction_induced_by_detaches_in_history( assert len(post_compact_l0s) == 1, "only the consecutive inherited L0s should be compacted" fullbackup_after = test_output_dir / "fullbackup_after.tar" - cmd = [ - "psql", - "--no-psqlrc", - env.pageserver.connstr(), - "-c", - f"fullbackup {env.initial_tenant} {branch_timeline_id} {branch_lsn}", - "-o", - str(fullbackup_after), - ] - pg_bin.run_capture(cmd, env=psql_env) + pg_bin.take_fullbackup( + env.pageserver, env.initial_tenant, branch_timeline_id, branch_lsn, fullbackup_after + ) # we don't need to skip any files, because zenith.signal will be identical - tar_cmp(fullbackup_before, fullbackup_after, set()) + assert_pageserver_backups_equal(fullbackup_before, fullbackup_after, set()) # TODO: From d1d55bbd9fbdca34d7f96c391f4a0368fb8c6583 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Wed, 22 May 2024 14:43:10 +0100 Subject: [PATCH 048/220] CI(report-benchmarks-failures): fix condition (#7820) ## Problem `report-benchmarks-failures` got skipped if a dependent job fails. ## Summary of changes - Fix the if-condition by adding `&& failures()` to it; it'll make the job run if the dependent job fails. --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 14d19f7ae3..d8ad6e26d0 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -548,7 +548,7 @@ jobs: report-benchmarks-failures: needs: [ benchmarks, create-test-report ] - if: github.ref_name == 'main' && needs.benchmarks.result == 'failure' + if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure' runs-on: ubuntu-latest steps: From ce44dfe3532b22689464cbeddbf392d05049b9c4 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Wed, 22 May 2024 16:55:34 +0300 Subject: [PATCH 049/220] openapi: document timeline ancestor detach (#7650) The openapi description with the error descriptions: - 200 is used for "detached or has been detached previously" - 400 is used for "cannot be detached right now" -- it's an odd thing, but good enough - 404 is used for tenant or timeline not found - 409 is used for "can never be detached" (root timeline) - 500 is used for transient errors (basically ill-defined shutdown errors) - 503 is used for busy (other tenant ancestor detach underway, pageserver shutdown) Cc: #6994 --- pageserver/src/http/openapi_spec.yml | 87 +++++++++++++++++++ pageserver/src/http/routes.rs | 27 +++--- .../src/tenant/timeline/detach_ancestor.rs | 30 ++++++- .../regress/test_timeline_detach_ancestor.py | 36 +++++++- 4 files changed, 161 insertions(+), 19 deletions(-) diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 107bcd4a22..e5eafc51f4 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -612,6 +612,80 @@ paths: schema: $ref: "#/components/schemas/Error" + /v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/detach_ancestor: + parameters: + - name: tenant_shard_id + in: path + required: true + schema: + type: string + - name: timeline_id + in: path + ŕequired: true + schema: + type: string + + put: + description: | + Detach a timeline from its ancestor and reparent all ancestors timelines with lower `ancestor_lsn`. + Current implementation might not be retryable across failure cases, but will be enhanced in future. + Detaching should be expected to be expensive operation. Timeouts should be retried. + responses: + "200": + description: | + The timeline has been detached from it's ancestor (now or earlier), and at least the returned timelines have been reparented. + If any timelines were deleted after reparenting, they might not be on this list. + content: + application/json: + schema: + $ref: "#/components/schemas/AncestorDetached" + + "400": + description: | + Number of early checks meaning the timeline cannot be detached now: + - the ancestor of timeline has an ancestor: not supported, see RFC + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + "404": + description: Tenant or timeline not found. + content: + application/json: + schema: + $ref: "#/components/schemas/NotFoundError" + + "409": + description: | + The timeline can never be detached: + - timeline has no ancestor, implying that the timeline has never had an ancestor + content: + application/json: + schema: + $ref: "#/components/schemas/ConflictError" + + "500": + description: | + Transient error, for example, pageserver shutdown happened while + processing the request but we were unable to distinguish that. Must + be retried. + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + "503": + description: | + Temporarily unavailable, please retry. Possible reasons: + - another timeline detach for the same tenant is underway, please retry later + - detected shutdown error + content: + application/json: + schema: + $ref: "#/components/schemas/ServiceUnavailableError" + + /v1/tenant/: get: description: Get tenants list @@ -1077,6 +1151,19 @@ components: format: int64 description: How many bytes of layer content were in the latest layer heatmap + AncestorDetached: + type: object + required: + - reparented_timelines + properties: + reparented_timelines: + type: array + description: Set of reparented timeline ids + properties: + type: string + format: hex + description: TimelineId + Error: type: object diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index c75e4ca5a9..34b9806a26 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -74,6 +74,7 @@ use crate::tenant::storage_layer::LayerAccessStatsReset; use crate::tenant::storage_layer::LayerName; use crate::tenant::timeline::CompactFlags; use crate::tenant::timeline::Timeline; +use crate::tenant::GetTimelineError; use crate::tenant::SpawnMode; use crate::tenant::{LogicalSizeCalculationCause, PageReconstructError}; use crate::{config::PageServerConf, tenant::mgr}; @@ -279,6 +280,13 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(gte: GetTimelineError) -> Self { + // Rationale: tenant is activated only after eligble timelines activate + ApiError::NotFound(gte.into()) + } +} + impl From for ApiError { fn from(e: GetActiveTenantError) -> ApiError { match e { @@ -643,9 +651,7 @@ async fn timeline_preserve_initdb_handler( .tenant_manager .get_attached_tenant_shard(tenant_shard_id)?; - let timeline = tenant - .get_timeline(timeline_id, false) - .map_err(|e| ApiError::NotFound(e.into()))?; + let timeline = tenant.get_timeline(timeline_id, false)?; timeline .preserve_initdb_archive() @@ -687,9 +693,7 @@ async fn timeline_detail_handler( tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; - let timeline = tenant - .get_timeline(timeline_id, false) - .map_err(|e| ApiError::NotFound(e.into()))?; + let timeline = tenant.get_timeline(timeline_id, false)?; let timeline_info = build_timeline_info( &timeline, @@ -1901,14 +1905,11 @@ async fn timeline_detach_ancestor_handler( let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download); let ctx = &ctx; - let timeline = tenant - .get_timeline(timeline_id, true) - .map_err(|e| ApiError::NotFound(e.into()))?; + let timeline = tenant.get_timeline(timeline_id, true)?; let (_guard, prepared) = timeline .prepare_to_detach_from_ancestor(&tenant, options, ctx) - .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; + .await?; let res = state .tenant_manager @@ -2042,9 +2043,7 @@ async fn active_timeline_of_active_tenant( tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; - tenant - .get_timeline(timeline_id, true) - .map_err(|e| ApiError::NotFound(e.into())) + Ok(tenant.get_timeline(timeline_id, true)?) } async fn always_panic_handler( diff --git a/pageserver/src/tenant/timeline/detach_ancestor.rs b/pageserver/src/tenant/timeline/detach_ancestor.rs index 4d8e570181..e6ddabe5b5 100644 --- a/pageserver/src/tenant/timeline/detach_ancestor.rs +++ b/pageserver/src/tenant/timeline/detach_ancestor.rs @@ -12,7 +12,7 @@ use crate::{ }; use tokio_util::sync::CancellationToken; use tracing::Instrument; -use utils::{completion, generation::Generation, id::TimelineId, lsn::Lsn}; +use utils::{completion, generation::Generation, http::error::ApiError, id::TimelineId, lsn::Lsn}; #[derive(Debug, thiserror::Error)] pub(crate) enum Error { @@ -41,6 +41,27 @@ pub(crate) enum Error { Unexpected(#[source] anyhow::Error), } +impl From for ApiError { + fn from(value: Error) -> Self { + match value { + e @ Error::NoAncestor => ApiError::Conflict(e.to_string()), + // TODO: ApiError converts the anyhow using debug formatting ... just stop using ApiError? + e @ Error::TooManyAncestors => ApiError::BadRequest(anyhow::anyhow!("{}", e)), + Error::ShuttingDown => ApiError::ShuttingDown, + Error::OtherTimelineDetachOngoing(_) => { + ApiError::ResourceUnavailable("other timeline detach is already ongoing".into()) + } + // All of these contain shutdown errors, in fact, it's the most common + e @ Error::FlushAncestor(_) + | e @ Error::RewrittenDeltaDownloadFailed(_) + | e @ Error::CopyDeltaPrefix(_) + | e @ Error::UploadRewritten(_) + | e @ Error::CopyFailed(_) + | e @ Error::Unexpected(_) => ApiError::InternalServerError(e.into()), + } + } +} + pub(crate) struct PreparedTimelineDetach { layers: Vec, } @@ -75,6 +96,11 @@ pub(super) async fn prepare( .as_ref() .map(|tl| (tl.clone(), detached.ancestor_lsn)) else { + // TODO: check if we have already been detached; for this we need to read the stored data + // on remote client, for that we need a follow-up which makes uploads cheaper and maintains + // a projection of the commited data. + // + // the error is wrong per openapi return Err(NoAncestor); }; @@ -84,7 +110,7 @@ pub(super) async fn prepare( if ancestor.ancestor_timeline.is_some() { // non-technical requirement; we could flatten N ancestors just as easily but we chose - // not to + // not to, at least initially return Err(TooManyAncestors); } diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index 1563c161e0..f0b2f7d733 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -8,9 +8,13 @@ from typing import List, Tuple import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_last_flush_lsn -from fixtures.pageserver.http import HistoricLayerInfo -from fixtures.pageserver.utils import wait_timeline_detail_404 +from fixtures.neon_fixtures import ( + NeonEnvBuilder, + PgBin, + wait_for_last_flush_lsn, +) +from fixtures.pageserver.http import HistoricLayerInfo, PageserverApiException +from fixtures.pageserver.utils import wait_tenant_status_404, wait_timeline_detail_404 from fixtures.remote_storage import LocalFsStorage from fixtures.utils import assert_pageserver_backups_equal @@ -555,6 +559,32 @@ def test_compaction_induced_by_detaches_in_history( assert_pageserver_backups_equal(fullbackup_before, fullbackup_after, set()) +def test_timeline_ancestor_errors(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_start() + env.pageserver.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + + client = env.pageserver.http_client() + + with pytest.raises(PageserverApiException, match=".* no ancestors") as info: + client.detach_ancestor(env.initial_tenant, env.initial_timeline) + assert info.value.status_code == 409 + + first_branch = env.neon_cli.create_branch("first_branch") + second_branch = env.neon_cli.create_branch("second_branch", ancestor_branch_name="first_branch") + + # funnily enough this does not have a prefix + with pytest.raises(PageserverApiException, match="too many ancestors") as info: + client.detach_ancestor(env.initial_tenant, second_branch) + assert info.value.status_code == 400 + + client.tenant_delete(env.initial_tenant) + wait_tenant_status_404(client, env.initial_tenant, 10, 1) + + with pytest.raises(PageserverApiException) as e: + client.detach_ancestor(env.initial_tenant, first_branch) + assert e.value.status_code == 404 + + # TODO: # - after starting the operation, tenant is deleted # - after starting the operation, pageserver is shutdown, restarted From 8901ce9c99f068464e9ee15673f25569ff298f17 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Tue, 21 May 2024 18:26:47 -0500 Subject: [PATCH 050/220] Fix typos in action definitions --- .github/actions/neon-branch-create/action.yml | 6 +++--- .github/actions/neon-branch-delete/action.yml | 8 ++++---- .github/actions/neon-project-create/action.yml | 12 ++++++------ .github/actions/neon-project-delete/action.yml | 6 +++--- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/actions/neon-branch-create/action.yml b/.github/actions/neon-branch-create/action.yml index dea3fc2357..9f752d5a89 100644 --- a/.github/actions/neon-branch-create/action.yml +++ b/.github/actions/neon-branch-create/action.yml @@ -3,13 +3,13 @@ description: 'Create Branch using API' inputs: api_key: - desctiption: 'Neon API key' + description: 'Neon API key' required: true project_id: - desctiption: 'ID of the Project to create Branch in' + description: 'ID of the Project to create Branch in' required: true api_host: - desctiption: 'Neon API host' + description: 'Neon API host' default: console-stage.neon.build outputs: dsn: diff --git a/.github/actions/neon-branch-delete/action.yml b/.github/actions/neon-branch-delete/action.yml index 8acba7ad00..58141a4a3f 100644 --- a/.github/actions/neon-branch-delete/action.yml +++ b/.github/actions/neon-branch-delete/action.yml @@ -3,16 +3,16 @@ description: 'Delete Branch using API' inputs: api_key: - desctiption: 'Neon API key' + description: 'Neon API key' required: true project_id: - desctiption: 'ID of the Project which should be deleted' + description: 'ID of the Project which should be deleted' required: true branch_id: - desctiption: 'ID of the branch to delete' + description: 'ID of the branch to delete' required: true api_host: - desctiption: 'Neon API host' + description: 'Neon API host' default: console-stage.neon.build runs: diff --git a/.github/actions/neon-project-create/action.yml b/.github/actions/neon-project-create/action.yml index 7f0e599b97..4039a58b9e 100644 --- a/.github/actions/neon-project-create/action.yml +++ b/.github/actions/neon-project-create/action.yml @@ -3,22 +3,22 @@ description: 'Create Neon Project using API' inputs: api_key: - desctiption: 'Neon API key' + description: 'Neon API key' required: true region_id: - desctiption: 'Region ID, if not set the project will be created in the default region' + description: 'Region ID, if not set the project will be created in the default region' default: aws-us-east-2 postgres_version: - desctiption: 'Postgres version; default is 15' + description: 'Postgres version; default is 15' default: 15 api_host: - desctiption: 'Neon API host' + description: 'Neon API host' default: console-stage.neon.build provisioner: - desctiption: 'k8s-pod or k8s-neonvm' + description: 'k8s-pod or k8s-neonvm' default: 'k8s-pod' compute_units: - desctiption: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal' + description: '[Min, Max] compute units; Min and Max are used for k8s-neonvm with autoscaling, for k8s-pod values Min and Max should be equal' default: '[1, 1]' outputs: diff --git a/.github/actions/neon-project-delete/action.yml b/.github/actions/neon-project-delete/action.yml index b8ec6cac70..35e165fd61 100644 --- a/.github/actions/neon-project-delete/action.yml +++ b/.github/actions/neon-project-delete/action.yml @@ -3,13 +3,13 @@ description: 'Delete Neon Project using API' inputs: api_key: - desctiption: 'Neon API key' + description: 'Neon API key' required: true project_id: - desctiption: 'ID of the Project to delete' + description: 'ID of the Project to delete' required: true api_host: - desctiption: 'Neon API host' + description: 'Neon API host' default: console-stage.neon.build runs: From 900f39111507bc058bd541b66deddd66d6473443 Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Tue, 21 May 2024 18:27:30 -0500 Subject: [PATCH 051/220] Make postgres_version action input default to a string This is "required" by GitHub Actions, though they must do some coersion on their side. --- .github/actions/neon-project-create/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/neon-project-create/action.yml b/.github/actions/neon-project-create/action.yml index 4039a58b9e..16759ad038 100644 --- a/.github/actions/neon-project-create/action.yml +++ b/.github/actions/neon-project-create/action.yml @@ -10,7 +10,7 @@ inputs: default: aws-us-east-2 postgres_version: description: 'Postgres version; default is 15' - default: 15 + default: '15' api_host: description: 'Neon API host' default: console-stage.neon.build From 325f3784f9fe46b5152000ba50ae4a01c9fd68a4 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Wed, 22 May 2024 16:02:20 +0100 Subject: [PATCH 052/220] CI(promote-images): simplify & fix the job (#7826) ## Problem Currently, `latest` tag is added to the images in several cases: ``` github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' ``` This leads to a race; the `latest` tag jumps back and forth depending on the branch that has built images. ## Summary of changes - Do not push `latest` images to prod ECR (we don't use it) - Use `docker buildx imagetools` instead of `crane` for tagging images - Unify `vm-compute-node-image` job with others and use dockerhub as a first source for images (sync images with ECR) - Tag images with `latest` only for commits in `main` --- .github/workflows/build_and_test.yml | 117 ++++++++++++--------------- 1 file changed, 52 insertions(+), 65 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index d8ad6e26d0..5056025457 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -883,22 +883,39 @@ jobs: curl -fL https://github.com/neondatabase/autoscaling/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder chmod +x vm-builder + # Use custom DOCKER_CONFIG directory to avoid conflicts with default settings + # The default value is ~/.docker + - name: Set custom docker config directory + run: | + mkdir -p .docker-custom + echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV + + - uses: docker/login-action@v3 + with: + username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} + password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} + # Note: we need a separate pull step here because otherwise vm-builder will try to pull, and # it won't have the proper authentication (written at v0.6.0) - name: Pulling compute-node image run: | - docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} + docker pull neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} - name: Build vm image run: | ./vm-builder \ -spec=vm-image-spec.yaml \ - -src=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} \ - -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} + -src=neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + -dst=neondatabase/vm-compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} - name: Pushing vm-compute-node image run: | - docker push 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} + docker push neondatabase/vm-compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} + + - name: Remove custom docker config directory + if: always() + run: | + rm -rf .docker-custom test-images: needs: [ check-permissions, tag, neon-image, compute-node-image ] @@ -946,78 +963,48 @@ jobs: promote-images: needs: [ check-permissions, tag, test-images, vm-compute-node-image ] - runs-on: [ self-hosted, gen3, small ] - container: golang:1.19-bullseye - # Don't add if-condition here. - # The job should always be run because we have dependant other jobs that shouldn't be skipped + runs-on: ubuntu-latest + + env: + VERSIONS: v14 v15 v16 steps: - - name: Install Crane & ECR helper - run: | - go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0 - go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0 + - uses: docker/login-action@v3 + with: + username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} + password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - - name: Configure ECR login - run: | - mkdir /github/home/.docker/ - echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json + - uses: docker/login-action@v3 + with: + registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com + username: ${{ secrets.AWS_ACCESS_KEY_DEV }} + password: ${{ secrets.AWS_SECRET_KEY_DEV }} - - name: Copy vm-compute-node images to Docker Hub + - name: Copy vm-compute-node images to ECR run: | - crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} vm-compute-node-v14 - crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} vm-compute-node-v15 - crane pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} vm-compute-node-v16 + for version in ${VERSIONS}; do + docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} + done - name: Add latest tag to images - if: github.ref_name == 'main' || github.ref_name == 'release' || github.ref_name == 'release-proxy' + if: github.ref_name == 'main' run: | - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:${{needs.tag.outputs.build-tag}} latest - crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest + for repo in neondatabase 369495373322.dkr.ecr.eu-central-1.amazonaws.com; do + docker buildx imagetools create -t $repo/neon:latest \ + $repo/neon:${{ needs.tag.outputs.build-tag }} - - name: Push images to production ECR - if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy' - run: | - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/neon:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v16:latest - crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} 093970136003.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v16:latest + docker buildx imagetools create -t $repo/compute-tools:latest \ + $repo/compute-tools:${{ needs.tag.outputs.build-tag }} - - name: Configure Docker Hub login - run: | - # ECR Credential Helper & Docker Hub don't work together in config, hence reset - echo "" > /github/home/.docker/config.json - crane auth login -u ${{ secrets.NEON_DOCKERHUB_USERNAME }} -p ${{ secrets.NEON_DOCKERHUB_PASSWORD }} index.docker.io + for version in ${VERSIONS}; do + docker buildx imagetools create -t $repo/compute-node-${version}:latest \ + $repo/compute-node-${version}:${{ needs.tag.outputs.build-tag }} - - name: Push vm-compute-node to Docker Hub - run: | - crane push vm-compute-node-v14 neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} - crane push vm-compute-node-v15 neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} - crane push vm-compute-node-v16 neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} - - - name: Push latest tags to Docker Hub - if: github.ref_name == 'main' || github.ref_name == 'release'|| github.ref_name == 'release-proxy' - run: | - crane tag neondatabase/neon:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/compute-tools:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/compute-node-v14:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/compute-node-v16:${{needs.tag.outputs.build-tag}} latest - crane tag neondatabase/vm-compute-node-v16:${{needs.tag.outputs.build-tag}} latest - - - name: Cleanup ECR folder - run: rm -rf ~/.ecr + docker buildx imagetools create -t $repo/vm-compute-node-${version}:latest \ + $repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} + done + done trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] From a7f31f1a59aec1cdd66df71ffaf3b7ad176c4966 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Wed, 22 May 2024 16:06:05 +0100 Subject: [PATCH 053/220] CI: build multi-arch images (#7696) ## Problem We don't build our docker images for ARM arch, and that makes it harder to run images on ARM (on MacBooks with Apple Silicon, for example). ## Summary of changes - Build `neondatabase/neon` for ARM and create a multi-arch image - Build `neondatabase/compute-node-vXX` for ARM and create a multi-arch image - Run `test-images` job on ARM as well --- .github/workflows/build_and_test.yml | 123 +++++++++++++++++----- docker-compose/compute_wrapper/Dockerfile | 2 +- docker-compose/docker_compose_test.sh | 2 - 3 files changed, 97 insertions(+), 30 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 5056025457..2ab1417d6d 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -723,9 +723,13 @@ jobs: uses: ./.github/workflows/trigger-e2e-tests.yml secrets: inherit - neon-image: + neon-image-arch: needs: [ check-permissions, build-build-tools-image, tag ] - runs-on: [ self-hosted, gen3, large ] + strategy: + matrix: + arch: [ x64, arm64 ] + + runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }} steps: - name: Checkout @@ -747,12 +751,6 @@ jobs: username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - - uses: docker/login-action@v3 - with: - registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com - username: ${{ secrets.AWS_ACCESS_KEY_DEV }} - password: ${{ secrets.AWS_SECRET_KEY_DEV }} - - uses: docker/build-push-action@v5 with: context: . @@ -764,25 +762,52 @@ jobs: push: true pull: true file: Dockerfile - cache-from: type=registry,ref=neondatabase/neon:cache - cache-to: type=registry,ref=neondatabase/neon:cache,mode=max + cache-from: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }} + cache-to: type=registry,ref=neondatabase/neon:cache-${{ matrix.arch }},mode=max tags: | - 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} - neondatabase/neon:${{needs.tag.outputs.build-tag}} + neondatabase/neon:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} - name: Remove custom docker config directory if: always() run: | rm -rf .docker-custom - compute-node-image: - needs: [ check-permissions, build-build-tools-image, tag ] - runs-on: [ self-hosted, gen3, large ] + neon-image: + needs: [ neon-image-arch, tag ] + runs-on: ubuntu-latest + steps: + - uses: docker/login-action@v3 + with: + username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} + password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} + + - name: Create multi-arch image + run: | + docker buildx imagetools create -t neondatabase/neon:${{ needs.tag.outputs.build-tag }} \ + neondatabase/neon:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/neon:${{ needs.tag.outputs.build-tag }}-arm64 + + - uses: docker/login-action@v3 + with: + registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com + username: ${{ secrets.AWS_ACCESS_KEY_DEV }} + password: ${{ secrets.AWS_SECRET_KEY_DEV }} + + - name: Push multi-arch image to ECR + run: | + docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{ needs.tag.outputs.build-tag }} \ + neondatabase/neon:${{ needs.tag.outputs.build-tag }} + + compute-node-image-arch: + needs: [ check-permissions, build-build-tools-image, tag ] strategy: fail-fast: false matrix: version: [ v14, v15, v16 ] + arch: [ x64, arm64 ] + + runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'large-arm64' || 'large')) }} steps: - name: Checkout @@ -829,15 +854,14 @@ jobs: push: true pull: true file: Dockerfile.compute-node - cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache - cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache,mode=max + cache-from: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }} + cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | - 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} - neondatabase/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} + neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} - name: Build compute-tools image # compute-tools are Postgres independent, so build it only once - if: ${{ matrix.version == 'v16' }} + if: matrix.version == 'v16' uses: docker/build-push-action@v5 with: target: compute-tools-image @@ -851,14 +875,57 @@ jobs: pull: true file: Dockerfile.compute-node tags: | - 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }} - neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }} + neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} - name: Remove custom docker config directory if: always() run: | rm -rf .docker-custom + compute-node-image: + needs: [ compute-node-image-arch, tag ] + runs-on: ubuntu-latest + + strategy: + matrix: + version: [ v14, v15, v16 ] + + steps: + - uses: docker/login-action@v3 + with: + username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} + password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} + + - name: Create multi-arch compute-node image + run: | + docker buildx imagetools create -t neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + + - name: Create multi-arch compute-tools image + if: matrix.version == 'v16' + run: | + docker buildx imagetools create -t neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }} \ + neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }}-arm64 + + - uses: docker/login-action@v3 + with: + registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com + username: ${{ secrets.AWS_ACCESS_KEY_DEV }} + password: ${{ secrets.AWS_SECRET_KEY_DEV }} + + - name: Push multi-arch compute-node-${{ matrix.version }} image to ECR + run: | + docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} + + - name: Push multi-arch compute-tools image to ECR + if: matrix.version == 'v16' + run: | + docker buildx imagetools create -t 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{ needs.tag.outputs.build-tag }} \ + neondatabase/compute-tools:${{ needs.tag.outputs.build-tag }} + vm-compute-node-image: needs: [ check-permissions, tag, compute-node-image ] runs-on: [ self-hosted, gen3, large ] @@ -866,9 +933,6 @@ jobs: fail-fast: false matrix: version: [ v14, v15, v16 ] - defaults: - run: - shell: sh -eu {0} env: VM_BUILDER_VERSION: v0.28.1 @@ -919,7 +983,12 @@ jobs: test-images: needs: [ check-permissions, tag, neon-image, compute-node-image ] - runs-on: [ self-hosted, gen3, small ] + strategy: + fail-fast: false + matrix: + arch: [ x64, arm64 ] + + runs-on: ${{ fromJson(format('["self-hosted", "gen3", "{0}"]', matrix.arch == 'arm64' && 'small-arm64' || 'small')) }} steps: - name: Checkout @@ -937,7 +1006,7 @@ jobs: - name: Verify image versions shell: bash # ensure no set -e for better error messages run: | - pageserver_version=$(docker run --rm 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} "/bin/sh" "-c" "/usr/local/bin/pageserver --version") + pageserver_version=$(docker run --rm neondatabase/neon:${{ needs.tag.outputs.build-tag }} "/bin/sh" "-c" "/usr/local/bin/pageserver --version") echo "Pageserver version string: $pageserver_version" diff --git a/docker-compose/compute_wrapper/Dockerfile b/docker-compose/compute_wrapper/Dockerfile index f1b1986072..974dcd7f03 100644 --- a/docker-compose/compute_wrapper/Dockerfile +++ b/docker-compose/compute_wrapper/Dockerfile @@ -1,4 +1,4 @@ -ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com +ARG REPOSITORY=neondatabase ARG COMPUTE_IMAGE=compute-node-v14 ARG TAG=latest diff --git a/docker-compose/docker_compose_test.sh b/docker-compose/docker_compose_test.sh index e18b0f9176..062fc6fc92 100755 --- a/docker-compose/docker_compose_test.sh +++ b/docker-compose/docker_compose_test.sh @@ -8,8 +8,6 @@ # Their defaults point at DockerHub `neondatabase/neon:latest` image.`, # to verify custom image builds (e.g pre-published ones). -# XXX: Current does not work on M1 macs due to x86_64 Docker images compiled only, and no seccomp support in M1 Docker emulation layer. - set -eux -o pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" From e015b2bf3eb96c7a5d8b9bf0253ae13e569f5747 Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 22 May 2024 16:10:58 +0100 Subject: [PATCH 054/220] safekeeper: use CancellationToken instead of watch channel (#7836) ## Problem Safekeeper Timeline uses a channel for cancellation, but we have a dedicated type for that. ## Summary of changes - Use CancellationToken in Timeline --- safekeeper/src/recovery.rs | 10 ++------- safekeeper/src/timeline.rs | 31 +++++++--------------------- safekeeper/src/timeline_manager.rs | 10 +-------- safekeeper/src/wal_backup_partial.rs | 14 +++---------- 4 files changed, 13 insertions(+), 52 deletions(-) diff --git a/safekeeper/src/recovery.rs b/safekeeper/src/recovery.rs index e8fa6c55f4..dfa1892c40 100644 --- a/safekeeper/src/recovery.rs +++ b/safekeeper/src/recovery.rs @@ -37,17 +37,11 @@ use crate::{ #[instrument(name = "recovery task", skip_all, fields(ttid = %tli.ttid))] pub async fn recovery_main(tli: Arc, conf: SafeKeeperConf) { info!("started"); - let mut cancellation_rx = match tli.get_cancellation_rx() { - Ok(rx) => rx, - Err(_) => { - info!("timeline canceled during task start"); - return; - } - }; + let cancel = tli.cancel.clone(); select! { _ = recovery_main_loop(tli, conf) => { unreachable!() } - _ = cancellation_rx.changed() => { + _ = cancel.cancelled() => { info!("stopped"); } } diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index da2e3f4538..89c157d514 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -6,6 +6,7 @@ use camino::Utf8PathBuf; use postgres_ffi::XLogSegNo; use serde::{Deserialize, Serialize}; use tokio::fs; +use tokio_util::sync::CancellationToken; use std::cmp::max; use std::ops::{Deref, DerefMut}; @@ -342,12 +343,8 @@ pub struct Timeline { walsenders: Arc, walreceivers: Arc, - /// Cancellation channel. Delete/cancel will send `true` here as a cancellation signal. - cancellation_tx: watch::Sender, - - /// Timeline should not be used after cancellation. Background tasks should - /// monitor this channel and stop eventually after receiving `true` from this channel. - cancellation_rx: watch::Receiver, + /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires + pub(crate) cancel: CancellationToken, /// Directory where timeline state is stored. pub timeline_dir: Utf8PathBuf, @@ -376,7 +373,6 @@ impl Timeline { shared_state.sk.flush_lsn(), ))); let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0); - let (cancellation_tx, cancellation_rx) = watch::channel(false); let walreceivers = WalReceivers::new(); Ok(Timeline { @@ -390,8 +386,7 @@ impl Timeline { mutex: RwLock::new(shared_state), walsenders: WalSenders::new(walreceivers.clone()), walreceivers, - cancellation_rx, - cancellation_tx, + cancel: CancellationToken::default(), timeline_dir: conf.timeline_dir(&ttid), walsenders_keep_horizon: conf.walsenders_keep_horizon, broker_active: AtomicBool::new(false), @@ -411,7 +406,6 @@ impl Timeline { let (term_flush_lsn_watch_tx, term_flush_lsn_watch_rx) = watch::channel(TermLsn::from((INVALID_TERM, Lsn::INVALID))); let (shared_state_version_tx, shared_state_version_rx) = watch::channel(0); - let (cancellation_tx, cancellation_rx) = watch::channel(false); let state = TimelinePersistentState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn); @@ -428,8 +422,7 @@ impl Timeline { mutex: RwLock::new(SharedState::create_new(conf, &ttid, state)?), walsenders: WalSenders::new(walreceivers.clone()), walreceivers, - cancellation_rx, - cancellation_tx, + cancel: CancellationToken::default(), timeline_dir: conf.timeline_dir(&ttid), walsenders_keep_horizon: conf.walsenders_keep_horizon, broker_active: AtomicBool::new(false), @@ -535,7 +528,7 @@ impl Timeline { /// eventually after receiving cancellation signal. fn cancel(&self, shared_state: &mut WriteGuardSharedState<'_>) { info!("timeline {} is cancelled", self.ttid); - let _ = self.cancellation_tx.send(true); + self.cancel.cancel(); // Close associated FDs. Nobody will be able to touch timeline data once // it is cancelled, so WAL storage won't be opened again. shared_state.sk.wal_store.close(); @@ -543,17 +536,7 @@ impl Timeline { /// Returns if timeline is cancelled. pub fn is_cancelled(&self) -> bool { - *self.cancellation_rx.borrow() - } - - /// Returns watch channel which gets value when timeline is cancelled. It is - /// guaranteed to have not cancelled value observed (errors otherwise). - pub fn get_cancellation_rx(&self) -> Result> { - let rx = self.cancellation_rx.clone(); - if *rx.borrow() { - bail!(TimelineError::Cancelled(self.ttid)); - } - Ok(rx) + self.cancel.is_cancelled() } /// Take a writing mutual exclusive lock on timeline shared_state. diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 52ad915065..e74ba37ad8 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -47,14 +47,6 @@ pub async fn main_task( conf: SafeKeeperConf, broker_active_set: Arc, ) { - let mut cancellation_rx = match tli.get_cancellation_rx() { - Ok(rx) => rx, - Err(_) => { - info!("timeline canceled during task start"); - return; - } - }; - scopeguard::defer! { if tli.is_cancelled() { info!("manager task finished"); @@ -129,7 +121,7 @@ pub async fn main_task( // wait until something changes. tx channels are stored under Arc, so they will not be // dropped until the manager task is finished. tokio::select! { - _ = cancellation_rx.changed() => { + _ = tli.cancel.cancelled() => { // timeline was deleted break 'outer state_snapshot; } diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index 200096ac5c..29e944bff3 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -277,14 +277,6 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { debug!("started"); let await_duration = conf.partial_backup_timeout; - let mut cancellation_rx = match tli.get_cancellation_rx() { - Ok(rx) => rx, - Err(_) => { - info!("timeline canceled during task start"); - return; - } - }; - // sleep for random time to avoid thundering herd { let randf64 = rand::thread_rng().gen_range(0.0..1.0); @@ -327,7 +319,7 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { && flush_lsn_rx.borrow().term == seg.term { tokio::select! { - _ = cancellation_rx.changed() => { + _ = backup.tli.cancel.cancelled() => { info!("timeline canceled"); return; } @@ -340,7 +332,7 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { // if we don't have any data and zero LSNs, wait for something while flush_lsn_rx.borrow().lsn == Lsn(0) { tokio::select! { - _ = cancellation_rx.changed() => { + _ = backup.tli.cancel.cancelled() => { info!("timeline canceled"); return; } @@ -357,7 +349,7 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { // waiting until timeout expires OR segno changes 'inner: loop { tokio::select! { - _ = cancellation_rx.changed() => { + _ = backup.tli.cancel.cancelled() => { info!("timeline canceled"); return; } From 62aac6c8add432da1ab2d206f19323e808622e8d Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Wed, 22 May 2024 18:13:45 +0300 Subject: [PATCH 055/220] fix(Layer): carry gate until eviction is complete (#7838) the gate was accidentially being dropped before the final blocking phase, possibly explaining the resident physical size global problems during deletions. it could had caused more harm as well, but the path is not actively being tested because cplane no longer puts locationconfigs with higher generation number during normal operation which prompted the last wave of fixes. Cc: #7341. --- pageserver/src/tenant/storage_layer/layer.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 45d61ce048..8c64621710 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -12,7 +12,7 @@ use std::time::{Duration, SystemTime}; use tracing::Instrument; use utils::id::TimelineId; use utils::lsn::Lsn; -use utils::sync::heavier_once_cell; +use utils::sync::{gate, heavier_once_cell}; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; @@ -1333,7 +1333,7 @@ impl LayerInner { is_good_to_continue(&rx.borrow_and_update())?; - let Ok(_gate) = timeline.gate.enter() else { + let Ok(gate) = timeline.gate.enter() else { return Err(EvictionCancelled::TimelineGone); }; @@ -1421,7 +1421,7 @@ impl LayerInner { Self::spawn_blocking(move || { let _span = span.entered(); - let res = self.evict_blocking(&timeline, &permit); + let res = self.evict_blocking(&timeline, &gate, &permit); let waiters = self.inner.initializer_count(); @@ -1447,6 +1447,7 @@ impl LayerInner { fn evict_blocking( &self, timeline: &Timeline, + _gate: &gate::GateGuard, _permit: &heavier_once_cell::InitPermit, ) -> Result<(), EvictionCancelled> { // now accesses to `self.inner.get_or_init*` wait on the semaphore or the `_permit` From 3404e76a51311a595e28beb7d0962488cafaf143 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 22 May 2024 14:36:13 +0300 Subject: [PATCH 056/220] Fix confusion between 1-based Buffer and 0-based index (#7825) The code was working correctly, but was incorrectly using Buffer for a 0-based index into the BufferDesc array. --- pgxn/neon/pagestore_smgr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 249ad313b0..4361c74905 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -3216,7 +3216,7 @@ neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id) BufferTag tag; uint32 hash; LWLock *partitionLock; - Buffer buffer; + int buf_id; bool no_redo_needed; if (old_redo_read_buffer_filter && old_redo_read_buffer_filter(record, block_id)) @@ -3254,9 +3254,9 @@ neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id) else { /* Try to find the relevant buffer */ - buffer = BufTableLookup(&tag, hash); + buf_id = BufTableLookup(&tag, hash); - no_redo_needed = buffer < 0; + no_redo_needed = buf_id < 0; } /* In both cases st lwlsn past this WAL record */ SetLastWrittenLSNForBlock(end_recptr, rinfo, forknum, blkno); From 921756402673e9e2fa764fbacf33457f05bc3d59 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 22 May 2024 14:57:09 +0300 Subject: [PATCH 057/220] Fix issues with determining request LSN in read replica (#7795) Don't set last-written LSN of a page when the record is replayed, only when the page is evicted from cache. For comparison, we don't update the last-written LSN on every page modification on the primary either, only when the page is evicted. Do update the last-written LSN when the page update is skipped in WAL redo, however. In neon_get_request_lsns(), don't be surprised if the last-written LSN is equal to the record being replayed. Use the LSN of the record being replayed as the request LSN in that case. Add a long comment explaining how that can happen. In neon_wallog_page, update last-written LSN also when Shutdown has been requested. We might still fetch and evict pages for a while, after shutdown has been requested, so we better continue to do that correctly. Enable the check that we don't evict a page with zero LSN also in standby, but make it a LOG message instead of PANIC Fixes issue https://github.com/neondatabase/neon/issues/7791 --- pgxn/neon/pagestore_smgr.c | 142 +++++++++++++++++++++--- test_runner/regress/test_hot_standby.py | 60 ++++++++++ 2 files changed, 185 insertions(+), 17 deletions(-) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 4361c74905..41546eae85 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -1349,6 +1349,10 @@ PageIsEmptyHeapPage(char *buffer) return memcmp(buffer, empty_page.data, BLCKSZ) == 0; } +/* + * A page is being evicted from the shared buffer cache. Update the + * last-written LSN of the page, and WAL-log it if needed. + */ static void #if PG_MAJORVERSION_NUM < 16 neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool force) @@ -1357,12 +1361,7 @@ neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, co #endif { XLogRecPtr lsn = PageGetLSN((Page) buffer); - - if (ShutdownRequestPending) - return; - /* Don't log any pages if we're not allowed to do so. */ - if (!XLogInsertAllowed()) - return; + bool log_page; /* * Whenever a VM or FSM page is evicted, WAL-log it. FSM and (some) VM @@ -1371,9 +1370,21 @@ neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, co * correctness, the non-logged updates are not critical. But we want to * have a reasonably up-to-date VM and FSM in the page server. */ - if ((force || forknum == FSM_FORKNUM || forknum == VISIBILITYMAP_FORKNUM) && !RecoveryInProgress()) + log_page = false; + if (force) + { + Assert(XLogInsertAllowed()); + log_page = true; + } + else if (XLogInsertAllowed() && + !ShutdownRequestPending && + (forknum == FSM_FORKNUM || forknum == VISIBILITYMAP_FORKNUM)) + { + log_page = true; + } + + if (log_page) { - /* FSM is never WAL-logged and we don't care. */ XLogRecPtr recptr; recptr = log_newpage_copy(&InfoFromSMgrRel(reln), forknum, blocknum, @@ -1386,7 +1397,8 @@ neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, co RelFileInfoFmt(InfoFromSMgrRel(reln)), forknum, LSN_FORMAT_ARGS(lsn)))); } - else if (lsn == InvalidXLogRecPtr) + + if (lsn == InvalidXLogRecPtr) { /* * When PostgreSQL extends a relation, it calls smgrextend() with an @@ -1422,19 +1434,31 @@ neon_wallog_page(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, co RelFileInfoFmt(InfoFromSMgrRel(reln)), forknum))); } - else + else if (forknum != FSM_FORKNUM && forknum != VISIBILITYMAP_FORKNUM) { - ereport(PANIC, + /* + * Its a bad sign if there is a page with zero LSN in the buffer + * cache in a standby, too. However, PANICing seems like a cure + * worse than the disease, as the damage has likely already been + * done in the primary. So in a standby, make this an assertion, + * and in a release build just LOG the error and soldier on. We + * update the last-written LSN of the page with a conservative + * value in that case, which is the last replayed LSN. + */ + ereport(RecoveryInProgress() ? LOG : PANIC, (errmsg(NEON_TAG "Page %u of relation %u/%u/%u.%u is evicted with zero LSN", blocknum, RelFileInfoFmt(InfoFromSMgrRel(reln)), forknum))); + Assert(false); + + lsn = GetXLogReplayRecPtr(NULL); /* in standby mode, soldier on */ } } else { ereport(SmgrTrace, - (errmsg(NEON_TAG "Page %u of relation %u/%u/%u.%u is already wal logged at lsn=%X/%X", + (errmsg(NEON_TAG "Evicting page %u of relation %u/%u/%u.%u with lsn=%X/%X", blocknum, RelFileInfoFmt(InfoFromSMgrRel(reln)), forknum, LSN_FORMAT_ARGS(lsn)))); @@ -1527,8 +1551,92 @@ neon_get_request_lsns(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno) if (RecoveryInProgress()) { - /* Request the page at the last replayed LSN. */ - result.request_lsn = GetXLogReplayRecPtr(NULL); + /*--- + * In broad strokes, a replica always requests the page at the current + * replay LSN. But looking closer, what exactly is the replay LSN? Is + * it the last replayed record, or the record being replayed? And does + * the startup process performing the replay need to do something + * differently than backends running queries? Let's take a closer look + * at the different scenarios: + * + * 1. Startup process reads a page, last_written_lsn is old. + * + * Read the old version of the page. We will apply the WAL record on + * it to bring it up-to-date. + * + * We could read the new version, with the changes from this WAL + * record already applied, to offload the work of replaying the record + * to the pageserver. The pageserver might not have received the WAL + * record yet, though, so a read of the old page version and applying + * the record ourselves is likely faster. Also, the redo function + * might be surprised if the changes have already applied. That's + * normal during crash recovery, but not in hot standby. + * + * 2. Startup process reads a page, last_written_lsn == record we're + * replaying. + * + * Can this happen? There are a few theoretical cases when it might: + * + * A) The redo function reads the same page twice. We had already read + * and applied the changes once, and now we're reading it for the + * second time. That would be a rather silly thing for a redo + * function to do, and I'm not aware of any that would do it. + * + * B) The redo function modifies multiple pages, and it already + * applied the changes to one of the pages, released the lock on + * it, and is now reading a second page. Furthermore, the first + * page was already evicted from the buffer cache, and also from + * the last-written LSN cache, so that the per-relation or global + * last-written LSN was already updated. All the WAL redo functions + * hold the locks on pages that they modify, until all the changes + * have been modified (?), which would make that impossible. + * However, we skip the locking, if the page isn't currently in the + * page cache (see neon_redo_read_buffer_filter below). + * + * Even if the one of the above cases were possible in theory, they + * would also require the pages being modified by the redo function to + * be immediately evicted from the page cache. + * + * So this probably does not happen in practice. But if it does, we + * request the new version, including the changes from the record + * being replayed. That seems like the correct behavior in any case. + * + * 3. Backend process reads a page with old last-written LSN + * + * Nothing special here. Read the old version. + * + * 4. Backend process reads a page with last_written_lsn == record being replayed + * + * This can happen, if the redo function has started to run, and saw + * that the page isn't present in the page cache (see + * neon_redo_read_buffer_filter below). Normally, in a normal + * Postgres server, the redo function would hold a lock on the page, + * so we would get blocked waiting the redo function to release the + * lock. To emulate that, wait for the WAL replay of the record to + * finish. + */ + /* Request the page at the end of the last fully replayed LSN. */ + XLogRecPtr replay_lsn = GetXLogReplayRecPtr(NULL); + + if (last_written_lsn > replay_lsn) + { + /* GetCurrentReplayRecPtr was introduced in v15 */ +#if PG_VERSION_NUM >= 150000 + Assert(last_written_lsn == GetCurrentReplayRecPtr(NULL)); +#endif + + /* + * Cases 2 and 4. If this is a backend (case 4), the + * neon_read_at_lsn() call later will wait for the WAL record to be + * fully replayed. + */ + result.request_lsn = last_written_lsn; + } + else + { + /* cases 1 and 3 */ + result.request_lsn = replay_lsn; + } result.not_modified_since = last_written_lsn; result.effective_request_lsn = result.request_lsn; Assert(last_written_lsn <= result.request_lsn); @@ -3258,16 +3366,16 @@ neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id) no_redo_needed = buf_id < 0; } - /* In both cases st lwlsn past this WAL record */ - SetLastWrittenLSNForBlock(end_recptr, rinfo, forknum, blkno); /* * we don't have the buffer in memory, update lwLsn past this record, also * evict page from file cache */ if (no_redo_needed) + { + SetLastWrittenLSNForBlock(end_recptr, rinfo, forknum, blkno); lfc_evict(rinfo, forknum, blkno); - + } LWLockRelease(partitionLock); diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 244d482c18..cf7a1c56ee 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -1,3 +1,4 @@ +import asyncio import os import re import threading @@ -292,3 +293,62 @@ def test_hot_standby_feedback(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): assert slot_xmin is None wait_until(10, 1.0, xmin_is_null) + + +# Test race condition between WAL replay and backends performing queries +# https://github.com/neondatabase/neon/issues/7791 +def test_replica_query_race(neon_simple_env: NeonEnv): + env = neon_simple_env + + primary_ep = env.endpoints.create_start( + branch_name="main", + endpoint_id="primary", + ) + + with primary_ep.connect() as p_con: + with p_con.cursor() as p_cur: + p_cur.execute("CREATE EXTENSION neon_test_utils") + p_cur.execute("CREATE TABLE test AS SELECT 0 AS counter") + + standby_ep = env.endpoints.new_replica_start(origin=primary_ep, endpoint_id="standby") + time.sleep(1) + + # In primary, run a lot of UPDATEs on a single page + finished = False + writecounter = 1 + + async def primary_workload(): + nonlocal writecounter, finished + conn = await primary_ep.connect_async() + while writecounter < 10000: + writecounter += 1 + await conn.execute(f"UPDATE test SET counter = {writecounter}") + finished = True + + # In standby, at the same time, run queries on it. And repeatedly drop caches + async def standby_workload(): + nonlocal writecounter, finished + conn = await standby_ep.connect_async() + reads = 0 + while not finished: + readcounter = await conn.fetchval("SELECT counter FROM test") + + # Check that the replica is keeping up with the primary. In local + # testing, the lag between primary and standby is much smaller, in + # the ballpark of 2-3 counter values. But be generous in case there's + # some hiccup. + # assert(writecounter - readcounter < 1000) + assert readcounter <= writecounter + if reads % 100 == 0: + log.info(f"read {reads}: counter {readcounter}, last update {writecounter}") + reads += 1 + + await conn.execute("SELECT clear_buffer_cache()") + + async def both(): + await asyncio.gather( + primary_workload(), + standby_workload(), + ) + + asyncio.run(both()) From 37f81289c2d1b27f88316980fb6b307e7e46d409 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 22 May 2024 18:24:52 +0300 Subject: [PATCH 058/220] Make 'neon.protocol_version = 2' the default, take two (#7819) Once all the computes in production have restarted, we can remove protocol version 1 altogether. See issue #6211. This was done earlier already in commit 0115fe6cb2, but reverted before it was released to production in commit bbe730d7ca because of issue https://github.com/neondatabase/neon/issues/7692. That issue was fixed in commit 22afaea6e1, so we are ready to change the default again. --- pgxn/neon/libpagestore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index b7b1e7ccbf..f5ce2caff3 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -49,7 +49,7 @@ char *neon_auth_token; int readahead_buffer_size = 128; int flush_every_n_requests = 8; -int neon_protocol_version = 1; +int neon_protocol_version = 2; static int n_reconnect_attempts = 0; static int max_reconnect_attempts = 60; @@ -860,7 +860,7 @@ pg_init_libpagestore(void) "Version of compute<->page server protocol", NULL, &neon_protocol_version, - 1, /* default to old protocol for now */ + 2, /* use protocol version 2 */ 1, /* min */ 2, /* max */ PGC_SU_BACKEND, From 64577cfddcdac89d649e1ba6db3a6f44e14e2eee Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 22 May 2024 12:41:13 -0400 Subject: [PATCH 059/220] feat(pageserver): auto-detect previous aux file policy (#7841) ## Problem If an existing user already has some aux v1 files, we don't want to switch them to the global tenant-level config. Part of #7462 --------- Signed-off-by: Alex Chi Z --- pageserver/src/pgdatadir_mapping.rs | 29 +++++++++++++ pageserver/src/tenant.rs | 67 ++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index f9d8c1020d..7dea687c46 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -1480,6 +1480,29 @@ impl<'a> DatadirModification<'a> { // Allowed switch path: // * no aux files -> v1/v2/cross-validation // * cross-validation->v2 + + let current_policy = if current_policy.is_none() { + // This path will only be hit once per tenant: we will decide the final policy in this code block. + // The next call to `put_file` will always have `last_aux_file_policy != None`. + let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn); + let aux_files_key_v1 = self.tline.list_aux_files_v1(lsn, ctx).await?; + if aux_files_key_v1.is_empty() { + None + } else { + self.tline + .last_aux_file_policy + .store(Some(AuxFilePolicy::V1)); + self.tline + .remote_client + .schedule_index_upload_for_aux_file_policy_update(Some( + AuxFilePolicy::V1, + ))?; + Some(AuxFilePolicy::V1) + } + } else { + current_policy + }; + if AuxFilePolicy::is_valid_migration_path(current_policy, switch_policy) { self.tline.last_aux_file_policy.store(Some(switch_policy)); self.tline @@ -1775,6 +1798,12 @@ impl<'a> DatadirModification<'a> { self.tline.get(key, lsn, ctx).await } + /// Only used during unit tests, force putting a key into the modification. + #[cfg(test)] + pub(crate) fn put_for_test(&mut self, key: Key, val: Value) { + self.put(key, val); + } + fn put(&mut self, key: Key, val: Value) { let values = self.pending_updates.entry(key).or_default(); // Replace the previous value if it exists at the same lsn diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 1a66f2c919..caf26e0a0b 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3964,18 +3964,20 @@ mod tests { use super::*; use crate::keyspace::KeySpaceAccum; + use crate::pgdatadir_mapping::AuxFilesDirectory; use crate::repository::{Key, Value}; use crate::tenant::harness::*; use crate::tenant::timeline::CompactFlags; use crate::DEFAULT_PG_VERSION; use bytes::{Bytes, BytesMut}; use hex_literal::hex; - use pageserver_api::key::{AUX_KEY_PREFIX, NON_INHERITED_RANGE}; + use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::CompactionAlgorithm; use rand::{thread_rng, Rng}; use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; + use utils::bin_ser::BeSer; static TEST_KEY: Lazy = Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001"))); @@ -5997,6 +5999,69 @@ mod tests { ); } + #[tokio::test] + async fn aux_file_policy_auto_detect() { + let mut harness = TenantHarness::create("aux_file_policy_auto_detect").unwrap(); + harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V2; // set to cross-validation mode + let (tenant, ctx) = harness.load().await; + + let mut lsn = Lsn(0x08); + + let tline: Arc = tenant + .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + assert_eq!( + tline.last_aux_file_policy.load(), + None, + "no aux file is written so it should be unset" + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + let buf = AuxFilesDirectory::ser(&AuxFilesDirectory { + files: vec![( + "test_file".to_string(), + Bytes::copy_from_slice(b"test_file"), + )] + .into_iter() + .collect(), + }) + .unwrap(); + modification.put_for_test(AUX_FILES_KEY, Value::Image(Bytes::from(buf))); + modification.commit(&ctx).await.unwrap(); + } + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test1", b"first", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V1), + "keep using v1 because there are aux files writting with v1" + ); + + // we can still read the auxfile v1 + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test1"), + Some(&bytes::Bytes::from_static(b"first")) + ); + assert_eq!( + files.get("test_file"), + Some(&bytes::Bytes::from_static(b"test_file")) + ); + } + #[tokio::test] async fn test_metadata_image_creation() -> anyhow::Result<()> { let harness = TenantHarness::create("test_metadata_image_creation")?; From 9cfe08e3d9f1181a163322705ff41cbcfb11db3b Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 22 May 2024 18:05:43 +0100 Subject: [PATCH 060/220] proxy password threadpool (#7806) ## Problem Despite making password hashing async, it can still take time away from the network code. ## Summary of changes Introduce a custom threadpool, inspired by rayon. Features: ### Fairness Each task is tagged with it's endpoint ID. The more times we have seen the endpoint, the more likely we are to skip the task if it comes up in the queue. This is using a min-count-sketch estimator for the number of times we have seen the endpoint, resetting it every 1000+ steps. Since tasks are immediately rescheduled if they do not complete, the worker could get stuck in a "always work available loop". To combat this, we check the global queue every 61 steps to ensure all tasks quickly get a worker assigned to them. ### Balanced Using crossbeam_deque, like rayon does, we have workstealing out of the box. I've tested it a fair amount and it seems to balance the workload accordingly --- Cargo.lock | 20 +- Cargo.toml | 2 + proxy/Cargo.toml | 4 +- proxy/src/auth/backend.rs | 10 +- proxy/src/auth/backend/hacks.rs | 11 +- proxy/src/auth/flow.rs | 24 ++- proxy/src/bin/proxy.rs | 8 + proxy/src/config.rs | 2 + proxy/src/metrics.rs | 89 +++++++-- proxy/src/scram.rs | 18 +- proxy/src/scram/countmin.rs | 173 +++++++++++++++++ proxy/src/scram/exchange.rs | 49 ++--- proxy/src/scram/pbkdf2.rs | 89 +++++++++ proxy/src/scram/threadpool.rs | 321 ++++++++++++++++++++++++++++++++ proxy/src/serverless/backend.rs | 11 +- workspace_hack/Cargo.toml | 2 + 16 files changed, 759 insertions(+), 74 deletions(-) create mode 100644 proxy/src/scram/countmin.rs create mode 100644 proxy/src/scram/pbkdf2.rs create mode 100644 proxy/src/scram/threadpool.rs diff --git a/Cargo.lock b/Cargo.lock index e6060c82f5..d8f9021eb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1471,26 +1471,21 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", - "scopeguard", ] [[package]] @@ -3961,9 +3956,9 @@ checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ca0b5a68607598bf3bad68f32227a8164f6254833f84eafaac409cd6746c31" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest", "hmac", @@ -4386,6 +4381,7 @@ dependencies = [ name = "proxy" version = "0.1.0" dependencies = [ + "ahash", "anyhow", "async-compression", "async-trait", @@ -4402,6 +4398,7 @@ dependencies = [ "chrono", "clap", "consumption_metrics", + "crossbeam-deque", "dashmap", "env_logger", "fallible-iterator", @@ -7473,6 +7470,7 @@ dependencies = [ name = "workspace_hack" version = "0.1.0" dependencies = [ + "ahash", "anyhow", "aws-config", "aws-runtime", diff --git a/Cargo.toml b/Cargo.toml index 2a7dea447e..0887c039f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ license = "Apache-2.0" ## All dependency versions, used in the project [workspace.dependencies] +ahash = "0.8" anyhow = { version = "1.0", features = ["backtrace"] } arc-swap = "1.6" async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] } @@ -74,6 +75,7 @@ clap = { version = "4.0", features = ["derive"] } comfy-table = "6.1" const_format = "0.2" crc32c = "0.6" +crossbeam-deque = "0.8.5" crossbeam-utils = "0.8.5" dashmap = { version = "5.5.0", features = ["raw-api"] } either = "1.8" diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 5f9b0aa75b..7da0763bc1 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -9,6 +9,7 @@ default = [] testing = [] [dependencies] +ahash.workspace = true anyhow.workspace = true async-compression.workspace = true async-trait.workspace = true @@ -24,6 +25,7 @@ camino.workspace = true chrono.workspace = true clap.workspace = true consumption_metrics.workspace = true +crossbeam-deque.workspace = true dashmap.workspace = true env_logger.workspace = true framed-websockets.workspace = true @@ -52,7 +54,6 @@ opentelemetry.workspace = true parking_lot.workspace = true parquet.workspace = true parquet_derive.workspace = true -pbkdf2 = { workspace = true, features = ["simple", "std"] } pin-project-lite.workspace = true postgres_backend.workspace = true pq_proto.workspace = true @@ -106,6 +107,7 @@ workspace_hack.workspace = true camino-tempfile.workspace = true fallible-iterator.workspace = true tokio-tungstenite.workspace = true +pbkdf2 = { workspace = true, features = ["simple", "std"] } rcgen.workspace = true rstest.workspace = true tokio-postgres-rustls.workspace = true diff --git a/proxy/src/auth/backend.rs b/proxy/src/auth/backend.rs index 6a906b299b..3555eba543 100644 --- a/proxy/src/auth/backend.rs +++ b/proxy/src/auth/backend.rs @@ -365,7 +365,10 @@ async fn authenticate_with_secret( config: &'static AuthenticationConfig, ) -> auth::Result { if let Some(password) = unauthenticated_password { - let auth_outcome = validate_password_and_exchange(&password, secret).await?; + let ep = EndpointIdInt::from(&info.endpoint); + + let auth_outcome = + validate_password_and_exchange(&config.thread_pool, ep, &password, secret).await?; let keys = match auth_outcome { crate::sasl::Outcome::Success(key) => key, crate::sasl::Outcome::Failure(reason) => { @@ -386,7 +389,7 @@ async fn authenticate_with_secret( // Currently, we use it for websocket connections (latency). if allow_cleartext { ctx.set_auth_method(crate::context::AuthMethod::Cleartext); - return hacks::authenticate_cleartext(ctx, info, client, secret).await; + return hacks::authenticate_cleartext(ctx, info, client, secret, config).await; } // Finally, proceed with the main auth flow (SCRAM-based). @@ -554,7 +557,7 @@ mod tests { context::RequestMonitoring, proxy::NeonOptions, rate_limiter::{EndpointRateLimiter, RateBucketInfo}, - scram::ServerSecret, + scram::{threadpool::ThreadPool, ServerSecret}, stream::{PqStream, Stream}, }; @@ -596,6 +599,7 @@ mod tests { } static CONFIG: Lazy = Lazy::new(|| AuthenticationConfig { + thread_pool: ThreadPool::new(1), scram_protocol_timeout: std::time::Duration::from_secs(5), rate_limiter_enabled: true, rate_limiter: AuthRateLimiter::new(&RateBucketInfo::DEFAULT_AUTH_SET), diff --git a/proxy/src/auth/backend/hacks.rs b/proxy/src/auth/backend/hacks.rs index f7241be4a9..6b0f5e1726 100644 --- a/proxy/src/auth/backend/hacks.rs +++ b/proxy/src/auth/backend/hacks.rs @@ -3,8 +3,10 @@ use super::{ }; use crate::{ auth::{self, AuthFlow}, + config::AuthenticationConfig, console::AuthSecret, context::RequestMonitoring, + intern::EndpointIdInt, sasl, stream::{self, Stream}, }; @@ -20,6 +22,7 @@ pub async fn authenticate_cleartext( info: ComputeUserInfo, client: &mut stream::PqStream>, secret: AuthSecret, + config: &'static AuthenticationConfig, ) -> auth::Result { warn!("cleartext auth flow override is enabled, proceeding"); ctx.set_auth_method(crate::context::AuthMethod::Cleartext); @@ -27,8 +30,14 @@ pub async fn authenticate_cleartext( // pause the timer while we communicate with the client let paused = ctx.latency_timer.pause(crate::metrics::Waiting::Client); + let ep = EndpointIdInt::from(&info.endpoint); + let auth_flow = AuthFlow::new(client) - .begin(auth::CleartextPassword(secret)) + .begin(auth::CleartextPassword { + secret, + endpoint: ep, + pool: config.thread_pool.clone(), + }) .await?; drop(paused); // cleartext auth is only allowed to the ws/http protocol. diff --git a/proxy/src/auth/flow.rs b/proxy/src/auth/flow.rs index 45bbad8cb2..59d1ac17f4 100644 --- a/proxy/src/auth/flow.rs +++ b/proxy/src/auth/flow.rs @@ -5,12 +5,14 @@ use crate::{ config::TlsServerEndPoint, console::AuthSecret, context::RequestMonitoring, - sasl, scram, + intern::EndpointIdInt, + sasl, + scram::{self, threadpool::ThreadPool}, stream::{PqStream, Stream}, }; use postgres_protocol::authentication::sasl::{SCRAM_SHA_256, SCRAM_SHA_256_PLUS}; use pq_proto::{BeAuthenticationSaslMessage, BeMessage, BeMessage as Be}; -use std::io; +use std::{io, sync::Arc}; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::info; @@ -53,7 +55,11 @@ impl AuthMethod for PasswordHack { /// Use clear-text password auth called `password` in docs /// -pub struct CleartextPassword(pub AuthSecret); +pub struct CleartextPassword { + pub pool: Arc, + pub endpoint: EndpointIdInt, + pub secret: AuthSecret, +} impl AuthMethod for CleartextPassword { #[inline(always)] @@ -126,7 +132,13 @@ impl AuthFlow<'_, S, CleartextPassword> { .strip_suffix(&[0]) .ok_or(AuthErrorImpl::MalformedPassword("missing terminator"))?; - let outcome = validate_password_and_exchange(password, self.state.0).await?; + let outcome = validate_password_and_exchange( + &self.state.pool, + self.state.endpoint, + password, + self.state.secret, + ) + .await?; if let sasl::Outcome::Success(_) = &outcome { self.stream.write_message_noflush(&Be::AuthenticationOk)?; @@ -181,6 +193,8 @@ impl AuthFlow<'_, S, Scram<'_>> { } pub(crate) async fn validate_password_and_exchange( + pool: &ThreadPool, + endpoint: EndpointIdInt, password: &[u8], secret: AuthSecret, ) -> super::Result> { @@ -194,7 +208,7 @@ pub(crate) async fn validate_password_and_exchange( } // perform scram authentication as both client and server to validate the keys AuthSecret::Scram(scram_secret) => { - let outcome = crate::scram::exchange(&scram_secret, password).await?; + let outcome = crate::scram::exchange(pool, endpoint, &scram_secret, password).await?; let client_key = match outcome { sasl::Outcome::Success(client_key) => client_key, diff --git a/proxy/src/bin/proxy.rs b/proxy/src/bin/proxy.rs index be7d961b8c..30f2e6f4b7 100644 --- a/proxy/src/bin/proxy.rs +++ b/proxy/src/bin/proxy.rs @@ -27,6 +27,7 @@ use proxy::redis::cancellation_publisher::RedisPublisherClient; use proxy::redis::connection_with_credentials_provider::ConnectionWithCredentialsProvider; use proxy::redis::elasticache; use proxy::redis::notifications; +use proxy::scram::threadpool::ThreadPool; use proxy::serverless::cancel_set::CancelSet; use proxy::serverless::GlobalConnPoolOptions; use proxy::usage_metrics; @@ -132,6 +133,9 @@ struct ProxyCliArgs { /// timeout for scram authentication protocol #[clap(long, default_value = "15s", value_parser = humantime::parse_duration)] scram_protocol_timeout: tokio::time::Duration, + /// size of the threadpool for password hashing + #[clap(long, default_value_t = 4)] + scram_thread_pool_size: u8, /// Require that all incoming requests have a Proxy Protocol V2 packet **and** have an IP address associated. #[clap(long, default_value_t = false, value_parser = clap::builder::BoolishValueParser::new(), action = clap::ArgAction::Set)] require_client_ip: bool, @@ -489,6 +493,9 @@ async fn main() -> anyhow::Result<()> { /// ProxyConfig is created at proxy startup, and lives forever. fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> { + let thread_pool = ThreadPool::new(args.scram_thread_pool_size); + Metrics::install(thread_pool.metrics.clone()); + let tls_config = match (&args.tls_key, &args.tls_cert) { (Some(key_path), Some(cert_path)) => Some(config::configure_tls( key_path, @@ -624,6 +631,7 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> { client_conn_threshold: args.sql_over_http.sql_over_http_client_conn_threshold, }; let authentication_config = AuthenticationConfig { + thread_pool, scram_protocol_timeout: args.scram_protocol_timeout, rate_limiter_enabled: args.auth_rate_limit_enabled, rate_limiter: AuthRateLimiter::new(args.auth_rate_limit.clone()), diff --git a/proxy/src/config.rs b/proxy/src/config.rs index b7ab2c00f9..5a0c251ce2 100644 --- a/proxy/src/config.rs +++ b/proxy/src/config.rs @@ -2,6 +2,7 @@ use crate::{ auth::{self, backend::AuthRateLimiter}, console::locks::ApiLocks, rate_limiter::RateBucketInfo, + scram::threadpool::ThreadPool, serverless::{cancel_set::CancelSet, GlobalConnPoolOptions}, Host, }; @@ -61,6 +62,7 @@ pub struct HttpConfig { } pub struct AuthenticationConfig { + pub thread_pool: Arc, pub scram_protocol_timeout: tokio::time::Duration, pub rate_limiter_enabled: bool, pub rate_limiter: AuthRateLimiter, diff --git a/proxy/src/metrics.rs b/proxy/src/metrics.rs index 1590316925..e2a75a8720 100644 --- a/proxy/src/metrics.rs +++ b/proxy/src/metrics.rs @@ -1,11 +1,11 @@ -use std::sync::OnceLock; +use std::sync::{Arc, OnceLock}; use lasso::ThreadedRodeo; use measured::{ - label::StaticLabelSet, + label::{FixedCardinalitySet, LabelName, LabelSet, LabelValue, StaticLabelSet}, metric::{histogram::Thresholds, name::MetricName}, - Counter, CounterVec, FixedCardinalityLabel, Gauge, Histogram, HistogramVec, LabelGroup, - MetricGroup, + Counter, CounterVec, FixedCardinalityLabel, Gauge, GaugeVec, Histogram, HistogramVec, + LabelGroup, MetricGroup, }; use metrics::{CounterPairAssoc, CounterPairVec, HyperLogLog, HyperLogLogVec}; @@ -14,26 +14,36 @@ use tokio::time::{self, Instant}; use crate::console::messages::ColdStartInfo; #[derive(MetricGroup)] +#[metric(new(thread_pool: Arc))] pub struct Metrics { #[metric(namespace = "proxy")] + #[metric(init = ProxyMetrics::new(thread_pool))] pub proxy: ProxyMetrics, #[metric(namespace = "wake_compute_lock")] pub wake_compute_lock: ApiLockMetrics, } +static SELF: OnceLock = OnceLock::new(); impl Metrics { + pub fn install(thread_pool: Arc) { + SELF.set(Metrics::new(thread_pool)) + .ok() + .expect("proxy metrics must not be installed more than once"); + } + pub fn get() -> &'static Self { - static SELF: OnceLock = OnceLock::new(); - SELF.get_or_init(|| Metrics { - proxy: ProxyMetrics::default(), - wake_compute_lock: ApiLockMetrics::new(), - }) + #[cfg(test)] + return SELF.get_or_init(|| Metrics::new(Arc::new(ThreadPoolMetrics::new(0)))); + + #[cfg(not(test))] + SELF.get() + .expect("proxy metrics must be installed by the main() function") } } #[derive(MetricGroup)] -#[metric(new())] +#[metric(new(thread_pool: Arc))] pub struct ProxyMetrics { #[metric(flatten)] pub db_connections: CounterPairVec, @@ -129,6 +139,10 @@ pub struct ProxyMetrics { #[metric(namespace = "connect_compute_lock")] pub connect_compute_lock: ApiLockMetrics, + + #[metric(namespace = "scram_pool")] + #[metric(init = thread_pool)] + pub scram_pool: Arc, } #[derive(MetricGroup)] @@ -146,12 +160,6 @@ pub struct ApiLockMetrics { pub semaphore_acquire_seconds: Histogram<16>, } -impl Default for ProxyMetrics { - fn default() -> Self { - Self::new() - } -} - impl Default for ApiLockMetrics { fn default() -> Self { Self::new() @@ -553,3 +561,52 @@ pub enum RedisEventsCount { PasswordUpdate, AllowedIpsUpdate, } + +pub struct ThreadPoolWorkers(usize); +pub struct ThreadPoolWorkerId(pub usize); + +impl LabelValue for ThreadPoolWorkerId { + fn visit(&self, v: V) -> V::Output { + v.write_int(self.0 as i64) + } +} + +impl LabelGroup for ThreadPoolWorkerId { + fn visit_values(&self, v: &mut impl measured::label::LabelGroupVisitor) { + v.write_value(LabelName::from_str("worker"), self); + } +} + +impl LabelSet for ThreadPoolWorkers { + type Value<'a> = ThreadPoolWorkerId; + + fn dynamic_cardinality(&self) -> Option { + Some(self.0) + } + + fn encode(&self, value: Self::Value<'_>) -> Option { + (value.0 < self.0).then_some(value.0) + } + + fn decode(&self, value: usize) -> Self::Value<'_> { + ThreadPoolWorkerId(value) + } +} + +impl FixedCardinalitySet for ThreadPoolWorkers { + fn cardinality(&self) -> usize { + self.0 + } +} + +#[derive(MetricGroup)] +#[metric(new(workers: usize))] +pub struct ThreadPoolMetrics { + pub injector_queue_depth: Gauge, + #[metric(init = GaugeVec::with_label_set(ThreadPoolWorkers(workers)))] + pub worker_queue_depth: GaugeVec, + #[metric(init = CounterVec::with_label_set(ThreadPoolWorkers(workers)))] + pub worker_task_turns_total: CounterVec, + #[metric(init = CounterVec::with_label_set(ThreadPoolWorkers(workers)))] + pub worker_task_skips_total: CounterVec, +} diff --git a/proxy/src/scram.rs b/proxy/src/scram.rs index ed80675f8a..862facb4e5 100644 --- a/proxy/src/scram.rs +++ b/proxy/src/scram.rs @@ -6,11 +6,14 @@ //! * //! * +mod countmin; mod exchange; mod key; mod messages; +mod pbkdf2; mod secret; mod signature; +pub mod threadpool; pub use exchange::{exchange, Exchange}; pub use key::ScramKey; @@ -56,9 +59,13 @@ fn sha256<'a>(parts: impl IntoIterator) -> [u8; 32] { #[cfg(test)] mod tests { - use crate::sasl::{Mechanism, Step}; + use crate::{ + intern::EndpointIdInt, + sasl::{Mechanism, Step}, + EndpointId, + }; - use super::{Exchange, ServerSecret}; + use super::{threadpool::ThreadPool, Exchange, ServerSecret}; #[test] fn snapshot() { @@ -112,8 +119,13 @@ mod tests { } async fn run_round_trip_test(server_password: &str, client_password: &str) { + let pool = ThreadPool::new(1); + + let ep = EndpointId::from("foo"); + let ep = EndpointIdInt::from(ep); + let scram_secret = ServerSecret::build(server_password).await.unwrap(); - let outcome = super::exchange(&scram_secret, client_password.as_bytes()) + let outcome = super::exchange(&pool, ep, &scram_secret, client_password.as_bytes()) .await .unwrap(); diff --git a/proxy/src/scram/countmin.rs b/proxy/src/scram/countmin.rs new file mode 100644 index 0000000000..f2b794e5fe --- /dev/null +++ b/proxy/src/scram/countmin.rs @@ -0,0 +1,173 @@ +use std::hash::Hash; + +/// estimator of hash jobs per second. +/// +pub struct CountMinSketch { + // one for each depth + hashers: Vec, + width: usize, + depth: usize, + // buckets, width*depth + buckets: Vec, +} + +impl CountMinSketch { + /// Given parameters (ε, δ), + /// set width = ceil(e/ε) + /// set depth = ceil(ln(1/δ)) + /// + /// guarantees: + /// actual <= estimate + /// estimate <= actual + ε * N with probability 1 - δ + /// where N is the cardinality of the stream + pub fn with_params(epsilon: f64, delta: f64) -> Self { + CountMinSketch::new( + (std::f64::consts::E / epsilon).ceil() as usize, + (1.0_f64 / delta).ln().ceil() as usize, + ) + } + + fn new(width: usize, depth: usize) -> Self { + Self { + #[cfg(test)] + hashers: (0..depth) + .map(|i| { + // digits of pi for good randomness + ahash::RandomState::with_seeds( + 314159265358979323, + 84626433832795028, + 84197169399375105, + 82097494459230781 + i as u64, + ) + }) + .collect(), + #[cfg(not(test))] + hashers: (0..depth).map(|_| ahash::RandomState::new()).collect(), + width, + depth, + buckets: vec![0; width * depth], + } + } + + pub fn inc_and_return(&mut self, t: &T, x: u32) -> u32 { + let mut min = u32::MAX; + for row in 0..self.depth { + let col = (self.hashers[row].hash_one(t) as usize) % self.width; + + let row = &mut self.buckets[row * self.width..][..self.width]; + row[col] = row[col].saturating_add(x); + min = std::cmp::min(min, row[col]); + } + min + } + + pub fn reset(&mut self) { + self.buckets.clear(); + self.buckets.resize(self.width * self.depth, 0); + } +} + +#[cfg(test)] +mod tests { + use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; + + use super::CountMinSketch; + + fn eval_precision(n: usize, p: f64, q: f64) -> usize { + // fixed value of phi for consistent test + let mut rng = StdRng::seed_from_u64(16180339887498948482); + + #[allow(non_snake_case)] + let mut N = 0; + + let mut ids = vec![]; + + for _ in 0..n { + // number of insert operations + let n = rng.gen_range(1..100); + // number to insert at once + let m = rng.gen_range(1..4096); + + let id = uuid::Builder::from_random_bytes(rng.gen()).into_uuid(); + ids.push((id, n, m)); + + // N = sum(actual) + N += n * m; + } + + // q% of counts will be within p of the actual value + let mut sketch = CountMinSketch::with_params(p / N as f64, 1.0 - q); + + dbg!(sketch.buckets.len()); + + // insert a bunch of entries in a random order + let mut ids2 = ids.clone(); + while !ids2.is_empty() { + ids2.shuffle(&mut rng); + + let mut i = 0; + while i < ids2.len() { + sketch.inc_and_return(&ids2[i].0, ids2[i].1); + ids2[i].2 -= 1; + if ids2[i].2 == 0 { + ids2.remove(i); + } else { + i += 1; + } + } + } + + let mut within_p = 0; + for (id, n, m) in ids { + let actual = n * m; + let estimate = sketch.inc_and_return(&id, 0); + + // This estimate has the guarantee that actual <= estimate + assert!(actual <= estimate); + + // This estimate has the guarantee that estimate <= actual + εN with probability 1 - δ. + // ε = p / N, δ = 1 - q; + // therefore, estimate <= actual + p with probability q. + if estimate as f64 <= actual as f64 + p { + within_p += 1; + } + } + within_p + } + + #[test] + fn precision() { + assert_eq!(eval_precision(100, 100.0, 0.99), 100); + assert_eq!(eval_precision(1000, 100.0, 0.99), 1000); + assert_eq!(eval_precision(100, 4096.0, 0.99), 100); + assert_eq!(eval_precision(1000, 4096.0, 0.99), 1000); + + // seems to be more precise than the literature indicates? + // probably numbers are too small to truly represent the probabilities. + assert_eq!(eval_precision(100, 4096.0, 0.90), 100); + assert_eq!(eval_precision(1000, 4096.0, 0.90), 1000); + assert_eq!(eval_precision(100, 4096.0, 0.1), 98); + assert_eq!(eval_precision(1000, 4096.0, 0.1), 991); + } + + // returns memory usage in bytes, and the time complexity per insert. + fn eval_cost(p: f64, q: f64) -> (usize, usize) { + #[allow(non_snake_case)] + // N = sum(actual) + // Let's assume 1021 samples, all of 4096 + let N = 1021 * 4096; + let sketch = CountMinSketch::with_params(p / N as f64, 1.0 - q); + + let memory = std::mem::size_of::() * sketch.buckets.len(); + let time = sketch.depth; + (memory, time) + } + + #[test] + fn memory_usage() { + assert_eq!(eval_cost(100.0, 0.99), (2273580, 5)); + assert_eq!(eval_cost(4096.0, 0.99), (55520, 5)); + assert_eq!(eval_cost(4096.0, 0.90), (33312, 3)); + assert_eq!(eval_cost(4096.0, 0.1), (11104, 1)); + } +} diff --git a/proxy/src/scram/exchange.rs b/proxy/src/scram/exchange.rs index 89dd33e59f..d0adbc780e 100644 --- a/proxy/src/scram/exchange.rs +++ b/proxy/src/scram/exchange.rs @@ -4,15 +4,17 @@ use std::convert::Infallible; use hmac::{Hmac, Mac}; use sha2::Sha256; -use tokio::task::yield_now; use super::messages::{ ClientFinalMessage, ClientFirstMessage, OwnedServerFirstMessage, SCRAM_RAW_NONCE_LEN, }; +use super::pbkdf2::Pbkdf2; use super::secret::ServerSecret; use super::signature::SignatureBuilder; +use super::threadpool::ThreadPool; use super::ScramKey; use crate::config; +use crate::intern::EndpointIdInt; use crate::sasl::{self, ChannelBinding, Error as SaslError}; /// The only channel binding mode we currently support. @@ -74,37 +76,18 @@ impl<'a> Exchange<'a> { } } -// copied from -async fn pbkdf2(str: &[u8], salt: &[u8], iterations: u32) -> [u8; 32] { - let hmac = Hmac::::new_from_slice(str).expect("HMAC is able to accept all key sizes"); - let mut prev = hmac - .clone() - .chain_update(salt) - .chain_update(1u32.to_be_bytes()) - .finalize() - .into_bytes(); - - let mut hi = prev; - - for i in 1..iterations { - prev = hmac.clone().chain_update(prev).finalize().into_bytes(); - - for (hi, prev) in hi.iter_mut().zip(prev) { - *hi ^= prev; - } - // yield every ~250us - // hopefully reduces tail latencies - if i % 1024 == 0 { - yield_now().await - } - } - - hi.into() -} - // copied from -async fn derive_client_key(password: &[u8], salt: &[u8], iterations: u32) -> ScramKey { - let salted_password = pbkdf2(password, salt, iterations).await; +async fn derive_client_key( + pool: &ThreadPool, + endpoint: EndpointIdInt, + password: &[u8], + salt: &[u8], + iterations: u32, +) -> ScramKey { + let salted_password = pool + .spawn_job(endpoint, Pbkdf2::start(password, salt, iterations)) + .await + .expect("job should not be cancelled"); let make_key = |name| { let key = Hmac::::new_from_slice(&salted_password) @@ -119,11 +102,13 @@ async fn derive_client_key(password: &[u8], salt: &[u8], iterations: u32) -> Scr } pub async fn exchange( + pool: &ThreadPool, + endpoint: EndpointIdInt, secret: &ServerSecret, password: &[u8], ) -> sasl::Result> { let salt = base64::decode(&secret.salt_base64)?; - let client_key = derive_client_key(password, &salt, secret.iterations).await; + let client_key = derive_client_key(pool, endpoint, password, &salt, secret.iterations).await; if secret.is_password_invalid(&client_key).into() { Ok(sasl::Outcome::Failure("password doesn't match")) diff --git a/proxy/src/scram/pbkdf2.rs b/proxy/src/scram/pbkdf2.rs new file mode 100644 index 0000000000..a803ba7e1b --- /dev/null +++ b/proxy/src/scram/pbkdf2.rs @@ -0,0 +1,89 @@ +use hmac::{ + digest::{consts::U32, generic_array::GenericArray}, + Hmac, Mac, +}; +use sha2::Sha256; + +pub struct Pbkdf2 { + hmac: Hmac, + prev: GenericArray, + hi: GenericArray, + iterations: u32, +} + +// inspired from +impl Pbkdf2 { + pub fn start(str: &[u8], salt: &[u8], iterations: u32) -> Self { + let hmac = + Hmac::::new_from_slice(str).expect("HMAC is able to accept all key sizes"); + + let prev = hmac + .clone() + .chain_update(salt) + .chain_update(1u32.to_be_bytes()) + .finalize() + .into_bytes(); + + Self { + hmac, + // one consumed for the hash above + iterations: iterations - 1, + hi: prev, + prev, + } + } + + pub fn cost(&self) -> u32 { + (self.iterations).clamp(0, 4096) + } + + pub fn turn(&mut self) -> std::task::Poll<[u8; 32]> { + let Self { + hmac, + prev, + hi, + iterations, + } = self; + + // only do 4096 iterations per turn before sharing the thread for fairness + let n = (*iterations).clamp(0, 4096); + for _ in 0..n { + *prev = hmac.clone().chain_update(*prev).finalize().into_bytes(); + + for (hi, prev) in hi.iter_mut().zip(*prev) { + *hi ^= prev; + } + } + + *iterations -= n; + if *iterations == 0 { + std::task::Poll::Ready((*hi).into()) + } else { + std::task::Poll::Pending + } + } +} + +#[cfg(test)] +mod tests { + use super::Pbkdf2; + use pbkdf2::pbkdf2_hmac_array; + use sha2::Sha256; + + #[test] + fn works() { + let salt = b"sodium chloride"; + let pass = b"Ne0n_!5_50_C007"; + + let mut job = Pbkdf2::start(pass, salt, 600000); + let hash = loop { + let std::task::Poll::Ready(hash) = job.turn() else { + continue; + }; + break hash; + }; + + let expected = pbkdf2_hmac_array::(pass, salt, 600000); + assert_eq!(hash, expected) + } +} diff --git a/proxy/src/scram/threadpool.rs b/proxy/src/scram/threadpool.rs new file mode 100644 index 0000000000..7701b869a3 --- /dev/null +++ b/proxy/src/scram/threadpool.rs @@ -0,0 +1,321 @@ +//! Custom threadpool implementation for password hashing. +//! +//! Requirements: +//! 1. Fairness per endpoint. +//! 2. Yield support for high iteration counts. + +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; + +use crossbeam_deque::{Injector, Stealer, Worker}; +use itertools::Itertools; +use parking_lot::{Condvar, Mutex}; +use rand::Rng; +use rand::{rngs::SmallRng, SeedableRng}; +use tokio::sync::oneshot; + +use crate::{ + intern::EndpointIdInt, + metrics::{ThreadPoolMetrics, ThreadPoolWorkerId}, + scram::countmin::CountMinSketch, +}; + +use super::pbkdf2::Pbkdf2; + +pub struct ThreadPool { + queue: Injector, + stealers: Vec>, + parkers: Vec<(Condvar, Mutex)>, + /// bitpacked representation. + /// lower 8 bits = number of sleeping threads + /// next 8 bits = number of idle threads (searching for work) + counters: AtomicU64, + + pub metrics: Arc, +} + +#[derive(PartialEq)] +enum ThreadState { + Parked, + Active, +} + +impl ThreadPool { + pub fn new(n_workers: u8) -> Arc { + let workers = (0..n_workers).map(|_| Worker::new_fifo()).collect_vec(); + let stealers = workers.iter().map(|w| w.stealer()).collect_vec(); + + let parkers = (0..n_workers) + .map(|_| (Condvar::new(), Mutex::new(ThreadState::Active))) + .collect_vec(); + + let pool = Arc::new(Self { + queue: Injector::new(), + stealers, + parkers, + // threads start searching for work + counters: AtomicU64::new((n_workers as u64) << 8), + metrics: Arc::new(ThreadPoolMetrics::new(n_workers as usize)), + }); + + for (i, worker) in workers.into_iter().enumerate() { + let pool = Arc::clone(&pool); + std::thread::spawn(move || thread_rt(pool, worker, i)); + } + + pool + } + + pub fn spawn_job( + &self, + endpoint: EndpointIdInt, + pbkdf2: Pbkdf2, + ) -> oneshot::Receiver<[u8; 32]> { + let (tx, rx) = oneshot::channel(); + + let queue_was_empty = self.queue.is_empty(); + + self.metrics.injector_queue_depth.inc(); + self.queue.push(JobSpec { + response: tx, + pbkdf2, + endpoint, + }); + + // inspired from + let counts = self.counters.load(Ordering::SeqCst); + let num_awake_but_idle = (counts >> 8) & 0xff; + let num_sleepers = counts & 0xff; + + // If the queue is non-empty, then we always wake up a worker + // -- clearly the existing idle jobs aren't enough. Otherwise, + // check to see if we have enough idle workers. + if !queue_was_empty || num_awake_but_idle == 0 { + let num_to_wake = Ord::min(1, num_sleepers); + self.wake_any_threads(num_to_wake); + } + + rx + } + + #[cold] + fn wake_any_threads(&self, mut num_to_wake: u64) { + if num_to_wake > 0 { + for i in 0..self.parkers.len() { + if self.wake_specific_thread(i) { + num_to_wake -= 1; + if num_to_wake == 0 { + return; + } + } + } + } + } + + fn wake_specific_thread(&self, index: usize) -> bool { + let (condvar, lock) = &self.parkers[index]; + + let mut state = lock.lock(); + if *state == ThreadState::Parked { + condvar.notify_one(); + + // When the thread went to sleep, it will have incremented + // this value. When we wake it, its our job to decrement + // it. We could have the thread do it, but that would + // introduce a delay between when the thread was + // *notified* and when this counter was decremented. That + // might mislead people with new work into thinking that + // there are sleeping threads that they should try to + // wake, when in fact there is nothing left for them to + // do. + self.counters.fetch_sub(1, Ordering::SeqCst); + *state = ThreadState::Active; + + true + } else { + false + } + } + + fn steal(&self, rng: &mut impl Rng, skip: usize, worker: &Worker) -> Option { + // announce thread as idle + self.counters.fetch_add(256, Ordering::SeqCst); + + // try steal from the global queue + loop { + match self.queue.steal_batch_and_pop(worker) { + crossbeam_deque::Steal::Success(job) => { + self.metrics + .injector_queue_depth + .set(self.queue.len() as i64); + // no longer idle + self.counters.fetch_sub(256, Ordering::SeqCst); + return Some(job); + } + crossbeam_deque::Steal::Retry => continue, + crossbeam_deque::Steal::Empty => break, + } + } + + // try steal from our neighbours + loop { + let mut retry = false; + let start = rng.gen_range(0..self.stealers.len()); + let job = (start..self.stealers.len()) + .chain(0..start) + .filter(|i| *i != skip) + .find_map( + |victim| match self.stealers[victim].steal_batch_and_pop(worker) { + crossbeam_deque::Steal::Success(job) => Some(job), + crossbeam_deque::Steal::Empty => None, + crossbeam_deque::Steal::Retry => { + retry = true; + None + } + }, + ); + if job.is_some() { + // no longer idle + self.counters.fetch_sub(256, Ordering::SeqCst); + return job; + } + if !retry { + return None; + } + } + } +} + +fn thread_rt(pool: Arc, worker: Worker, index: usize) { + /// interval when we should steal from the global queue + /// so that tail latencies are managed appropriately + const STEAL_INTERVAL: usize = 61; + + /// How often to reset the sketch values + const SKETCH_RESET_INTERVAL: usize = 1021; + + let mut rng = SmallRng::from_entropy(); + + // used to determine whether we should temporarily skip tasks for fairness. + // 99% of estimates will overcount by no more than 4096 samples + let mut sketch = CountMinSketch::with_params(1.0 / (SKETCH_RESET_INTERVAL as f64), 0.01); + + let (condvar, lock) = &pool.parkers[index]; + + 'wait: loop { + // wait for notification of work + { + let mut lock = lock.lock(); + + // queue is empty + pool.metrics + .worker_queue_depth + .set(ThreadPoolWorkerId(index), 0); + + // subtract 1 from idle count, add 1 to sleeping count. + pool.counters.fetch_sub(255, Ordering::SeqCst); + + *lock = ThreadState::Parked; + condvar.wait(&mut lock); + } + + for i in 0.. { + let mut job = match worker + .pop() + .or_else(|| pool.steal(&mut rng, index, &worker)) + { + Some(job) => job, + None => continue 'wait, + }; + + pool.metrics + .worker_queue_depth + .set(ThreadPoolWorkerId(index), worker.len() as i64); + + // receiver is closed, cancel the task + if !job.response.is_closed() { + let rate = sketch.inc_and_return(&job.endpoint, job.pbkdf2.cost()); + + const P: f64 = 2000.0; + // probability decreases as rate increases. + // lower probability, higher chance of being skipped + // + // estimates (rate in terms of 4096 rounds): + // rate = 0 => probability = 100% + // rate = 10 => probability = 71.3% + // rate = 50 => probability = 62.1% + // rate = 500 => probability = 52.3% + // rate = 1021 => probability = 49.8% + // + // My expectation is that the pool queue will only begin backing up at ~1000rps + // in which case the SKETCH_RESET_INTERVAL represents 1 second. Thus, the rates above + // are in requests per second. + let probability = P.ln() / (P + rate as f64).ln(); + if pool.queue.len() > 32 || rng.gen_bool(probability) { + pool.metrics + .worker_task_turns_total + .inc(ThreadPoolWorkerId(index)); + + match job.pbkdf2.turn() { + std::task::Poll::Ready(result) => { + let _ = job.response.send(result); + } + std::task::Poll::Pending => worker.push(job), + } + } else { + pool.metrics + .worker_task_skips_total + .inc(ThreadPoolWorkerId(index)); + + // skip for now + worker.push(job) + } + } + + // if we get stuck with a few long lived jobs in the queue + // it's better to try and steal from the queue too for fairness + if i % STEAL_INTERVAL == 0 { + let _ = pool.queue.steal_batch(&worker); + } + + if i % SKETCH_RESET_INTERVAL == 0 { + sketch.reset(); + } + } + } +} + +struct JobSpec { + response: oneshot::Sender<[u8; 32]>, + pbkdf2: Pbkdf2, + endpoint: EndpointIdInt, +} + +#[cfg(test)] +mod tests { + use crate::EndpointId; + + use super::*; + + #[tokio::test] + async fn hash_is_correct() { + let pool = ThreadPool::new(1); + + let ep = EndpointId::from("foo"); + let ep = EndpointIdInt::from(ep); + + let salt = [0x55; 32]; + let actual = pool + .spawn_job(ep, Pbkdf2::start(b"password", &salt, 4096)) + .await + .unwrap(); + + let expected = [ + 10, 114, 73, 188, 140, 222, 196, 156, 214, 184, 79, 157, 119, 242, 16, 31, 53, 242, + 178, 43, 95, 8, 225, 182, 122, 40, 219, 21, 89, 147, 64, 140, + ]; + assert_eq!(actual, expected) + } +} diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 6b79c12316..52fc7b556a 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -15,6 +15,7 @@ use crate::{ }, context::RequestMonitoring, error::{ErrorKind, ReportableError, UserFacingError}, + intern::EndpointIdInt, proxy::{connect_compute::ConnectMechanism, retry::ShouldRetry}, rate_limiter::EndpointRateLimiter, Host, @@ -66,8 +67,14 @@ impl PoolingBackend { return Err(AuthError::auth_failed(&*user_info.user)); } }; - let auth_outcome = - crate::auth::validate_password_and_exchange(&conn_info.password, secret).await?; + let ep = EndpointIdInt::from(&conn_info.user_info.endpoint); + let auth_outcome = crate::auth::validate_password_and_exchange( + &config.thread_pool, + ep, + &conn_info.password, + secret, + ) + .await?; let res = match auth_outcome { crate::sasl::Outcome::Success(key) => { info!("user successfully authenticated"); diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index 7582562450..f364a6c2e0 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -13,6 +13,7 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] +ahash = { version = "0.8" } anyhow = { version = "1", features = ["backtrace"] } aws-config = { version = "1", default-features = false, features = ["rustls", "sso"] } aws-runtime = { version = "1", default-features = false, features = ["event-stream", "http-02x", "sigv4a"] } @@ -85,6 +86,7 @@ zstd-safe = { version = "7", default-features = false, features = ["arrays", "le zstd-sys = { version = "2", default-features = false, features = ["legacy", "std", "zdict_builder"] } [build-dependencies] +ahash = { version = "0.8" } anyhow = { version = "1", features = ["backtrace"] } bytes = { version = "1", features = ["serde"] } cc = { version = "1", default-features = false, features = ["parallel"] } From ddd8ebd2536f90872da2678aecbd94a3e1b3f547 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 22 May 2024 13:06:00 -0400 Subject: [PATCH 061/220] chore(pageserver): use kebab case for aux file flag (#7840) part of https://github.com/neondatabase/neon/issues/7462 --------- Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/models.rs | 45 +++++++++++-------- test_runner/fixtures/neon_fixtures.py | 2 +- test_runner/fixtures/utils.py | 6 +-- .../regress/test_attach_tenant_config.py | 2 +- test_runner/regress/test_aux_files.py | 12 +++-- 5 files changed, 39 insertions(+), 28 deletions(-) diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 55e9c48421..6b0403c8ab 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -9,7 +9,6 @@ use std::{ collections::HashMap, io::{BufRead, Read}, num::{NonZeroU64, NonZeroUsize}, - str::FromStr, sync::atomic::AtomicUsize, time::{Duration, SystemTime}, }; @@ -334,14 +333,28 @@ pub struct TenantConfig { /// Unset -> V1 /// -> V2 /// -> CrossValidation -> V2 -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive( + Eq, + PartialEq, + Debug, + Copy, + Clone, + strum_macros::EnumString, + strum_macros::Display, + serde_with::DeserializeFromStr, + serde_with::SerializeDisplay, +)] +#[strum(serialize_all = "kebab-case")] pub enum AuxFilePolicy { /// V1 aux file policy: store everything in AUX_FILE_KEY + #[strum(ascii_case_insensitive)] V1, /// V2 aux file policy: store in the AUX_FILE keyspace + #[strum(ascii_case_insensitive)] V2, /// Cross validation runs both formats on the write path and does validation /// on the read path. + #[strum(ascii_case_insensitive)] CrossValidation, } @@ -407,23 +420,6 @@ impl AuxFilePolicy { } } -impl FromStr for AuxFilePolicy { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - let s = s.to_lowercase(); - if s == "v1" { - Ok(Self::V1) - } else if s == "v2" { - Ok(Self::V2) - } else if s == "crossvalidation" || s == "cross_validation" { - Ok(Self::CrossValidation) - } else { - anyhow::bail!("cannot parse {} to aux file policy", s) - } - } -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[serde(tag = "kind")] pub enum EvictionPolicy { @@ -1405,6 +1401,7 @@ impl PagestreamBeMessage { #[cfg(test)] mod tests { use serde_json::json; + use std::str::FromStr; use super::*; @@ -1667,4 +1664,14 @@ mod tests { AuxFilePolicy::V2 )); } + + #[test] + fn test_aux_parse() { + assert_eq!(AuxFilePolicy::from_str("V2").unwrap(), AuxFilePolicy::V2); + assert_eq!(AuxFilePolicy::from_str("v2").unwrap(), AuxFilePolicy::V2); + assert_eq!( + AuxFilePolicy::from_str("cross-validation").unwrap(), + AuxFilePolicy::CrossValidation + ); + } } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index b02054a702..796ae7217b 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -1625,7 +1625,7 @@ class NeonCli(AbstractNeonCli): args.extend(["-c", "switch_aux_file_policy:v1"]) if aux_file_v2 is AuxFileStore.CrossValidation: - args.extend(["-c", "switch_aux_file_policy:cross_validation"]) + args.extend(["-c", "switch_aux_file_policy:cross-validation"]) if set_default: args.append("--set-default") diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 70263245e7..22bb43c580 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -495,9 +495,9 @@ def assert_no_errors(log_file, service, allowed_errors): @enum.unique class AuxFileStore(str, enum.Enum): - V1 = "V1" - V2 = "V2" - CrossValidation = "CrossValidation" + V1 = "v1" + V2 = "v2" + CrossValidation = "cross-validation" def __repr__(self) -> str: return f"'aux-{self.value}'" diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 2ec375271c..531679c7af 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -190,7 +190,7 @@ def test_fully_custom_config(positive_env: NeonEnv): "trace_read_requests": True, "walreceiver_connect_timeout": "13m", "image_layer_creation_check_threshold": 1, - "switch_aux_file_policy": "CrossValidation", + "switch_aux_file_policy": "cross-validation", } ps_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_aux_files.py b/test_runner/regress/test_aux_files.py index be9c41a867..5328aef156 100644 --- a/test_runner/regress/test_aux_files.py +++ b/test_runner/regress/test_aux_files.py @@ -1,5 +1,6 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import ( + AuxFileStore, NeonEnvBuilder, logical_replication_sync, ) @@ -14,7 +15,7 @@ def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): timeline_id = env.initial_timeline tenant_config = client.tenant_config(tenant_id).effective_config - tenant_config["switch_aux_file_policy"] = "V2" + tenant_config["switch_aux_file_policy"] = AuxFileStore.V2 client.set_tenant_config(tenant_id, tenant_config) # aux file v2 is enabled on the write path, so for now, it should be unset (or null) assert ( @@ -49,7 +50,10 @@ def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): with env.pageserver.http_client() as client: # aux file v2 flag should be enabled at this point - assert client.timeline_detail(tenant_id, timeline_id)["last_aux_file_policy"] == "V2" + assert ( + client.timeline_detail(tenant_id, timeline_id)["last_aux_file_policy"] + == AuxFileStore.V2 + ) with env.pageserver.http_client() as client: tenant_config = client.tenant_config(tenant_id).effective_config tenant_config["switch_aux_file_policy"] = "V1" @@ -59,7 +63,7 @@ def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id)[ "last_aux_file_policy" ] - == "V2" + == AuxFileStore.V2 ) env.pageserver.restart() with env.pageserver.http_client() as client: @@ -68,5 +72,5 @@ def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id)[ "last_aux_file_policy" ] - == "V2" + == AuxFileStore.V2 ) From 014f822a789efd2a64fbcf79de5acd2f07018952 Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 22 May 2024 19:17:47 +0100 Subject: [PATCH 062/220] tests: refine test_secondary_background_downloads (#7829) ## Problem This test relied on some sleeps, and was failing ~5% of the time. ## Summary of changes Use log-watching rather than straight waits, and make timeouts more generous for the CI environment. --- .../regress/test_pageserver_secondary.py | 51 +++++++++++++++++-- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 127340a1e7..25a3f8521c 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -578,7 +578,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): default_download_period_secs = 60 # The upload period, which will also be the download once the secondary has seen its first heatmap - upload_period_secs = 20 + upload_period_secs = 30 for _i in range(0, tenant_count): tenant_id = TenantId.generate() @@ -596,11 +596,26 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): tenant_timelines[tenant_id] = [timeline_a, timeline_b] + def await_log(pageserver, deadline, expression): + """ + Wrapper around assert_log_contains that waits with a deadline rather than timeout + """ + now = time.time() + if now > deadline: + raise RuntimeError(f"Timed out waiting for {expression}") + else: + timeout = int(deadline - now) + 1 + try: + wait_until(timeout, 1, lambda: pageserver.assert_log_contains(expression)) # type: ignore + except: + log.error(f"Timed out waiting for '{expression}'") + raise + t_start = time.time() # Wait long enough that the background downloads should happen; we expect all the inital layers # of all the initial timelines to show up on the secondary location of each tenant. - time.sleep(default_download_period_secs * 1.5) + initial_download_deadline = time.time() + default_download_period_secs * 3 for tenant_id, timelines in tenant_timelines.items(): attached_to_id = env.storage_controller.locate(tenant_id)[0]["node_id"] @@ -608,8 +623,24 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): # We only have two: the other one must be secondary ps_secondary = next(p for p in env.pageservers if p != ps_attached) + now = time.time() + if now > initial_download_deadline: + raise RuntimeError("Timed out waiting for initial secondary download") + else: + for timeline_id in timelines: + log.info( + f"Waiting for downloads of timeline {timeline_id} on secondary pageserver {ps_secondary.id}" + ) + await_log( + ps_secondary, + initial_download_deadline, + f".*{timeline_id}.*Wrote timeline_detail.*", + ) + for timeline_id in timelines: - log.info(f"Checking for secondary timeline {timeline_id} on node {ps_secondary.id}") + log.info( + f"Checking for secondary timeline downloads {timeline_id} on node {ps_secondary.id}" + ) # One or more layers should be present for all timelines assert ps_secondary.list_layers(tenant_id, timeline_id) @@ -617,7 +648,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): env.storage_controller.pageserver_api().timeline_delete(tenant_id, timelines[1]) # Wait long enough for the secondary locations to see the deletion: 2x period plus a grace factor - time.sleep(upload_period_secs * 2.5) + deletion_deadline = time.time() + upload_period_secs * 3 for tenant_id, timelines in tenant_timelines.items(): attached_to_id = env.storage_controller.locate(tenant_id)[0]["node_id"] @@ -625,6 +656,16 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): # We only have two: the other one must be secondary ps_secondary = next(p for p in env.pageservers if p != ps_attached) + expect_del_timeline = timelines[1] + log.info( + f"Waiting for deletion of timeline {expect_del_timeline} on secondary pageserver {ps_secondary.id}" + ) + await_log( + ps_secondary, + deletion_deadline, + f".*Timeline no longer in heatmap.*{expect_del_timeline}.*", + ) + # This one was not deleted assert ps_secondary.list_layers(tenant_id, timelines[0]) @@ -632,7 +673,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): log.info( f"Checking for secondary timeline deletion {tenant_id}/{timeline_id} on node {ps_secondary.id}" ) - assert not ps_secondary.list_layers(tenant_id, timelines[1]) + assert not ps_secondary.list_layers(tenant_id, expect_del_timeline) t_end = time.time() From f98fdd20e390822a890f596a7932ca81b47e00ce Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 22 May 2024 19:38:22 +0100 Subject: [PATCH 063/220] tests: add a couple of allow lists for shutdown cases (#7844) ## Problem Failures on some of our uglier shutdown log messages: https://neon-github-public-dev.s3.amazonaws.com/reports/main/9192662995/index.html#suites/07874de07c4a1c9effe0d92da7755ebf/51b365408678c66f/ ## Summary of changes - Allow-list these errors. --- test_runner/fixtures/pageserver/allowed_errors.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index 91cd67d107..fa6e4eaafd 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -70,6 +70,7 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( # this is expected given our collaborative shutdown approach for the UploadQueue ".*Compaction failed.*, retrying in .*: Other\\(queue is in state Stopped.*", ".*Compaction failed.*, retrying in .*: ShuttingDown", + ".*Compaction failed.*, retrying in .*: timeline shutting down.*", # Pageserver timeline deletion should be polled until it gets 404, so ignore it globally ".*Error processing HTTP request: NotFound: Timeline .* was not found", ".*took more than expected to complete.*", @@ -91,6 +92,10 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( ".*WARN deletion backend: calling control plane generation validation API failed.*error sending request.*", # Can happen when the test shuts down the storage controller while it is calling the utilization API ".*WARN.*path=/v1/utilization .*request was dropped before completing", + # Can happen during shutdown + ".*scheduling deletion on drop failed: queue is in state Stopped.*", + # Can happen during shutdown + ".*ignoring failure to find gc cutoffs: timeline shutting down.*", ) From 4a278cce7ce5b7f32360e85fd41219df95cc9a86 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 22 May 2024 15:05:26 -0400 Subject: [PATCH 064/220] chore(pageserver): add force aux file policy switch handler (#7842) For existing users, we want to allow doing a force switch for their aux file policy. Part of #7462 --------- Signed-off-by: Alex Chi Z --- pageserver/src/http/routes.rs | 78 ++++++++++++++++++----------- pageserver/src/pgdatadir_mapping.rs | 14 +----- pageserver/src/tenant.rs | 61 ++++++++++++++++++++++ pageserver/src/tenant/timeline.rs | 8 +++ 4 files changed, 119 insertions(+), 42 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 34b9806a26..7b55e88096 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -16,6 +16,7 @@ use hyper::header; use hyper::StatusCode; use hyper::{Body, Request, Response, Uri}; use metrics::launch_timestamp::LaunchTimestamp; +use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::IngestAuxFilesRequest; use pageserver_api::models::ListAuxFilesRequest; use pageserver_api::models::LocationConfig; @@ -2307,6 +2308,31 @@ async fn post_tracing_event_handler( json_response(StatusCode::OK, ()) } +async fn force_aux_policy_switch_handler( + mut r: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&r, None)?; + let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&r, "timeline_id")?; + let policy: AuxFilePolicy = json_request(&mut r).await?; + + let state = get_state(&r); + + let tenant = state + .tenant_manager + .get_attached_tenant_shard(tenant_shard_id)?; + tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + timeline + .do_switch_aux_policy(policy) + .map_err(ApiError::InternalServerError)?; + + json_response(StatusCode::OK, ()) +} + async fn put_io_engine_handler( mut r: Request, _cancel: CancellationToken, @@ -2384,19 +2410,9 @@ async fn list_aux_files( active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) .await?; - let process = || async move { - let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); - let files = timeline.list_aux_files(body.lsn, &ctx).await?; - Ok::<_, anyhow::Error>(files) - }; - - match process().await { - Ok(st) => json_response(StatusCode::OK, st), - Err(err) => json_response( - StatusCode::INTERNAL_SERVER_ERROR, - ApiError::InternalServerError(err).to_string(), - ), - } + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); + let files = timeline.list_aux_files(body.lsn, &ctx).await?; + json_response(StatusCode::OK, files) } async fn ingest_aux_files( @@ -2414,24 +2430,22 @@ async fn ingest_aux_files( active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) .await?; - let process = || async move { - let mut modification = timeline.begin_modification(Lsn( - timeline.get_last_record_lsn().0 + 8 - ) /* advance LSN by 8 */); - let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); - for (fname, content) in body.aux_files { - modification - .put_file(&fname, content.as_bytes(), &ctx) - .await?; - } - modification.commit(&ctx).await?; - Ok::<_, anyhow::Error>(()) - }; - - match process().await { - Ok(st) => json_response(StatusCode::OK, st), - Err(err) => Err(ApiError::InternalServerError(err)), + let mut modification = timeline.begin_modification( + Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */ + ); + let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download); + for (fname, content) in body.aux_files { + modification + .put_file(&fname, content.as_bytes(), &ctx) + .await + .map_err(ApiError::InternalServerError)?; } + modification + .commit(&ctx) + .await + .map_err(ApiError::InternalServerError)?; + + json_response(StatusCode::OK, ()) } /// Report on the largest tenants on this pageserver, for the storage controller to identify @@ -2814,6 +2828,10 @@ pub fn make_router( |r| api_handler(r, timeline_collect_keyspace), ) .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler)) + .put( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/force_aux_policy_switch", + |r| api_handler(r, force_aux_policy_switch_handler), + ) .get("/v1/utilization", |r| api_handler(r, get_utilization)) .post( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files", diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 7dea687c46..afba34c6d1 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -1489,14 +1489,7 @@ impl<'a> DatadirModification<'a> { if aux_files_key_v1.is_empty() { None } else { - self.tline - .last_aux_file_policy - .store(Some(AuxFilePolicy::V1)); - self.tline - .remote_client - .schedule_index_upload_for_aux_file_policy_update(Some( - AuxFilePolicy::V1, - ))?; + self.tline.do_switch_aux_policy(AuxFilePolicy::V1)?; Some(AuxFilePolicy::V1) } } else { @@ -1504,10 +1497,7 @@ impl<'a> DatadirModification<'a> { }; if AuxFilePolicy::is_valid_migration_path(current_policy, switch_policy) { - self.tline.last_aux_file_policy.store(Some(switch_policy)); - self.tline - .remote_client - .schedule_index_upload_for_aux_file_policy_update(Some(switch_policy))?; + self.tline.do_switch_aux_policy(switch_policy)?; info!(current=?current_policy, next=?switch_policy, "switching aux file policy"); switch_policy } else { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index caf26e0a0b..2bb199b228 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -5999,6 +5999,67 @@ mod tests { ); } + #[tokio::test] + async fn aux_file_policy_force_switch() { + let mut harness = TenantHarness::create("aux_file_policy_force_switch").unwrap(); + harness.tenant_conf.switch_aux_file_policy = AuxFilePolicy::V1; + let (tenant, ctx) = harness.load().await; + + let mut lsn = Lsn(0x08); + + let tline: Arc = tenant + .create_test_timeline(TIMELINE_ID, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + assert_eq!( + tline.last_aux_file_policy.load(), + None, + "no aux file is written so it should be unset" + ); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test1", b"first", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + tline.do_switch_aux_policy(AuxFilePolicy::V2).unwrap(); + + assert_eq!( + tline.last_aux_file_policy.load(), + Some(AuxFilePolicy::V2), + "dirty index_part.json reflected state is yet to be updated" + ); + + // lose all data from v1 + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!(files.get("pg_logical/mappings/test1"), None); + + { + lsn += 8; + let mut modification = tline.begin_modification(lsn); + modification + .put_file("pg_logical/mappings/test2", b"second", &ctx) + .await + .unwrap(); + modification.commit(&ctx).await.unwrap(); + } + + // read data ingested in v2 + let files = tline.list_aux_files(lsn, &ctx).await.unwrap(); + assert_eq!( + files.get("pg_logical/mappings/test2"), + Some(&bytes::Bytes::from_static(b"second")) + ); + // lose all data from v1 + assert_eq!(files.get("pg_logical/mappings/test1"), None); + } + #[tokio::test] async fn aux_file_policy_auto_detect() { let mut harness = TenantHarness::create("aux_file_policy_auto_detect").unwrap(); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 1f8ee9ffc4..63d03b3c68 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4606,6 +4606,14 @@ impl Timeline { ) -> Result, anyhow::Error> { detach_ancestor::complete(self, tenant, prepared, ctx).await } + + /// Switch aux file policy and schedule upload to the index part. + pub(crate) fn do_switch_aux_policy(&self, policy: AuxFilePolicy) -> anyhow::Result<()> { + self.last_aux_file_policy.store(Some(policy)); + self.remote_client + .schedule_index_upload_for_aux_file_policy_update(Some(policy))?; + Ok(()) + } } /// Top-level failure to compact. From ff560a1113046ff29acf2723c0183db7c2d128f1 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 22 May 2024 17:28:47 -0400 Subject: [PATCH 065/220] chore(pageserver): use kebab case for compaction algorithms (#7845) Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/models.rs | 21 ++++++++++++++++--- pageserver/src/tenant.rs | 10 ++++++--- pageserver/src/tenant/config.rs | 13 ++++++++---- pageserver/src/tenant/timeline.rs | 14 +++++++------ .../regress/test_attach_tenant_config.py | 2 +- test_runner/regress/test_compaction.py | 4 ++-- 6 files changed, 45 insertions(+), 19 deletions(-) diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 6b0403c8ab..9311dab33c 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -305,7 +305,7 @@ pub struct TenantConfig { pub compaction_period: Option, pub compaction_threshold: Option, // defer parsing compaction_algorithm, like eviction_policy - pub compaction_algorithm: Option, + pub compaction_algorithm: Option, pub gc_horizon: Option, pub gc_period: Option, pub image_creation_threshold: Option, @@ -438,13 +438,28 @@ impl EvictionPolicy { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "kind")] +#[derive( + Eq, + PartialEq, + Debug, + Copy, + Clone, + strum_macros::EnumString, + strum_macros::Display, + serde_with::DeserializeFromStr, + serde_with::SerializeDisplay, +)] +#[strum(serialize_all = "kebab-case")] pub enum CompactionAlgorithm { Legacy, Tiered, } +#[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct CompactionAlgorithmSettings { + pub kind: CompactionAlgorithm, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub struct EvictionPolicyLayerAccessThreshold { #[serde(with = "humantime_serde")] diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 2bb199b228..540eb10ed2 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3973,7 +3973,7 @@ mod tests { use hex_literal::hex; use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE}; use pageserver_api::keyspace::KeySpace; - use pageserver_api::models::CompactionAlgorithm; + use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; use rand::{thread_rng, Rng}; use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; @@ -5169,7 +5169,9 @@ mod tests { compaction_algorithm: CompactionAlgorithm, ) -> anyhow::Result<()> { let mut harness = TenantHarness::create(name)?; - harness.tenant_conf.compaction_algorithm = compaction_algorithm; + harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings { + kind: compaction_algorithm, + }; let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) @@ -5526,7 +5528,9 @@ mod tests { compaction_algorithm: CompactionAlgorithm, ) -> anyhow::Result<()> { let mut harness = TenantHarness::create(name)?; - harness.tenant_conf.compaction_algorithm = compaction_algorithm; + harness.tenant_conf.compaction_algorithm = CompactionAlgorithmSettings { + kind: compaction_algorithm, + }; let (tenant, ctx) = harness.load().await; let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x08), DEFAULT_PG_VERSION, &ctx) diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index a695363cdc..342d705954 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -11,6 +11,7 @@ use anyhow::bail; use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::CompactionAlgorithm; +use pageserver_api::models::CompactionAlgorithmSettings; use pageserver_api::models::EvictionPolicy; use pageserver_api::models::{self, ThrottleConfig}; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; @@ -320,7 +321,7 @@ pub struct TenantConf { pub compaction_period: Duration, // Level0 delta layer threshold for compaction. pub compaction_threshold: usize, - pub compaction_algorithm: CompactionAlgorithm, + pub compaction_algorithm: CompactionAlgorithmSettings, // Determines how much history is retained, to allow // branching and read replicas at an older point in time. // The unit is #of bytes of WAL. @@ -406,7 +407,7 @@ pub struct TenantConfOpt { #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - pub compaction_algorithm: Option, + pub compaction_algorithm: Option, #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] @@ -497,7 +498,9 @@ impl TenantConfOpt { .unwrap_or(global_conf.compaction_threshold), compaction_algorithm: self .compaction_algorithm - .unwrap_or(global_conf.compaction_algorithm), + .as_ref() + .unwrap_or(&global_conf.compaction_algorithm) + .clone(), gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon), gc_period: self.gc_period.unwrap_or(global_conf.gc_period), image_creation_threshold: self @@ -550,7 +553,9 @@ impl Default for TenantConf { compaction_period: humantime::parse_duration(DEFAULT_COMPACTION_PERIOD) .expect("cannot parse default compaction period"), compaction_threshold: DEFAULT_COMPACTION_THRESHOLD, - compaction_algorithm: DEFAULT_COMPACTION_ALGORITHM, + compaction_algorithm: CompactionAlgorithmSettings { + kind: DEFAULT_COMPACTION_ALGORITHM, + }, gc_horizon: DEFAULT_GC_HORIZON, gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD) .expect("cannot parse default gc period"), diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 63d03b3c68..881e7f8f3c 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -23,9 +23,9 @@ use pageserver_api::{ }, keyspace::{KeySpaceAccum, KeySpaceRandomAccum, SparseKeyPartitioning}, models::{ - AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, DownloadRemoteLayersTaskInfo, - DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, InMemoryLayerInfo, LayerMapInfo, - LsnLease, TimelineState, + AtomicAuxFilePolicy, AuxFilePolicy, CompactionAlgorithm, CompactionAlgorithmSettings, + DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest, EvictionPolicy, + InMemoryLayerInfo, LayerMapInfo, LsnLease, TimelineState, }, reltag::BlockNumber, shard::{ShardIdentity, ShardNumber, TenantShardId}, @@ -1700,7 +1700,7 @@ impl Timeline { return Ok(()); } - match self.get_compaction_algorithm() { + match self.get_compaction_algorithm_settings().kind { CompactionAlgorithm::Tiered => self.compact_tiered(cancel, ctx).await, CompactionAlgorithm::Legacy => self.compact_legacy(cancel, flags, ctx).await, } @@ -2096,12 +2096,14 @@ impl Timeline { .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold) } - fn get_compaction_algorithm(&self) -> CompactionAlgorithm { + fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings { let tenant_conf = &self.tenant_conf.load(); tenant_conf .tenant_conf .compaction_algorithm - .unwrap_or(self.conf.default_tenant_conf.compaction_algorithm) + .as_ref() + .unwrap_or(&self.conf.default_tenant_conf.compaction_algorithm) + .clone() } fn get_eviction_policy(&self) -> EvictionPolicy { diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 531679c7af..8c60b454d8 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -162,7 +162,7 @@ def test_fully_custom_config(positive_env: NeonEnv): "checkpoint_distance": 10000, "checkpoint_timeout": "13m", "compaction_algorithm": { - "kind": "Tiered", + "kind": "tiered", }, "eviction_policy": { "kind": "LayerAccessThreshold", diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 6a3515e1bd..4850a5c688 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -194,8 +194,8 @@ def test_sharding_compaction( class CompactionAlgorithm(str, enum.Enum): - LEGACY = "Legacy" - TIERED = "Tiered" + LEGACY = "legacy" + TIERED = "tiered" @pytest.mark.parametrize( From eb0c026aac95bf8aad1c115aabb5697e86594eac Mon Sep 17 00:00:00 2001 From: Oleg Vasilev Date: Thu, 23 May 2024 00:48:59 +0300 Subject: [PATCH 066/220] Bump vm-builder v0.28.1 -> v0.29.3 (#7849) One change: runner: allow coredump collection (#931) --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 2ab1417d6d..f8c011a0a5 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -934,7 +934,7 @@ jobs: matrix: version: [ v14, v15, v16 ] env: - VM_BUILDER_VERSION: v0.28.1 + VM_BUILDER_VERSION: v0.29.3 steps: - name: Checkout From a43a1ad1df11fc547fa16874f9776fc2e1bf4c9b Mon Sep 17 00:00:00 2001 From: John Spray Date: Thu, 23 May 2024 09:13:55 +0100 Subject: [PATCH 067/220] pageserver: fix API-driven secondary downloads possibly colliding with background downloads (#7848) ## Problem We've seen some strange behaviors when doing lots of migrations involving secondary locations. One of these was where a tenant was apparently stuck in the `Scheduler::running` list, but didn't appear to be making any progress. Another was a shutdown hang (https://github.com/neondatabase/cloud/issues/13576). ## Summary of changes - Fix one issue (probably not the only one) where a tenant in the `pending` list could proceed to `spawn` even if the same tenant already had a running task via `handle_command` (this could have resulted in a weird value of SecondaryProgress) - Add various extra logging: - log before as well as after layer downloads so that it would be obvious if we were stuck in remote storage code (we shouldn't be, it has built in timeouts) - log the number of running + pending jobs from the scheduler every time it wakes up to do a scheduling iteration (~10s) -- this is quite chatty, but not compared with the volume of logs on a busy pageserver. It should give us confidence that the scheduler loop is still alive, and visibility of how many tasks the scheduler thinks are running. --- pageserver/src/tenant/secondary/downloader.rs | 7 ++++++- .../src/tenant/secondary/heatmap_uploader.rs | 2 +- pageserver/src/tenant/secondary/scheduler.rs | 20 ++++++++++++++++--- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 870475eb57..de30c4dcb6 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -93,7 +93,7 @@ pub(super) async fn downloader_task( scheduler .run(command_queue, background_jobs_can_start, cancel) - .instrument(info_span!("secondary_downloads")) + .instrument(info_span!("secondary_download_scheduler")) .await } @@ -1013,6 +1013,11 @@ impl<'a> TenantDownloader<'a> { ); // Note: no backoff::retry wrapper here because download_layer_file does its own retries internally + tracing::info!( + "Starting download of layer {}, size {}", + layer.name, + layer.metadata.file_size + ); let downloaded_bytes = match download_layer_file( self.conf, self.remote_storage, diff --git a/pageserver/src/tenant/secondary/heatmap_uploader.rs b/pageserver/src/tenant/secondary/heatmap_uploader.rs index fddced3ead..9c7a9c4234 100644 --- a/pageserver/src/tenant/secondary/heatmap_uploader.rs +++ b/pageserver/src/tenant/secondary/heatmap_uploader.rs @@ -53,7 +53,7 @@ pub(super) async fn heatmap_uploader_task( scheduler .run(command_queue, background_jobs_can_start, cancel) - .instrument(info_span!("heatmap_uploader")) + .instrument(info_span!("heatmap_upload_scheduler")) .await } diff --git a/pageserver/src/tenant/secondary/scheduler.rs b/pageserver/src/tenant/secondary/scheduler.rs index 3d042f4513..0ec1c7872a 100644 --- a/pageserver/src/tenant/secondary/scheduler.rs +++ b/pageserver/src/tenant/secondary/scheduler.rs @@ -179,6 +179,13 @@ where // Schedule some work, if concurrency limit permits it self.spawn_pending(); + // This message is printed every scheduling iteration as proof of liveness when looking at logs + tracing::info!( + "Status: {} tasks running, {} pending", + self.running.len(), + self.pending.len() + ); + // Between scheduling iterations, we will: // - Drain any complete tasks and spawn pending tasks // - Handle incoming administrative commands @@ -258,7 +265,11 @@ where self.tasks.spawn(fut); - self.running.insert(tenant_shard_id, in_progress); + let replaced = self.running.insert(tenant_shard_id, in_progress); + debug_assert!(replaced.is_none()); + if replaced.is_some() { + tracing::warn!(%tenant_shard_id, "Unexpectedly spawned a task when one was already running") + } } /// For all pending tenants that are elegible for execution, spawn their task. @@ -268,7 +279,9 @@ where while !self.pending.is_empty() && self.running.len() < self.concurrency { // unwrap: loop condition includes !is_empty() let pending = self.pending.pop_front().unwrap(); - self.do_spawn(pending); + if !self.running.contains_key(pending.get_tenant_shard_id()) { + self.do_spawn(pending); + } } } @@ -321,7 +334,8 @@ where let tenant_shard_id = job.get_tenant_shard_id(); let barrier = if let Some(barrier) = self.get_running(tenant_shard_id) { - tracing::info!("Command already running, waiting for it"); + tracing::info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), + "Command already running, waiting for it"); barrier } else { let running = self.spawn_now(job); From 58e31fe0980060f7345f30c617fa30aea0c2d11e Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 23 May 2024 11:25:38 +0300 Subject: [PATCH 068/220] test_attach_tenant_config: add allowed error (#7839) [evidence] of quite rare flaky. the detach can cause this with the right timing. [evidence]: https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7650/9191613501/index.html#suites/7745dadbd815ab87f5798aa881796f47/2190222925001078 --- test_runner/regress/test_attach_tenant_config.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 8c60b454d8..1d193b8999 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -17,9 +17,13 @@ def positive_env(neon_env_builder: NeonEnvBuilder) -> NeonEnv: neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) env = neon_env_builder.init_start() - # eviction might be the first one after an attach to access the layers - env.pageserver.allowed_errors.append( - ".*unexpectedly on-demand downloading remote layer .* for task kind Eviction" + env.pageserver.allowed_errors.extend( + [ + # eviction might be the first one after an attach to access the layers + ".*unexpectedly on-demand downloading remote layer .* for task kind Eviction", + # detach can happen before we get to validate the generation number + ".*deletion backend: Dropped remote consistent LSN updates for tenant.*", + ] ) assert isinstance(env.pageserver_remote_storage, LocalFsStorage) return env From 8f3c316bae85b14b351a4cbd0bbe3c13c5ca9770 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Thu, 23 May 2024 09:45:24 +0100 Subject: [PATCH 069/220] Skip unnecessary shared state updates in safekeepers (#7851) I looked at the metrics from https://github.com/neondatabase/neon/pull/7768 on staging and it seems that manager does too many iterations. This is probably caused by background job `remove_wal.rs` which iterates over all timelines and tries to remove WAL and persist control file. This causes shared state updates and wakes up the manager. The fix is to skip notifying about the updates if nothing was updated. --- safekeeper/src/safekeeper.rs | 6 +++--- safekeeper/src/timeline.rs | 32 +++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index e671d4f36a..4b1481a397 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -827,10 +827,10 @@ where /// Persist control file if there is something to save and enough time /// passed after the last save. - pub async fn maybe_persist_inmem_control_file(&mut self) -> Result<()> { + pub async fn maybe_persist_inmem_control_file(&mut self) -> Result { const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300); if self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL { - return Ok(()); + return Ok(false); } let need_persist = self.state.inmem.commit_lsn > self.state.commit_lsn || self.state.inmem.backup_lsn > self.state.backup_lsn @@ -840,7 +840,7 @@ where self.state.flush().await?; trace!("saved control file: {CF_SAVE_INTERVAL:?} passed"); } - Ok(()) + Ok(need_persist) } /// Handle request to append WAL. diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 89c157d514..0cc6153373 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -104,11 +104,16 @@ pub type ReadGuardSharedState<'a> = RwLockReadGuard<'a, SharedState>; pub struct WriteGuardSharedState<'a> { tli: Arc, guard: RwLockWriteGuard<'a, SharedState>, + skip_update: bool, } impl<'a> WriteGuardSharedState<'a> { fn new(tli: Arc, guard: RwLockWriteGuard<'a, SharedState>) -> Self { - WriteGuardSharedState { tli, guard } + WriteGuardSharedState { + tli, + guard, + skip_update: false, + } } } @@ -149,10 +154,12 @@ impl<'a> Drop for WriteGuardSharedState<'a> { } }); - // send notification about shared state update - self.tli.shared_state_version_tx.send_modify(|old| { - *old += 1; - }); + if !self.skip_update { + // send notification about shared state update + self.tli.shared_state_version_tx.send_modify(|old| { + *old += 1; + }); + } } } @@ -802,7 +809,11 @@ impl Timeline { // update last_removed_segno let mut shared_state = self.write_shared_state().await; - shared_state.last_removed_segno = horizon_segno; + if shared_state.last_removed_segno != horizon_segno { + shared_state.last_removed_segno = horizon_segno; + } else { + shared_state.skip_update = true; + } Ok(()) } @@ -811,11 +822,10 @@ impl Timeline { /// to date so that storage nodes restart doesn't cause many pageserver -> /// safekeeper reconnections. pub async fn maybe_persist_control_file(self: &Arc) -> Result<()> { - self.write_shared_state() - .await - .sk - .maybe_persist_inmem_control_file() - .await + let mut guard = self.write_shared_state().await; + let changed = guard.sk.maybe_persist_inmem_control_file().await?; + guard.skip_update = !changed; + Ok(()) } /// Gather timeline data for metrics. From cd6d811213158195dca2327571f7a3cfcc571061 Mon Sep 17 00:00:00 2001 From: Anna Khanova <32508607+khanova@users.noreply.github.com> Date: Thu, 23 May 2024 11:41:29 +0200 Subject: [PATCH 070/220] [proxy] Do not fail after parquet upload error (#7858) ## Problem If the parquet upload was unsuccessful, it will panic. ## Summary of changes Write error in logs instead. --- proxy/src/context/parquet.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index 392821c430..a213a32ca4 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -355,7 +355,7 @@ async fn upload_parquet( "{year:04}/{month:02}/{day:02}/{hour:02}/requests_{id}.parquet" ))?; let cancel = CancellationToken::new(); - backoff::retry( + let maybe_err = backoff::retry( || async { let stream = futures::stream::once(futures::future::ready(Ok(data.clone()))); storage @@ -372,7 +372,12 @@ async fn upload_parquet( .await .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel)) .and_then(|x| x) - .context("request_data_upload")?; + .context("request_data_upload") + .err(); + + if let Some(err) = maybe_err { + tracing::warn!(%id, %err, "failed to upload request data"); + } Ok(buffer.writer()) } From 545f7e8cd7fcca09134f5c1eb47c8ff323dfad22 Mon Sep 17 00:00:00 2001 From: John Spray Date: Thu, 23 May 2024 10:50:21 +0100 Subject: [PATCH 071/220] tests: fix an allow list entry (#7856) https://github.com/neondatabase/neon/pull/7844 typo'd one of the expressions: https://neon-github-public-dev.s3.amazonaws.com/reports/main/9196993886/index.html#suites/07874de07c4a1c9effe0d92da7755ebf/e420fbfdb193bf80/ --- test_runner/fixtures/pageserver/allowed_errors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index fa6e4eaafd..ad8bbe2021 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -70,7 +70,7 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( # this is expected given our collaborative shutdown approach for the UploadQueue ".*Compaction failed.*, retrying in .*: Other\\(queue is in state Stopped.*", ".*Compaction failed.*, retrying in .*: ShuttingDown", - ".*Compaction failed.*, retrying in .*: timeline shutting down.*", + ".*Compaction failed.*, retrying in .*: Other\\(timeline shutting down.*", # Pageserver timeline deletion should be polled until it gets 404, so ignore it globally ".*Error processing HTTP request: NotFound: Timeline .* was not found", ".*took more than expected to complete.*", From 95a49f00752c8e22f5f912b26f76409e2b515804 Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Thu, 23 May 2024 12:08:06 +0200 Subject: [PATCH 072/220] remove march=native from pgvector Makefile's OPTFLAGS (#7854) ## Problem By default, pgvector compiles with `-march=native` on some platforms for best performance. However, this can lead to `Illegal instruction` errors if trying to run the compiled extension on a different machine. I had this problem when trying to run the Neon compute docker image on MacOS with Apple Silicon with Rosetta. see https://github.com/pgvector/pgvector/blob/ff9b22977e3ef19866d23a54332c8717f258e8db/README.md?plain=1#L1021 ## Summary of changes Pass OPTFLAGS="" to make. --- Dockerfile.compute-node | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 5bf3246f34..87fb218245 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -243,12 +243,15 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY patches/pgvector.patch /pgvector.patch +# By default, pgvector Makefile uses `-march=native`. We don't want that, +# because we build the images on different machines than where we run them. +# Pass OPTFLAGS="" to remove it. RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.0.tar.gz -O pgvector.tar.gz && \ echo "1b5503a35c265408b6eb282621c5e1e75f7801afc04eecb950796cfee2e3d1d8 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ patch -p1 < /pgvector.patch && \ - make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ - make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ + make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ + make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control ######################################################################################### From 49d7f9b5a4cab00797bd8c85bf116478d9c1dee0 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 23 May 2024 14:44:08 +0300 Subject: [PATCH 073/220] test_import_from_pageserver_small: try to make less flaky (#7843) With #7828 and proper fullbackup testing the test became flaky ([evidence]). - produce better assertion messages in `assert_pageserver_backups_equal` - use read only endpoint to confirm the row count [evidence]: https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7839/9192447962/index.html#suites/89cfa994d71769e01e3fc4f475a1f3fa/49009214d0f8b8ce --- test_runner/fixtures/utils.py | 25 ++++++++++++++++++------- test_runner/regress/test_import.py | 9 +++------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 22bb43c580..89e116df28 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -541,11 +541,22 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: Set[str left_list, right_list = map(build_hash_list, [left, right]) - try: - assert len(left_list) == len(right_list) + assert len(left_list) == len( + right_list + ), f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}" - for left_tuple, right_tuple in zip(left_list, right_list): - assert left_tuple == right_tuple - finally: - elapsed = time.time() - started_at - log.info(f"assert_pageserver_backups_equal completed in {elapsed}s") + mismatching = set() + + for left_tuple, right_tuple in zip(left_list, right_list): + left_path, left_hash = left_tuple + right_path, right_hash = right_tuple + assert ( + left_path == right_path + ), f"file count matched, expected these to be same paths: {left_path}, {right_path}" + if left_hash != right_hash: + mismatching.add(left_path) + + assert len(mismatching) == 0, f"files with hash mismatch: {mismatching}" + + elapsed = time.time() - started_at + log.info(f"assert_pageserver_backups_equal completed in {elapsed}s") diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index 62229ebfe7..ac27a4cf36 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -163,7 +163,7 @@ def test_import_from_pageserver_small( num_rows = 3000 lsn = _generate_data(num_rows, endpoint) - _import(num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir, test_output_dir) + _import(num_rows, lsn, env, pg_bin, timeline, test_output_dir) @pytest.mark.timeout(1800) @@ -193,9 +193,7 @@ def test_import_from_pageserver_multisegment( log.info(f"timeline logical size = {logical_size / (1024 ** 2)}MB") assert logical_size > 1024**3 # = 1GB - tar_output_file = _import( - num_rows, lsn, env, pg_bin, timeline, env.pg_distrib_dir, test_output_dir - ) + tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline, test_output_dir) # Check if the backup data contains multiple segment files cnt_seg_files = 0 @@ -235,7 +233,6 @@ def _import( env: NeonEnv, pg_bin: PgBin, timeline: TimelineId, - pg_distrib_dir: Path, test_output_dir: Path, ) -> Path: """Test importing backup data to the pageserver. @@ -295,7 +292,7 @@ def _import( wait_for_upload(client, tenant, timeline, lsn) # Check it worked - endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant) + endpoint = env.endpoints.create_start(endpoint_id, tenant_id=tenant, lsn=lsn) assert endpoint.safe_psql("select count(*) from tbl") == [(expected_num_rows,)] # Take another fullbackup From d5d15eb6eb184b589483882148ab177a73f8db64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 23 May 2024 14:28:05 +0200 Subject: [PATCH 074/220] Warn if a blob in an image is larger than 256 MiB (#7852) We'd like to get some bits reserved in the length field of image layers for future usage (compression). This PR bases on the assumption that we don't have any blobs that require more than 28 bits (3 bytes + 4 bits) to store the length, but as a preparation, before erroring, we want to first emit warnings as if the assumption is wrong, such warnings are less disruptive than errors. A metric would be even less disruptive (log messages are more slow, if we have a LOT of such large blobs then it would take a lot of time to print them). At the same time, likely such 256 MiB blobs will occupy an entire layer file, as they are larger than our target size. For layer files we already log something, so there shouldn't be a large increase in overhead. Part of #5431 --- pageserver/src/tenant/blob_io.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pageserver/src/tenant/blob_io.rs b/pageserver/src/tenant/blob_io.rs index 24b4e4f3ea..2be8816cef 100644 --- a/pageserver/src/tenant/blob_io.rs +++ b/pageserver/src/tenant/blob_io.rs @@ -238,10 +238,13 @@ impl BlobWriter { io_buf, Err(Error::new( ErrorKind::Other, - format!("blob too large ({} bytes)", len), + format!("blob too large ({len} bytes)"), )), ); } + if len > 0x0fff_ffff { + tracing::warn!("writing blob above future limit ({len} bytes)"); + } let mut len_buf = (len as u32).to_be_bytes(); len_buf[0] |= 0x80; io_buf.extend_from_slice(&len_buf[..]); From e28e46f20be1f1bd298bbba6c5a131f435acde52 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 23 May 2024 09:45:29 -0400 Subject: [PATCH 075/220] fix(pageserver): make wal connstr a connstr (#7846) The list timeline API gives something like `"wal_source_connstr":"PgConnectionConfig { host: Domain(\"safekeeper-5.us-east-2.aws.neon.build\"), port: 6500, password: Some(REDACTED-STRING) }"`, which is weird. This pull request makes it somehow like a connection string. This field is not used at least in the neon database, so I assume no one is reading or parsing it. Signed-off-by: Alex Chi Z --- libs/postgres_connection/src/lib.rs | 7 +++++++ pageserver/src/http/routes.rs | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs index ccf9108895..9f57f3d507 100644 --- a/libs/postgres_connection/src/lib.rs +++ b/libs/postgres_connection/src/lib.rs @@ -178,6 +178,13 @@ impl PgConnectionConfig { } } +impl fmt::Display for PgConnectionConfig { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // The password is intentionally hidden and not part of this display string. + write!(f, "postgresql://{}:{}", self.host, self.port) + } +} + impl fmt::Debug for PgConnectionConfig { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // We want `password: Some(REDACTED-STRING)`, not `password: Some("REDACTED-STRING")` diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 7b55e88096..8a061f3ae1 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -395,7 +395,7 @@ async fn build_timeline_info_common( let guard = timeline.last_received_wal.lock().unwrap(); if let Some(info) = guard.as_ref() { ( - Some(format!("{:?}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only. + Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only. Some(info.last_received_msg_lsn), Some(info.last_received_msg_ts), ) From 75a52ac7fd7996f5faeb3c68bd15a9811b0d84f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 23 May 2024 17:10:24 +0200 Subject: [PATCH 076/220] Use Timeline::create_image_layer_for_rel_blocks in tiered compaction (#7850) Reduces duplication between tiered and legacy compaction by using the `Timeline::create_image_layer_for_rel_blocks` function. This way, we also use vectored get in tiered compaction, so the change has two benefits in one. fixes #7659 --------- Co-authored-by: Alex Chi Z. --- pageserver/src/tenant/timeline/compaction.rs | 75 +++++++++----------- 1 file changed, 32 insertions(+), 43 deletions(-) diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 2eff469591..db8adfc16c 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -9,7 +9,10 @@ use std::ops::{Deref, Range}; use std::sync::Arc; use super::layer_manager::LayerManager; -use super::{CompactFlags, DurationRecorder, ImageLayerCreationMode, RecordedDuration, Timeline}; +use super::{ + CompactFlags, CreateImageLayersError, DurationRecorder, ImageLayerCreationMode, + RecordedDuration, Timeline, +}; use anyhow::{anyhow, Context}; use enumset::EnumSet; @@ -22,14 +25,13 @@ use tracing::{debug, info, info_span, trace, warn, Instrument}; use utils::id::TimelineId; use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder}; +use crate::page_cache; use crate::tenant::storage_layer::{AsLayerDesc, PersistentLayerDesc}; -use crate::tenant::timeline::{drop_rlock, is_rel_fsm_block_key, is_rel_vm_block_key, Hole}; +use crate::tenant::timeline::{drop_rlock, Hole, ImageLayerCreationOutcome}; use crate::tenant::timeline::{DeltaLayerWriter, ImageLayerWriter}; use crate::tenant::timeline::{Layer, ResidentLayer}; use crate::tenant::DeltaLayer; -use crate::tenant::PageReconstructError; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; -use crate::{page_cache, ZERO_PAGE}; use crate::keyspace::KeySpace; use crate::repository::Key; @@ -1150,10 +1152,10 @@ impl TimelineAdaptor { lsn: Lsn, key_range: &Range, ctx: &RequestContext, - ) -> Result<(), PageReconstructError> { + ) -> Result<(), CreateImageLayersError> { let timer = self.timeline.metrics.create_images_time_histo.start_timer(); - let mut image_layer_writer = ImageLayerWriter::new( + let image_layer_writer = ImageLayerWriter::new( self.timeline.conf, self.timeline.timeline_id, self.timeline.tenant_shard_id, @@ -1164,47 +1166,34 @@ impl TimelineAdaptor { .await?; fail_point!("image-layer-writer-fail-before-finish", |_| { - Err(PageReconstructError::Other(anyhow::anyhow!( + Err(CreateImageLayersError::Other(anyhow::anyhow!( "failpoint image-layer-writer-fail-before-finish" ))) }); - let keyspace_ranges = self.get_keyspace(key_range, lsn, ctx).await?; - for range in &keyspace_ranges { - let mut key = range.start; - while key < range.end { - let img = match self.timeline.get(key, lsn, ctx).await { - Ok(img) => img, - Err(err) => { - // If we fail to reconstruct a VM or FSM page, we can zero the - // page without losing any actual user data. That seems better - // than failing repeatedly and getting stuck. - // - // We had a bug at one point, where we truncated the FSM and VM - // in the pageserver, but the Postgres didn't know about that - // and continued to generate incremental WAL records for pages - // that didn't exist in the pageserver. Trying to replay those - // WAL records failed to find the previous image of the page. - // This special case allows us to recover from that situation. - // See https://github.com/neondatabase/neon/issues/2601. - // - // Unfortunately we cannot do this for the main fork, or for - // any metadata keys, keys, as that would lead to actual data - // loss. - if is_rel_fsm_block_key(key) || is_rel_vm_block_key(key) { - warn!("could not reconstruct FSM or VM key {key}, filling with zeros: {err:?}"); - ZERO_PAGE.clone() - } else { - return Err(err); - } - } - }; - image_layer_writer.put_image(key, img, ctx).await?; - key = key.next(); - } - } - let image_layer = image_layer_writer.finish(&self.timeline, ctx).await?; - self.new_images.push(image_layer); + let keyspace = KeySpace { + ranges: self.get_keyspace(key_range, lsn, ctx).await?, + }; + // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly + let start = Key::MIN; + let ImageLayerCreationOutcome { + image, + next_start_key: _, + } = self + .timeline + .create_image_layer_for_rel_blocks( + &keyspace, + image_layer_writer, + lsn, + ctx, + key_range.clone(), + start, + ) + .await?; + + if let Some(image_layer) = image { + self.new_images.push(image_layer); + } timer.stop_and_record(); From 6b3164269cc3a6c577428071986447aadffc0008 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 23 May 2024 11:30:43 -0400 Subject: [PATCH 077/220] chore(pageserver): reduce logging related to image layers (#7864) * Reduce the logging level for create image layers of metadata keys. (question: is it possible to adjust logging levels at runtime?) * Do a info logging of image layers only after the layer is created. Now there are a lot of cases where we create the image layer writer but then discarding that image layer because it does not contain any key. Therefore, I changed the new image layer logging to trace, and create image layer logging to info. Signed-off-by: Alex Chi Z --- pageserver/src/tenant/storage_layer/image_layer.rs | 4 ++-- pageserver/src/tenant/timeline.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index becd1e7a6d..67b489ce0d 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -650,7 +650,7 @@ impl ImageLayerWriterInner { lsn, }, ); - info!("new image layer {path}"); + trace!("creating image layer {}", path); let mut file = { VirtualFile::open_with_options( &path, @@ -770,7 +770,7 @@ impl ImageLayerWriterInner { // FIXME: why not carry the virtualfile here, it supports renaming? let layer = Layer::finish_creating(self.conf, timeline, desc, &self.path)?; - trace!("created image layer {}", layer.local_path()); + info!("created image layer {}", layer.local_path()); Ok(layer) } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 881e7f8f3c..262e1896ce 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4347,7 +4347,7 @@ impl Timeline { let delta_file_accessed = reconstruct_state.get_delta_layers_visited(); let trigger_generation = delta_file_accessed as usize >= MAX_AUX_FILE_V2_DELTAS; - info!( + debug!( "generate image layers for metadata keys: trigger_generation={trigger_generation}, \ delta_file_accessed={delta_file_accessed}, total_kb_retrieved={total_kb_retrieved}, \ total_key_retrieved={total_key_retrieved}" From 7cf726e36e3a9dab2516a861b914eda3d2cac2c4 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 23 May 2024 23:24:31 +0300 Subject: [PATCH 078/220] refactor(rtc): remove the duplicate IndexLayerMetadata (#7860) Once upon a time, we used to have duplicated types for runtime IndexPart and whatever we stored. Because of the serde fixes in #5335 we have no need for duplicated IndexPart type anymore, but the `IndexLayerMetadata` stayed. - remove the type - remove LayerFileMetadata::file_size() in favor of direct field access Split off from #7833. Cc: #3072. --- pageserver/compaction/src/simulator.rs | 4 +- pageserver/ctl/src/index_part.rs | 4 +- pageserver/src/disk_usage_eviction_task.rs | 4 +- .../src/tenant/remote_timeline_client.rs | 6 +- .../tenant/remote_timeline_client/download.rs | 2 +- .../tenant/remote_timeline_client/index.rs | 92 ++++++------------- pageserver/src/tenant/secondary/downloader.rs | 14 ++- pageserver/src/tenant/secondary/heatmap.rs | 6 +- pageserver/src/tenant/storage_layer/layer.rs | 6 +- pageserver/src/tenant/timeline.rs | 8 +- pageserver/src/tenant/timeline/init.rs | 6 +- pageserver/src/tenant/upload_queue.rs | 9 +- s3_scrubber/src/checks.rs | 4 +- s3_scrubber/src/tenant_snapshot.rs | 15 ++- 14 files changed, 65 insertions(+), 115 deletions(-) diff --git a/pageserver/compaction/src/simulator.rs b/pageserver/compaction/src/simulator.rs index a7c8bd5c1f..776c537d03 100644 --- a/pageserver/compaction/src/simulator.rs +++ b/pageserver/compaction/src/simulator.rs @@ -380,8 +380,8 @@ impl interface::CompactionLayer for MockLayer { } fn file_size(&self) -> u64 { match self { - MockLayer::Delta(this) => this.file_size(), - MockLayer::Image(this) => this.file_size(), + MockLayer::Delta(this) => this.file_size, + MockLayer::Image(this) => this.file_size, } } fn short_id(&self) -> String { diff --git a/pageserver/ctl/src/index_part.rs b/pageserver/ctl/src/index_part.rs index 0d010eb009..2998b5c732 100644 --- a/pageserver/ctl/src/index_part.rs +++ b/pageserver/ctl/src/index_part.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use anyhow::Context; use camino::Utf8PathBuf; -use pageserver::tenant::remote_timeline_client::index::IndexLayerMetadata; +use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; use pageserver::tenant::storage_layer::LayerName; use pageserver::tenant::{metadata::TimelineMetadata, IndexPart}; use utils::lsn::Lsn; @@ -19,7 +19,7 @@ pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { let des: IndexPart = IndexPart::from_s3_bytes(&bytes).context("deserialize")?; #[derive(serde::Serialize)] struct Output<'a> { - layer_metadata: &'a HashMap, + layer_metadata: &'a HashMap, disk_consistent_lsn: Lsn, timeline_metadata: &'a TimelineMetadata, } diff --git a/pageserver/src/disk_usage_eviction_task.rs b/pageserver/src/disk_usage_eviction_task.rs index 7f25e49570..90bd4294bb 100644 --- a/pageserver/src/disk_usage_eviction_task.rs +++ b/pageserver/src/disk_usage_eviction_task.rs @@ -534,7 +534,7 @@ pub(crate) async fn disk_usage_eviction_task_iteration_impl( }); } EvictionLayer::Secondary(layer) => { - let file_size = layer.metadata.file_size(); + let file_size = layer.metadata.file_size; js.spawn(async move { layer @@ -641,7 +641,7 @@ impl EvictionLayer { pub(crate) fn get_file_size(&self) -> u64 { match self { Self::Attached(l) => l.layer_desc().file_size, - Self::Secondary(sl) => sl.metadata.file_size(), + Self::Secondary(sl) => sl.metadata.file_size, } } } diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index d3adae6841..23904b9da4 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -1192,7 +1192,7 @@ impl RemoteTimelineClient { &self.storage_impl, uploaded.local_path(), &remote_path, - uploaded.metadata().file_size(), + uploaded.metadata().file_size, cancel, ) .await @@ -1573,7 +1573,7 @@ impl RemoteTimelineClient { &self.storage_impl, local_path, &remote_path, - layer_metadata.file_size(), + layer_metadata.file_size, &self.cancel, ) .measure_remote_op( @@ -1768,7 +1768,7 @@ impl RemoteTimelineClient { UploadOp::UploadLayer(_, m) => ( RemoteOpFileKind::Layer, RemoteOpKind::Upload, - RemoteTimelineClientMetricsCallTrackSize::Bytes(m.file_size()), + RemoteTimelineClientMetricsCallTrackSize::Bytes(m.file_size), ), UploadOp::UploadMetadata(_, _) => ( RemoteOpFileKind::Index, diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index 70c5cae05e..bd75f980e8 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -84,7 +84,7 @@ pub async fn download_layer_file<'a>( ) .await?; - let expected = layer_metadata.file_size(); + let expected = layer_metadata.file_size; if expected != bytes_amount { return Err(DownloadError::Other(anyhow!( "According to layer file metadata should have downloaded {expected} bytes but downloaded {bytes_amount} bytes into file {temp_file_path:?}", diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index 032dda7ff3..f5d939c747 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -17,46 +17,6 @@ use pageserver_api::shard::ShardIndex; use utils::lsn::Lsn; -/// Metadata gathered for each of the layer files. -/// -/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which -/// might have less or more metadata depending if upgrading or rolling back an upgrade. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -//#[cfg_attr(test, derive(Default))] -pub struct LayerFileMetadata { - file_size: u64, - - pub(crate) generation: Generation, - - pub(crate) shard: ShardIndex, -} - -impl From<&'_ IndexLayerMetadata> for LayerFileMetadata { - fn from(other: &IndexLayerMetadata) -> Self { - LayerFileMetadata { - file_size: other.file_size, - generation: other.generation, - shard: other.shard, - } - } -} - -impl LayerFileMetadata { - pub fn new(file_size: u64, generation: Generation, shard: ShardIndex) -> Self { - LayerFileMetadata { - file_size, - generation, - shard, - } - } - - pub fn file_size(&self) -> u64 { - self.file_size - } -} - -// TODO seems like another part of the remote storage file format -// compatibility issue, see https://github.com/neondatabase/neon/issues/3072 /// In-memory representation of an `index_part.json` file /// /// Contains the data about all files in the timeline, present remotely and its metadata. @@ -77,7 +37,7 @@ pub struct IndexPart { /// /// Older versions of `IndexPart` will not have this property or have only a part of metadata /// that latest version stores. - pub layer_metadata: HashMap, + pub layer_metadata: HashMap, // 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata. // It's duplicated for convenience when reading the serialized structure, but is @@ -127,10 +87,7 @@ impl IndexPart { lineage: Lineage, last_aux_file_policy: Option, ) -> Self { - let layer_metadata = layers_and_metadata - .iter() - .map(|(k, v)| (k.to_owned(), IndexLayerMetadata::from(v))) - .collect(); + let layer_metadata = layers_and_metadata.clone(); Self { version: Self::LATEST_VERSION, @@ -194,9 +151,12 @@ impl From<&UploadQueueInitialized> for IndexPart { } } -/// Serialized form of [`LayerFileMetadata`]. -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] -pub struct IndexLayerMetadata { +/// Metadata gathered for each of the layer files. +/// +/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which +/// might have less or more metadata depending if upgrading or rolling back an upgrade. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct LayerFileMetadata { pub file_size: u64, #[serde(default = "Generation::none")] @@ -208,12 +168,12 @@ pub struct IndexLayerMetadata { pub shard: ShardIndex, } -impl From<&LayerFileMetadata> for IndexLayerMetadata { - fn from(other: &LayerFileMetadata) -> Self { - IndexLayerMetadata { - file_size: other.file_size, - generation: other.generation, - shard: other.shard, +impl LayerFileMetadata { + pub fn new(file_size: u64, generation: Generation, shard: ShardIndex) -> Self { + LayerFileMetadata { + file_size, + generation, + shard, } } } @@ -307,12 +267,12 @@ mod tests { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, @@ -349,12 +309,12 @@ mod tests { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 1, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, @@ -392,12 +352,12 @@ mod tests { // note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead? version: 2, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, @@ -480,12 +440,12 @@ mod tests { let expected = IndexPart { version: 4, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, @@ -522,12 +482,12 @@ mod tests { let expected = IndexPart { version: 5, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000014EF420-00000000014EF499".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000014EF420-00000000014EF499".parse().unwrap(), LayerFileMetadata { file_size: 23289856, generation: Generation::new(1), shard: ShardIndex::unsharded(), }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000014EF499-00000000015A7619".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000014EF499-00000000015A7619".parse().unwrap(), LayerFileMetadata { file_size: 1015808, generation: Generation::new(1), shard: ShardIndex::unsharded(), @@ -569,12 +529,12 @@ mod tests { let expected = IndexPart { version: 6, layer_metadata: HashMap::from([ - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { file_size: 25600000, generation: Generation::none(), shard: ShardIndex::unsharded() }), - ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata { + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { // serde_json should always parse this but this might be a double with jq for // example. file_size: 9007199254741001, diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index de30c4dcb6..789f1a0fa9 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -716,7 +716,7 @@ impl<'a> TenantDownloader<'a> { let mut layer_byte_count: u64 = timeline_state .on_disk_layers .values() - .map(|l| l.metadata.file_size()) + .map(|l| l.metadata.file_size) .sum(); // Remove on-disk layers that are no longer present in heatmap @@ -727,7 +727,7 @@ impl<'a> TenantDownloader<'a> { .get(layer_file_name) .unwrap() .metadata - .file_size(); + .file_size; let local_path = local_layer_path( self.conf, @@ -886,9 +886,7 @@ impl<'a> TenantDownloader<'a> { } } - if on_disk.metadata != LayerFileMetadata::from(&layer.metadata) - || on_disk.access_time != layer.access_time - { + if on_disk.metadata != layer.metadata || on_disk.access_time != layer.access_time { // We already have this layer on disk. Update its access time. tracing::debug!( "Access time updated for layer {}: {} -> {}", @@ -979,7 +977,7 @@ impl<'a> TenantDownloader<'a> { tenant_shard_id, &timeline.timeline_id, t.name, - LayerFileMetadata::from(&t.metadata), + t.metadata.clone(), t.access_time, local_path, )); @@ -1024,7 +1022,7 @@ impl<'a> TenantDownloader<'a> { *tenant_shard_id, *timeline_id, &layer.name, - &LayerFileMetadata::from(&layer.metadata), + &layer.metadata, &local_path, &self.secondary_state.cancel, ctx, @@ -1185,7 +1183,7 @@ async fn init_timeline_state( tenant_shard_id, &heatmap.timeline_id, name, - LayerFileMetadata::from(&remote_meta.metadata), + remote_meta.metadata.clone(), remote_meta.access_time, file_path, ), diff --git a/pageserver/src/tenant/secondary/heatmap.rs b/pageserver/src/tenant/secondary/heatmap.rs index 2da4a3b9d5..166483ba5d 100644 --- a/pageserver/src/tenant/secondary/heatmap.rs +++ b/pageserver/src/tenant/secondary/heatmap.rs @@ -1,6 +1,6 @@ use std::time::SystemTime; -use crate::tenant::{remote_timeline_client::index::IndexLayerMetadata, storage_layer::LayerName}; +use crate::tenant::{remote_timeline_client::index::LayerFileMetadata, storage_layer::LayerName}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr, TimestampSeconds}; @@ -38,7 +38,7 @@ pub(crate) struct HeatMapTimeline { #[derive(Serialize, Deserialize)] pub(crate) struct HeatMapLayer { pub(super) name: LayerName, - pub(super) metadata: IndexLayerMetadata, + pub(super) metadata: LayerFileMetadata, #[serde_as(as = "TimestampSeconds")] pub(super) access_time: SystemTime, @@ -49,7 +49,7 @@ pub(crate) struct HeatMapLayer { impl HeatMapLayer { pub(crate) fn new( name: LayerName, - metadata: IndexLayerMetadata, + metadata: LayerFileMetadata, access_time: SystemTime, ) -> Self { Self { diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 8c64621710..b2f3bdb552 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -161,7 +161,7 @@ impl Layer { timeline.tenant_shard_id, timeline.timeline_id, file_name, - metadata.file_size(), + metadata.file_size, ); let access_stats = LayerAccessStats::for_loading_layer(LayerResidenceStatus::Evicted); @@ -194,7 +194,7 @@ impl Layer { timeline.tenant_shard_id, timeline.timeline_id, file_name, - metadata.file_size(), + metadata.file_size, ); let access_stats = LayerAccessStats::for_loading_layer(LayerResidenceStatus::Resident); @@ -227,7 +227,7 @@ impl Layer { timeline .metrics - .resident_physical_size_add(metadata.file_size()); + .resident_physical_size_add(metadata.file_size); ResidentLayer { downloaded, owner } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 262e1896ce..342fc4fc59 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1424,7 +1424,7 @@ impl Timeline { let layer_map = guard.layer_map(); let mut size = 0; for l in layer_map.iter_historic_layers() { - size += l.file_size(); + size += l.file_size; } size } @@ -2516,7 +2516,7 @@ impl Timeline { Ok(UseRemote { local, remote }) => { // Remote is authoritative, but we may still choose to retain // the local file if the contents appear to match - if local.metadata.file_size() == remote.file_size() { + if local.metadata.file_size == remote.file_size { // Use the local file, but take the remote metadata so that we pick up // the correct generation. UseLocal(LocalLayerFileMetadata { @@ -2556,7 +2556,7 @@ impl Timeline { let layer = match decision { UseLocal(local) => { - total_physical_size += local.metadata.file_size(); + total_physical_size += local.metadata.file_size; Layer::for_resident(conf, &this, local.local_path, name, local.metadata) .drop_eviction_guard() } @@ -3071,7 +3071,7 @@ impl Timeline { HeatMapLayer::new( layer.layer_desc().layer_name(), - (&layer.metadata()).into(), + layer.metadata(), last_activity_ts, ) }); diff --git a/pageserver/src/tenant/timeline/init.rs b/pageserver/src/tenant/timeline/init.rs index feadc79e5e..0cbaf39555 100644 --- a/pageserver/src/tenant/timeline/init.rs +++ b/pageserver/src/tenant/timeline/init.rs @@ -157,7 +157,7 @@ pub(super) fn reconcile( .map(|ip| ip.layer_metadata.iter()) .into_iter() .flatten() - .map(|(name, metadata)| (name, LayerFileMetadata::from(metadata))) + .map(|(name, metadata)| (name, metadata.clone())) .for_each(|(name, metadata)| { if let Some(existing) = discovered.get_mut(name) { existing.1 = Some(metadata); @@ -200,8 +200,8 @@ pub(super) fn cleanup_local_file_for_remote( local: &LocalLayerFileMetadata, remote: &LayerFileMetadata, ) -> anyhow::Result<()> { - let local_size = local.metadata.file_size(); - let remote_size = remote.file_size(); + let local_size = local.metadata.file_size; + let remote_size = remote.file_size; let path = &local.local_path; let file_name = path.file_name().expect("must be file path"); diff --git a/pageserver/src/tenant/upload_queue.rs b/pageserver/src/tenant/upload_queue.rs index c0cc8f3124..02f87303d1 100644 --- a/pageserver/src/tenant/upload_queue.rs +++ b/pageserver/src/tenant/upload_queue.rs @@ -213,10 +213,7 @@ impl UploadQueue { let mut files = HashMap::with_capacity(index_part.layer_metadata.len()); for (layer_name, layer_metadata) in &index_part.layer_metadata { - files.insert( - layer_name.to_owned(), - LayerFileMetadata::from(layer_metadata), - ); + files.insert(layer_name.to_owned(), layer_metadata.clone()); } info!( @@ -322,9 +319,7 @@ impl std::fmt::Display for UploadOp { write!( f, "UploadLayer({}, size={:?}, gen={:?})", - layer, - metadata.file_size(), - metadata.generation + layer, metadata.file_size, metadata.generation ) } UploadOp::UploadMetadata(_, lsn) => { diff --git a/s3_scrubber/src/checks.rs b/s3_scrubber/src/checks.rs index dd64a0a98f..134afa53da 100644 --- a/s3_scrubber/src/checks.rs +++ b/s3_scrubber/src/checks.rs @@ -2,7 +2,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context; use aws_sdk_s3::{types::ObjectIdentifier, Client}; -use pageserver::tenant::remote_timeline_client::index::IndexLayerMetadata; +use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; use pageserver_api::shard::ShardIndex; use tracing::{error, info, warn}; use utils::generation::Generation; @@ -208,7 +208,7 @@ impl TenantObjectListing { &mut self, timeline_id: TimelineId, layer_file: &LayerName, - metadata: &IndexLayerMetadata, + metadata: &LayerFileMetadata, ) -> bool { let Some(shard_tl) = self.shard_timelines.get_mut(&(metadata.shard, timeline_id)) else { return false; diff --git a/s3_scrubber/src/tenant_snapshot.rs b/s3_scrubber/src/tenant_snapshot.rs index a24a1e92ae..450b337235 100644 --- a/s3_scrubber/src/tenant_snapshot.rs +++ b/s3_scrubber/src/tenant_snapshot.rs @@ -11,7 +11,7 @@ use async_stream::stream; use aws_sdk_s3::Client; use camino::Utf8PathBuf; use futures::{StreamExt, TryStreamExt}; -use pageserver::tenant::remote_timeline_client::index::IndexLayerMetadata; +use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; use pageserver::tenant::storage_layer::LayerName; use pageserver::tenant::IndexPart; use pageserver_api::shard::TenantShardId; @@ -49,8 +49,8 @@ impl SnapshotDownloader { &self, ttid: TenantShardTimelineId, layer_name: LayerName, - layer_metadata: IndexLayerMetadata, - ) -> anyhow::Result<(LayerName, IndexLayerMetadata)> { + layer_metadata: LayerFileMetadata, + ) -> anyhow::Result<(LayerName, LayerFileMetadata)> { // Note this is local as in a local copy of S3 data, not local as in the pageserver's local format. They use // different layer names (remote-style has the generation suffix) let local_path = self.output_path.join(format!( @@ -110,7 +110,7 @@ impl SnapshotDownloader { async fn download_layers( &self, ttid: TenantShardTimelineId, - layers: Vec<(LayerName, IndexLayerMetadata)>, + layers: Vec<(LayerName, LayerFileMetadata)>, ) -> anyhow::Result<()> { let layer_count = layers.len(); tracing::info!("Downloading {} layers for timeline {ttid}...", layer_count); @@ -161,10 +161,7 @@ impl SnapshotDownloader { ttid: TenantShardTimelineId, index_part: Box, index_part_generation: Generation, - ancestor_layers: &mut HashMap< - TenantShardTimelineId, - HashMap, - >, + ancestor_layers: &mut HashMap>, ) -> anyhow::Result<()> { let index_bytes = serde_json::to_string(&index_part).unwrap(); @@ -234,7 +231,7 @@ impl SnapshotDownloader { // happen if this tenant has been split at some point) let mut ancestor_layers: HashMap< TenantShardTimelineId, - HashMap, + HashMap, > = Default::default(); for shard in shards.into_iter().filter(|s| s.shard_count == shard_count) { From ea2e830707bcd5cf8b7fca25ae598d2937994c9e Mon Sep 17 00:00:00 2001 From: Sasha Krassovsky Date: Thu, 23 May 2024 13:35:59 -0700 Subject: [PATCH 079/220] Remove apostrophe (#7868) ## Problem ## Summary of changes ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --- vm-image-spec.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 0f9d56e466..484a86fc21 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -320,7 +320,7 @@ files: - metric_name: wal_is_lost type: gauge - help: 'Whether or not the replication slot\'s wal_status is lost' + help: 'Whether or not the replication slot wal_status is lost' key_labels: - slot_name values: [wal_status_is_lost] From 0e4f1826805d040a23c25c54f3993a942755dbc2 Mon Sep 17 00:00:00 2001 From: MMeent Date: Thu, 23 May 2024 23:26:42 +0200 Subject: [PATCH 080/220] Rework PageStream connection state handling: (#7611) * Make PS connection startup use async APIs This allows for improved query cancellation when we start connections * Make PS connections have per-shard connection retry state. Previously they shared global backoff state, which is bad for quickly getting all connections started and/or back online. * Make sure we clean up most connection state on failed connections. Previously, we could technically leak some resources that we'd otherwise clean up. Now, the resources are correctly cleaned up. * pagestore_smgr.c now PANICs on unexpected response message types. Unexpected responses are likely a symptom of having a desynchronized view of the connection state. As a desynchronized connection state can cause corruption, we PANIC, as we don't know what data may have been written to buffers: the only solution is to fail fast & hope we didn't write wrong data. * Catch errors in sync pagestream request handling. Previously, if a query was cancelled after a message was sent to the pageserver, but before the data was received, the backend could forget that it sent the synchronous request, and let others deal with the repercussions. This could then lead to incorrect responses, or errors such as "unexpected response from page server with tag 0x68" --- pageserver/src/page_service.rs | 11 + pgxn/neon/libpagestore.c | 529 ++++++++++++------ pgxn/neon/pagestore_smgr.c | 91 ++- .../regress/test_pg_query_cancellation.py | 282 ++++++++++ 4 files changed, 729 insertions(+), 184 deletions(-) create mode 100644 test_runner/regress/test_pg_query_cancellation.py diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index d250864fd6..e9651165b1 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -260,6 +260,8 @@ async fn page_service_conn_main( socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms))); let socket = std::pin::pin!(socket); + fail::fail_point!("ps::connection-start::pre-login"); + // XXX: pgbackend.run() should take the connection_ctx, // and create a child per-query context when it invokes process_query. // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler @@ -603,6 +605,7 @@ impl PageServerHandler { }; trace!("query: {copy_data_bytes:?}"); + fail::fail_point!("ps::handle-pagerequest-message"); // Trace request if needed if let Some(t) = tracer.as_mut() { @@ -617,6 +620,7 @@ impl PageServerHandler { let (response, span) = match neon_fe_msg { PagestreamFeMessage::Exists(req) => { + fail::fail_point!("ps::handle-pagerequest-message::exists"); let span = tracing::info_span!("handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.request_lsn); ( self.handle_get_rel_exists_request(tenant_id, timeline_id, &req, &ctx) @@ -626,6 +630,7 @@ impl PageServerHandler { ) } PagestreamFeMessage::Nblocks(req) => { + fail::fail_point!("ps::handle-pagerequest-message::nblocks"); let span = tracing::info_span!("handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.request_lsn); ( self.handle_get_nblocks_request(tenant_id, timeline_id, &req, &ctx) @@ -635,6 +640,7 @@ impl PageServerHandler { ) } PagestreamFeMessage::GetPage(req) => { + fail::fail_point!("ps::handle-pagerequest-message::getpage"); // shard_id is filled in by the handler let span = tracing::info_span!("handle_get_page_at_lsn_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.request_lsn); ( @@ -645,6 +651,7 @@ impl PageServerHandler { ) } PagestreamFeMessage::DbSize(req) => { + fail::fail_point!("ps::handle-pagerequest-message::dbsize"); let span = tracing::info_span!("handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.request_lsn); ( self.handle_db_size_request(tenant_id, timeline_id, &req, &ctx) @@ -654,6 +661,7 @@ impl PageServerHandler { ) } PagestreamFeMessage::GetSlruSegment(req) => { + fail::fail_point!("ps::handle-pagerequest-message::slrusegment"); let span = tracing::info_span!("handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.request_lsn); ( self.handle_get_slru_segment_request(tenant_id, timeline_id, &req, &ctx) @@ -1505,6 +1513,7 @@ where _pgb: &mut PostgresBackend, _sm: &FeStartupPacket, ) -> Result<(), QueryError> { + fail::fail_point!("ps::connection-start::startup-packet"); Ok(()) } @@ -1519,6 +1528,8 @@ where Err(QueryError::SimulatedConnectionError) }); + fail::fail_point!("ps::connection-start::process-query"); + let ctx = self.connection_ctx.attached_child(); debug!("process query {query_string:?}"); let parts = query_string.split_whitespace().collect::>(); diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index f5ce2caff3..a9c8d59c3a 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -51,7 +51,6 @@ int flush_every_n_requests = 8; int neon_protocol_version = 2; -static int n_reconnect_attempts = 0; static int max_reconnect_attempts = 60; static int stripe_size; @@ -95,18 +94,44 @@ static shmem_startup_hook_type prev_shmem_startup_hook; static PagestoreShmemState *pagestore_shared; static uint64 pagestore_local_counter = 0; +typedef enum PSConnectionState { + PS_Disconnected, /* no connection yet */ + PS_Connecting_Startup, /* connection starting up */ + PS_Connecting_PageStream, /* negotiating pagestream */ + PS_Connected, /* connected, pagestream established */ +} PSConnectionState; + /* This backend's per-shard connections */ typedef struct { - PGconn *conn; + TimestampTz last_connect_time; /* read-only debug value */ + TimestampTz last_reconnect_time; + uint32 delay_us; + int n_reconnect_attempts; /*--- - * WaitEventSet containing: - * - WL_SOCKET_READABLE on 'conn' - * - WL_LATCH_SET on MyLatch, and - * - WL_EXIT_ON_PM_DEATH. + * Pageserver connection state, i.e. + * disconnected: conn == NULL, wes == NULL; + * conn_startup: connection initiated, waiting for connection establishing + * conn_ps: PageStream query sent, waiting for confirmation + * connected: PageStream established */ - WaitEventSet *wes; + PSConnectionState state; + PGconn *conn; + /*--- + * WaitEventSet containing: + * - WL_SOCKET_READABLE on 'conn' + * - WL_LATCH_SET on MyLatch, and + * - WL_EXIT_ON_PM_DEATH. + */ + WaitEventSet *wes_read; + /*--- + * WaitEventSet containing: + * - WL_SOCKET_WRITABLE on 'conn' + * - WL_LATCH_SET on MyLatch, and + * - WL_EXIT_ON_PM_DEATH. + */ + WaitEventSet *wes_write; } PageServer; static PageServer page_servers[MAX_SHARDS]; @@ -303,119 +328,269 @@ get_shard_number(BufferTag *tag) return hash % n_shards; } +static inline void +CLEANUP_AND_DISCONNECT(PageServer *shard) +{ + if (shard->wes_read) + { + FreeWaitEventSet(shard->wes_read); + shard->wes_read = NULL; + } + if (shard->wes_write) + { + FreeWaitEventSet(shard->wes_write); + shard->wes_write = NULL; + } + if (shard->conn) + { + PQfinish(shard->conn); + shard->conn = NULL; + } + + shard->state = PS_Disconnected; +} + +/* + * Connect to a pageserver, or continue to try to connect if we're yet to + * complete the connection (e.g. due to receiving an earlier cancellation + * during connection start). + * Returns true if successfully connected; false if the connection failed. + * + * Throws errors in unrecoverable situations, or when this backend's query + * is canceled. + */ static bool pageserver_connect(shardno_t shard_no, int elevel) { - char *query; - int ret; - const char *keywords[3]; - const char *values[3]; - int n; - PGconn *conn; - WaitEventSet *wes; + PageServer *shard = &page_servers[shard_no]; char connstr[MAX_PAGESERVER_CONNSTRING_SIZE]; - static TimestampTz last_connect_time = 0; - static uint64_t delay_us = MIN_RECONNECT_INTERVAL_USEC; - TimestampTz now; - uint64_t us_since_last_connect; - bool broke_from_loop = false; - - Assert(page_servers[shard_no].conn == NULL); - /* * Get the connection string for this shard. If the shard map has been * updated since we last looked, this will also disconnect any existing * pageserver connections as a side effect. + * Note that connstr is used both during connection start, and when we + * log the successful connection. */ load_shard_map(shard_no, connstr, NULL); - now = GetCurrentTimestamp(); - us_since_last_connect = now - last_connect_time; - if (us_since_last_connect < MAX_RECONNECT_INTERVAL_USEC) + switch (shard->state) { - pg_usleep(delay_us); - delay_us *= 2; - } - else + case PS_Disconnected: { - delay_us = MIN_RECONNECT_INTERVAL_USEC; - } + const char *keywords[3]; + const char *values[3]; + int n_pgsql_params; + TimestampTz now; + int64 us_since_last_attempt; - /* - * Connect using the connection string we got from the - * neon.pageserver_connstring GUC. If the NEON_AUTH_TOKEN environment - * variable was set, use that as the password. - * - * The connection options are parsed in the order they're given, so when - * we set the password before the connection string, the connection string - * can override the password from the env variable. Seems useful, although - * we don't currently use that capability anywhere. - */ - n = 0; - if (neon_auth_token) - { - keywords[n] = "password"; - values[n] = neon_auth_token; - n++; + /* Make sure we start with a clean slate */ + CLEANUP_AND_DISCONNECT(shard); + + neon_shard_log(shard_no, DEBUG5, "Connection state: Disconnected"); + + now = GetCurrentTimestamp(); + us_since_last_attempt = (int64) (now - shard->last_reconnect_time); + shard->last_reconnect_time = now; + + /* + * If we did other tasks between reconnect attempts, then we won't + * need to wait as long as a full delay. + */ + if (us_since_last_attempt < shard->delay_us) + { + pg_usleep(shard->delay_us - us_since_last_attempt); + } + + /* update the delay metric */ + shard->delay_us = Min(shard->delay_us * 2, MAX_RECONNECT_INTERVAL_USEC); + + /* + * Connect using the connection string we got from the + * neon.pageserver_connstring GUC. If the NEON_AUTH_TOKEN environment + * variable was set, use that as the password. + * + * The connection options are parsed in the order they're given, so when + * we set the password before the connection string, the connection string + * can override the password from the env variable. Seems useful, although + * we don't currently use that capability anywhere. + */ + keywords[0] = "dbname"; + values[0] = connstr; + n_pgsql_params = 1; + + if (neon_auth_token) + { + keywords[1] = "password"; + values[1] = neon_auth_token; + n_pgsql_params++; + } + + keywords[n_pgsql_params] = NULL; + values[n_pgsql_params] = NULL; + + shard->conn = PQconnectStartParams(keywords, values, 1); + if (!shard->conn) + { + neon_shard_log(shard_no, elevel, "Failed to connect to pageserver: out of memory"); + return false; + } + + shard->wes_read = CreateWaitEventSet(TopMemoryContext, 3); + AddWaitEventToSet(shard->wes_read, WL_LATCH_SET, PGINVALID_SOCKET, + MyLatch, NULL); + AddWaitEventToSet(shard->wes_read, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, + NULL, NULL); + AddWaitEventToSet(shard->wes_read, WL_SOCKET_READABLE, PQsocket(shard->conn), NULL, NULL); + + shard->wes_write = CreateWaitEventSet(TopMemoryContext, 3); + AddWaitEventToSet(shard->wes_write, WL_LATCH_SET, PGINVALID_SOCKET, + MyLatch, NULL); + AddWaitEventToSet(shard->wes_write, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, + NULL, NULL); + AddWaitEventToSet(shard->wes_write, WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE, + PQsocket(shard->conn), + NULL, NULL); + + shard->state = PS_Connecting_Startup; + /* fallthrough */ } - keywords[n] = "dbname"; - values[n] = connstr; - n++; - keywords[n] = NULL; - values[n] = NULL; - n++; - conn = PQconnectdbParams(keywords, values, 1); - last_connect_time = GetCurrentTimestamp(); - - if (PQstatus(conn) == CONNECTION_BAD) + case PS_Connecting_Startup: { - char *msg = pchomp(PQerrorMessage(conn)); + char *pagestream_query; + int ps_send_query_ret; + bool connected = false; - PQfinish(conn); + neon_shard_log(shard_no, DEBUG5, "Connection state: Connecting_Startup"); - ereport(elevel, - (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - errmsg(NEON_TAG "[shard %d] could not establish connection to pageserver", shard_no), - errdetail_internal("%s", msg))); - pfree(msg); - return false; - } - switch (neon_protocol_version) - { + do + { + WaitEvent event; + int poll_result = PQconnectPoll(shard->conn); + + switch (poll_result) + { + default: /* unknown/unused states are handled as a failed connection */ + case PGRES_POLLING_FAILED: + { + char *pqerr = PQerrorMessage(shard->conn); + char *msg = NULL; + neon_shard_log(shard_no, DEBUG5, "POLLING_FAILED"); + + if (pqerr) + msg = pchomp(pqerr); + + CLEANUP_AND_DISCONNECT(shard); + + if (msg) + { + neon_shard_log(shard_no, elevel, + "could not connect to pageserver: %s", + msg); + pfree(msg); + } + else + neon_shard_log(shard_no, elevel, + "could not connect to pageserver"); + + return false; + } + case PGRES_POLLING_READING: + /* Sleep until there's something to do */ + (void) WaitEventSetWait(shard->wes_read, -1L, &event, 1, + PG_WAIT_EXTENSION); + ResetLatch(MyLatch); + + /* query cancellation, backend shutdown */ + CHECK_FOR_INTERRUPTS(); + + /* PQconnectPoll() handles the socket polling state updates */ + + break; + case PGRES_POLLING_WRITING: + /* Sleep until there's something to do */ + (void) WaitEventSetWait(shard->wes_write, -1L, &event, 1, + PG_WAIT_EXTENSION); + ResetLatch(MyLatch); + + /* query cancellation, backend shutdown */ + CHECK_FOR_INTERRUPTS(); + + /* PQconnectPoll() handles the socket polling state updates */ + + break; + case PGRES_POLLING_OK: + neon_shard_log(shard_no, DEBUG5, "POLLING_OK"); + connected = true; + break; + } + } + while (!connected); + + /* No more polling needed; connection succeeded */ + shard->last_connect_time = GetCurrentTimestamp(); + + switch (neon_protocol_version) + { case 2: - query = psprintf("pagestream_v2 %s %s", neon_tenant, neon_timeline); + pagestream_query = psprintf("pagestream_v2 %s %s", neon_tenant, neon_timeline); break; case 1: - query = psprintf("pagestream %s %s", neon_tenant, neon_timeline); + pagestream_query = psprintf("pagestream %s %s", neon_tenant, neon_timeline); break; default: elog(ERROR, "unexpected neon_protocol_version %d", neon_protocol_version); - } - ret = PQsendQuery(conn, query); - pfree(query); - if (ret != 1) - { - PQfinish(conn); - neon_shard_log(shard_no, elevel, "could not send pagestream command to pageserver"); - return false; - } + } - wes = CreateWaitEventSet(TopMemoryContext, 3); - AddWaitEventToSet(wes, WL_LATCH_SET, PGINVALID_SOCKET, - MyLatch, NULL); - AddWaitEventToSet(wes, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, - NULL, NULL); - AddWaitEventToSet(wes, WL_SOCKET_READABLE, PQsocket(conn), NULL, NULL); + if (PQstatus(shard->conn) == CONNECTION_BAD) + { + char *msg = pchomp(PQerrorMessage(shard->conn)); - PG_TRY(); + CLEANUP_AND_DISCONNECT(shard); + + ereport(elevel, + (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + errmsg(NEON_TAG "[shard %d] could not establish connection to pageserver", shard_no), + errdetail_internal("%s", msg))); + pfree(msg); + return false; + } + + ps_send_query_ret = PQsendQuery(shard->conn, pagestream_query); + pfree(pagestream_query); + if (ps_send_query_ret != 1) + { + CLEANUP_AND_DISCONNECT(shard); + + neon_shard_log(shard_no, elevel, "could not send pagestream command to pageserver"); + return false; + } + + shard->state = PS_Connecting_PageStream; + /* fallthrough */ + } + case PS_Connecting_PageStream: { - while (PQisBusy(conn)) + neon_shard_log(shard_no, DEBUG5, "Connection state: Connecting_PageStream"); + + if (PQstatus(shard->conn) == CONNECTION_BAD) + { + char *msg = pchomp(PQerrorMessage(shard->conn)); + CLEANUP_AND_DISCONNECT(shard); + ereport(elevel, + (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + errmsg(NEON_TAG "[shard %d] could not establish connection to pageserver", shard_no), + errdetail_internal("%s", msg))); + pfree(msg); + return false; + } + + while (PQisBusy(shard->conn)) { WaitEvent event; /* Sleep until there's something to do */ - (void) WaitEventSetWait(wes, -1L, &event, 1, PG_WAIT_EXTENSION); + (void) WaitEventSetWait(shard->wes_read, -1L, &event, 1, PG_WAIT_EXTENSION); ResetLatch(MyLatch); CHECK_FOR_INTERRUPTS(); @@ -423,40 +598,37 @@ pageserver_connect(shardno_t shard_no, int elevel) /* Data available in socket? */ if (event.events & WL_SOCKET_READABLE) { - if (!PQconsumeInput(conn)) + if (!PQconsumeInput(shard->conn)) { - char *msg = pchomp(PQerrorMessage(conn)); - - PQfinish(conn); - FreeWaitEventSet(wes); + char *msg = pchomp(PQerrorMessage(shard->conn)); + CLEANUP_AND_DISCONNECT(shard); neon_shard_log(shard_no, elevel, "could not complete handshake with pageserver: %s", msg); - /* Returning from inside PG_TRY is bad, so we break/return later */ - broke_from_loop = true; - break; + pfree(msg); + return false; } } } - } - PG_CATCH(); - { - PQfinish(conn); - FreeWaitEventSet(wes); - PG_RE_THROW(); - } - PG_END_TRY(); - if (broke_from_loop) - { - return false; + shard->state = PS_Connected; + /* fallthrough */ } + case PS_Connected: + /* + * We successfully connected. Future connections to this PageServer + * will do fast retries again, with exponential backoff. + */ + shard->delay_us = MIN_RECONNECT_INTERVAL_USEC; - neon_shard_log(shard_no, LOG, "libpagestore: connected to '%s' with protocol version %d", connstr, neon_protocol_version); - page_servers[shard_no].conn = conn; - page_servers[shard_no].wes = wes; - - return true; + neon_shard_log(shard_no, DEBUG5, "Connection state: Connected"); + neon_shard_log(shard_no, LOG, "libpagestore: connected to '%s' with protocol version %d", connstr, neon_protocol_version); + return true; + default: + neon_shard_log(shard_no, ERROR, "libpagestore: invalid connection state %d", shard->state); + } + /* This shouldn't be hit */ + Assert(false); } /* @@ -476,7 +648,7 @@ retry: WaitEvent event; /* Sleep until there's something to do */ - (void) WaitEventSetWait(page_servers[shard_no].wes, -1L, &event, 1, PG_WAIT_EXTENSION); + (void) WaitEventSetWait(page_servers[shard_no].wes_read, -1L, &event, 1, PG_WAIT_EXTENSION); ResetLatch(MyLatch); CHECK_FOR_INTERRUPTS(); @@ -502,7 +674,8 @@ retry: /* * Reset prefetch and drop connection to the shard. - * It also drops connection to all other shards involved in prefetch. + * It also drops connection to all other shards involved in prefetch, through + * prefetch_on_ps_disconnect(). */ static void pageserver_disconnect(shardno_t shard_no) @@ -512,9 +685,6 @@ pageserver_disconnect(shardno_t shard_no) * whole prefetch queue, even for other pageservers. It should not * cause big problems, because connection loss is supposed to be a * rare event. - * - * Prefetch state should be reset even if page_servers[shard_no].conn == NULL, - * because prefetch request may be registered before connection is established. */ prefetch_on_ps_disconnect(); @@ -527,37 +697,36 @@ pageserver_disconnect(shardno_t shard_no) static void pageserver_disconnect_shard(shardno_t shard_no) { + PageServer *shard = &page_servers[shard_no]; /* * If anything goes wrong while we were sending a request, it's not clear * what state the connection is in. For example, if we sent the request * but didn't receive a response yet, we might receive the response some * time later after we have already sent a new unrelated request. Close * the connection to avoid getting confused. + * Similarly, even when we're in PS_DISCONNECTED, we may have junk to + * clean up: It is possible that we encountered an error allocating any + * of the wait event sets or the psql connection, or failed when we tried + * to attach wait events to the WaitEventSets. */ - if (page_servers[shard_no].conn) - { - neon_shard_log(shard_no, LOG, "dropping connection to page server due to error"); - PQfinish(page_servers[shard_no].conn); - page_servers[shard_no].conn = NULL; - } - if (page_servers[shard_no].wes != NULL) - { - FreeWaitEventSet(page_servers[shard_no].wes); - page_servers[shard_no].wes = NULL; - } + CLEANUP_AND_DISCONNECT(shard); + + shard->state = PS_Disconnected; } static bool pageserver_send(shardno_t shard_no, NeonRequest *request) { StringInfoData req_buff; - PGconn *pageserver_conn = page_servers[shard_no].conn; + PageServer *shard = &page_servers[shard_no]; + PGconn *pageserver_conn; /* If the connection was lost for some reason, reconnect */ - if (pageserver_conn && PQstatus(pageserver_conn) == CONNECTION_BAD) + if (shard->state == PS_Connected && PQstatus(shard->conn) == CONNECTION_BAD) { neon_shard_log(shard_no, LOG, "pageserver_send disconnect bad connection"); pageserver_disconnect(shard_no); + pageserver_conn = NULL; } req_buff = nm_pack_request(request); @@ -571,17 +740,19 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) * https://github.com/neondatabase/neon/issues/1138 So try to reestablish * connection in case of failure. */ - if (!page_servers[shard_no].conn) + if (shard->state != PS_Connected) { - while (!pageserver_connect(shard_no, n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR)) + while (!pageserver_connect(shard_no, shard->n_reconnect_attempts < max_reconnect_attempts ? LOG : ERROR)) { HandleMainLoopInterrupts(); - n_reconnect_attempts += 1; + shard->n_reconnect_attempts += 1; } - n_reconnect_attempts = 0; + shard->n_reconnect_attempts = 0; + } else { + Assert(shard->conn != NULL); } - pageserver_conn = page_servers[shard_no].conn; + pageserver_conn = shard->conn; /* * Send request. @@ -590,13 +761,17 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) * should use async mode and check for interrupts while waiting. In * practice, our requests are small enough to always fit in the output and * TCP buffer. + * + * Note that this also will fail when the connection is in the + * PGRES_POLLING_WRITING state. It's kinda dirty to disconnect at this + * point, but on the grand scheme of things it's only a small issue. */ if (PQputCopyData(pageserver_conn, req_buff.data, req_buff.len) <= 0) { char *msg = pchomp(PQerrorMessage(pageserver_conn)); pageserver_disconnect(shard_no); - neon_shard_log(shard_no, LOG, "pageserver_send disconnect because failed to send page request (try to reconnect): %s", msg); + neon_shard_log(shard_no, LOG, "pageserver_send disconnected: failed to send page request (try to reconnect): %s", msg); pfree(msg); pfree(req_buff.data); return false; @@ -611,6 +786,7 @@ pageserver_send(shardno_t shard_no, NeonRequest *request) neon_shard_log(shard_no, PageStoreTrace, "sent request: %s", msg); pfree(msg); } + return true; } @@ -619,58 +795,68 @@ pageserver_receive(shardno_t shard_no) { StringInfoData resp_buff; NeonResponse *resp; - PGconn *pageserver_conn = page_servers[shard_no].conn; + PageServer *shard = &page_servers[shard_no]; + PGconn *pageserver_conn = shard->conn; + /* read response */ + int rc; - if (!pageserver_conn) - return NULL; - - PG_TRY(); + if (shard->state != PS_Connected) { - /* read response */ - int rc; + neon_shard_log(shard_no, LOG, + "pageserver_receive: returning NULL for non-connected pageserver connection: 0x%02x", + shard->state); + return NULL; + } - rc = call_PQgetCopyData(shard_no, &resp_buff.data); - if (rc >= 0) + Assert(pageserver_conn); + + rc = call_PQgetCopyData(shard_no, &resp_buff.data); + if (rc >= 0) + { + /* call_PQgetCopyData handles rc == 0 */ + Assert(rc > 0); + + PG_TRY(); { resp_buff.len = rc; resp_buff.cursor = 0; resp = nm_unpack_response(&resp_buff); PQfreemem(resp_buff.data); - - if (message_level_is_interesting(PageStoreTrace)) - { - char *msg = nm_to_string((NeonMessage *) resp); - - neon_shard_log(shard_no, PageStoreTrace, "got response: %s", msg); - pfree(msg); - } } - else if (rc == -1) + PG_CATCH(); { - neon_shard_log(shard_no, LOG, "pageserver_receive disconnect because call_PQgetCopyData returns -1: %s", pchomp(PQerrorMessage(pageserver_conn))); + neon_shard_log(shard_no, LOG, "pageserver_receive: disconnect due malformatted response"); pageserver_disconnect(shard_no); - resp = NULL; + PG_RE_THROW(); } - else if (rc == -2) - { - char *msg = pchomp(PQerrorMessage(pageserver_conn)); + PG_END_TRY(); - pageserver_disconnect(shard_no); - neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect because could not read COPY data: %s", msg); - } - else + if (message_level_is_interesting(PageStoreTrace)) { - pageserver_disconnect(shard_no); - neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect because unexpected PQgetCopyData return value: %d", rc); + char *msg = nm_to_string((NeonMessage *) resp); + + neon_shard_log(shard_no, PageStoreTrace, "got response: %s", msg); + pfree(msg); } } - PG_CATCH(); + else if (rc == -1) { - neon_shard_log(shard_no, LOG, "pageserver_receive disconnect due to caught exception"); + neon_shard_log(shard_no, LOG, "pageserver_receive disconnect: psql end of copy data: %s", pchomp(PQerrorMessage(pageserver_conn))); pageserver_disconnect(shard_no); - PG_RE_THROW(); + resp = NULL; + } + else if (rc == -2) + { + char *msg = pchomp(PQerrorMessage(pageserver_conn)); + + pageserver_disconnect(shard_no); + neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: could not read COPY data: %s", msg); + } + else + { + pageserver_disconnect(shard_no); + neon_shard_log(shard_no, ERROR, "pageserver_receive disconnect: unexpected PQgetCopyData return value: %d", rc); } - PG_END_TRY(); return (NeonResponse *) resp; } @@ -681,7 +867,7 @@ pageserver_flush(shardno_t shard_no) { PGconn *pageserver_conn = page_servers[shard_no].conn; - if (!pageserver_conn) + if (page_servers[shard_no].state != PS_Connected) { neon_shard_log(shard_no, WARNING, "Tried to flush while disconnected"); } @@ -697,6 +883,7 @@ pageserver_flush(shardno_t shard_no) return false; } } + return true; } @@ -891,5 +1078,7 @@ pg_init_libpagestore(void) dbsize_hook = neon_dbsize; } + memset(page_servers, 0, sizeof(page_servers)); + lfc_init(); } diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 41546eae85..ac505fe6fb 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -94,6 +94,10 @@ static char *hexdump_page(char *page); const int SmgrTrace = DEBUG5; +#define NEON_PANIC_CONNECTION_STATE(shard_no, elvl, message, ...) \ + neon_shard_log(shard_no, elvl, "Broken connection state: " message, \ + ##__VA_ARGS__) + page_server_api *page_server; /* unlogged relation build states */ @@ -526,6 +530,8 @@ prefetch_flush_requests(void) * * NOTE: this function may indirectly update MyPState->pfs_hash; which * invalidates any active pointers into the hash table. + * NOTE: callers should make sure they can handle query cancellations in this + * function's call path. */ static bool prefetch_wait_for(uint64 ring_index) @@ -561,6 +567,8 @@ prefetch_wait_for(uint64 ring_index) * * NOTE: this function may indirectly update MyPState->pfs_hash; which * invalidates any active pointers into the hash table. + * + * NOTE: this does IO, and can get canceled out-of-line. */ static bool prefetch_read(PrefetchRequest *slot) @@ -572,6 +580,14 @@ prefetch_read(PrefetchRequest *slot) Assert(slot->response == NULL); Assert(slot->my_ring_index == MyPState->ring_receive); + if (slot->status != PRFS_REQUESTED || + slot->response != NULL || + slot->my_ring_index != MyPState->ring_receive) + neon_shard_log(slot->shard_no, ERROR, + "Incorrect prefetch read: status=%d response=%llx my=%llu receive=%llu", + slot->status, (size_t) (void *) slot->response, + slot->my_ring_index, MyPState->ring_receive); + old = MemoryContextSwitchTo(MyPState->errctx); response = (NeonResponse *) page_server->receive(slot->shard_no); MemoryContextSwitchTo(old); @@ -589,6 +605,11 @@ prefetch_read(PrefetchRequest *slot) } else { + neon_shard_log(slot->shard_no, WARNING, + "No response from reading prefetch entry %llu: %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", + slot->my_ring_index, + RelFileInfoFmt(BufTagGetNRelFileInfo(slot->buftag)), + slot->buftag.forkNum, slot->buftag.blockNum); return false; } } @@ -603,6 +624,7 @@ void prefetch_on_ps_disconnect(void) { MyPState->ring_flush = MyPState->ring_unused; + while (MyPState->ring_receive < MyPState->ring_unused) { PrefetchRequest *slot; @@ -625,6 +647,7 @@ prefetch_on_ps_disconnect(void) slot->status = PRFS_TAG_REMAINS; MyPState->n_requests_inflight -= 1; MyPState->ring_receive += 1; + prefetch_set_unused(ring_index); } } @@ -691,6 +714,8 @@ static void prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns) { bool found; + uint64 mySlotNo = slot->my_ring_index; + NeonGetPageRequest request = { .req.tag = T_NeonGetPageRequest, /* lsn and not_modified_since are filled in below */ @@ -699,6 +724,8 @@ prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns .blkno = slot->buftag.blockNum, }; + Assert(mySlotNo == MyPState->ring_unused); + if (force_request_lsns) slot->request_lsns = *force_request_lsns; else @@ -711,7 +738,11 @@ prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns Assert(slot->response == NULL); Assert(slot->my_ring_index == MyPState->ring_unused); - while (!page_server->send(slot->shard_no, (NeonRequest *) &request)); + while (!page_server->send(slot->shard_no, (NeonRequest *) &request)) + { + Assert(mySlotNo == MyPState->ring_unused); + /* loop */ + } /* update prefetch state */ MyPState->n_requests_inflight += 1; @@ -722,7 +753,6 @@ prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_request_lsns /* update slot state */ slot->status = PRFS_REQUESTED; - prfh_insert(MyPState->prf_hash, slot, &found); Assert(!found); } @@ -894,6 +924,10 @@ Retry: return ring_index; } +/* + * Note: this function can get canceled and use a long jump to the next catch + * context. Take care. + */ static NeonResponse * page_server_request(void const *req) { @@ -925,19 +959,38 @@ page_server_request(void const *req) * Current sharding model assumes that all metadata is present only at shard 0. * We still need to call get_shard_no() to check if shard map is up-to-date. */ - if (((NeonRequest *) req)->tag != T_NeonGetPageRequest || ((NeonGetPageRequest *) req)->forknum != MAIN_FORKNUM) + if (((NeonRequest *) req)->tag != T_NeonGetPageRequest || + ((NeonGetPageRequest *) req)->forknum != MAIN_FORKNUM) { shard_no = 0; } do { - while (!page_server->send(shard_no, (NeonRequest *) req) || !page_server->flush(shard_no)); - consume_prefetch_responses(); - resp = page_server->receive(shard_no); - } while (resp == NULL); - return resp; + PG_TRY(); + { + while (!page_server->send(shard_no, (NeonRequest *) req) + || !page_server->flush(shard_no)) + { + /* do nothing */ + } + consume_prefetch_responses(); + resp = page_server->receive(shard_no); + } + PG_CATCH(); + { + /* + * Cancellation in this code needs to be handled better at some + * point, but this currently seems fine for now. + */ + page_server->disconnect(shard_no); + PG_RE_THROW(); + } + PG_END_TRY(); + } while (resp == NULL); + + return resp; } @@ -1905,7 +1958,9 @@ neon_exists(SMgrRelation reln, ForkNumber forkNum) break; default: - neon_log(ERROR, "unexpected response from page server with tag 0x%02x in neon_exists", resp->tag); + NEON_PANIC_CONNECTION_STATE(-1, PANIC, + "Expected Exists (0x%02x) or Error (0x%02x) response to ExistsRequest, but got 0x%02x", + T_NeonExistsResponse, T_NeonErrorResponse, resp->tag); } pfree(resp); return exists; @@ -2357,7 +2412,7 @@ neon_read_at_lsn(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, /* * Try to find prefetched page in the list of received pages. */ - Retry: +Retry: entry = prfh_lookup(MyPState->prf_hash, (PrefetchRequest *) &buftag); if (entry != NULL) @@ -2443,7 +2498,9 @@ neon_read_at_lsn(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, ((NeonErrorResponse *) resp)->message))); break; default: - neon_log(ERROR, "unexpected response from page server with tag 0x%02x in neon_read_at_lsn", resp->tag); + NEON_PANIC_CONNECTION_STATE(slot->shard_no, PANIC, + "Expected GetPage (0x%02x) or Error (0x%02x) response to GetPageRequest, but got 0x%02x", + T_NeonGetPageResponse, T_NeonErrorResponse, resp->tag); } /* buffer was used, clean up for later reuse */ @@ -2714,7 +2771,9 @@ neon_nblocks(SMgrRelation reln, ForkNumber forknum) break; default: - neon_log(ERROR, "unexpected response from page server with tag 0x%02x in neon_nblocks", resp->tag); + NEON_PANIC_CONNECTION_STATE(-1, PANIC, + "Expected Nblocks (0x%02x) or Error (0x%02x) response to NblocksRequest, but got 0x%02x", + T_NeonNblocksResponse, T_NeonErrorResponse, resp->tag); } update_cached_relsize(InfoFromSMgrRel(reln), forknum, n_blocks); @@ -2767,7 +2826,9 @@ neon_dbsize(Oid dbNode) break; default: - neon_log(ERROR, "unexpected response from page server with tag 0x%02x in neon_dbsize", resp->tag); + NEON_PANIC_CONNECTION_STATE(-1, PANIC, + "Expected DbSize (0x%02x) or Error (0x%02x) response to DbSizeRequest, but got 0x%02x", + T_NeonDbSizeResponse, T_NeonErrorResponse, resp->tag); } neon_log(SmgrTrace, "neon_dbsize: db %u (request LSN %X/%08X): %ld bytes", @@ -3106,7 +3167,9 @@ neon_read_slru_segment(SMgrRelation reln, const char* path, int segno, void* buf break; default: - neon_log(ERROR, "unexpected response from page server with tag 0x%02x in neon_read_slru_segment", resp->tag); + NEON_PANIC_CONNECTION_STATE(-1, PANIC, + "Expected GetSlruSegment (0x%02x) or Error (0x%02x) response to GetSlruSegmentRequest, but got 0x%02x", + T_NeonGetSlruSegmentResponse, T_NeonErrorResponse, resp->tag); } pfree(resp); diff --git a/test_runner/regress/test_pg_query_cancellation.py b/test_runner/regress/test_pg_query_cancellation.py new file mode 100644 index 0000000000..bad2e5865e --- /dev/null +++ b/test_runner/regress/test_pg_query_cancellation.py @@ -0,0 +1,282 @@ +from contextlib import closing +from typing import Set + +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonPageserver +from fixtures.pageserver.http import PageserverHttpClient +from psycopg2.errors import QueryCanceled + +CRITICAL_PG_PS_WAIT_FAILPOINTS: Set[str] = { + "ps::connection-start::pre-login", + "ps::connection-start::startup-packet", + "ps::connection-start::process-query", + "ps::handle-pagerequest-message::exists", + "ps::handle-pagerequest-message::nblocks", + "ps::handle-pagerequest-message::getpage", + "ps::handle-pagerequest-message::dbsize", + # We don't yet have a good way to on-demand guarantee the download of an + # SLRU segment, so that's disabled for now. + # "ps::handle-pagerequest-message::slrusegment", +} + +PG_PS_START_FAILPOINTS = { + "ps::connection-start::pre-login", + "ps::connection-start::startup-packet", + "ps::connection-start::process-query", +} +SMGR_EXISTS = "ps::handle-pagerequest-message::exists" +SMGR_NBLOCKS = "ps::handle-pagerequest-message::nblocks" +SMGR_GETPAGE = "ps::handle-pagerequest-message::getpage" +SMGR_DBSIZE = "ps::handle-pagerequest-message::dbsize" + +""" +Test that we can handle connection delays and cancellations at various +unfortunate connection startup and request states. +""" + + +def test_cancellations(neon_simple_env: NeonEnv): + env = neon_simple_env + ps = env.pageserver + ps_http = ps.http_client() + ps_http.is_testing_enabled_or_skip() + + env.neon_cli.create_branch("test_config", "empty") + + # We don't want to have any racy behaviour with autovacuum IOs + ep = env.endpoints.create_start( + "test_config", + config_lines=[ + "autovacuum = off", + "shared_buffers = 128MB", + ], + ) + + with closing(ep.connect()) as conn: + with conn.cursor() as cur: + cur.execute( + """ + CREATE TABLE test1 AS + SELECT id, sha256(id::text::bytea) payload + FROM generate_series(1, 1024::bigint) p(id); + """ + ) + cur.execute( + """ + CREATE TABLE test2 AS + SELECT id, sha256(id::text::bytea) payload + FROM generate_series(1025, 2048::bigint) p(id); + """ + ) + cur.execute( + """ + VACUUM (ANALYZE, FREEZE) test1, test2; + """ + ) + cur.execute( + """ + CREATE EXTENSION pg_buffercache; + """ + ) + cur.execute( + """ + CREATE EXTENSION pg_prewarm; + """ + ) + + # data preparation is now complete, with 2 disjoint tables that aren't + # preloaded into any caches. + + ep.stop() + + for failpoint in CRITICAL_PG_PS_WAIT_FAILPOINTS: + connect_works_correctly(failpoint, ep, ps, ps_http) + + +ENABLED_FAILPOINTS: Set[str] = set() + + +def connect_works_correctly( + failpoint: str, ep: Endpoint, ps: NeonPageserver, ps_http: PageserverHttpClient +): + log.debug("Starting work on %s", failpoint) + # All queries we use should finish (incl. IO) within 500ms, + # including all their IO. + # This allows us to use `SET statement_timeout` to let the query + # timeout system cancel queries, rather than us having to go + # through the most annoying effort of manual query cancellation + # in psycopg2. + options = "-cstatement_timeout=500ms -ceffective_io_concurrency=1" + + ep.start() + + def fp_enable(): + global ENABLED_FAILPOINTS + ps_http.configure_failpoints( + [ + (failpoint, "pause"), + ] + ) + ENABLED_FAILPOINTS = ENABLED_FAILPOINTS | {failpoint} + log.info( + 'Enabled failpoint "%s", current_active=%s', failpoint, ENABLED_FAILPOINTS, stacklevel=2 + ) + + def fp_disable(): + global ENABLED_FAILPOINTS + ps_http.configure_failpoints( + [ + (failpoint, "off"), + ] + ) + ENABLED_FAILPOINTS = ENABLED_FAILPOINTS - {failpoint} + log.info( + 'Disabled failpoint "%s", current_active=%s', + failpoint, + ENABLED_FAILPOINTS, + stacklevel=2, + ) + + def check_buffers(cur): + cur.execute( + """ + SELECT n.nspname AS nspname + , c.relname AS relname + , count(*) AS count + FROM pg_buffercache b + JOIN pg_class c + ON b.relfilenode = pg_relation_filenode(c.oid) AND + b.reldatabase = (SELECT oid FROM pg_database WHERE datname = current_database()) + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE c.oid IN ('test1'::regclass::oid, 'test2'::regclass::oid) + GROUP BY n.nspname, c.relname + ORDER BY 3 DESC + LIMIT 10 + """ + ) + return cur.fetchone() + + def exec_may_cancel(query, cursor, result, cancels): + if cancels: + with pytest.raises(QueryCanceled): + cursor.execute(query) + assert cursor.fetchone() == result + else: + cursor.execute(query) + assert cursor.fetchone() == result + + fp_disable() + + # Warm caches required for new connections, so that they can run without + # requiring catalog reads. + with closing(ep.connect()) as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT 1; + """ + ) + assert cur.fetchone() == (1,) + + assert check_buffers(cur) is None + # Ensure all caches required for connection start are correctly + # filled, so that we don't have any "accidents" in this test run + # caused by changes in connection startup plans that require + # requests to the PageServer. + cur.execute( + """ + select array_agg(distinct (pg_prewarm(c.oid::regclass, 'buffer') >= 0)) + from pg_class c + where c.oid < 16384 AND c.relkind IN ('i', 'r'); + """ + ) + assert cur.fetchone() == ([True],) + + # Enable failpoint + fp_enable() + + with closing(ep.connect(options=options, autocommit=True)) as conn: + with conn.cursor() as cur: + cur.execute("SHOW statement_timeout;") + assert cur.fetchone() == ("500ms",) + assert check_buffers(cur) is None + exec_may_cancel( + """ + SELECT min(id) FROM test1; + """, + cur, + (1,), + failpoint in (CRITICAL_PG_PS_WAIT_FAILPOINTS - {SMGR_EXISTS, SMGR_DBSIZE}), + ) + + fp_disable() + + with closing(ep.connect(options=options, autocommit=True)) as conn: + with conn.cursor() as cur: + # Do a select on the data, putting some buffers into the prefetch + # queue. + cur.execute( + """ + SELECT count(id) FROM (select * from test1 LIMIT 256) a; + """ + ) + assert cur.fetchone() == (256,) + + ps.stop() + ps.start() + fp_enable() + + exec_may_cancel( + """ + SELECT COUNT(id) FROM test1; + """, + cur, + (1024,), + failpoint + in (CRITICAL_PG_PS_WAIT_FAILPOINTS - {SMGR_EXISTS, SMGR_NBLOCKS, SMGR_DBSIZE}), + ) + + with closing(ep.connect(options=options, autocommit=True)) as conn: + with conn.cursor() as cur: + exec_may_cancel( + """ + SELECT COUNT(id) FROM test2; + """, + cur, + (1024,), + failpoint in (CRITICAL_PG_PS_WAIT_FAILPOINTS - {SMGR_EXISTS, SMGR_DBSIZE}), + ) + + fp_disable() + fp_enable() + + exec_may_cancel( + """ + SELECT 0 < pg_database_size(CURRENT_DATABASE()); + """, + cur, + (True,), + failpoint + in (CRITICAL_PG_PS_WAIT_FAILPOINTS - {SMGR_EXISTS, SMGR_GETPAGE, SMGR_NBLOCKS}), + ) + + fp_disable() + + cur.execute( + """ + SELECT count(id), count(distinct payload), min(id), max(id), sum(id) FROM test2; + """ + ) + + assert cur.fetchone() == (1024, 1024, 1025, 2048, 1573376) + + cur.execute( + """ + SELECT count(id), count(distinct payload), min(id), max(id), sum(id) FROM test1; + """ + ) + + assert cur.fetchone() == (1024, 1024, 1, 1024, 524800) + + ep.stop() From c1f4028fc0e76b0945d7eaaed5a460412ea7980e Mon Sep 17 00:00:00 2001 From: Roman Zaynetdinov Date: Fri, 24 May 2024 11:05:20 +0300 Subject: [PATCH 081/220] Export db size metrics for 10 user databases (#7857) ## Problem One database is too limiting. We have agreed to raise this limit to 10. ## Checklist before requesting a review - [x] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --- vm-image-spec.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 484a86fc21..73a24c42d6 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -194,7 +194,7 @@ files: - metric_name: pg_stats_userdb type: gauge - help: 'Stats for the oldest non-system db' + help: 'Stats for several oldest non-system dbs' key_labels: - datname value_label: kind @@ -205,9 +205,8 @@ files: - inserted - updated - deleted - # We export stats for only one non-system database. Without this limit + # We export stats for 10 non-system database. Without this limit # it is too easy to abuse the system by creating lots of databases. - # We can try lifting this limit in the future after we understand the needs better. query: | select pg_database_size(datname) as db_size, deadlocks, tup_inserted as inserted, tup_updated as updated, tup_deleted as deleted, @@ -218,7 +217,7 @@ files: from pg_database where datname <> 'postgres' and not datistemplate order by oid - limit 1 + limit 10 ); - metric_name: max_cluster_size From 3860bc9c6c74ca4f84ff2f12d6c3521b3df99df2 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 24 May 2024 09:33:19 +0100 Subject: [PATCH 082/220] pageserver: post-shard-split layer rewrites (2/2) (#7531) ## Problem - After a shard split of a large existing tenant, child tenants can end up with oversized historic layers indefinitely, if those layers are prevented from being GC'd by branchpoints. This PR follows https://github.com/neondatabase/neon/pull/7531, and adds rewriting of layers that contain a mixture of needed & un-needed contents, in addition to dropping un-needed layers. Closes: https://github.com/neondatabase/neon/issues/7504 ## Summary of changes - Add methods to ImageLayer for reading back existing layers - Extend `compact_shard_ancestors` to rewrite layer files that contain a mixture of keys that we want and keys we do not, if unwanted keys are the majority of those in the file. - Amend initialization code to handle multiple layers with the same LayerName properly - Get rid of of renaming bad layer files to `.old` since that's now expected on restarts during rewrites. --- .../src/tenant/storage_layer/image_layer.rs | 200 +++++++++++++++++- pageserver/src/tenant/storage_layer/layer.rs | 32 ++- pageserver/src/tenant/timeline.rs | 84 +++----- pageserver/src/tenant/timeline/compaction.rs | 101 +++++++-- pageserver/src/tenant/timeline/init.rs | 183 ++++++++-------- .../src/tenant/timeline/layer_manager.rs | 27 ++- test_runner/fixtures/neon_fixtures.py | 4 +- test_runner/regress/test_sharding.py | 104 ++++++++- 8 files changed, 545 insertions(+), 190 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 67b489ce0d..8394b33f19 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -47,7 +47,7 @@ use hex; use itertools::Itertools; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::LayerAccessKind; -use pageserver_api::shard::TenantShardId; +use pageserver_api::shard::{ShardIdentity, TenantShardId}; use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; use std::fs::File; @@ -473,7 +473,7 @@ impl ImageLayerInner { ctx: &RequestContext, ) -> Result<(), GetVectoredError> { let reads = self - .plan_reads(keyspace, ctx) + .plan_reads(keyspace, None, ctx) .await .map_err(GetVectoredError::Other)?; @@ -485,9 +485,15 @@ impl ImageLayerInner { Ok(()) } + /// Traverse the layer's index to build read operations on the overlap of the input keyspace + /// and the keys in this layer. + /// + /// If shard_identity is provided, it will be used to filter keys down to those stored on + /// this shard. async fn plan_reads( &self, keyspace: KeySpace, + shard_identity: Option<&ShardIdentity>, ctx: &RequestContext, ) -> anyhow::Result> { let mut planner = VectoredReadPlanner::new( @@ -507,7 +513,6 @@ impl ImageLayerInner { for range in keyspace.ranges.iter() { let mut range_end_handled = false; - let mut search_key: [u8; KEY_SIZE] = [0u8; KEY_SIZE]; range.start.write_to_byte_slice(&mut search_key); @@ -520,12 +525,22 @@ impl ImageLayerInner { let key = Key::from_slice(&raw_key[..KEY_SIZE]); assert!(key >= range.start); + let flag = if let Some(shard_identity) = shard_identity { + if shard_identity.is_key_disposable(&key) { + BlobFlag::Ignore + } else { + BlobFlag::None + } + } else { + BlobFlag::None + }; + if key >= range.end { planner.handle_range_end(offset); range_end_handled = true; break; } else { - planner.handle(key, self.lsn, offset, BlobFlag::None); + planner.handle(key, self.lsn, offset, flag); } } @@ -538,6 +553,50 @@ impl ImageLayerInner { Ok(planner.finish()) } + /// Given a key range, select the parts of that range that should be retained by the ShardIdentity, + /// then execute vectored GET operations, passing the results of all read keys into the writer. + pub(super) async fn filter( + &self, + shard_identity: &ShardIdentity, + writer: &mut ImageLayerWriter, + ctx: &RequestContext, + ) -> anyhow::Result { + // Fragment the range into the regions owned by this ShardIdentity + let plan = self + .plan_reads( + KeySpace { + // If asked for the total key space, plan_reads will give us all the keys in the layer + ranges: vec![Key::MIN..Key::MAX], + }, + Some(shard_identity), + ctx, + ) + .await?; + + let vectored_blob_reader = VectoredBlobReader::new(&self.file); + let mut key_count = 0; + for read in plan.into_iter() { + let buf_size = read.size(); + + let buf = BytesMut::with_capacity(buf_size); + let blobs_buf = vectored_blob_reader.read_blobs(&read, buf, ctx).await?; + + let frozen_buf = blobs_buf.buf.freeze(); + + for meta in blobs_buf.blobs.iter() { + let img_buf = frozen_buf.slice(meta.start..meta.end); + + key_count += 1; + writer + .put_image(meta.meta.key, img_buf, ctx) + .await + .context(format!("Storing key {}", meta.meta.key))?; + } + } + + Ok(key_count) + } + async fn do_reads_and_update_state( &self, reads: Vec, @@ -855,3 +914,136 @@ impl Drop for ImageLayerWriter { } } } + +#[cfg(test)] +mod test { + use bytes::Bytes; + use pageserver_api::{ + key::Key, + shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}, + }; + use utils::{id::TimelineId, lsn::Lsn}; + + use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION}; + + use super::ImageLayerWriter; + + #[tokio::test] + async fn image_layer_rewrite() { + let harness = TenantHarness::create("test_image_layer_rewrite").unwrap(); + let (tenant, ctx) = harness.load().await; + + // The LSN at which we will create an image layer to filter + let lsn = Lsn(0xdeadbeef0000); + + let timeline_id = TimelineId::generate(); + let timeline = tenant + .create_test_timeline(timeline_id, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + // This key range contains several 0x8000 page stripes, only one of which belongs to shard zero + let input_start = Key::from_hex("000000067f00000001000000ae0000000000").unwrap(); + let input_end = Key::from_hex("000000067f00000001000000ae0000020000").unwrap(); + let range = input_start..input_end; + + // Build an image layer to filter + let resident = { + let mut writer = ImageLayerWriter::new( + harness.conf, + timeline_id, + harness.tenant_shard_id, + &range, + lsn, + &ctx, + ) + .await + .unwrap(); + + let foo_img = Bytes::from_static(&[1, 2, 3, 4]); + let mut key = range.start; + while key < range.end { + writer.put_image(key, foo_img.clone(), &ctx).await.unwrap(); + + key = key.next(); + } + writer.finish(&timeline, &ctx).await.unwrap() + }; + let original_size = resident.metadata().file_size; + + // Filter for various shards: this exercises cases like values at start of key range, end of key + // range, middle of key range. + for shard_number in 0..4 { + let mut filtered_writer = ImageLayerWriter::new( + harness.conf, + timeline_id, + harness.tenant_shard_id, + &range, + lsn, + &ctx, + ) + .await + .unwrap(); + + // TenantHarness gave us an unsharded tenant, but we'll use a sharded ShardIdentity + // to exercise filter() + let shard_identity = ShardIdentity::new( + ShardNumber(shard_number), + ShardCount::new(4), + ShardStripeSize(0x8000), + ) + .unwrap(); + + let wrote_keys = resident + .filter(&shard_identity, &mut filtered_writer, &ctx) + .await + .unwrap(); + let replacement = if wrote_keys > 0 { + Some(filtered_writer.finish(&timeline, &ctx).await.unwrap()) + } else { + None + }; + + // This exact size and those below will need updating as/when the layer encoding changes, but + // should be deterministic for a given version of the format, as we used no randomness generating the input. + assert_eq!(original_size, 1597440); + + match shard_number { + 0 => { + // We should have written out just one stripe for our shard identity + assert_eq!(wrote_keys, 0x8000); + let replacement = replacement.unwrap(); + + // We should have dropped some of the data + assert!(replacement.metadata().file_size < original_size); + assert!(replacement.metadata().file_size > 0); + + // Assert that we dropped ~3/4 of the data. + assert_eq!(replacement.metadata().file_size, 417792); + } + 1 => { + // Shard 1 has no keys in our input range + assert_eq!(wrote_keys, 0x0); + assert!(replacement.is_none()); + } + 2 => { + // Shard 2 has one stripes in the input range + assert_eq!(wrote_keys, 0x8000); + let replacement = replacement.unwrap(); + assert!(replacement.metadata().file_size < original_size); + assert!(replacement.metadata().file_size > 0); + assert_eq!(replacement.metadata().file_size, 417792); + } + 3 => { + // Shard 3 has two stripes in the input range + assert_eq!(wrote_keys, 0x10000); + let replacement = replacement.unwrap(); + assert!(replacement.metadata().file_size < original_size); + assert!(replacement.metadata().file_size > 0); + assert_eq!(replacement.metadata().file_size, 811008); + } + _ => unreachable!(), + } + } + } +} diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index b2f3bdb552..3ac799c69a 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -4,7 +4,7 @@ use pageserver_api::keyspace::KeySpace; use pageserver_api::models::{ HistoricLayerInfo, LayerAccessKind, LayerResidenceEventReason, LayerResidenceStatus, }; -use pageserver_api::shard::{ShardIndex, TenantShardId}; +use pageserver_api::shard::{ShardIdentity, ShardIndex, TenantShardId}; use std::ops::Range; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; @@ -23,10 +23,10 @@ use crate::tenant::timeline::GetVectoredError; use crate::tenant::{remote_timeline_client::LayerFileMetadata, Timeline}; use super::delta_layer::{self, DeltaEntry}; -use super::image_layer; +use super::image_layer::{self}; use super::{ - AsLayerDesc, LayerAccessStats, LayerAccessStatsReset, LayerName, PersistentLayerDesc, - ValueReconstructResult, ValueReconstructState, ValuesReconstructState, + AsLayerDesc, ImageLayerWriter, LayerAccessStats, LayerAccessStatsReset, LayerName, + PersistentLayerDesc, ValueReconstructResult, ValueReconstructState, ValuesReconstructState, }; use utils::generation::Generation; @@ -1802,16 +1802,15 @@ impl ResidentLayer { use LayerKind::*; let owner = &self.owner.0; - match self.downloaded.get(owner, ctx).await? { Delta(ref d) => { + // this is valid because the DownloadedLayer::kind is a OnceCell, not a + // Mutex, so we cannot go and deinitialize the value with OnceCell::take + // while it's being held. owner .access_stats .record_access(LayerAccessKind::KeyIter, ctx); - // this is valid because the DownloadedLayer::kind is a OnceCell, not a - // Mutex, so we cannot go and deinitialize the value with OnceCell::take - // while it's being held. delta_layer::DeltaLayerInner::load_keys(d, ctx) .await .with_context(|| format!("Layer index is corrupted for {self}")) @@ -1820,6 +1819,23 @@ impl ResidentLayer { } } + /// Read all they keys in this layer which match the ShardIdentity, and write them all to + /// the provided writer. Return the number of keys written. + #[tracing::instrument(level = tracing::Level::DEBUG, skip_all, fields(layer=%self))] + pub(crate) async fn filter<'a>( + &'a self, + shard_identity: &ShardIdentity, + writer: &mut ImageLayerWriter, + ctx: &RequestContext, + ) -> anyhow::Result { + use LayerKind::*; + + match self.downloaded.get(&self.owner.0, ctx).await? { + Delta(_) => anyhow::bail!(format!("cannot filter() on a delta layer {self}")), + Image(i) => i.filter(shard_identity, writer, ctx).await, + } + } + /// Returns the amount of keys and values written to the writer. pub(crate) async fn copy_delta_prefix( &self, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 342fc4fc59..1bdbddd95f 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -41,6 +41,7 @@ use tokio_util::sync::CancellationToken; use tracing::*; use utils::{ bin_ser::BeSer, + fs_ext, sync::gate::{Gate, GateGuard}, vec_map::VecMap, }; @@ -60,6 +61,7 @@ use std::{ ops::ControlFlow, }; +use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS; use crate::{ aux_file::AuxFileSizeEstimator, tenant::{ @@ -88,9 +90,6 @@ use crate::{ metrics::ScanLatencyOngoingRecording, tenant::timeline::logical_size::CurrentLogicalSize, }; use crate::{pgdatadir_mapping::LsnForTimestamp, tenant::tasks::BackgroundLoopKind}; -use crate::{ - pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS, tenant::timeline::init::LocalLayerFileMetadata, -}; use crate::{ pgdatadir_mapping::{AuxFilesDirectory, DirectoryKind}, virtual_file::{MaybeFatalIo, VirtualFile}, @@ -2454,8 +2453,6 @@ impl Timeline { let span = tracing::Span::current(); // Copy to move into the task we're about to spawn - let generation = self.generation; - let shard = self.get_shard_index(); let this = self.myself.upgrade().expect("&self method holds the arc"); let (loaded_layers, needs_cleanup, total_physical_size) = tokio::task::spawn_blocking({ @@ -2469,11 +2466,14 @@ impl Timeline { for discovered in discovered { let (name, kind) = match discovered { - Discovered::Layer(layer_file_name, local_path, file_size) => { - discovered_layers.push((layer_file_name, local_path, file_size)); + Discovered::Layer(layer_file_name, local_metadata) => { + discovered_layers.push((layer_file_name, local_metadata)); continue; } - Discovered::IgnoredBackup => { + Discovered::IgnoredBackup(path) => { + std::fs::remove_file(path) + .or_else(fs_ext::ignore_not_found) + .fatal_err("Removing .old file"); continue; } Discovered::Unknown(file_name) => { @@ -2499,13 +2499,8 @@ impl Timeline { ); } - let decided = init::reconcile( - discovered_layers, - index_part.as_ref(), - disk_consistent_lsn, - generation, - shard, - ); + let decided = + init::reconcile(discovered_layers, index_part.as_ref(), disk_consistent_lsn); let mut loaded_layers = Vec::new(); let mut needs_cleanup = Vec::new(); @@ -2513,21 +2508,6 @@ impl Timeline { for (name, decision) in decided { let decision = match decision { - Ok(UseRemote { local, remote }) => { - // Remote is authoritative, but we may still choose to retain - // the local file if the contents appear to match - if local.metadata.file_size == remote.file_size { - // Use the local file, but take the remote metadata so that we pick up - // the correct generation. - UseLocal(LocalLayerFileMetadata { - metadata: remote, - local_path: local.local_path, - }) - } else { - init::cleanup_local_file_for_remote(&local, &remote)?; - UseRemote { local, remote } - } - } Ok(decision) => decision, Err(DismissedLayer::Future { local }) => { if let Some(local) = local { @@ -2545,6 +2525,11 @@ impl Timeline { // this file never existed remotely, we will have to do rework continue; } + Err(DismissedLayer::BadMetadata(local)) => { + init::cleanup_local_file_for_remote(&local)?; + // this file never existed remotely, we will have to do rework + continue; + } }; match &name { @@ -2555,14 +2540,12 @@ impl Timeline { tracing::debug!(layer=%name, ?decision, "applied"); let layer = match decision { - UseLocal(local) => { - total_physical_size += local.metadata.file_size; - Layer::for_resident(conf, &this, local.local_path, name, local.metadata) + Resident { local, remote } => { + total_physical_size += local.file_size; + Layer::for_resident(conf, &this, local.local_path, name, remote) .drop_eviction_guard() } - Evicted(remote) | UseRemote { remote, .. } => { - Layer::for_evicted(conf, &this, name, remote) - } + Evicted(remote) => Layer::for_evicted(conf, &this, name, remote), }; loaded_layers.push(layer); @@ -4725,11 +4708,16 @@ impl Timeline { async fn rewrite_layers( self: &Arc, - replace_layers: Vec<(Layer, ResidentLayer)>, - drop_layers: Vec, + mut replace_layers: Vec<(Layer, ResidentLayer)>, + mut drop_layers: Vec, ) -> anyhow::Result<()> { let mut guard = self.layers.write().await; + // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want + // to avoid double-removing, and avoid rewriting something that was removed. + replace_layers.retain(|(l, _)| guard.contains(l)); + drop_layers.retain(|l| guard.contains(l)); + guard.rewrite_layers(&replace_layers, &drop_layers, &self.metrics); let upload_layers: Vec<_> = replace_layers.into_iter().map(|r| r.1).collect(); @@ -5604,26 +5592,6 @@ fn is_send() { _assert_send::>(); } -/// Add a suffix to a layer file's name: .{num}.old -/// Uses the first available num (starts at 0) -fn rename_to_backup(path: &Utf8Path) -> anyhow::Result<()> { - let filename = path - .file_name() - .ok_or_else(|| anyhow!("Path {path} don't have a file name"))?; - let mut new_path = path.to_owned(); - - for i in 0u32.. { - new_path.set_file_name(format!("{filename}.{i}.old")); - if !new_path.exists() { - std::fs::rename(path, &new_path) - .with_context(|| format!("rename {path:?} to {new_path:?}"))?; - return Ok(()); - } - } - - bail!("couldn't find an unused backup number for {:?}", path) -} - #[cfg(test)] mod tests { use utils::{id::TimelineId, lsn::Lsn}; diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index db8adfc16c..07a12f535a 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -176,13 +176,24 @@ impl Timeline { async fn compact_shard_ancestors( self: &Arc, rewrite_max: usize, - _ctx: &RequestContext, + ctx: &RequestContext, ) -> anyhow::Result<()> { let mut drop_layers = Vec::new(); - let layers_to_rewrite: Vec = Vec::new(); + let mut layers_to_rewrite: Vec = Vec::new(); - // We will use the PITR cutoff as a condition for rewriting layers. - let pitr_cutoff = self.gc_info.read().unwrap().cutoffs.pitr; + // We will use the Lsn cutoff of the last GC as a threshold for rewriting layers: if a + // layer is behind this Lsn, it indicates that the layer is being retained beyond the + // pitr_interval, for example because a branchpoint references it. + // + // Holding this read guard also blocks [`Self::gc_timeline`] from entering while we + // are rewriting layers. + let latest_gc_cutoff = self.get_latest_gc_cutoff_lsn(); + + tracing::info!( + "latest_gc_cutoff: {}, pitr cutoff {}", + *latest_gc_cutoff, + self.gc_info.read().unwrap().cutoffs.pitr + ); let layers = self.layers.read().await; for layer_desc in layers.layer_map().iter_historic_layers() { @@ -241,9 +252,9 @@ impl Timeline { // Don't bother re-writing a layer if it is within the PITR window: it will age-out eventually // without incurring the I/O cost of a rewrite. - if layer_desc.get_lsn_range().end >= pitr_cutoff { - debug!(%layer, "Skipping rewrite of layer still in PITR window ({} >= {})", - layer_desc.get_lsn_range().end, pitr_cutoff); + if layer_desc.get_lsn_range().end >= *latest_gc_cutoff { + debug!(%layer, "Skipping rewrite of layer still in GC window ({} >= {})", + layer_desc.get_lsn_range().end, *latest_gc_cutoff); continue; } @@ -253,13 +264,10 @@ impl Timeline { continue; } - // Only rewrite layers if they would have different remote paths: either they belong to this - // shard but an old generation, or they belonged to another shard. This also implicitly - // guarantees that the layer is persistent in remote storage (as only remote persistent - // layers are carried across shard splits, any local-only layer would be in the current generation) - if layer.metadata().generation == self.generation - && layer.metadata().shard.shard_count == self.shard_identity.count - { + // Only rewrite layers if their generations differ. This guarantees: + // - that local rewrite is safe, as local layer paths will differ between existing layer and rewritten one + // - that the layer is persistent in remote storage, as we only see old-generation'd layer via loading from remote storage + if layer.metadata().generation == self.generation { debug!(%layer, "Skipping rewrite, is not from old generation"); continue; } @@ -272,18 +280,69 @@ impl Timeline { } // Fall through: all our conditions for doing a rewrite passed. - // TODO: implement rewriting - tracing::debug!(%layer, "Would rewrite layer"); + layers_to_rewrite.push(layer); } - // Drop the layers read lock: we will acquire it for write in [`Self::rewrite_layers`] + // Drop read lock on layer map before we start doing time-consuming I/O drop(layers); - // TODO: collect layers to rewrite - let replace_layers = Vec::new(); + let mut replace_image_layers = Vec::new(); + + for layer in layers_to_rewrite { + tracing::info!(layer=%layer, "Rewriting layer after shard split..."); + let mut image_layer_writer = ImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + &layer.layer_desc().key_range, + layer.layer_desc().image_layer_lsn(), + ctx, + ) + .await?; + + // Safety of layer rewrites: + // - We are writing to a different local file path than we are reading from, so the old Layer + // cannot interfere with the new one. + // - In the page cache, contents for a particular VirtualFile are stored with a file_id that + // is different for two layers with the same name (in `ImageLayerInner::new` we always + // acquire a fresh id from [`crate::page_cache::next_file_id`]. So readers do not risk + // reading the index from one layer file, and then data blocks from the rewritten layer file. + // - Any readers that have a reference to the old layer will keep it alive until they are done + // with it. If they are trying to promote from remote storage, that will fail, but this is the same + // as for compaction generally: compaction is allowed to delete layers that readers might be trying to use. + // - We do not run concurrently with other kinds of compaction, so the only layer map writes we race with are: + // - GC, which at worst witnesses us "undelete" a layer that they just deleted. + // - ingestion, which only inserts layers, therefore cannot collide with us. + let resident = layer.download_and_keep_resident().await?; + + let keys_written = resident + .filter(&self.shard_identity, &mut image_layer_writer, ctx) + .await?; + + if keys_written > 0 { + let new_layer = image_layer_writer.finish(self, ctx).await?; + tracing::info!(layer=%new_layer, "Rewrote layer, {} -> {} bytes", + layer.metadata().file_size, + new_layer.metadata().file_size); + + replace_image_layers.push((layer, new_layer)); + } else { + // Drop the old layer. Usually for this case we would already have noticed that + // the layer has no data for us with the ShardedRange check above, but + drop_layers.push(layer); + } + } + + // At this point, we have replaced local layer files with their rewritten form, but not yet uploaded + // metadata to reflect that. If we restart here, the replaced layer files will look invalid (size mismatch + // to remote index) and be removed. This is inefficient but safe. + fail::fail_point!("compact-shard-ancestors-localonly"); // Update the LayerMap so that readers will use the new layers, and enqueue it for writing to remote storage - self.rewrite_layers(replace_layers, drop_layers).await?; + self.rewrite_layers(replace_image_layers, drop_layers) + .await?; + + fail::fail_point!("compact-shard-ancestors-enqueued"); // We wait for all uploads to complete before finishing this compaction stage. This is not // necessary for correctness, but it simplifies testing, and avoids proceeding with another @@ -291,6 +350,8 @@ impl Timeline { // load. self.remote_client.wait_completion().await?; + fail::fail_point!("compact-shard-ancestors-persistent"); + Ok(()) } diff --git a/pageserver/src/tenant/timeline/init.rs b/pageserver/src/tenant/timeline/init.rs index 0cbaf39555..5bc67c7133 100644 --- a/pageserver/src/tenant/timeline/init.rs +++ b/pageserver/src/tenant/timeline/init.rs @@ -7,19 +7,20 @@ use crate::{ index::{IndexPart, LayerFileMetadata}, }, storage_layer::LayerName, - Generation, }, }; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; -use pageserver_api::shard::ShardIndex; -use std::{collections::HashMap, str::FromStr}; +use std::{ + collections::{hash_map, HashMap}, + str::FromStr, +}; use utils::lsn::Lsn; /// Identified files in the timeline directory. pub(super) enum Discovered { /// The only one we care about - Layer(LayerName, Utf8PathBuf, u64), + Layer(LayerName, LocalLayerFileMetadata), /// Old ephmeral files from previous launches, should be removed Ephemeral(String), /// Old temporary timeline files, unsure what these really are, should be removed @@ -27,7 +28,7 @@ pub(super) enum Discovered { /// Temporary on-demand download files, should be removed TemporaryDownload(String), /// Backup file from previously future layers - IgnoredBackup, + IgnoredBackup(Utf8PathBuf), /// Unrecognized, warn about these Unknown(String), } @@ -43,12 +44,15 @@ pub(super) fn scan_timeline_dir(path: &Utf8Path) -> anyhow::Result { let file_size = direntry.metadata()?.len(); - Discovered::Layer(file_name, direntry.path().to_owned(), file_size) + Discovered::Layer( + file_name, + LocalLayerFileMetadata::new(direntry.path().to_owned(), file_size), + ) } Err(_) => { if file_name.ends_with(".old") { // ignore these - Discovered::IgnoredBackup + Discovered::IgnoredBackup(direntry.path().to_owned()) } else if remote_timeline_client::is_temp_download_file(direntry.path()) { Discovered::TemporaryDownload(file_name) } else if is_ephemeral_file(&file_name) { @@ -71,37 +75,32 @@ pub(super) fn scan_timeline_dir(path: &Utf8Path) -> anyhow::Result Self { + pub fn new(local_path: Utf8PathBuf, file_size: u64) -> Self { Self { local_path, - metadata: LayerFileMetadata::new(file_size, generation, shard), + file_size, } } } -/// Decision on what to do with a layer file after considering its local and remote metadata. +/// For a layer that is present in remote metadata, this type describes how to handle +/// it during startup: it is either Resident (and we have some metadata about a local file), +/// or it is Evicted (and we only have remote metadata). #[derive(Clone, Debug)] pub(super) enum Decision { /// The layer is not present locally. Evicted(LayerFileMetadata), - /// The layer is present locally, but local metadata does not match remote; we must - /// delete it and treat it as evicted. - UseRemote { + /// The layer is present locally, and metadata matches: we may hook up this layer to the + /// existing file in local storage. + Resident { local: LocalLayerFileMetadata, remote: LayerFileMetadata, }, - /// The layer is present locally, and metadata matches. - UseLocal(LocalLayerFileMetadata), } /// A layer needs to be left out of the layer map. @@ -117,77 +116,81 @@ pub(super) enum DismissedLayer { /// In order to make crash safe updates to layer map, we must dismiss layers which are only /// found locally or not yet included in the remote `index_part.json`. LocalOnly(LocalLayerFileMetadata), + + /// The layer exists in remote storage but the local layer's metadata (e.g. file size) + /// does not match it + BadMetadata(LocalLayerFileMetadata), } /// Merges local discoveries and remote [`IndexPart`] to a collection of decisions. pub(super) fn reconcile( - discovered: Vec<(LayerName, Utf8PathBuf, u64)>, + local_layers: Vec<(LayerName, LocalLayerFileMetadata)>, index_part: Option<&IndexPart>, disk_consistent_lsn: Lsn, - generation: Generation, - shard: ShardIndex, ) -> Vec<(LayerName, Result)> { - use Decision::*; + let Some(index_part) = index_part else { + // If we have no remote metadata, no local layer files are considered valid to load + return local_layers + .into_iter() + .map(|(layer_name, local_metadata)| { + (layer_name, Err(DismissedLayer::LocalOnly(local_metadata))) + }) + .collect(); + }; - // name => (local_metadata, remote_metadata) - type Collected = - HashMap, Option)>; + let mut result = Vec::new(); - let mut discovered = discovered - .into_iter() - .map(|(layer_name, local_path, file_size)| { - ( - layer_name, - // The generation and shard here will be corrected to match IndexPart in the merge below, unless - // it is not in IndexPart, in which case using our current generation makes sense - // because it will be uploaded in this generation. - ( - Some(LocalLayerFileMetadata::new( - local_path, file_size, generation, shard, - )), - None, - ), - ) - }) - .collect::(); + let mut remote_layers = HashMap::new(); - // merge any index_part information, when available + // Construct Decisions for layers that are found locally, if they're in remote metadata. Otherwise + // construct DismissedLayers to get rid of them. + for (layer_name, local_metadata) in local_layers { + let Some(remote_metadata) = index_part.layer_metadata.get(&layer_name) else { + result.push((layer_name, Err(DismissedLayer::LocalOnly(local_metadata)))); + continue; + }; + + if remote_metadata.file_size != local_metadata.file_size { + result.push((layer_name, Err(DismissedLayer::BadMetadata(local_metadata)))); + continue; + } + + remote_layers.insert( + layer_name, + Decision::Resident { + local: local_metadata, + remote: remote_metadata.clone(), + }, + ); + } + + // Construct Decision for layers that were not found locally index_part - .as_ref() - .map(|ip| ip.layer_metadata.iter()) - .into_iter() - .flatten() - .map(|(name, metadata)| (name, metadata.clone())) + .layer_metadata + .iter() .for_each(|(name, metadata)| { - if let Some(existing) = discovered.get_mut(name) { - existing.1 = Some(metadata); - } else { - discovered.insert(name.to_owned(), (None, Some(metadata))); + if let hash_map::Entry::Vacant(entry) = remote_layers.entry(name.clone()) { + entry.insert(Decision::Evicted(metadata.clone())); } }); - discovered - .into_iter() - .map(|(name, (local, remote))| { - let decision = if name.is_in_future(disk_consistent_lsn) { - Err(DismissedLayer::Future { local }) - } else { - match (local, remote) { - (Some(local), Some(remote)) if local.metadata != remote => { - Ok(UseRemote { local, remote }) - } - (Some(x), Some(_)) => Ok(UseLocal(x)), - (None, Some(x)) => Ok(Evicted(x)), - (Some(x), None) => Err(DismissedLayer::LocalOnly(x)), - (None, None) => { - unreachable!("there must not be any non-local non-remote files") - } - } - }; + // For layers that were found in authoritative remote metadata, apply a final check that they are within + // the disk_consistent_lsn. + result.extend(remote_layers.into_iter().map(|(name, decision)| { + if name.is_in_future(disk_consistent_lsn) { + match decision { + Decision::Evicted(_remote) => (name, Err(DismissedLayer::Future { local: None })), + Decision::Resident { + local, + remote: _remote, + } => (name, Err(DismissedLayer::Future { local: Some(local) })), + } + } else { + (name, Ok(decision)) + } + })); - (name, decision) - }) - .collect::>() + result } pub(super) fn cleanup(path: &Utf8Path, kind: &str) -> anyhow::Result<()> { @@ -196,25 +199,15 @@ pub(super) fn cleanup(path: &Utf8Path, kind: &str) -> anyhow::Result<()> { std::fs::remove_file(path).with_context(|| format!("failed to remove {kind} at {path}")) } -pub(super) fn cleanup_local_file_for_remote( - local: &LocalLayerFileMetadata, - remote: &LayerFileMetadata, -) -> anyhow::Result<()> { - let local_size = local.metadata.file_size; - let remote_size = remote.file_size; +pub(super) fn cleanup_local_file_for_remote(local: &LocalLayerFileMetadata) -> anyhow::Result<()> { + let local_size = local.file_size; let path = &local.local_path; - let file_name = path.file_name().expect("must be file path"); - tracing::warn!("removing local file {file_name:?} because it has unexpected length {local_size}; length in remote index is {remote_size}"); - if let Err(err) = crate::tenant::timeline::rename_to_backup(path) { - assert!( - path.exists(), - "we would leave the local_layer without a file if this does not hold: {path}", - ); - Err(err) - } else { - Ok(()) - } + tracing::warn!( + "removing local file {file_name:?} because it has unexpected length {local_size};" + ); + + std::fs::remove_file(path).with_context(|| format!("failed to remove layer at {path}")) } pub(super) fn cleanup_future_layer( @@ -236,8 +229,8 @@ pub(super) fn cleanup_local_only_file( ) -> anyhow::Result<()> { let kind = name.kind(); tracing::info!( - "found local-only {kind} layer {name}, metadata {:?}", - local.metadata + "found local-only {kind} layer {name} size {}", + local.file_size ); std::fs::remove_file(&local.local_path)?; Ok(()) diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 248420e632..884b71df75 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -212,13 +212,34 @@ impl LayerManager { &mut self, rewrite_layers: &[(Layer, ResidentLayer)], drop_layers: &[Layer], - _metrics: &TimelineMetrics, + metrics: &TimelineMetrics, ) { let mut updates = self.layer_map.batch_update(); + for (old_layer, new_layer) in rewrite_layers { + debug_assert_eq!( + old_layer.layer_desc().key_range, + new_layer.layer_desc().key_range + ); + debug_assert_eq!( + old_layer.layer_desc().lsn_range, + new_layer.layer_desc().lsn_range + ); - // TODO: implement rewrites (currently this code path only used for drops) - assert!(rewrite_layers.is_empty()); + // Safety: we may never rewrite the same file in-place. Callers are responsible + // for ensuring that they only rewrite layers after something changes the path, + // such as an increment in the generation number. + assert_ne!(old_layer.local_path(), new_layer.local_path()); + Self::delete_historic_layer(old_layer, &mut updates, &mut self.layer_fmgr); + + Self::insert_historic_layer( + new_layer.as_ref().clone(), + &mut updates, + &mut self.layer_fmgr, + ); + + metrics.record_new_file_metrics(new_layer.layer_desc().file_size); + } for l in drop_layers { Self::delete_historic_layer(l, &mut updates, &mut self.layer_fmgr); } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 796ae7217b..36aa18f1f9 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2667,7 +2667,9 @@ class NeonPageserver(PgProtocol, LogUtils): tenant_id, generation=self.env.storage_controller.attach_hook_issue(tenant_id, self.id) ) - def list_layers(self, tenant_id: TenantId, timeline_id: TimelineId) -> list[Path]: + def list_layers( + self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId + ) -> list[Path]: """ Inspect local storage on a pageserver to discover which layer files are present. diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index bbb1ad0c6d..545ba05b17 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -177,7 +177,16 @@ def test_sharding_split_unsharded( env.storage_controller.consistency_check() -def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder): +@pytest.mark.parametrize( + "failpoint", + [ + None, + "compact-shard-ancestors-localonly", + "compact-shard-ancestors-enqueued", + "compact-shard-ancestors-persistent", + ], +) +def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder, failpoint: Optional[str]): """ Test that after a split, we clean up parent layer data in the child shards via compaction. """ @@ -196,6 +205,11 @@ def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder): "image_layer_creation_check_threshold": "0", } + neon_env_builder.storage_controller_config = { + # Default neon_local uses a small timeout: use a longer one to tolerate longer pageserver restarts. + "max_unavailable": "300s" + } + env = neon_env_builder.init_start(initial_tenant_conf=TENANT_CONF) tenant_id = env.initial_tenant timeline_id = env.initial_timeline @@ -213,6 +227,10 @@ def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder): # Split one shard into two shards = env.storage_controller.tenant_shard_split(tenant_id, shard_count=2) + # Let all shards move into their stable locations, so that during subsequent steps we + # don't have reconciles in progress (simpler to reason about what messages we expect in logs) + env.storage_controller.reconcile_until_idle() + # Check we got the shard IDs we expected assert env.storage_controller.inspect(TenantShardId(tenant_id, 0, 2)) is not None assert env.storage_controller.inspect(TenantShardId(tenant_id, 1, 2)) is not None @@ -237,6 +255,90 @@ def test_sharding_split_compaction(neon_env_builder: NeonEnvBuilder): # Compaction shouldn't make anything unreadable workload.validate() + # Force a generation increase: layer rewrites are a long-term thing and only happen after + # the generation has increased. + env.pageserver.stop() + env.pageserver.start() + + # Cleanup part 2: once layers are outside the PITR window, they will be rewritten if they are partially redundant + env.storage_controller.pageserver_api().set_tenant_config(tenant_id, {"pitr_interval": "0s"}) + env.storage_controller.reconcile_until_idle() + + for shard in shards: + ps = env.get_tenant_pageserver(shard) + + # Apply failpoints for the layer-rewriting phase: this is the area of code that has sensitive behavior + # across restarts, as we will have local layer files that temporarily disagree with the remote metadata + # for the same local layer file name. + if failpoint is not None: + ps.http_client().configure_failpoints((failpoint, "exit")) + + # Do a GC to update gc_info (compaction uses this to decide whether a layer is to be rewritten) + # Set gc_horizon=0 to let PITR horizon control GC cutoff exclusively. + ps.http_client().timeline_gc(shard, timeline_id, gc_horizon=0) + + # We will compare stats before + after compaction + detail_before = ps.http_client().timeline_detail(shard, timeline_id) + + # Invoke compaction: this should rewrite layers that are behind the pitr horizon + try: + ps.http_client().timeline_compact(shard, timeline_id) + except requests.ConnectionError as e: + if failpoint is None: + raise e + else: + log.info(f"Compaction failed (failpoint={failpoint}): {e}") + + if failpoint in ( + "compact-shard-ancestors-localonly", + "compact-shard-ancestors-enqueued", + ): + # If we left local files that don't match remote metadata, we expect warnings on next startup + env.pageserver.allowed_errors.append( + ".*removing local file .+ because it has unexpected length.*" + ) + + # Post-failpoint: we check that the pageserver comes back online happily. + env.pageserver.running = False + env.pageserver.start() + else: + assert failpoint is None # We shouldn't reach success path if a failpoint was set + + detail_after = ps.http_client().timeline_detail(shard, timeline_id) + + # Physical size should shrink because layers are smaller + assert detail_after["current_physical_size"] < detail_before["current_physical_size"] + + # Validate size statistics + for shard in shards: + ps = env.get_tenant_pageserver(shard) + timeline_info = ps.http_client().timeline_detail(shard, timeline_id) + reported_size = timeline_info["current_physical_size"] + layer_paths = ps.list_layers(shard, timeline_id) + measured_size = 0 + for p in layer_paths: + abs_path = ps.timeline_dir(shard, timeline_id) / p + measured_size += os.stat(abs_path).st_size + + log.info( + f"shard {shard} reported size {reported_size}, measured size {measured_size} ({len(layer_paths)} layers)" + ) + + if failpoint in ( + "compact-shard-ancestors-localonly", + "compact-shard-ancestors-enqueued", + ): + # If we injected a failure between local rewrite and remote upload, then after + # restart we may end up with neither version of the file on local disk (the new file + # is cleaned up because it doesn't matchc remote metadata). So local size isn't + # necessarily going to match remote physical size. + continue + + assert measured_size == reported_size + + # Compaction shouldn't make anything unreadable + workload.validate() + def test_sharding_split_smoke( neon_env_builder: NeonEnvBuilder, From 1455f5a2612a95d1c8fe3f68311cb42fc4785523 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 24 May 2024 09:45:34 +0100 Subject: [PATCH 083/220] pageserver: revert concurrent secondary downloads, make DownloadStream always yield Err after cancel (#7866) ## Problem Ongoing hunt for secondary location shutdown hang issues. ## Summary of changes - Revert the functional changes from #7675 - Tweak a log in secondary downloads to make it more apparent when we drop out on cancellation - Modify DownloadStream's behavior to always return an Err after it has been cancelled. This _should_ not impact anything, but it makes the behavior simpler to reason about (e.g. even if the poll function somehow got called again, it could never end up in an un-cancellable state) Related #https://github.com/neondatabase/cloud/issues/13576 --- libs/remote_storage/src/azure_blob.rs | 5 - libs/remote_storage/src/lib.rs | 34 ----- libs/remote_storage/src/local_fs.rs | 14 +- libs/remote_storage/src/s3_bucket.rs | 8 +- libs/remote_storage/src/simulate_failures.rs | 6 +- pageserver/src/tenant/secondary/downloader.rs | 120 ++---------------- 6 files changed, 18 insertions(+), 169 deletions(-) diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 220d4ef115..24c1248304 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -29,7 +29,6 @@ use http_types::{StatusCode, Url}; use tokio_util::sync::CancellationToken; use tracing::debug; -use crate::RemoteStorageActivity; use crate::{ error::Cancelled, s3_bucket::RequestKind, AzureConfig, ConcurrencyLimiter, Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, StorageMetadata, @@ -526,10 +525,6 @@ impl RemoteStorage for AzureBlobStorage { // https://learn.microsoft.com/en-us/azure/storage/blobs/point-in-time-restore-overview Err(TimeTravelError::Unimplemented) } - - fn activity(&self) -> RemoteStorageActivity { - self.concurrency_limiter.activity() - } } pin_project_lite::pin_project! { diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index f024021507..708662f20f 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -263,17 +263,6 @@ pub trait RemoteStorage: Send + Sync + 'static { done_if_after: SystemTime, cancel: &CancellationToken, ) -> Result<(), TimeTravelError>; - - /// Query how busy we currently are: may be used by callers which wish to politely - /// back off if there are already a lot of operations underway. - fn activity(&self) -> RemoteStorageActivity; -} - -pub struct RemoteStorageActivity { - pub read_available: usize, - pub read_total: usize, - pub write_available: usize, - pub write_total: usize, } /// DownloadStream is sensitive to the timeout and cancellation used with the original @@ -455,15 +444,6 @@ impl GenericRemoteStorage> { } } } - - pub fn activity(&self) -> RemoteStorageActivity { - match self { - Self::LocalFs(s) => s.activity(), - Self::AwsS3(s) => s.activity(), - Self::AzureBlob(s) => s.activity(), - Self::Unreliable(s) => s.activity(), - } - } } impl GenericRemoteStorage { @@ -794,9 +774,6 @@ struct ConcurrencyLimiter { // The helps to ensure we don't exceed the thresholds. write: Arc, read: Arc, - - write_total: usize, - read_total: usize, } impl ConcurrencyLimiter { @@ -825,21 +802,10 @@ impl ConcurrencyLimiter { Arc::clone(self.for_kind(kind)).acquire_owned().await } - fn activity(&self) -> RemoteStorageActivity { - RemoteStorageActivity { - read_available: self.read.available_permits(), - read_total: self.read_total, - write_available: self.write.available_permits(), - write_total: self.write_total, - } - } - fn new(limit: usize) -> ConcurrencyLimiter { Self { read: Arc::new(Semaphore::new(limit)), write: Arc::new(Semaphore::new(limit)), - read_total: limit, - write_total: limit, } } } diff --git a/libs/remote_storage/src/local_fs.rs b/libs/remote_storage/src/local_fs.rs index f12f6590a3..1f7bcfc982 100644 --- a/libs/remote_storage/src/local_fs.rs +++ b/libs/remote_storage/src/local_fs.rs @@ -23,8 +23,8 @@ use tokio_util::{io::ReaderStream, sync::CancellationToken}; use utils::crashsafe::path_with_suffix_extension; use crate::{ - Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorageActivity, - TimeTravelError, TimeoutOrCancel, REMOTE_STORAGE_PREFIX_SEPARATOR, + Download, DownloadError, Listing, ListingMode, RemotePath, TimeTravelError, TimeoutOrCancel, + REMOTE_STORAGE_PREFIX_SEPARATOR, }; use super::{RemoteStorage, StorageMetadata}; @@ -605,16 +605,6 @@ impl RemoteStorage for LocalFs { ) -> Result<(), TimeTravelError> { Err(TimeTravelError::Unimplemented) } - - fn activity(&self) -> RemoteStorageActivity { - // LocalFS has no concurrency limiting: give callers the impression that plenty of units are available - RemoteStorageActivity { - read_available: 16, - read_total: 16, - write_available: 16, - write_total: 16, - } - } } fn storage_metadata_path(original_path: &Utf8Path) -> Utf8PathBuf { diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index 0f6772b274..c3d6c75e20 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -47,8 +47,8 @@ use utils::backoff; use super::StorageMetadata; use crate::{ error::Cancelled, support::PermitCarrying, ConcurrencyLimiter, Download, DownloadError, - Listing, ListingMode, RemotePath, RemoteStorage, RemoteStorageActivity, S3Config, - TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE, REMOTE_STORAGE_PREFIX_SEPARATOR, + Listing, ListingMode, RemotePath, RemoteStorage, S3Config, TimeTravelError, TimeoutOrCancel, + MAX_KEYS_PER_DELETE, REMOTE_STORAGE_PREFIX_SEPARATOR, }; pub(super) mod metrics; @@ -975,10 +975,6 @@ impl RemoteStorage for S3Bucket { } Ok(()) } - - fn activity(&self) -> RemoteStorageActivity { - self.concurrency_limiter.activity() - } } /// On drop (cancellation) count towards [`metrics::BucketMetrics::cancelled_waits`]. diff --git a/libs/remote_storage/src/simulate_failures.rs b/libs/remote_storage/src/simulate_failures.rs index 66522e04ca..c467a2d196 100644 --- a/libs/remote_storage/src/simulate_failures.rs +++ b/libs/remote_storage/src/simulate_failures.rs @@ -12,7 +12,7 @@ use tokio_util::sync::CancellationToken; use crate::{ Download, DownloadError, GenericRemoteStorage, Listing, ListingMode, RemotePath, RemoteStorage, - RemoteStorageActivity, StorageMetadata, TimeTravelError, + StorageMetadata, TimeTravelError, }; pub struct UnreliableWrapper { @@ -213,8 +213,4 @@ impl RemoteStorage for UnreliableWrapper { .time_travel_recover(prefix, timestamp, done_if_after, cancel) .await } - - fn activity(&self) -> RemoteStorageActivity { - self.inner.activity() - } } diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 789f1a0fa9..0ec1bd649b 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -45,10 +45,10 @@ use crate::tenant::{ use camino::Utf8PathBuf; use chrono::format::{DelayedFormat, StrftimeItems}; -use futures::{Future, StreamExt}; +use futures::Future; use pageserver_api::models::SecondaryProgress; use pageserver_api::shard::TenantShardId; -use remote_storage::{DownloadError, Etag, GenericRemoteStorage, RemoteStorageActivity}; +use remote_storage::{DownloadError, Etag, GenericRemoteStorage}; use tokio_util::sync::CancellationToken; use tracing::{info_span, instrument, warn, Instrument}; @@ -67,12 +67,6 @@ use super::{ /// download, if the uploader populated it. const DEFAULT_DOWNLOAD_INTERVAL: Duration = Duration::from_millis(60000); -/// Range of concurrency we may use when downloading layers within a timeline. This is independent -/// for each tenant we're downloading: the concurrency of _tenants_ is defined separately in -/// `PageServerConf::secondary_download_concurrency` -const MAX_LAYER_CONCURRENCY: usize = 16; -const MIN_LAYER_CONCURRENCY: usize = 1; - pub(super) async fn downloader_task( tenant_manager: Arc, remote_storage: GenericRemoteStorage, @@ -81,15 +75,14 @@ pub(super) async fn downloader_task( cancel: CancellationToken, root_ctx: RequestContext, ) { - // How many tenants' secondary download operations we will run concurrently - let tenant_concurrency = tenant_manager.get_conf().secondary_download_concurrency; + let concurrency = tenant_manager.get_conf().secondary_download_concurrency; let generator = SecondaryDownloader { tenant_manager, remote_storage, root_ctx, }; - let mut scheduler = Scheduler::new(generator, tenant_concurrency); + let mut scheduler = Scheduler::new(generator, concurrency); scheduler .run(command_queue, background_jobs_can_start, cancel) @@ -414,7 +407,7 @@ impl JobGenerator { - tracing::debug!("Shut down while downloading"); + tracing::info!("Shut down while downloading"); }, Err(UpdateError::Deserialize(e)) => { tracing::error!("Corrupt content while downloading tenant: {e}"); @@ -848,8 +841,6 @@ impl<'a> TenantDownloader<'a> { tracing::debug!(timeline_id=%timeline.timeline_id, "Downloading layers, {} in heatmap", timeline.layers.len()); - let mut download_futs = Vec::new(); - // Download heatmap layers that are not present on local disk, or update their // access time if they are already present. for layer in timeline.layers { @@ -922,31 +913,14 @@ impl<'a> TenantDownloader<'a> { } } - download_futs.push(self.download_layer( - tenant_shard_id, - &timeline.timeline_id, - layer, - ctx, - )); - } - - // Break up layer downloads into chunks, so that for each chunk we can re-check how much - // concurrency to use based on activity level of remote storage. - while !download_futs.is_empty() { - let chunk = - download_futs.split_off(download_futs.len().saturating_sub(MAX_LAYER_CONCURRENCY)); - - let concurrency = Self::layer_concurrency(self.remote_storage.activity()); - - let mut result_stream = futures::stream::iter(chunk).buffered(concurrency); - let mut result_stream = std::pin::pin!(result_stream); - while let Some(result) = result_stream.next().await { - match result { - Err(e) => return Err(e), - Ok(None) => { - // No error, but we didn't download the layer. Don't mark it touched - } - Ok(Some(layer)) => touched.push(layer), + match self + .download_layer(tenant_shard_id, &timeline.timeline_id, layer, ctx) + .await? + { + Some(layer) => touched.push(layer), + None => { + // Not an error but we didn't download it: remote layer is missing. Don't add it to the list of + // things to consider touched. } } } @@ -1081,19 +1055,6 @@ impl<'a> TenantDownloader<'a> { Ok(Some(layer)) } - - /// Calculate the currently allowed parallelism of layer download tasks, based on activity level of the remote storage - fn layer_concurrency(activity: RemoteStorageActivity) -> usize { - // When less than 75% of units are available, use minimum concurrency. Else, do a linear mapping - // of our concurrency range to the units available within the remaining 25%. - let clamp_at = (activity.read_total * 3) / 4; - if activity.read_available > clamp_at { - (MAX_LAYER_CONCURRENCY * (activity.read_available - clamp_at)) - / (activity.read_total - clamp_at) - } else { - MIN_LAYER_CONCURRENCY - } - } } /// Scan local storage and build up Layer objects based on the metadata in a HeatMapTimeline @@ -1217,58 +1178,3 @@ async fn init_timeline_state( detail } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn layer_concurrency() { - // Totally idle - assert_eq!( - TenantDownloader::layer_concurrency(RemoteStorageActivity { - read_available: 16, - read_total: 16, - write_available: 16, - write_total: 16 - }), - MAX_LAYER_CONCURRENCY - ); - - // Totally busy - assert_eq!( - TenantDownloader::layer_concurrency(RemoteStorageActivity { - read_available: 0, - read_total: 16, - - write_available: 16, - write_total: 16 - }), - MIN_LAYER_CONCURRENCY - ); - - // Edge of the range at which we interpolate - assert_eq!( - TenantDownloader::layer_concurrency(RemoteStorageActivity { - read_available: 12, - read_total: 16, - - write_available: 16, - write_total: 16 - }), - MIN_LAYER_CONCURRENCY - ); - - // Midpoint of the range in which we interpolate - assert_eq!( - TenantDownloader::layer_concurrency(RemoteStorageActivity { - read_available: 14, - read_total: 16, - - write_available: 16, - write_total: 16 - }), - MAX_LAYER_CONCURRENCY / 2 - ); - } -} From a3f5b836772d54464e18302beb132f4c19b8adf8 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Fri, 24 May 2024 16:07:58 +0300 Subject: [PATCH 084/220] chore: lower gate guard drop logging threshold to 100ms (#7862) We have some 1001ms cases, which do not yield gate guard context. --- libs/utils/src/sync/gate.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/utils/src/sync/gate.rs b/libs/utils/src/sync/gate.rs index c34176af57..156b99a010 100644 --- a/libs/utils/src/sync/gate.rs +++ b/libs/utils/src/sync/gate.rs @@ -135,7 +135,8 @@ impl Gate { let started_at = std::time::Instant::now(); let mut do_close = std::pin::pin!(self.do_close()); - let nag_after = Duration::from_secs(1); + // with 1s we rarely saw anything, let's try if we get more gate closing reasons with 100ms + let nag_after = Duration::from_millis(100); let Err(_timeout) = tokio::time::timeout(nag_after, &mut do_close).await else { return; From 71a7fd983e13d71508350c750b32081e0bb7832d Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Fri, 24 May 2024 14:11:51 +0100 Subject: [PATCH 085/220] CI(release): tune Storage & Compute release PR title (#7870) ## Problem A title for automatic proxy release PRs is `Proxy release`, and for storage & compute, it's just `Release` ## Summary of changes - Amend PR title for Storage & Compute releases to "Storage & Compute release" --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b2c9a19588..fe24f6330e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -53,7 +53,7 @@ jobs: GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} run: | cat << EOF > body.md - ## Release ${RELEASE_DATE} + ## Storage & Compute release ${RELEASE_DATE} **Please merge this Pull Request using 'Create a merge commit' button** EOF From 43f9a16e4608789a2c190aa5c32303a0d0182f30 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Fri, 24 May 2024 17:56:12 +0100 Subject: [PATCH 086/220] proxy: fix websocket buffering (#7878) ## Problem Seems the websocket buffering was broken for large query responses only ## Summary of changes Move buffering until after the underlying stream is ready. Tested locally confirms this fixes the bug. Also fixes the pg-sni-router missing metrics bug --- proxy/src/bin/pg_sni_router.rs | 3 +++ proxy/src/serverless/websocket.rs | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/proxy/src/bin/pg_sni_router.rs b/proxy/src/bin/pg_sni_router.rs index fb16b76567..e1674049a6 100644 --- a/proxy/src/bin/pg_sni_router.rs +++ b/proxy/src/bin/pg_sni_router.rs @@ -9,6 +9,7 @@ use futures::future::Either; use itertools::Itertools; use proxy::config::TlsServerEndPoint; use proxy::context::RequestMonitoring; +use proxy::metrics::{Metrics, ThreadPoolMetrics}; use proxy::proxy::{copy_bidirectional_client_compute, run_until_cancelled}; use rustls::pki_types::PrivateKeyDer; use tokio::net::TcpListener; @@ -65,6 +66,8 @@ async fn main() -> anyhow::Result<()> { let _panic_hook_guard = utils::logging::replace_panic_hook_with_tracing_panic_hook(); let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]); + Metrics::install(Arc::new(ThreadPoolMetrics::new(0))); + let args = cli().get_matches(); let destination: String = args.get_one::("dest").unwrap().parse()?; diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 61d6d60dbe..7d3153a3c1 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -51,9 +51,10 @@ impl AsyncWrite for WebSocketRw { ) -> Poll> { let this = self.project(); let mut stream = this.stream; - this.send.put(buf); ready!(stream.as_mut().poll_ready(cx).map_err(io_error))?; + + this.send.put(buf); match stream.as_mut().start_send(Frame::binary(this.send.split())) { Ok(()) => Poll::Ready(Ok(buf.len())), Err(e) => Poll::Ready(Err(io_error(e))), From 3797566c36c767155648ddde6c21ece4a24827cd Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 20 May 2024 16:21:57 +0300 Subject: [PATCH 087/220] safekeeper: test pull_timeline with WAL gc. Do pull_timeline while WAL is being removed. To this end - extract pausable_failpoint to utils, sprinkle pull_timeline with it - add 'checkpoint' sk http endpoint to force WAL removal. After fixing checking for pull file status code test fails so far which is expected. --- libs/utils/src/failpoint_support.rs | 27 +++++++ pageserver/src/tenant.rs | 27 +------ pageserver/src/tenant/delete.rs | 2 +- .../src/tenant/remote_timeline_client.rs | 1 + .../tenant/remote_timeline_client/upload.rs | 2 +- pageserver/src/tenant/tasks.rs | 2 +- pageserver/src/tenant/timeline.rs | 2 +- pageserver/src/tenant/timeline/delete.rs | 2 +- safekeeper/src/http/routes.rs | 24 ++++++ safekeeper/src/pull_timeline.rs | 10 +++ safekeeper/src/remove_wal.rs | 2 +- safekeeper/src/safekeeper.rs | 4 +- safekeeper/src/timeline.rs | 4 +- safekeeper/src/timeline_manager.rs | 2 +- test_runner/fixtures/common_types.py | 5 ++ test_runner/fixtures/neon_fixtures.py | 67 +++++++++++++++- test_runner/fixtures/safekeeper/http.py | 7 ++ test_runner/fixtures/utils.py | 22 ++++++ test_runner/regress/test_wal_acceptor.py | 79 ++++++++++++++++--- 19 files changed, 241 insertions(+), 50 deletions(-) diff --git a/libs/utils/src/failpoint_support.rs b/libs/utils/src/failpoint_support.rs index 8704b72921..870684b399 100644 --- a/libs/utils/src/failpoint_support.rs +++ b/libs/utils/src/failpoint_support.rs @@ -9,6 +9,33 @@ use serde::{Deserialize, Serialize}; use tokio_util::sync::CancellationToken; use tracing::*; +/// Declare a failpoint that can use the `pause` failpoint action. +/// We don't want to block the executor thread, hence, spawn_blocking + await. +#[macro_export] +macro_rules! pausable_failpoint { + ($name:literal) => { + if cfg!(feature = "testing") { + tokio::task::spawn_blocking({ + let current = tracing::Span::current(); + move || { + let _entered = current.entered(); + tracing::info!("at failpoint {}", $name); + fail::fail_point!($name); + } + }) + .await + .expect("spawn_blocking"); + } + }; + ($name:literal, $cond:expr) => { + if cfg!(feature = "testing") { + if $cond { + pausable_failpoint!($name) + } + } + }; +} + /// use with fail::cfg("$name", "return(2000)") /// /// The effect is similar to a "sleep(2000)" action, i.e. we sleep for the diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 540eb10ed2..e6bfd57a44 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -42,6 +42,7 @@ use utils::completion; use utils::crashsafe::path_with_suffix_extension; use utils::failpoint_support; use utils::fs_ext; +use utils::pausable_failpoint; use utils::sync::gate::Gate; use utils::sync::gate::GateGuard; use utils::timeout::timeout_cancellable; @@ -122,32 +123,6 @@ use utils::{ lsn::{Lsn, RecordLsn}, }; -/// Declare a failpoint that can use the `pause` failpoint action. -/// We don't want to block the executor thread, hence, spawn_blocking + await. -macro_rules! pausable_failpoint { - ($name:literal) => { - if cfg!(feature = "testing") { - tokio::task::spawn_blocking({ - let current = tracing::Span::current(); - move || { - let _entered = current.entered(); - tracing::info!("at failpoint {}", $name); - fail::fail_point!($name); - } - }) - .await - .expect("spawn_blocking"); - } - }; - ($name:literal, $cond:expr) => { - if cfg!(feature = "testing") { - if $cond { - pausable_failpoint!($name) - } - } - }; -} - pub mod blob_io; pub mod block_io; pub mod vectored_blob_io; diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 3173a33dad..7c6640eaac 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -8,7 +8,7 @@ use tokio::sync::OwnedMutexGuard; use tokio_util::sync::CancellationToken; use tracing::{error, instrument, Instrument}; -use utils::{backoff, completion, crashsafe, fs_ext, id::TimelineId}; +use utils::{backoff, completion, crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; use crate::{ config::PageServerConf, diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 23904b9da4..73438a790f 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -197,6 +197,7 @@ pub(crate) use upload::upload_initdb_dir; use utils::backoff::{ self, exponential_backoff, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, }; +use utils::pausable_failpoint; use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicU32, Ordering}; diff --git a/pageserver/src/tenant/remote_timeline_client/upload.rs b/pageserver/src/tenant/remote_timeline_client/upload.rs index caa843316f..e8e824f415 100644 --- a/pageserver/src/tenant/remote_timeline_client/upload.rs +++ b/pageserver/src/tenant/remote_timeline_client/upload.rs @@ -9,7 +9,7 @@ use std::time::SystemTime; use tokio::fs::{self, File}; use tokio::io::AsyncSeekExt; use tokio_util::sync::CancellationToken; -use utils::backoff; +use utils::{backoff, pausable_failpoint}; use super::Generation; use crate::tenant::remote_timeline_client::{ diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index ba2b8afd03..bf2d8a47b4 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -17,7 +17,7 @@ use crate::tenant::{Tenant, TenantState}; use rand::Rng; use tokio_util::sync::CancellationToken; use tracing::*; -use utils::{backoff, completion}; +use utils::{backoff, completion, pausable_failpoint}; static CONCURRENT_BACKGROUND_TASKS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 1bdbddd95f..d4f6e25843 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -41,7 +41,7 @@ use tokio_util::sync::CancellationToken; use tracing::*; use utils::{ bin_ser::BeSer, - fs_ext, + fs_ext, pausable_failpoint, sync::gate::{Gate, GateGuard}, vec_map::VecMap, }; diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index b5dfc86e77..5ca8544d49 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -7,7 +7,7 @@ use anyhow::Context; use pageserver_api::{models::TimelineState, shard::TenantShardId}; use tokio::sync::OwnedMutexGuard; use tracing::{error, info, instrument, Instrument}; -use utils::{crashsafe, fs_ext, id::TimelineId}; +use utils::{crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; use crate::{ config::PageServerConf, diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 808bb1e490..4aacd3421d 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -287,6 +287,26 @@ async fn timeline_files_handler(request: Request) -> Result .map_err(|e| ApiError::InternalServerError(e.into())) } +/// Force persist control file and remove old WAL. +async fn timeline_checkpoint_handler(request: Request) -> Result, ApiError> { + check_permission(&request, None)?; + + let ttid = TenantTimelineId::new( + parse_request_param(&request, "tenant_id")?, + parse_request_param(&request, "timeline_id")?, + ); + + let tli = GlobalTimelines::get(ttid)?; + tli.maybe_persist_control_file(true) + .await + .map_err(ApiError::InternalServerError)?; + tli.remove_old_wal() + .await + .map_err(ApiError::InternalServerError)?; + + json_response(StatusCode::OK, ()) +} + /// Deactivates the timeline and removes its data directory. async fn timeline_delete_handler(mut request: Request) -> Result, ApiError> { let ttid = TenantTimelineId::new( @@ -553,6 +573,10 @@ pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder "/v1/tenant/:tenant_id/timeline/:timeline_id/control_file", |r| request_span(r, patch_control_file_handler), ) + .post( + "/v1/tenant/:tenant_id/timeline/:timeline_id/checkpoint", + |r| request_span(r, timeline_checkpoint_handler), + ) // for tests .post("/v1/record_safekeeper_info/:tenant_id/:timeline_id", |r| { request_span(r, record_safekeeper_info) diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 93b51f32c0..f7cc40f58a 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -11,6 +11,7 @@ use tracing::info; use utils::{ id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, + pausable_failpoint, }; use crate::{ @@ -162,6 +163,8 @@ async fn pull_timeline(status: TimelineStatus, host: String) -> Result filenames.remove(control_file_index); filenames.insert(0, "safekeeper.control".to_string()); + pausable_failpoint!("sk-pull-timeline-after-list-pausable"); + info!( "downloading {} files from safekeeper {}", filenames.len(), @@ -183,6 +186,13 @@ async fn pull_timeline(status: TimelineStatus, host: String) -> Result let mut file = tokio::fs::File::create(&file_path).await?; let mut response = client.get(&http_url).send().await?; + if response.status() != reqwest::StatusCode::OK { + bail!( + "pulling file {} failed: status is {}", + filename, + response.status() + ); + } while let Some(chunk) = response.chunk().await? { file.write_all(&chunk).await?; file.flush().await?; diff --git a/safekeeper/src/remove_wal.rs b/safekeeper/src/remove_wal.rs index 98ce671182..3400eee9b7 100644 --- a/safekeeper/src/remove_wal.rs +++ b/safekeeper/src/remove_wal.rs @@ -15,7 +15,7 @@ pub async fn task_main(_conf: SafeKeeperConf) -> anyhow::Result<()> { for tli in &tlis { let ttid = tli.ttid; async { - if let Err(e) = tli.maybe_persist_control_file().await { + if let Err(e) = tli.maybe_persist_control_file(false).await { warn!("failed to persist control file: {e}"); } if let Err(e) = tli.remove_old_wal().await { diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 4b1481a397..2a620f5fef 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -827,9 +827,9 @@ where /// Persist control file if there is something to save and enough time /// passed after the last save. - pub async fn maybe_persist_inmem_control_file(&mut self) -> Result { + pub async fn maybe_persist_inmem_control_file(&mut self, force: bool) -> Result { const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300); - if self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL { + if !force && self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL { return Ok(false); } let need_persist = self.state.inmem.commit_lsn > self.state.commit_lsn diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 0cc6153373..f30c503382 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -821,9 +821,9 @@ impl Timeline { /// passed after the last save. This helps to keep remote_consistent_lsn up /// to date so that storage nodes restart doesn't cause many pageserver -> /// safekeeper reconnections. - pub async fn maybe_persist_control_file(self: &Arc) -> Result<()> { + pub async fn maybe_persist_control_file(self: &Arc, force: bool) -> Result<()> { let mut guard = self.write_shared_state().await; - let changed = guard.sk.maybe_persist_inmem_control_file().await?; + let changed = guard.sk.maybe_persist_inmem_control_file(force).await?; guard.skip_update = !changed; Ok(()) } diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index e74ba37ad8..ed544352f9 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -106,7 +106,7 @@ pub async fn main_task( if !is_active { // TODO: maybe use tokio::spawn? - if let Err(e) = tli.maybe_persist_control_file().await { + if let Err(e) = tli.maybe_persist_control_file(false).await { warn!("control file save in update_status failed: {:?}", e); } } diff --git a/test_runner/fixtures/common_types.py b/test_runner/fixtures/common_types.py index b5458b5c26..e9be765669 100644 --- a/test_runner/fixtures/common_types.py +++ b/test_runner/fixtures/common_types.py @@ -5,6 +5,8 @@ from typing import Any, Type, TypeVar, Union T = TypeVar("T", bound="Id") +DEFAULT_WAL_SEG_SIZE = 16 * 1024 * 1024 + @total_ordering class Lsn: @@ -67,6 +69,9 @@ class Lsn: def as_int(self) -> int: return self.lsn_int + def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> "Lsn": + return Lsn(self.lsn_int - (self.lsn_int % seg_sz)) + @dataclass(frozen=True) class Key: diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 36aa18f1f9..c9d0acb967 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3771,7 +3771,7 @@ class SafekeeperPort: @dataclass -class Safekeeper: +class Safekeeper(LogUtils): """An object representing a running safekeeper daemon.""" env: NeonEnv @@ -3779,6 +3779,13 @@ class Safekeeper: id: int running: bool = False + def __init__(self, env: NeonEnv, port: SafekeeperPort, id: int, running: bool = False): + self.env = env + self.port = port + self.id = id + self.running = running + self.logfile = Path(self.data_dir) / f"safekeeper-{id}.log" + def start(self, extra_opts: Optional[List[str]] = None) -> "Safekeeper": assert self.running is False self.env.neon_cli.safekeeper_start(self.id, extra_opts=extra_opts) @@ -3839,11 +3846,38 @@ class Safekeeper: port=self.port.http, auth_token=auth_token, is_testing_enabled=is_testing_enabled ) + def get_timeline_start_lsn(self, tenant_id: TenantId, timeline_id: TimelineId) -> Lsn: + timeline_status = self.http_client().timeline_status(tenant_id, timeline_id) + timeline_start_lsn = timeline_status.timeline_start_lsn + log.info(f"sk {self.id} timeline start LSN: {timeline_start_lsn}") + return timeline_start_lsn + + def get_flush_lsn(self, tenant_id: TenantId, timeline_id: TimelineId) -> Lsn: + timeline_status = self.http_client().timeline_status(tenant_id, timeline_id) + flush_lsn = timeline_status.flush_lsn + log.info(f"sk {self.id} flush LSN: {flush_lsn}") + return flush_lsn + + def pull_timeline( + self, srcs: list[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId + ) -> Dict[str, Any]: + """ + pull_timeline from srcs to self. + """ + src_https = [f"http://localhost:{sk.port.http}" for sk in srcs] + res = self.http_client().pull_timeline( + {"tenant_id": str(tenant_id), "timeline_id": str(timeline_id), "http_hosts": src_https} + ) + src_ids = [sk.id for sk in srcs] + log.info(f"finished pulling timeline from {src_ids} to {self.id}") + return res + + @property def data_dir(self) -> str: return os.path.join(self.env.repo_dir, "safekeepers", f"sk{self.id}") def timeline_dir(self, tenant_id, timeline_id) -> str: - return os.path.join(self.data_dir(), str(tenant_id), str(timeline_id)) + return os.path.join(self.data_dir, str(tenant_id), str(timeline_id)) def list_segments(self, tenant_id, timeline_id) -> List[str]: """ @@ -3856,6 +3890,35 @@ class Safekeeper: segments.sort() return segments + def checkpoint_up_to(self, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn): + """ + Assuming pageserver(s) uploaded to s3 up to `lsn`, + 1) wait for remote_consistent_lsn and wal_backup_lsn on safekeeper to reach it. + 2) checkpoint timeline on safekeeper, which should remove WAL before this LSN. + """ + cli = self.http_client() + + def are_lsns_advanced(): + stat = cli.timeline_status(tenant_id, timeline_id) + log.info( + f"waiting for remote_consistent_lsn and backup_lsn on sk {self.id} to reach {lsn}, currently remote_consistent_lsn={stat.remote_consistent_lsn}, backup_lsn={stat.backup_lsn}" + ) + assert stat.remote_consistent_lsn >= lsn and stat.backup_lsn >= lsn.segment_lsn() + + # xxx: max wait is long because we might be waiting for reconnection from + # pageserver to this safekeeper + wait_until(30, 1, are_lsns_advanced) + cli.checkpoint(tenant_id, timeline_id) + + def wait_until_paused(self, failpoint: str): + msg = f"at failpoint {failpoint}" + + def paused(): + log.info(f"waiting for hitting failpoint {failpoint}") + self.assert_log_contains(msg) + + wait_until(20, 0.5, paused) + class S3Scrubber: def __init__(self, env: NeonEnvBuilder, log_dir: Optional[Path] = None): diff --git a/test_runner/fixtures/safekeeper/http.py b/test_runner/fixtures/safekeeper/http.py index 82148d0556..a5480f557f 100644 --- a/test_runner/fixtures/safekeeper/http.py +++ b/test_runner/fixtures/safekeeper/http.py @@ -177,6 +177,13 @@ class SafekeeperHttpClient(requests.Session): ) res.raise_for_status() + def checkpoint(self, tenant_id: TenantId, timeline_id: TimelineId): + res = self.post( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/checkpoint", + json={}, + ) + res.raise_for_status() + # only_local doesn't remove segments in the remote storage. def timeline_delete( self, tenant_id: TenantId, timeline_id: TimelineId, only_local: bool = False diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 89e116df28..c05cb3e744 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -560,3 +560,25 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: Set[str elapsed = time.time() - started_at log.info(f"assert_pageserver_backups_equal completed in {elapsed}s") + + +class PropagatingThread(threading.Thread): + _target: Any + _args: Any + _kwargs: Any + """ + Simple Thread wrapper with join() propagating the possible exception in the thread. + """ + + def run(self): + self.exc = None + try: + self.ret = self._target(*self._args, **self._kwargs) + except BaseException as e: + self.exc = e + + def join(self, timeout=None): + super(PropagatingThread, self).join(timeout) + if self.exc: + raise self.exc + return self.ret diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index ea66eeff63..0c37711f7a 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -23,7 +23,6 @@ from fixtures.log_helper import log from fixtures.metrics import parse_metrics from fixtures.neon_fixtures import ( Endpoint, - NeonEnv, NeonEnvBuilder, NeonPageserver, PgBin, @@ -48,7 +47,7 @@ from fixtures.remote_storage import ( ) from fixtures.safekeeper.http import SafekeeperHttpClient from fixtures.safekeeper.utils import are_walreceivers_absent -from fixtures.utils import get_dir_size, query_scalar, start_in_background +from fixtures.utils import PropagatingThread, get_dir_size, query_scalar, start_in_background def wait_lsn_force_checkpoint( @@ -360,7 +359,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): # We will wait for first segment removal. Make sure they exist for starter. first_segments = [ - os.path.join(sk.data_dir(), str(tenant_id), str(timeline_id), "000000010000000000000001") + os.path.join(sk.data_dir, str(tenant_id), str(timeline_id), "000000010000000000000001") for sk in env.safekeepers ] assert all(os.path.exists(p) for p in first_segments) @@ -445,7 +444,7 @@ def is_flush_lsn_caught_up(sk: Safekeeper, tenant_id: TenantId, timeline_id: Tim def is_wal_trimmed(sk: Safekeeper, tenant_id: TenantId, timeline_id: TimelineId, target_size_mb): http_cli = sk.http_client() tli_status = http_cli.timeline_status(tenant_id, timeline_id) - sk_wal_size = get_dir_size(os.path.join(sk.data_dir(), str(tenant_id), str(timeline_id))) + sk_wal_size = get_dir_size(os.path.join(sk.data_dir, str(tenant_id), str(timeline_id))) sk_wal_size_mb = sk_wal_size / 1024 / 1024 log.info(f"Safekeeper id={sk.id} wal_size={sk_wal_size_mb:.2f}MB status={tli_status}") return sk_wal_size_mb <= target_size_mb @@ -591,10 +590,10 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder): # save the last (partial) file to put it back after recreation; others will be fetched from s3 sk = env.safekeepers[0] - tli_dir = Path(sk.data_dir()) / str(tenant_id) / str(timeline_id) + tli_dir = Path(sk.data_dir) / str(tenant_id) / str(timeline_id) f_partial = Path([f for f in os.listdir(tli_dir) if f.endswith(".partial")][0]) f_partial_path = tli_dir / f_partial - f_partial_saved = Path(sk.data_dir()) / f_partial.name + f_partial_saved = Path(sk.data_dir) / f_partial.name f_partial_path.rename(f_partial_saved) pg_version = sk.http_client().timeline_status(tenant_id, timeline_id).pg_version @@ -616,7 +615,7 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder): cli = sk.http_client() cli.timeline_create(tenant_id, timeline_id, pg_version, last_lsn) f_partial_path = ( - Path(sk.data_dir()) / str(tenant_id) / str(timeline_id) / f_partial_saved.name + Path(sk.data_dir) / str(tenant_id) / str(timeline_id) / f_partial_saved.name ) shutil.copy(f_partial_saved, f_partial_path) @@ -1631,7 +1630,7 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): with conn.cursor() as cur: cur.execute("CREATE TABLE t(key int primary key)") sk = env.safekeepers[0] - sk_data_dir = Path(sk.data_dir()) + sk_data_dir = Path(sk.data_dir) if not auth_enabled: sk_http = sk.http_client() sk_http_other = sk_http @@ -1724,9 +1723,6 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): def test_pull_timeline(neon_env_builder: NeonEnvBuilder): - def safekeepers_guc(env: NeonEnv, sk_names: List[int]) -> str: - return ",".join([f"localhost:{sk.port.pg}" for sk in env.safekeepers if sk.id in sk_names]) - def execute_payload(endpoint: Endpoint): with closing(endpoint.connect()) as conn: with conn.cursor() as cur: @@ -1812,6 +1808,67 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): show_statuses(env.safekeepers, tenant_id, timeline_id) +# Test pull_timeline while concurrently gc'ing WAL on safekeeper: +# 1) Start pull_timeline, listing files to fetch. +# 2) Write segment, do gc. +# 3) Finish pull_timeline. +# 4) Do some write, verify integrity with timeline_digest. +# Expected to fail while holding off WAL gc plus fetching commit_lsn WAL +# segment is not implemented. +@pytest.mark.xfail +def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_safekeepers = 3 + neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) + env = neon_env_builder.init_start() + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + + log.info("use only first 2 safekeepers, 3rd will be seeded") + endpoint = env.endpoints.create("main") + endpoint.active_safekeepers = [1, 2] + endpoint.start() + endpoint.safe_psql("create table t(key int, value text)") + endpoint.safe_psql("insert into t select generate_series(1, 1000), 'pear'") + + src_flush_lsn = src_sk.get_flush_lsn(tenant_id, timeline_id) + log.info(f"flush_lsn on src before pull_timeline: {src_flush_lsn}") + + dst_http = dst_sk.http_client() + # run pull_timeline which will halt before downloading files + dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + pt_handle = PropagatingThread( + target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) + ) + pt_handle.start() + dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + + # ensure segment exists + endpoint.safe_psql("insert into t select generate_series(1, 180000), 'papaya'") + lsn = last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) + assert lsn > Lsn("0/2000000") + # Checkpoint timeline beyond lsn. + src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn) + first_segment_p = os.path.join( + src_sk.timeline_dir(tenant_id, timeline_id), "000000010000000000000001" + ) + log.info(f"first segment exist={os.path.exists(first_segment_p)}") + + dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + pt_handle.join() + + timeline_start_lsn = src_sk.get_timeline_start_lsn(tenant_id, timeline_id) + dst_flush_lsn = dst_sk.get_flush_lsn(tenant_id, timeline_id) + log.info(f"flush_lsn on dst after pull_timeline: {dst_flush_lsn}") + assert dst_flush_lsn >= src_flush_lsn + digests = [ + sk.http_client().timeline_digest(tenant_id, timeline_id, timeline_start_lsn, dst_flush_lsn) + for sk in [src_sk, dst_sk] + ] + assert digests[0] == digests[1], f"digest on src is {digests[0]} but on dst is {digests[1]}" + + # In this test we check for excessive START_REPLICATION and START_WAL_PUSH queries # when compute is active, but there are no writes to the timeline. In that case # pageserver should maintain a single connection to safekeeper and don't attempt From b2d34a82b909c102d81cd301c870ebe9b11aec86 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 24 May 2024 22:00:08 +0300 Subject: [PATCH 088/220] Make python Safekeeper datadir Path instead of str. --- test_runner/fixtures/neon_fixtures.py | 8 ++++---- test_runner/fixtures/utils.py | 2 +- test_runner/regress/test_wal_acceptor.py | 14 ++++++-------- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index c9d0acb967..b8ef63faa9 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3873,11 +3873,11 @@ class Safekeeper(LogUtils): return res @property - def data_dir(self) -> str: - return os.path.join(self.env.repo_dir, "safekeepers", f"sk{self.id}") + def data_dir(self) -> Path: + return self.env.repo_dir / "safekeepers" / f"sk{self.id}" - def timeline_dir(self, tenant_id, timeline_id) -> str: - return os.path.join(self.data_dir, str(tenant_id), str(timeline_id)) + def timeline_dir(self, tenant_id, timeline_id) -> Path: + return self.data_dir / str(tenant_id) / str(timeline_id) def list_segments(self, tenant_id, timeline_id) -> List[str]: """ diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index c05cb3e744..b55329e054 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -196,7 +196,7 @@ def query_scalar(cur: cursor, query: str) -> Any: # Traverse directory to get total size. -def get_dir_size(path: str) -> int: +def get_dir_size(path: Path) -> int: """Return size in bytes.""" totalbytes = 0 for root, _dirs, files in os.walk(path): diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 0c37711f7a..cff13e74ee 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -359,7 +359,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): # We will wait for first segment removal. Make sure they exist for starter. first_segments = [ - os.path.join(sk.data_dir, str(tenant_id), str(timeline_id), "000000010000000000000001") + sk.timeline_dir(tenant_id, timeline_id) / "000000010000000000000001" for sk in env.safekeepers ] assert all(os.path.exists(p) for p in first_segments) @@ -444,7 +444,7 @@ def is_flush_lsn_caught_up(sk: Safekeeper, tenant_id: TenantId, timeline_id: Tim def is_wal_trimmed(sk: Safekeeper, tenant_id: TenantId, timeline_id: TimelineId, target_size_mb): http_cli = sk.http_client() tli_status = http_cli.timeline_status(tenant_id, timeline_id) - sk_wal_size = get_dir_size(os.path.join(sk.data_dir, str(tenant_id), str(timeline_id))) + sk_wal_size = get_dir_size(sk.timeline_dir(tenant_id, timeline_id)) sk_wal_size_mb = sk_wal_size / 1024 / 1024 log.info(f"Safekeeper id={sk.id} wal_size={sk_wal_size_mb:.2f}MB status={tli_status}") return sk_wal_size_mb <= target_size_mb @@ -1131,8 +1131,8 @@ def cmp_sk_wal(sks: List[Safekeeper], tenant_id: TenantId, timeline_id: Timeline ) for f in mismatch: - f1 = os.path.join(sk0.timeline_dir(tenant_id, timeline_id), f) - f2 = os.path.join(sk.timeline_dir(tenant_id, timeline_id), f) + f1 = sk0.timeline_dir(tenant_id, timeline_id) / f + f2 = sk.timeline_dir(tenant_id, timeline_id) / f stdout_filename = f"{f2}.filediff" with open(stdout_filename, "w") as stdout_f: @@ -1630,7 +1630,7 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): with conn.cursor() as cur: cur.execute("CREATE TABLE t(key int primary key)") sk = env.safekeepers[0] - sk_data_dir = Path(sk.data_dir) + sk_data_dir = sk.data_dir if not auth_enabled: sk_http = sk.http_client() sk_http_other = sk_http @@ -1850,9 +1850,7 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): assert lsn > Lsn("0/2000000") # Checkpoint timeline beyond lsn. src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn) - first_segment_p = os.path.join( - src_sk.timeline_dir(tenant_id, timeline_id), "000000010000000000000001" - ) + first_segment_p = src_sk.timeline_dir(tenant_id, timeline_id) / "000000010000000000000001" log.info(f"first segment exist={os.path.exists(first_segment_p)}") dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) From d61e9241035c3d03eb90a5722b0d38769f92e18a Mon Sep 17 00:00:00 2001 From: Konstantin Knizhnik Date: Mon, 27 May 2024 15:57:57 +0300 Subject: [PATCH 089/220] Fix connect to PS on MacOS/X (#7885) ## Problem After [0e4f1826805d040a23c25c54f3993a942755dbc2] which introduce async connect Neon is not able to connect to page server. ## Summary of changes Perform sync commit at MacOS/X ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --------- Co-authored-by: Konstantin Knizhnik --- pgxn/neon/libpagestore.c | 89 +++++++++++++++++++------------------- pgxn/neon/pagestore_smgr.c | 10 ++--- 2 files changed, 50 insertions(+), 49 deletions(-) diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index a9c8d59c3a..5eae2d8204 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -125,13 +125,6 @@ typedef struct * - WL_EXIT_ON_PM_DEATH. */ WaitEventSet *wes_read; - /*--- - * WaitEventSet containing: - * - WL_SOCKET_WRITABLE on 'conn' - * - WL_LATCH_SET on MyLatch, and - * - WL_EXIT_ON_PM_DEATH. - */ - WaitEventSet *wes_write; } PageServer; static PageServer page_servers[MAX_SHARDS]; @@ -336,11 +329,6 @@ CLEANUP_AND_DISCONNECT(PageServer *shard) FreeWaitEventSet(shard->wes_read); shard->wes_read = NULL; } - if (shard->wes_write) - { - FreeWaitEventSet(shard->wes_write); - shard->wes_write = NULL; - } if (shard->conn) { PQfinish(shard->conn); @@ -436,22 +424,6 @@ pageserver_connect(shardno_t shard_no, int elevel) return false; } - shard->wes_read = CreateWaitEventSet(TopMemoryContext, 3); - AddWaitEventToSet(shard->wes_read, WL_LATCH_SET, PGINVALID_SOCKET, - MyLatch, NULL); - AddWaitEventToSet(shard->wes_read, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, - NULL, NULL); - AddWaitEventToSet(shard->wes_read, WL_SOCKET_READABLE, PQsocket(shard->conn), NULL, NULL); - - shard->wes_write = CreateWaitEventSet(TopMemoryContext, 3); - AddWaitEventToSet(shard->wes_write, WL_LATCH_SET, PGINVALID_SOCKET, - MyLatch, NULL); - AddWaitEventToSet(shard->wes_write, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, - NULL, NULL); - AddWaitEventToSet(shard->wes_write, WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE, - PQsocket(shard->conn), - NULL, NULL); - shard->state = PS_Connecting_Startup; /* fallthrough */ } @@ -460,13 +432,12 @@ pageserver_connect(shardno_t shard_no, int elevel) char *pagestream_query; int ps_send_query_ret; bool connected = false; - + int poll_result = PGRES_POLLING_WRITING; neon_shard_log(shard_no, DEBUG5, "Connection state: Connecting_Startup"); do { WaitEvent event; - int poll_result = PQconnectPoll(shard->conn); switch (poll_result) { @@ -497,25 +468,45 @@ pageserver_connect(shardno_t shard_no, int elevel) } case PGRES_POLLING_READING: /* Sleep until there's something to do */ - (void) WaitEventSetWait(shard->wes_read, -1L, &event, 1, - PG_WAIT_EXTENSION); - ResetLatch(MyLatch); - - /* query cancellation, backend shutdown */ - CHECK_FOR_INTERRUPTS(); - + while (true) + { + int rc = WaitLatchOrSocket(MyLatch, + WL_EXIT_ON_PM_DEATH | WL_LATCH_SET | WL_SOCKET_READABLE, + PQsocket(shard->conn), + 0, + PG_WAIT_EXTENSION); + elog(DEBUG5, "PGRES_POLLING_READING=>%d", rc); + if (rc & WL_LATCH_SET) + { + ResetLatch(MyLatch); + /* query cancellation, backend shutdown */ + CHECK_FOR_INTERRUPTS(); + } + if (rc & WL_SOCKET_READABLE) + break; + } /* PQconnectPoll() handles the socket polling state updates */ break; case PGRES_POLLING_WRITING: /* Sleep until there's something to do */ - (void) WaitEventSetWait(shard->wes_write, -1L, &event, 1, - PG_WAIT_EXTENSION); - ResetLatch(MyLatch); - - /* query cancellation, backend shutdown */ - CHECK_FOR_INTERRUPTS(); - + while (true) + { + int rc = WaitLatchOrSocket(MyLatch, + WL_EXIT_ON_PM_DEATH | WL_LATCH_SET | WL_SOCKET_WRITEABLE, + PQsocket(shard->conn), + 0, + PG_WAIT_EXTENSION); + elog(DEBUG5, "PGRES_POLLING_WRITING=>%d", rc); + if (rc & WL_LATCH_SET) + { + ResetLatch(MyLatch); + /* query cancellation, backend shutdown */ + CHECK_FOR_INTERRUPTS(); + } + if (rc & WL_SOCKET_WRITEABLE) + break; + } /* PQconnectPoll() handles the socket polling state updates */ break; @@ -524,12 +515,22 @@ pageserver_connect(shardno_t shard_no, int elevel) connected = true; break; } + poll_result = PQconnectPoll(shard->conn); + elog(DEBUG5, "PQconnectPoll=>%d", poll_result); } while (!connected); /* No more polling needed; connection succeeded */ shard->last_connect_time = GetCurrentTimestamp(); + shard->wes_read = CreateWaitEventSet(TopMemoryContext, 3); + AddWaitEventToSet(shard->wes_read, WL_LATCH_SET, PGINVALID_SOCKET, + MyLatch, NULL); + AddWaitEventToSet(shard->wes_read, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET, + NULL, NULL); + AddWaitEventToSet(shard->wes_read, WL_SOCKET_READABLE, PQsocket(shard->conn), NULL, NULL); + + switch (neon_protocol_version) { case 2: diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index ac505fe6fb..0e4d210be8 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -584,9 +584,9 @@ prefetch_read(PrefetchRequest *slot) slot->response != NULL || slot->my_ring_index != MyPState->ring_receive) neon_shard_log(slot->shard_no, ERROR, - "Incorrect prefetch read: status=%d response=%llx my=%llu receive=%llu", - slot->status, (size_t) (void *) slot->response, - slot->my_ring_index, MyPState->ring_receive); + "Incorrect prefetch read: status=%d response=%p my=%lu receive=%lu", + slot->status, slot->response, + (long)slot->my_ring_index, (long)MyPState->ring_receive); old = MemoryContextSwitchTo(MyPState->errctx); response = (NeonResponse *) page_server->receive(slot->shard_no); @@ -606,8 +606,8 @@ prefetch_read(PrefetchRequest *slot) else { neon_shard_log(slot->shard_no, WARNING, - "No response from reading prefetch entry %llu: %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", - slot->my_ring_index, + "No response from reading prefetch entry %lu: %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", + (long)slot->my_ring_index, RelFileInfoFmt(BufTagGetNRelFileInfo(slot->buftag)), slot->buftag.forkNum, slot->buftag.blockNum); return false; From 4a0ce9512b5eb26b636006cda2488411d07bfc03 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 27 May 2024 17:35:46 +0300 Subject: [PATCH 090/220] Add safekeeper test truncating WAL. We do it as a part of more complicated tests like test_compute_restarts, but let's have a simple test as well. --- .../regress/test_wal_acceptor_async.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index b5d86de574..715d22eed8 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -531,6 +531,64 @@ def test_recovery_uncommitted(neon_env_builder: NeonEnvBuilder): asyncio.run(run_recovery_uncommitted(env)) +async def run_wal_truncation(env: NeonEnv): + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + (sk1, sk2, sk3) = env.safekeepers + + ep = env.endpoints.create_start("main") + ep.safe_psql("create table t (key int, value text)") + ep.safe_psql("insert into t select generate_series(1, 100), 'payload'") + + # insert with only one sk3 up to create tail of flushed but not committed WAL on it + sk1.stop() + sk2.stop() + conn = await ep.connect_async() + # query should hang, so execute in separate task + bg_query = asyncio.create_task( + conn.execute("insert into t select generate_series(1, 180000), 'Papaya'") + ) + sleep_sec = 2 + await asyncio.sleep(sleep_sec) + # it must still be not finished + assert not bg_query.done() + # note: destoy will kill compute_ctl, preventing it waiting for hanging sync-safekeepers. + ep.stop_and_destroy() + + # stop sk3 as well + sk3.stop() + + # now start sk1 and sk2 and make them commit something + sk1.start() + sk2.start() + ep = env.endpoints.create_start( + "main", + ) + ep.safe_psql("insert into t select generate_series(1, 200), 'payload'") + + # start sk3 and wait for it to catch up + sk3.start() + flush_lsn = Lsn(ep.safe_psql_scalar("SELECT pg_current_wal_flush_lsn()")) + await wait_for_lsn(sk3, tenant_id, timeline_id, flush_lsn) + + timeline_start_lsn = sk1.get_timeline_start_lsn(tenant_id, timeline_id) + digests = [ + sk.http_client().timeline_digest(tenant_id, timeline_id, timeline_start_lsn, flush_lsn) + for sk in [sk1, sk2] + ] + assert digests[0] == digests[1], f"digest on sk1 is {digests[0]} but on sk3 is {digests[1]}" + + +# Simple deterministic test creating tail of WAL on safekeeper which is +# truncated when majority without this sk elects walproposer starting earlier. +def test_wal_truncation(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_safekeepers = 3 + env = neon_env_builder.init_start() + + asyncio.run(run_wal_truncation(env)) + + async def run_segment_init_failure(env: NeonEnv): env.neon_cli.create_branch("test_segment_init_failure") ep = env.endpoints.create_start("test_segment_init_failure") From fabeff822fac24b7ba45214e907295003874252a Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Tue, 28 May 2024 13:05:33 +0200 Subject: [PATCH 091/220] Performance test for pgvector HNSW index build and queries (#7873) ## Problem We want to regularly verify the performance of pgvector HNSW parallel index builds and parallel similarity search using HNSW indexes. The first release that considerably improved the index-build parallelism was pgvector 0.7.0 and we want to make sure that we do not regress by our neon compute VM settings (swap, memory over commit, pg conf etc.) ## Summary of changes Prepare a Neon project with 1 million openAI vector embeddings (vector size 1536). Run HNSW indexing operations in the regression test for the various distance metrics. Run similarity queries using pgbench with 100 concurrent clients. I have also added the relevant metrics to the grafana dashboards pgbench and olape --------- Co-authored-by: Alexander Bayandin --- .github/workflows/benchmarking.yml | 100 +++++++++++++++++- pyproject.toml | 1 + .../performance/pgvector/HNSW_build.sql | 47 ++++++++ .../performance/pgvector/IVFFLAT_build.sql | 52 +++++++++ test_runner/performance/pgvector/README.md | 38 +++++++ test_runner/performance/pgvector/loaddata.py | 72 +++++++++++++ ...ch_custom_script_pgvector_hsnw_queries.sql | 10 ++ .../pgvector/pgbench_hnsw_queries.sql | 13 +++ test_runner/performance/test_perf_olap.py | 34 ++++++ test_runner/performance/test_perf_pgbench.py | 31 ++++++ 10 files changed, 395 insertions(+), 3 deletions(-) create mode 100644 test_runner/performance/pgvector/HNSW_build.sql create mode 100644 test_runner/performance/pgvector/IVFFLAT_build.sql create mode 100644 test_runner/performance/pgvector/README.md create mode 100644 test_runner/performance/pgvector/loaddata.py create mode 100644 test_runner/performance/pgvector/pgbench_custom_script_pgvector_hsnw_queries.sql create mode 100644 test_runner/performance/pgvector/pgbench_hnsw_queries.sql diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 1eaf05cd54..d5a375d704 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -38,6 +38,11 @@ on: description: 'AWS-RDS and AWS-AURORA normally only run on Saturday. Set this to true to run them on every workflow_dispatch' required: false default: false + run_only_pgvector_tests: + type: boolean + description: 'Run pgvector tests but no other tests. If not set, all tests including pgvector tests will be run' + required: false + default: false defaults: run: @@ -50,6 +55,7 @@ concurrency: jobs: bench: + if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }} env: TEST_PG_BENCH_DURATIONS_MATRIX: "300" TEST_PG_BENCH_SCALES_MATRIX: "10,100" @@ -120,6 +126,7 @@ jobs: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} generate-matrices: + if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }} # Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday) # # Available platforms: @@ -197,6 +204,7 @@ jobs: echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT pgbench-compare: + if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }} needs: [ generate-matrices ] strategy: @@ -343,6 +351,92 @@ jobs: env: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + pgbench-pgvector: + env: + TEST_PG_BENCH_DURATIONS_MATRIX: "15m" + TEST_PG_BENCH_SCALES_MATRIX: "1" + POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install + DEFAULT_PG_VERSION: 16 + TEST_OUTPUT: /tmp/test_output + BUILD_TYPE: remote + SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }} + PLATFORM: "neon-captest-pgvector" + + runs-on: [ self-hosted, us-east-2, x64 ] + container: + image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/build-tools:pinned + options: --init + + steps: + - uses: actions/checkout@v4 + + - name: Download Neon artifact + uses: ./.github/actions/download + with: + name: neon-${{ runner.os }}-release-artifact + path: /tmp/neon/ + prefix: latest + + - name: Add Postgres binaries to PATH + run: | + ${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version + echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH + + - name: Set up Connection String + id: set-up-connstr + run: | + CONNSTR=${{ secrets.BENCHMARK_PGVECTOR_CONNSTR }} + + echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT + + QUERIES=("SELECT version()") + QUERIES+=("SHOW neon.tenant_id") + QUERIES+=("SHOW neon.timeline_id") + + for q in "${QUERIES[@]}"; do + psql ${CONNSTR} -c "${q}" + done + + - name: Benchmark pgvector hnsw indexing + uses: ./.github/actions/run-python-test-set + with: + build_type: ${{ env.BUILD_TYPE }} + test_selection: performance/test_perf_olap.py + run_in_parallel: false + save_perf_report: ${{ env.SAVE_PERF_REPORT }} + extra_params: -m remote_cluster --timeout 21600 -k test_pgvector_indexing + env: + VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" + BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} + + - name: Benchmark pgvector hnsw queries + uses: ./.github/actions/run-python-test-set + with: + build_type: ${{ env.BUILD_TYPE }} + test_selection: performance + run_in_parallel: false + save_perf_report: ${{ env.SAVE_PERF_REPORT }} + extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_pgvector + env: + BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} + VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" + + - name: Create Allure report + if: ${{ !cancelled() }} + uses: ./.github/actions/allure-report-generate + + - name: Post to a Slack channel + if: ${{ github.event.schedule && failure() }} + uses: slackapi/slack-github-action@v1 + with: + channel-id: "C033QLM5P7D" # dev-staging-stream + slack-message: "Periodic perf testing neon-captest-pgvector: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + + clickbench-compare: # ClichBench DB for rds-aurora and rds-Postgres deployed to the same clusters # we use for performance testing in pgbench-compare. @@ -351,7 +445,7 @@ jobs: # # *_CLICKBENCH_CONNSTR: Genuine ClickBench DB with ~100M rows # *_CLICKBENCH_10M_CONNSTR: DB with the first 10M rows of ClickBench DB - if: ${{ !cancelled() }} + if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }} needs: [ generate-matrices, pgbench-compare ] strategy: @@ -455,7 +549,7 @@ jobs: # We might change it after https://github.com/neondatabase/neon/issues/2900. # # *_TPCH_S10_CONNSTR: DB generated with scale factor 10 (~10 GB) - if: ${{ !cancelled() }} + if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }} needs: [ generate-matrices, clickbench-compare ] strategy: @@ -557,7 +651,7 @@ jobs: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} user-examples-compare: - if: ${{ !cancelled() }} + if: ${{ !cancelled() && (github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null) }} needs: [ generate-matrices, tpch-compare ] strategy: diff --git a/pyproject.toml b/pyproject.toml index 131d1121f7..c7f1a07512 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ build-backend = "poetry.core.masonry.api" exclude = [ "^vendor/", "^target/", + "test_runner/performance/pgvector/loaddata.py", ] check_untyped_defs = true # Help mypy find imports when running against list of individual files. diff --git a/test_runner/performance/pgvector/HNSW_build.sql b/test_runner/performance/pgvector/HNSW_build.sql new file mode 100644 index 0000000000..9e6918b755 --- /dev/null +++ b/test_runner/performance/pgvector/HNSW_build.sql @@ -0,0 +1,47 @@ + +\set ECHO queries +\timing + +-- prepare test table +DROP TABLE IF EXISTS hnsw_test_table; +CREATE TABLE hnsw_test_table AS TABLE documents WITH NO DATA; +INSERT INTO hnsw_test_table SELECT * FROM documents; +CREATE INDEX ON hnsw_test_table (_id); -- needed later for random tuple queries +-- tune index build params +SET max_parallel_maintenance_workers = 7; +SET maintenance_work_mem = '8GB'; +-- create HNSW index for the supported distance metrics +CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_cosine_ops); +CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_ip_ops); +CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_l1_ops); +CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_hamming_ops); +CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_jaccard_ops); +-- note: in a second psql session we can monitor the progress of the index build phases using +-- the following query: +-- SELECT phase, round(100.0 * blocks_done / nullif(blocks_total, 0), 1) AS "%" FROM pg_stat_progress_create_index; + +-- show all indexes built on the table +SELECT + idx.relname AS index_name, + tbl.relname AS table_name, + am.amname AS access_method, + a.attname AS column_name, + opc.opcname AS operator_class +FROM + pg_index i +JOIN + pg_class idx ON idx.oid = i.indexrelid +JOIN + pg_class tbl ON tbl.oid = i.indrelid +JOIN + pg_am am ON am.oid = idx.relam +JOIN + pg_attribute a ON a.attrelid = tbl.oid AND a.attnum = ANY(i.indkey) +JOIN + pg_opclass opc ON opc.oid = i.indclass[0] +WHERE + tbl.relname = 'hnsw_test_table' + AND a.attname = 'embeddings'; + +-- show table sizes +\dt+ diff --git a/test_runner/performance/pgvector/IVFFLAT_build.sql b/test_runner/performance/pgvector/IVFFLAT_build.sql new file mode 100644 index 0000000000..338980831a --- /dev/null +++ b/test_runner/performance/pgvector/IVFFLAT_build.sql @@ -0,0 +1,52 @@ + +\set ECHO queries +\timing + +-- prepare test table +DROP TABLE IF EXISTS ivfflat_test_table; +CREATE TABLE ivfflat_test_table AS TABLE documents WITH NO DATA; +INSERT INTO ivfflat_test_table SELECT * FROM documents; +CREATE INDEX ON ivfflat_test_table (_id); -- needed later for random tuple queries +-- tune index build params +SET max_parallel_maintenance_workers = 7; +SET maintenance_work_mem = '8GB'; +-- create ivfflat index for the supported distance metrics +-- the formulat for lists is # rows / 1000 or sqrt(# rows) if # rows > 1 million +-- we have 1 million embeddings of vector size 1536 in column embeddings of table documents +-- so we use 1000 lists +CREATE INDEX ON ivfflat_test_table USING ivfflat (embeddings vector_l2_ops) WITH (lists = 1000); +CREATE INDEX ON ivfflat_test_table USING ivfflat (embeddings vector_ip_ops) WITH (lists = 1000); +CREATE INDEX ON ivfflat_test_table USING ivfflat (embeddings vector_cosine_ops) WITH (lists = 1000); +CREATE INDEX ON ivfflat_test_table USING ivfflat (embeddings::halfvec(1536) halfvec_l2_ops) WITH (lists = 1000); +CREATE INDEX ON ivfflat_test_table + USING ivfflat ((binary_quantize(embeddings)::bit(1536)) bit_hamming_ops) WITH (lists = 1000); + +\d ivfflat_test_table + + +-- show all indexes built on the table +SELECT + idx.relname AS index_name, + tbl.relname AS table_name, + am.amname AS access_method, + a.attname AS column_name, + opc.opcname AS operator_class +FROM + pg_index i +JOIN + pg_class idx ON idx.oid = i.indexrelid +JOIN + pg_class tbl ON tbl.oid = i.indrelid +JOIN + pg_am am ON am.oid = idx.relam +JOIN + pg_attribute a ON a.attrelid = tbl.oid AND a.attnum = ANY(i.indkey) +JOIN + pg_opclass opc ON opc.oid = i.indclass[0] +WHERE + tbl.relname = 'ivfflat_test_table' + AND a.attname = 'embeddings'; +-- show table sizes +\dt+ + + diff --git a/test_runner/performance/pgvector/README.md b/test_runner/performance/pgvector/README.md new file mode 100644 index 0000000000..c55db12e74 --- /dev/null +++ b/test_runner/performance/pgvector/README.md @@ -0,0 +1,38 @@ +--- +dataset_info: + features: + - name: _id + dtype: string + - name: title + dtype: string + - name: text + dtype: string + - name: text-embedding-3-large-1536-embedding + sequence: float64 + splits: + - name: train + num_bytes: 12679725776 + num_examples: 1000000 + download_size: 9551862565 + dataset_size: 12679725776 +configs: +- config_name: default + data_files: + - split: train + path: data/train-* +license: mit +task_categories: +- feature-extraction +language: +- en +size_categories: +- 1M ") + + +def main(conn_str, directory_path): + # Connection to PostgreSQL + with psycopg2.connect(conn_str) as conn: + with conn.cursor() as cursor: + # Run SQL statements + cursor.execute("CREATE EXTENSION IF NOT EXISTS vector;") + register_vector(conn) + cursor.execute("DROP TABLE IF EXISTS documents;") + cursor.execute( + """ + CREATE TABLE documents ( + _id TEXT PRIMARY KEY, + title TEXT, + text TEXT, + embeddings vector(1536) -- text-embedding-3-large-1536-embedding (OpenAI) + ); + """ + ) + conn.commit() + + # List and sort Parquet files + parquet_files = sorted(Path(directory_path).glob("*.parquet")) + + for file in parquet_files: + print(f"Loading {file} into PostgreSQL") + df = pd.read_parquet(file) + + print(df.head()) + + data_list = [ + ( + row["_id"], + row["title"], + row["text"], + np.array(row["text-embedding-3-large-1536-embedding"]), + ) + for index, row in df.iterrows() + ] + # Use execute_values to perform batch insertion + execute_values( + cursor, + "INSERT INTO documents (_id, title, text, embeddings) VALUES %s", + data_list, + ) + # Commit after we insert all embeddings + conn.commit() + + print(f"Loaded {file} into PostgreSQL") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print_usage() + sys.exit(1) + + conn_str = sys.argv[1] + directory_path = sys.argv[2] + main(conn_str, directory_path) diff --git a/test_runner/performance/pgvector/pgbench_custom_script_pgvector_hsnw_queries.sql b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_hsnw_queries.sql new file mode 100644 index 0000000000..886ae9645b --- /dev/null +++ b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_hsnw_queries.sql @@ -0,0 +1,10 @@ +with x (x) as ( + select "embeddings" as x + from hnsw_test_table + TABLESAMPLE SYSTEM (1) + LIMIT 1 +) +SELECT title, "embeddings" <=> (select x from x) as distance +FROM hnsw_test_table +ORDER BY 2 +LIMIT 30; diff --git a/test_runner/performance/pgvector/pgbench_hnsw_queries.sql b/test_runner/performance/pgvector/pgbench_hnsw_queries.sql new file mode 100644 index 0000000000..5034063c1b --- /dev/null +++ b/test_runner/performance/pgvector/pgbench_hnsw_queries.sql @@ -0,0 +1,13 @@ +-- run with pooled connection +-- pgbench -T 300 -c 100 -j20 -f pgbench_hnsw_queries.sql -postgresql://neondb_owner:@ep-floral-thunder-w1gzhaxi-pooler.eu-west-1.aws.neon.build/neondb?sslmode=require" + +with x (x) as ( + select "embeddings" as x + from hnsw_test_table + TABLESAMPLE SYSTEM (1) + LIMIT 1 +) +SELECT title, "embeddings" <=> (select x from x) as distance +FROM hnsw_test_table +ORDER BY 2 +LIMIT 30; diff --git a/test_runner/performance/test_perf_olap.py b/test_runner/performance/test_perf_olap.py index 8a9509ea44..2367676e67 100644 --- a/test_runner/performance/test_perf_olap.py +++ b/test_runner/performance/test_perf_olap.py @@ -100,6 +100,25 @@ QUERIES: Tuple[LabelledQuery, ...] = ( ) # fmt: on +# A list of pgvector HNSW index builds to run. +# Please do not alter the label for the query, as it is used to identify it. +# +# Disable auto formatting for the list of queries so that it's easier to read +# fmt: off +PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( + LabelledQuery("PGV0", r"DROP TABLE IF EXISTS hnsw_test_table;"), + LabelledQuery("PGV1", r"CREATE TABLE hnsw_test_table AS TABLE documents WITH NO DATA;"), + LabelledQuery("PGV2", r"INSERT INTO hnsw_test_table SELECT * FROM documents;"), + LabelledQuery("PGV3", r"CREATE INDEX ON hnsw_test_table (_id);"), + LabelledQuery("PGV4", r"CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_cosine_ops);"), + LabelledQuery("PGV5", r"CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_ip_ops);"), + LabelledQuery("PGV6", r"CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_l1_ops);"), + LabelledQuery("PGV7", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_hamming_ops);"), + LabelledQuery("PGV8", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_jaccard_ops);"), +) +# fmt: on + + EXPLAIN_STRING: str = "EXPLAIN (ANALYZE, VERBOSE, BUFFERS, COSTS, SETTINGS, FORMAT JSON)" @@ -245,3 +264,18 @@ def test_clickbench_collect_pg_stat_statements(remote_compare: RemoteCompare): log.info("Collecting pg_stat_statements") query = LabelledQuery("Q_COLLECT_PG_STAT_STATEMENTS", r"SELECT * from pg_stat_statements;") run_psql(remote_compare, query, times=1, explain=False) + + +@pytest.mark.parametrize("query", PGVECTOR_QUERIES) +@pytest.mark.remote_cluster +def test_pgvector_indexing(query: LabelledQuery, remote_compare: RemoteCompare): + """ + An pgvector test that tests HNSW index build performance and parallelism. + + The DB prepared manually in advance. + See + - test_runner/performance/pgvector/README.md + - test_runner/performance/pgvector/loaddata.py + - test_runner/performance/pgvector/HNSW_build.sql + """ + run_psql(remote_compare, query, times=1, explain=False) diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index 2b8760dff2..d756d6eeca 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -17,6 +17,7 @@ class PgBenchLoadType(enum.Enum): INIT = "init" SIMPLE_UPDATE = "simple-update" SELECT_ONLY = "select-only" + PGVECTOR_HNSW = "pgvector-hnsw" def utc_now_timestamp() -> int: @@ -132,6 +133,26 @@ def run_test_pgbench(env: PgCompare, scale: int, duration: int, workload_type: P password=password, ) + if workload_type == PgBenchLoadType.PGVECTOR_HNSW: + # Run simple-update workload + run_pgbench( + env, + "pgvector-hnsw", + [ + "pgbench", + "-f", + "test_runner/performance/pgvector/pgbench_custom_script_pgvector_hsnw_queries.sql", + "-c100", + "-j20", + f"-T{duration}", + "-P2", + "--protocol=prepared", + "--progress-timestamp", + connstr, + ], + password=password, + ) + env.report_size() @@ -201,3 +222,13 @@ def test_pgbench_remote_simple_update(remote_compare: PgCompare, scale: int, dur @pytest.mark.remote_cluster def test_pgbench_remote_select_only(remote_compare: PgCompare, scale: int, duration: int): run_test_pgbench(remote_compare, scale, duration, PgBenchLoadType.SELECT_ONLY) + + +# The following test runs on an existing database that has pgvector extension installed +# and a table with 1 million embedding vectors loaded and indexed with HNSW. +# +# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_pgbench_remote_pgvector(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HNSW) From f9f69a2ee7fb11b9d713bd3f2c50c5be516253c9 Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Tue, 28 May 2024 16:21:09 +0200 Subject: [PATCH 092/220] clarify how to load the dbpedia vector embeddings into a postgres database (#7894) ## Problem Improve the readme for the data load step in the pgvector performance test. --- test_runner/performance/pgvector/README.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/test_runner/performance/pgvector/README.md b/test_runner/performance/pgvector/README.md index c55db12e74..83495d270a 100644 --- a/test_runner/performance/pgvector/README.md +++ b/test_runner/performance/pgvector/README.md @@ -1,3 +1,20 @@ +# Source of the dataset for pgvector tests + +This readme was copied from https://huggingface.co/datasets/Qdrant/dbpedia-entities-openai3-text-embedding-3-large-1536-1M + +## Download the parquet files + +```bash +brew install git-lfs +git-lfs clone https://huggingface.co/datasets/Qdrant/dbpedia-entities-openai3-text-embedding-3-large-1536-1M +``` + +## Load into postgres: + +see loaddata.py in this directory + +## Rest of dataset card as on huggingface + --- dataset_info: features: @@ -35,4 +52,4 @@ size_categories: - Created: February 2024. - Text used for Embedding: title (string) + text (string) - Embedding Model: OpenAI text-embedding-3-large -- This dataset was generated from the first 1M entries of https://huggingface.co/datasets/BeIR/dbpedia-entity, extracted by @KShivendu_ [here](https://huggingface.co/datasets/KShivendu/dbpedia-entities-openai-1M) \ No newline at end of file +- This dataset was generated from the first 1M entries of https://huggingface.co/datasets/BeIR/dbpedia-entity, extracted by @KShivendu_ \ No newline at end of file From 352b08d0be56c73ba9017a82cd6496aea7ba5758 Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 28 May 2024 16:06:47 +0100 Subject: [PATCH 093/220] pageserver: fix a warning on secondary mode downloads after evictions (#7877) ## Problem In 4ce6e2d2fc we added a warning when progress stats don't look right at the end of a secondary download pass. This `Correcting drift in progress stats` warning fired in staging on a pageserver that had been doing some disk usage eviction. The impact is low because in the same place we log the warning, we also fix up the progress values. ## Summary of changes - When we skip downloading a layer because it was recently evicted, update the progress stats to ensure they still reach a clean complete state at the end of a download pass. - Also add a log for evicting secondary location layers, for symmetry with attached locations, so that we can clearly see when eviction has happened for a particular tenant's layers when investigating issues. This is a point fix -- the code would also benefit from being refactored so that there is some "download result" type with a Skip variant, to ensure that we are updating the progress stats uniformly for those cases. --- pageserver/src/tenant/secondary.rs | 1 + pageserver/src/tenant/secondary/downloader.rs | 18 +++++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pageserver/src/tenant/secondary.rs b/pageserver/src/tenant/secondary.rs index 252b6eb11b..af6840f525 100644 --- a/pageserver/src/tenant/secondary.rs +++ b/pageserver/src/tenant/secondary.rs @@ -187,6 +187,7 @@ impl SecondaryTenant { }; let now = SystemTime::now(); + tracing::info!("Evicting secondary layer"); let this = self.clone(); diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 0ec1bd649b..5c915d6b53 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -909,6 +909,7 @@ impl<'a> TenantDownloader<'a> { strftime(&layer.access_time), strftime(evicted_at) ); + self.skip_layer(layer); continue; } } @@ -963,6 +964,15 @@ impl<'a> TenantDownloader<'a> { Ok(()) } + /// Call this during timeline download if a layer will _not_ be downloaded, to update progress statistics + fn skip_layer(&self, layer: HeatMapLayer) { + let mut progress = self.secondary_state.progress.lock().unwrap(); + progress.layers_total = progress.layers_total.saturating_sub(1); + progress.bytes_total = progress + .bytes_total + .saturating_sub(layer.metadata.file_size); + } + async fn download_layer( &self, tenant_shard_id: &TenantShardId, @@ -1012,13 +1022,7 @@ impl<'a> TenantDownloader<'a> { "Skipped downloading missing layer {}, raced with compaction/gc?", layer.name ); - - // If the layer is 404, adjust the progress statistics to reflect that we will not download it. - let mut progress = self.secondary_state.progress.lock().unwrap(); - progress.layers_total = progress.layers_total.saturating_sub(1); - progress.bytes_total = progress - .bytes_total - .saturating_sub(layer.metadata.file_size); + self.skip_layer(layer); return Ok(None); } From 14df69d0e38c2ab3e1b8f1bef4a6981c842fd913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Tue, 28 May 2024 17:40:52 +0200 Subject: [PATCH 094/220] Drop postgres-native-tls in favour of tokio-postgres-rustls (#7883) Get rid of postgres-native-tls and openssl in favour of rustls in our dependency tree. Do further steps to completely remove native-tls and openssl. Among other advantages, this allows us to do static musl builds more easily: #7889 --- Cargo.lock | 154 ++------------------ Cargo.toml | 15 +- deny.toml | 7 + proxy/Cargo.toml | 5 +- proxy/src/compute.rs | 84 +++++++++-- s3_scrubber/Cargo.toml | 6 +- s3_scrubber/src/scan_safekeeper_metadata.rs | 20 ++- workspace_hack/Cargo.toml | 4 +- 8 files changed, 124 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d8f9021eb8..b1a307dd19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -776,7 +776,6 @@ dependencies = [ "pin-project", "serde", "time", - "tz-rs", "url", "uuid", ] @@ -1291,12 +1290,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - [[package]] name = "const_format" version = "0.2.30" @@ -1976,21 +1969,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.1.0" @@ -2620,19 +2598,6 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.26", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-util" version = "0.1.3" @@ -3168,24 +3133,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "nix" version = "0.25.1" @@ -3356,15 +3303,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "oauth2" version = "4.4.2" @@ -3414,50 +3352,12 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" -[[package]] -name = "openssl" -version = "0.10.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" -dependencies = [ - "bitflags 2.4.1", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-sys" -version = "0.9.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.20.0" @@ -4105,17 +4005,6 @@ dependencies = [ "tokio-postgres", ] -[[package]] -name = "postgres-native-tls" -version = "0.5.0" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#20031d7a9ee1addeae6e0968e3899ae6bf01cee2" -dependencies = [ - "native-tls", - "tokio", - "tokio-native-tls", - "tokio-postgres", -] - [[package]] name = "postgres-protocol" version = "0.6.4" @@ -4423,7 +4312,6 @@ dependencies = [ "md5", "measured", "metrics", - "native-tls", "once_cell", "opentelemetry", "parking_lot 0.12.1", @@ -4431,7 +4319,6 @@ dependencies = [ "parquet_derive", "pbkdf2", "pin-project-lite", - "postgres-native-tls", "postgres-protocol", "postgres_backend", "pq_proto", @@ -4450,6 +4337,7 @@ dependencies = [ "rstest", "rustc-hash", "rustls 0.22.4", + "rustls-native-certs 0.7.0", "rustls-pemfile 2.1.1", "scopeguard", "serde", @@ -4479,7 +4367,6 @@ dependencies = [ "utils", "uuid", "walkdir", - "webpki-roots 0.25.2", "workspace_hack", "x509-parser", ] @@ -4786,20 +4673,21 @@ dependencies = [ "http 0.2.9", "http-body 0.4.5", "hyper 0.14.26", - "hyper-tls", + "hyper-rustls 0.24.0", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.21.11", + "rustls-pemfile 1.0.2", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-native-tls", + "tokio-rustls 0.24.0", "tokio-util", "tower-service", "url", @@ -4807,6 +4695,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams 0.3.0", "web-sys", + "webpki-roots 0.25.2", "winreg 0.50.0", ] @@ -5232,20 +5121,22 @@ dependencies = [ "hex", "histogram", "itertools", - "native-tls", + "once_cell", "pageserver", "pageserver_api", - "postgres-native-tls", "postgres_ffi", "rand 0.8.5", "remote_storage", "reqwest 0.12.4", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", "serde", "serde_json", "serde_with", "thiserror", "tokio", "tokio-postgres", + "tokio-postgres-rustls", "tokio-rustls 0.25.0", "tokio-stream", "tokio-util", @@ -6189,8 +6080,6 @@ checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "js-sys", - "libc", - "num_threads", "serde", "time-core", "time-macros", @@ -6300,16 +6189,6 @@ dependencies = [ "syn 2.0.52", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-postgres" version = "0.7.7" @@ -6716,15 +6595,6 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" -[[package]] -name = "tz-rs" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33851b15c848fad2cf4b105c6bb66eb9512b6f6c44a4b13f57c53c73c707e2b4" -dependencies = [ - "const_fn", -] - [[package]] name = "uname" version = "0.1.1" @@ -7629,9 +7499,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 0887c039f8..58715db32b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,10 +46,10 @@ anyhow = { version = "1.0", features = ["backtrace"] } arc-swap = "1.6" async-compression = { version = "0.4.0", features = ["tokio", "gzip", "zstd"] } atomic-take = "1.1.0" -azure_core = "0.19" -azure_identity = "0.19" -azure_storage = "0.19" -azure_storage_blobs = "0.19" +azure_core = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls", "hmac_rust"] } +azure_identity = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] } +azure_storage = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] } +azure_storage_blobs = { version = "0.19", default-features = false, features = ["enable_reqwest_rustls"] } flate2 = "1.0.26" async-stream = "0.3" async-trait = "0.1" @@ -114,7 +114,6 @@ md5 = "0.7.0" measured = { version = "0.0.21", features=["lasso"] } measured-process = { version = "0.0.21" } memoffset = "0.8" -native-tls = "0.2" nix = { version = "0.27", features = ["fs", "process", "socket", "signal", "poll"] } notify = "6.0.0" num_cpus = "1.15" @@ -191,7 +190,7 @@ url = "2.2" urlencoding = "2.1" uuid = { version = "1.6.1", features = ["v4", "v7", "serde"] } walkdir = "2.3.2" -webpki-roots = "0.25" +rustls-native-certs = "0.7" x509-parser = "0.15" ## TODO replace this with tracing @@ -200,7 +199,6 @@ log = "0.4" ## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } -postgres-native-tls = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } @@ -241,8 +239,7 @@ tonic-build = "0.9" [patch.crates-io] -# This is only needed for proxy's tests. -# TODO: we should probably fork `tokio-postgres-rustls` instead. +# Needed to get `tokio-postgres-rustls` to depend on our fork. tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch="neon" } # bug fixes for UUID diff --git a/deny.toml b/deny.toml index 22e39a2ca3..469609c496 100644 --- a/deny.toml +++ b/deny.toml @@ -99,6 +99,13 @@ name = "async-executor" [[bans.deny]] name = "smol" +[[bans.deny]] +# We want to use rustls instead of the platform's native tls implementation. +name = "native-tls" + +[[bans.deny]] +name = "openssl" + # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 7da0763bc1..0b892e3277 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -82,6 +82,7 @@ thiserror.workspace = true tikv-jemallocator.workspace = true tikv-jemalloc-ctl = { workspace = true, features = ["use_std"] } tokio-postgres.workspace = true +tokio-postgres-rustls.workspace = true tokio-rustls.workspace = true tokio-util.workspace = true tokio = { workspace = true, features = ["signal"] } @@ -94,10 +95,8 @@ url.workspace = true urlencoding.workspace = true utils.workspace = true uuid.workspace = true -webpki-roots.workspace = true +rustls-native-certs.workspace = true x509-parser.workspace = true -native-tls.workspace = true -postgres-native-tls.workspace = true postgres-protocol.workspace = true redis.workspace = true diff --git a/proxy/src/compute.rs b/proxy/src/compute.rs index 4433b3c1c2..feb09d5638 100644 --- a/proxy/src/compute.rs +++ b/proxy/src/compute.rs @@ -10,11 +10,14 @@ use crate::{ }; use futures::{FutureExt, TryFutureExt}; use itertools::Itertools; +use once_cell::sync::OnceCell; use pq_proto::StartupMessageParams; -use std::{io, net::SocketAddr, time::Duration}; +use rustls::{client::danger::ServerCertVerifier, pki_types::InvalidDnsNameError}; +use std::{io, net::SocketAddr, sync::Arc, time::Duration}; use thiserror::Error; use tokio::net::TcpStream; use tokio_postgres::tls::MakeTlsConnect; +use tokio_postgres_rustls::MakeRustlsConnect; use tracing::{error, info, warn}; const COULD_NOT_CONNECT: &str = "Couldn't connect to compute node"; @@ -30,7 +33,7 @@ pub enum ConnectionError { CouldNotConnect(#[from] io::Error), #[error("{COULD_NOT_CONNECT}: {0}")] - TlsError(#[from] native_tls::Error), + TlsError(#[from] InvalidDnsNameError), #[error("{COULD_NOT_CONNECT}: {0}")] WakeComputeError(#[from] WakeComputeError), @@ -257,7 +260,7 @@ pub struct PostgresConnection { /// Socket connected to a compute node. pub stream: tokio_postgres::maybe_tls_stream::MaybeTlsStream< tokio::net::TcpStream, - postgres_native_tls::TlsStream, + tokio_postgres_rustls::RustlsStream, >, /// PostgreSQL connection parameters. pub params: std::collections::HashMap, @@ -282,12 +285,23 @@ impl ConnCfg { let (socket_addr, stream, host) = self.connect_raw(timeout).await?; drop(pause); - let tls_connector = native_tls::TlsConnector::builder() - .danger_accept_invalid_certs(allow_self_signed_compute) - .build() - .unwrap(); - let mut mk_tls = postgres_native_tls::MakeTlsConnector::new(tls_connector); - let tls = MakeTlsConnect::::make_tls_connect(&mut mk_tls, host)?; + let client_config = if allow_self_signed_compute { + // Allow all certificates for creating the connection + let verifier = Arc::new(AcceptEverythingVerifier) as Arc; + rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(verifier) + } else { + let root_store = TLS_ROOTS.get_or_try_init(load_certs)?.clone(); + rustls::ClientConfig::builder().with_root_certificates(root_store) + }; + let client_config = client_config.with_no_client_auth(); + + let mut mk_tls = tokio_postgres_rustls::MakeRustlsConnect::new(client_config); + let tls = >::make_tls_connect( + &mut mk_tls, + host, + )?; // connect_raw() will not use TLS if sslmode is "disable" let pause = ctx.latency_timer.pause(crate::metrics::Waiting::Compute); @@ -340,6 +354,58 @@ fn filtered_options(params: &StartupMessageParams) -> Option { Some(options) } +fn load_certs() -> Result, io::Error> { + let der_certs = rustls_native_certs::load_native_certs()?; + let mut store = rustls::RootCertStore::empty(); + store.add_parsable_certificates(der_certs); + Ok(Arc::new(store)) +} +static TLS_ROOTS: OnceCell> = OnceCell::new(); + +#[derive(Debug)] +struct AcceptEverythingVerifier; +impl ServerCertVerifier for AcceptEverythingVerifier { + fn supported_verify_schemes(&self) -> Vec { + use rustls::SignatureScheme::*; + // The schemes for which `SignatureScheme::supported_in_tls13` returns true. + vec![ + ECDSA_NISTP521_SHA512, + ECDSA_NISTP384_SHA384, + ECDSA_NISTP256_SHA256, + RSA_PSS_SHA512, + RSA_PSS_SHA384, + RSA_PSS_SHA256, + ED25519, + ] + } + fn verify_server_cert( + &self, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, + _ocsp_response: &[u8], + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/s3_scrubber/Cargo.toml b/s3_scrubber/Cargo.toml index dd5d453a2b..e56bd43fb8 100644 --- a/s3_scrubber/Cargo.toml +++ b/s3_scrubber/Cargo.toml @@ -22,8 +22,7 @@ serde_with.workspace = true workspace_hack.workspace = true utils.workspace = true async-stream.workspace = true -native-tls.workspace = true -postgres-native-tls.workspace = true +tokio-postgres-rustls.workspace = true postgres_ffi.workspace = true tokio-stream.workspace = true tokio-postgres.workspace = true @@ -31,6 +30,9 @@ tokio-util = { workspace = true } futures-util.workspace = true itertools.workspace = true camino.workspace = true +rustls.workspace = true +rustls-native-certs.workspace = true +once_cell.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } chrono = { workspace = true, default-features = false, features = ["clock", "serde"] } diff --git a/s3_scrubber/src/scan_safekeeper_metadata.rs b/s3_scrubber/src/scan_safekeeper_metadata.rs index 73dd49ceb5..24051b03de 100644 --- a/s3_scrubber/src/scan_safekeeper_metadata.rs +++ b/s3_scrubber/src/scan_safekeeper_metadata.rs @@ -1,7 +1,8 @@ -use std::{collections::HashSet, str::FromStr}; +use std::{collections::HashSet, str::FromStr, sync::Arc}; use aws_sdk_s3::Client; use futures::stream::{StreamExt, TryStreamExt}; +use once_cell::sync::OnceCell; use pageserver_api::shard::TenantShardId; use postgres_ffi::{XLogFileName, PG_TLI}; use serde::Serialize; @@ -70,9 +71,12 @@ pub async fn scan_safekeeper_metadata( "checking bucket {}, region {}, dump_db_table {}", bucket_config.bucket, bucket_config.region, dump_db_table ); - // Use the native TLS implementation (Neon requires TLS) - let tls_connector = - postgres_native_tls::MakeTlsConnector::new(native_tls::TlsConnector::new().unwrap()); + // Use rustls (Neon requires TLS) + let root_store = TLS_ROOTS.get_or_try_init(load_certs)?.clone(); + let client_config = rustls::ClientConfig::builder() + .with_root_certificates(root_store) + .with_no_client_auth(); + let tls_connector = tokio_postgres_rustls::MakeRustlsConnect::new(client_config); let (client, connection) = tokio_postgres::connect(&dump_db_connstr, tls_connector).await?; // The connection object performs the actual communication with the database, // so spawn it off to run on its own. @@ -234,3 +238,11 @@ async fn check_timeline( is_deleted: false, }) } + +fn load_certs() -> Result, std::io::Error> { + let der_certs = rustls_native_certs::load_native_certs()?; + let mut store = rustls::RootCertStore::empty(); + store.add_parsable_certificates(der_certs); + Ok(Arc::new(store)) +} +static TLS_ROOTS: OnceCell> = OnceCell::new(); diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index f364a6c2e0..df16c71789 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -59,7 +59,7 @@ regex = { version = "1" } regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8" } reqwest-5ef9efb8ec2df382 = { package = "reqwest", version = "0.12", default-features = false, features = ["blocking", "json", "rustls-tls", "stream"] } -reqwest-a6292c17cd707f01 = { package = "reqwest", version = "0.11", default-features = false, features = ["blocking", "default-tls", "stream"] } +reqwest-a6292c17cd707f01 = { package = "reqwest", version = "0.11", default-features = false, features = ["blocking", "rustls-tls", "stream"] } rustls = { version = "0.21", features = ["dangerous_configuration"] } scopeguard = { version = "1" } serde = { version = "1", features = ["alloc", "derive"] } @@ -68,7 +68,7 @@ sha2 = { version = "0.10", features = ["asm"] } smallvec = { version = "1", default-features = false, features = ["const_new", "write"] } subtle = { version = "2" } sync_wrapper = { version = "0.1", default-features = false, features = ["futures"] } -time = { version = "0.3", features = ["local-offset", "macros", "serde-well-known"] } +time = { version = "0.3", features = ["macros", "serde-well-known"] } tokio = { version = "1", features = ["fs", "io-std", "io-util", "macros", "net", "process", "rt-multi-thread", "signal", "test-util"] } tokio-rustls = { version = "0.24" } tokio-util = { version = "0.7", features = ["codec", "compat", "io", "rt"] } From c8cebecabf75149211866cd8e8f07ec061ccc2a5 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 29 May 2024 11:17:05 +0100 Subject: [PATCH 095/220] proxy: reintroduce dynamic limiter for compute lock (#7737) ## Problem Computes that are healthy can manage many connection attempts at a time. Unhealthy computes cannot. We initially handled this with a fixed concurrency limit, but it seems this inhibits pgbench. ## Summary of changes Support AIMD for connect_to_compute lock to allow varying the concurrency limit based on compute health --- Cargo.lock | 1 + proxy/Cargo.toml | 1 + proxy/src/bin/proxy.rs | 17 +- proxy/src/config.rs | 67 ++++- proxy/src/console/provider.rs | 44 +-- proxy/src/console/provider/neon.rs | 2 +- proxy/src/proxy/connect_compute.rs | 4 +- proxy/src/rate_limiter.rs | 4 + proxy/src/rate_limiter/limit_algorithm.rs | 275 ++++++++++++++++++ .../src/rate_limiter/limit_algorithm/aimd.rs | 184 ++++++++++++ proxy/src/serverless/backend.rs | 4 +- 11 files changed, 563 insertions(+), 40 deletions(-) create mode 100644 proxy/src/rate_limiter/limit_algorithm.rs create mode 100644 proxy/src/rate_limiter/limit_algorithm/aimd.rs diff --git a/Cargo.lock b/Cargo.lock index b1a307dd19..794486e2e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4302,6 +4302,7 @@ dependencies = [ "http 1.1.0", "http-body-util", "humantime", + "humantime-serde", "hyper 0.14.26", "hyper 1.2.0", "hyper-util", diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 0b892e3277..288f7769fe 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -38,6 +38,7 @@ hmac.workspace = true hostname.workspace = true http.workspace = true humantime.workspace = true +humantime-serde.workspace = true hyper.workspace = true hyper1 = { package = "hyper", version = "1.2", features = ["server"] } hyper-util = { version = "0.1", features = ["server", "http1", "http2", "tokio"] } diff --git a/proxy/src/bin/proxy.rs b/proxy/src/bin/proxy.rs index 30f2e6f4b7..dffebf5580 100644 --- a/proxy/src/bin/proxy.rs +++ b/proxy/src/bin/proxy.rs @@ -557,14 +557,14 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> { let config::ConcurrencyLockOptions { shards, - permits, + limiter, epoch, timeout, } = args.wake_compute_lock.parse()?; - info!(permits, shards, ?epoch, "Using NodeLocks (wake_compute)"); + info!(?limiter, shards, ?epoch, "Using NodeLocks (wake_compute)"); let locks = Box::leak(Box::new(console::locks::ApiLocks::new( "wake_compute_lock", - permits, + limiter, shards, timeout, epoch, @@ -603,14 +603,19 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> { let config::ConcurrencyLockOptions { shards, - permits, + limiter, epoch, timeout, } = args.connect_compute_lock.parse()?; - info!(permits, shards, ?epoch, "Using NodeLocks (connect_compute)"); + info!( + ?limiter, + shards, + ?epoch, + "Using NodeLocks (connect_compute)" + ); let connect_compute_locks = console::locks::ApiLocks::new( "connect_compute_lock", - permits, + limiter, shards, timeout, epoch, diff --git a/proxy/src/config.rs b/proxy/src/config.rs index 5a0c251ce2..f4707a33aa 100644 --- a/proxy/src/config.rs +++ b/proxy/src/config.rs @@ -1,7 +1,7 @@ use crate::{ auth::{self, backend::AuthRateLimiter}, console::locks::ApiLocks, - rate_limiter::RateBucketInfo, + rate_limiter::{RateBucketInfo, RateLimitAlgorithm, RateLimiterConfig}, scram::threadpool::ThreadPool, serverless::{cancel_set::CancelSet, GlobalConnPoolOptions}, Host, @@ -580,14 +580,18 @@ impl RetryConfig { } /// Helper for cmdline cache options parsing. +#[derive(serde::Deserialize)] pub struct ConcurrencyLockOptions { /// The number of shards the lock map should have pub shards: usize, /// The number of allowed concurrent requests for each endpoitn - pub permits: usize, + #[serde(flatten)] + pub limiter: RateLimiterConfig, /// Garbage collection epoch + #[serde(deserialize_with = "humantime_serde::deserialize")] pub epoch: Duration, /// Lock timeout + #[serde(deserialize_with = "humantime_serde::deserialize")] pub timeout: Duration, } @@ -596,13 +600,18 @@ impl ConcurrencyLockOptions { pub const DEFAULT_OPTIONS_WAKE_COMPUTE_LOCK: &'static str = "permits=0"; /// Default options for [`crate::console::provider::ApiLocks`]. pub const DEFAULT_OPTIONS_CONNECT_COMPUTE_LOCK: &'static str = - "shards=64,permits=10,epoch=10m,timeout=10ms"; + "shards=64,permits=100,epoch=10m,timeout=10ms"; // pub const DEFAULT_OPTIONS_WAKE_COMPUTE_LOCK: &'static str = "shards=32,permits=4,epoch=10m,timeout=1s"; /// Parse lock options passed via cmdline. /// Example: [`Self::DEFAULT_OPTIONS_WAKE_COMPUTE_LOCK`]. fn parse(options: &str) -> anyhow::Result { + let options = options.trim(); + if options.starts_with('{') && options.ends_with('}') { + return Ok(serde_json::from_str(options)?); + } + let mut shards = None; let mut permits = None; let mut epoch = None; @@ -629,9 +638,13 @@ impl ConcurrencyLockOptions { shards = Some(2); } + let permits = permits.context("missing `permits`")?; let out = Self { shards: shards.context("missing `shards`")?, - permits: permits.context("missing `permits`")?, + limiter: RateLimiterConfig { + algorithm: RateLimitAlgorithm::Fixed, + initial_limit: permits, + }, epoch: epoch.context("missing `epoch`")?, timeout: timeout.context("missing `timeout`")?, }; @@ -657,6 +670,8 @@ impl FromStr for ConcurrencyLockOptions { #[cfg(test)] mod tests { + use crate::rate_limiter::Aimd; + use super::*; #[test] @@ -684,36 +699,68 @@ mod tests { fn test_parse_lock_options() -> anyhow::Result<()> { let ConcurrencyLockOptions { epoch, - permits, + limiter, shards, timeout, } = "shards=32,permits=4,epoch=10m,timeout=1s".parse()?; assert_eq!(epoch, Duration::from_secs(10 * 60)); assert_eq!(timeout, Duration::from_secs(1)); assert_eq!(shards, 32); - assert_eq!(permits, 4); + assert_eq!(limiter.initial_limit, 4); + assert_eq!(limiter.algorithm, RateLimitAlgorithm::Fixed); let ConcurrencyLockOptions { epoch, - permits, + limiter, shards, timeout, } = "epoch=60s,shards=16,timeout=100ms,permits=8".parse()?; assert_eq!(epoch, Duration::from_secs(60)); assert_eq!(timeout, Duration::from_millis(100)); assert_eq!(shards, 16); - assert_eq!(permits, 8); + assert_eq!(limiter.initial_limit, 8); + assert_eq!(limiter.algorithm, RateLimitAlgorithm::Fixed); let ConcurrencyLockOptions { epoch, - permits, + limiter, shards, timeout, } = "permits=0".parse()?; assert_eq!(epoch, Duration::ZERO); assert_eq!(timeout, Duration::ZERO); assert_eq!(shards, 2); - assert_eq!(permits, 0); + assert_eq!(limiter.initial_limit, 0); + assert_eq!(limiter.algorithm, RateLimitAlgorithm::Fixed); + + Ok(()) + } + + #[test] + fn test_parse_json_lock_options() -> anyhow::Result<()> { + let ConcurrencyLockOptions { + epoch, + limiter, + shards, + timeout, + } = r#"{"shards":32,"initial_limit":44,"aimd":{"min":5,"max":500,"inc":10,"dec":0.9,"utilisation":0.8},"epoch":"10m","timeout":"1s"}"# + .parse()?; + assert_eq!(epoch, Duration::from_secs(10 * 60)); + assert_eq!(timeout, Duration::from_secs(1)); + assert_eq!(shards, 32); + assert_eq!(limiter.initial_limit, 44); + assert_eq!( + limiter.algorithm, + RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 5, + max: 500, + dec: 0.9, + inc: 10, + utilisation: 0.8 + } + }, + ); Ok(()) } diff --git a/proxy/src/console/provider.rs b/proxy/src/console/provider.rs index 3b996cdbd1..4d074f98a5 100644 --- a/proxy/src/console/provider.rs +++ b/proxy/src/console/provider.rs @@ -15,11 +15,11 @@ use crate::{ error::ReportableError, intern::ProjectIdInt, metrics::ApiLockMetrics, + rate_limiter::{DynamicLimiter, Outcome, RateLimiterConfig, Token}, scram, EndpointCacheKey, }; use dashmap::DashMap; use std::{hash::Hash, sync::Arc, time::Duration}; -use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio::time::Instant; use tracing::info; @@ -443,8 +443,8 @@ impl ApiCaches { /// Various caches for [`console`](super). pub struct ApiLocks { name: &'static str, - node_locks: DashMap>, - permits: usize, + node_locks: DashMap>, + config: RateLimiterConfig, timeout: Duration, epoch: std::time::Duration, metrics: &'static ApiLockMetrics, @@ -452,8 +452,6 @@ pub struct ApiLocks { #[derive(Debug, thiserror::Error)] pub enum ApiLockError { - #[error("lock was closed")] - AcquireError(#[from] tokio::sync::AcquireError), #[error("permit could not be acquired")] TimeoutError(#[from] tokio::time::error::Elapsed), } @@ -461,7 +459,6 @@ pub enum ApiLockError { impl ReportableError for ApiLockError { fn get_error_kind(&self) -> crate::error::ErrorKind { match self { - ApiLockError::AcquireError(_) => crate::error::ErrorKind::Service, ApiLockError::TimeoutError(_) => crate::error::ErrorKind::RateLimit, } } @@ -470,7 +467,7 @@ impl ReportableError for ApiLockError { impl ApiLocks { pub fn new( name: &'static str, - permits: usize, + config: RateLimiterConfig, shards: usize, timeout: Duration, epoch: std::time::Duration, @@ -479,7 +476,7 @@ impl ApiLocks { Ok(Self { name, node_locks: DashMap::with_shard_amount(shards), - permits, + config, timeout, epoch, metrics, @@ -487,8 +484,10 @@ impl ApiLocks { } pub async fn get_permit(&self, key: &K) -> Result { - if self.permits == 0 { - return Ok(WakeComputePermit { permit: None }); + if self.config.initial_limit == 0 { + return Ok(WakeComputePermit { + permit: Token::disabled(), + }); } let now = Instant::now(); let semaphore = { @@ -500,24 +499,22 @@ impl ApiLocks { .entry(key.clone()) .or_insert_with(|| { self.metrics.semaphores_registered.inc(); - Arc::new(Semaphore::new(self.permits)) + DynamicLimiter::new(self.config) }) .clone() } }; - let permit = tokio::time::timeout_at(now + self.timeout, semaphore.acquire_owned()).await; + let permit = semaphore.acquire_deadline(now + self.timeout).await; self.metrics .semaphore_acquire_seconds .observe(now.elapsed().as_secs_f64()); - Ok(WakeComputePermit { - permit: Some(permit??), - }) + Ok(WakeComputePermit { permit: permit? }) } pub async fn garbage_collect_worker(&self) { - if self.permits == 0 { + if self.config.initial_limit == 0 { return; } let mut interval = @@ -547,12 +544,21 @@ impl ApiLocks { } pub struct WakeComputePermit { - // None if the lock is disabled - permit: Option, + permit: Token, } impl WakeComputePermit { pub fn should_check_cache(&self) -> bool { - self.permit.is_some() + !self.permit.is_disabled() + } + pub fn release(self, outcome: Outcome) { + self.permit.release(outcome) + } + pub fn release_result(self, res: Result) -> Result { + match res { + Ok(_) => self.release(Outcome::Success), + Err(_) => self.release(Outcome::Overload), + } + res } } diff --git a/proxy/src/console/provider/neon.rs b/proxy/src/console/provider/neon.rs index 7728d2cafa..5d691e5f15 100644 --- a/proxy/src/console/provider/neon.rs +++ b/proxy/src/console/provider/neon.rs @@ -301,7 +301,7 @@ impl super::Api for Api { } } - let mut node = self.do_wake_compute(ctx, user_info).await?; + let mut node = permit.release_result(self.do_wake_compute(ctx, user_info).await)?; ctx.set_project(node.aux.clone()); let cold_start_info = node.aux.cold_start_info; info!("woken up a compute node"); diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index c8528d0296..409d45b39a 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -84,8 +84,8 @@ impl ConnectMechanism for TcpMechanism<'_> { timeout: time::Duration, ) -> Result { let host = node_info.config.get_host()?; - let _permit = self.locks.get_permit(&host).await?; - node_info.connect(ctx, timeout).await + let permit = self.locks.get_permit(&host).await?; + permit.release_result(node_info.connect(ctx, timeout).await) } fn update_connect_config(&self, config: &mut compute::ConnCfg) { diff --git a/proxy/src/rate_limiter.rs b/proxy/src/rate_limiter.rs index c542267547..be9072dd8c 100644 --- a/proxy/src/rate_limiter.rs +++ b/proxy/src/rate_limiter.rs @@ -1,2 +1,6 @@ +mod limit_algorithm; mod limiter; +pub use limit_algorithm::{ + aimd::Aimd, DynamicLimiter, Outcome, RateLimitAlgorithm, RateLimiterConfig, Token, +}; pub use limiter::{BucketRateLimiter, EndpointRateLimiter, GlobalRateLimiter, RateBucketInfo}; diff --git a/proxy/src/rate_limiter/limit_algorithm.rs b/proxy/src/rate_limiter/limit_algorithm.rs new file mode 100644 index 0000000000..072fdb80b0 --- /dev/null +++ b/proxy/src/rate_limiter/limit_algorithm.rs @@ -0,0 +1,275 @@ +//! Algorithms for controlling concurrency limits. +use parking_lot::Mutex; +use std::{pin::pin, sync::Arc, time::Duration}; +use tokio::{ + sync::Notify, + time::{error::Elapsed, timeout_at, Instant}, +}; + +use self::aimd::Aimd; + +pub mod aimd; + +/// Whether a job succeeded or failed as a result of congestion/overload. +/// +/// Errors not considered to be caused by overload should be ignored. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Outcome { + /// The job succeeded, or failed in a way unrelated to overload. + Success, + /// The job failed because of overload, e.g. it timed out or an explicit backpressure signal + /// was observed. + Overload, +} + +/// An algorithm for controlling a concurrency limit. +pub trait LimitAlgorithm: Send + Sync + 'static { + /// Update the concurrency limit in response to a new job completion. + fn update(&self, old_limit: usize, sample: Sample) -> usize; +} + +/// The result of a job (or jobs), including the [`Outcome`] (loss) and latency (delay). +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub struct Sample { + pub(crate) latency: Duration, + /// Jobs in flight when the sample was taken. + pub(crate) in_flight: usize, + pub(crate) outcome: Outcome, +} + +#[derive(Clone, Copy, Debug, Default, serde::Deserialize, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum RateLimitAlgorithm { + #[default] + Fixed, + Aimd { + #[serde(flatten)] + conf: Aimd, + }, +} + +pub struct Fixed; + +impl LimitAlgorithm for Fixed { + fn update(&self, old_limit: usize, _sample: Sample) -> usize { + old_limit + } +} + +#[derive(Clone, Copy, Debug, serde::Deserialize, PartialEq)] +pub struct RateLimiterConfig { + #[serde(flatten)] + pub algorithm: RateLimitAlgorithm, + pub initial_limit: usize, +} + +impl RateLimiterConfig { + pub fn create_rate_limit_algorithm(self) -> Box { + match self.algorithm { + RateLimitAlgorithm::Fixed => Box::new(Fixed), + RateLimitAlgorithm::Aimd { conf } => Box::new(conf), + } + } +} + +pub struct LimiterInner { + alg: Box, + available: usize, + limit: usize, + in_flight: usize, +} + +impl LimiterInner { + fn update(&mut self, latency: Duration, outcome: Option) { + if let Some(outcome) = outcome { + let sample = Sample { + latency, + in_flight: self.in_flight, + outcome, + }; + self.limit = self.alg.update(self.limit, sample); + } + } + + fn take(&mut self, ready: &Notify) -> Option<()> { + if self.available > 1 { + self.available -= 1; + self.in_flight += 1; + + // tell the next in the queue that there is a permit ready + if self.available > 1 { + ready.notify_one(); + } + Some(()) + } else { + None + } + } +} + +/// Limits the number of concurrent jobs. +/// +/// Concurrency is limited through the use of [`Token`]s. Acquire a token to run a job, and release the +/// token once the job is finished. +/// +/// The limit will be automatically adjusted based on observed latency (delay) and/or failures +/// caused by overload (loss). +pub struct DynamicLimiter { + config: RateLimiterConfig, + inner: Mutex, + // to notify when a token is available + ready: Notify, +} + +/// A concurrency token, required to run a job. +/// +/// Release the token back to the [`DynamicLimiter`] after the job is complete. +pub struct Token { + start: Instant, + limiter: Option>, +} + +/// A snapshot of the state of the [`DynamicLimiter`]. +/// +/// Not guaranteed to be consistent under high concurrency. +#[derive(Debug, Clone, Copy)] +pub struct LimiterState { + limit: usize, + in_flight: usize, +} + +impl DynamicLimiter { + /// Create a limiter with a given limit control algorithm. + pub fn new(config: RateLimiterConfig) -> Arc { + let ready = Notify::new(); + ready.notify_one(); + + Arc::new(Self { + inner: Mutex::new(LimiterInner { + alg: config.create_rate_limit_algorithm(), + available: config.initial_limit, + limit: config.initial_limit, + in_flight: 0, + }), + ready, + config, + }) + } + + /// Try to acquire a concurrency [Token], waiting for `duration` if there are none available. + /// + /// Returns `None` if there are none available after `duration`. + pub async fn acquire_timeout(self: &Arc, duration: Duration) -> Result { + self.acquire_deadline(Instant::now() + duration).await + } + + /// Try to acquire a concurrency [Token], waiting until `deadline` if there are none available. + /// + /// Returns `None` if there are none available after `deadline`. + pub async fn acquire_deadline(self: &Arc, deadline: Instant) -> Result { + if self.config.initial_limit == 0 { + // If the rate limiter is disabled, we can always acquire a token. + Ok(Token::disabled()) + } else { + let mut notified = pin!(self.ready.notified()); + let mut ready = notified.as_mut().enable(); + loop { + let mut limit = None; + if ready { + let mut inner = self.inner.lock(); + if inner.take(&self.ready).is_some() { + break Ok(Token::new(self.clone())); + } + limit = Some(inner.limit); + } + match timeout_at(deadline, notified.as_mut()).await { + Ok(()) => ready = true, + Err(e) => { + let limit = limit.unwrap_or_else(|| self.inner.lock().limit); + tracing::info!(limit, "could not acquire token in time"); + break Err(e); + } + } + } + } + } + + /// Return the concurrency [Token], along with the outcome of the job. + /// + /// The [Outcome] of the job, and the time taken to perform it, may be used + /// to update the concurrency limit. + /// + /// Set the outcome to `None` to ignore the job. + fn release_inner(&self, start: Instant, outcome: Option) { + tracing::info!("outcome is {:?}", outcome); + if self.config.initial_limit == 0 { + return; + } + + let mut inner = self.inner.lock(); + + inner.update(start.elapsed(), outcome); + if inner.in_flight < inner.limit { + inner.available = inner.limit - inner.in_flight; + // At least 1 permit is now available + self.ready.notify_one(); + } + + inner.in_flight -= 1; + } + + /// The current state of the limiter. + pub fn state(&self) -> LimiterState { + let inner = self.inner.lock(); + LimiterState { + limit: inner.limit, + in_flight: inner.in_flight, + } + } +} + +impl Token { + fn new(limiter: Arc) -> Self { + Self { + start: Instant::now(), + limiter: Some(limiter), + } + } + pub fn disabled() -> Self { + Self { + start: Instant::now(), + limiter: None, + } + } + + pub fn is_disabled(&self) -> bool { + self.limiter.is_none() + } + + pub fn release(mut self, outcome: Outcome) { + self.release_mut(Some(outcome)) + } + + pub fn release_mut(&mut self, outcome: Option) { + if let Some(limiter) = self.limiter.take() { + limiter.release_inner(self.start, outcome); + } + } +} + +impl Drop for Token { + fn drop(&mut self) { + self.release_mut(None) + } +} + +impl LimiterState { + /// The current concurrency limit. + pub fn limit(&self) -> usize { + self.limit + } + /// The number of jobs in flight. + pub fn in_flight(&self) -> usize { + self.in_flight + } +} diff --git a/proxy/src/rate_limiter/limit_algorithm/aimd.rs b/proxy/src/rate_limiter/limit_algorithm/aimd.rs new file mode 100644 index 0000000000..370d4be802 --- /dev/null +++ b/proxy/src/rate_limiter/limit_algorithm/aimd.rs @@ -0,0 +1,184 @@ +use std::usize; + +use super::{LimitAlgorithm, Outcome, Sample}; + +/// Loss-based congestion avoidance. +/// +/// Additive-increase, multiplicative decrease. +/// +/// Adds available currency when: +/// 1. no load-based errors are observed, and +/// 2. the utilisation of the current limit is high. +/// +/// Reduces available concurrency by a factor when load-based errors are detected. +#[derive(Clone, Copy, Debug, serde::Deserialize, PartialEq)] +pub struct Aimd { + /// Minimum limit for AIMD algorithm. + pub min: usize, + /// Maximum limit for AIMD algorithm. + pub max: usize, + /// Decrease AIMD decrease by value in case of error. + pub dec: f32, + /// Increase AIMD increase by value in case of success. + pub inc: usize, + /// A threshold below which the limit won't be increased. + pub utilisation: f32, +} + +impl LimitAlgorithm for Aimd { + fn update(&self, old_limit: usize, sample: Sample) -> usize { + use Outcome::*; + match sample.outcome { + Success => { + let utilisation = sample.in_flight as f32 / old_limit as f32; + + if utilisation > self.utilisation { + let limit = old_limit + self.inc; + let increased_limit = limit.clamp(self.min, self.max); + if increased_limit > old_limit { + tracing::info!(increased_limit, "limit increased"); + } + + increased_limit + } else { + old_limit + } + } + Overload => { + let limit = old_limit as f32 * self.dec; + + // Floor instead of round, so the limit reduces even with small numbers. + // E.g. round(2 * 0.9) = 2, but floor(2 * 0.9) = 1 + let limit = limit.floor() as usize; + + limit.clamp(self.min, self.max) + } + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::rate_limiter::limit_algorithm::{ + DynamicLimiter, RateLimitAlgorithm, RateLimiterConfig, + }; + + use super::*; + + #[tokio::test(start_paused = true)] + async fn should_decrease_limit_on_overload() { + let config = RateLimiterConfig { + initial_limit: 10, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 1500, + inc: 10, + dec: 0.5, + utilisation: 0.8, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + token.release(Outcome::Overload); + + assert_eq!(limiter.state().limit(), 5, "overload: decrease"); + } + + #[tokio::test(start_paused = true)] + async fn should_increase_limit_on_success_when_using_gt_util_threshold() { + let config = RateLimiterConfig { + initial_limit: 4, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 1500, + inc: 1, + dec: 0.5, + utilisation: 0.5, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + let _token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + let _token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + + token.release(Outcome::Success); + assert_eq!(limiter.state().limit(), 5, "success: increase"); + } + + #[tokio::test(start_paused = true)] + async fn should_not_change_limit_on_success_when_using_lt_util_threshold() { + let config = RateLimiterConfig { + initial_limit: 4, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 1500, + inc: 10, + dec: 0.5, + utilisation: 0.5, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + + token.release(Outcome::Success); + assert_eq!( + limiter.state().limit(), + 4, + "success: ignore when < half limit" + ); + } + + #[tokio::test(start_paused = true)] + async fn should_not_change_limit_when_no_outcome() { + let config = RateLimiterConfig { + initial_limit: 10, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 1500, + inc: 10, + dec: 0.5, + utilisation: 0.5, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + drop(token); + assert_eq!(limiter.state().limit(), 10, "ignore"); + } +} diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 52fc7b556a..a40c66a80d 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -232,9 +232,9 @@ impl ConnectMechanism for TokioMechanism { .connect_timeout(timeout); let pause = ctx.latency_timer.pause(crate::metrics::Waiting::Compute); - let (client, connection) = config.connect(tokio_postgres::NoTls).await?; + let res = config.connect(tokio_postgres::NoTls).await; drop(pause); - drop(permit); + let (client, connection) = permit.release_result(res)?; tracing::Span::current().record("pid", &tracing::field::display(client.get_process_id())); Ok(poll_client( From 7ac11d39421330b64b8dfa72b83439d51c05da0b Mon Sep 17 00:00:00 2001 From: Konstantin Knizhnik Date: Wed, 29 May 2024 22:18:09 +0300 Subject: [PATCH 096/220] Do not produce error if gin page is not restored in redo (#7876) ## Problem See https://github.com/neondatabase/cloud/issues/10845 ## Summary of changes Do not report error if GIN page is not restored ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --------- Co-authored-by: Konstantin Knizhnik --- test_runner/regress/test_gin_redo.py | 22 ++++++++++++++++++++++ vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 +++--- 5 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 test_runner/regress/test_gin_redo.py diff --git a/test_runner/regress/test_gin_redo.py b/test_runner/regress/test_gin_redo.py new file mode 100644 index 0000000000..9205882239 --- /dev/null +++ b/test_runner/regress/test_gin_redo.py @@ -0,0 +1,22 @@ +import time + +from fixtures.neon_fixtures import NeonEnv, wait_replica_caughtup + + +# +# Test that redo of XLOG_GIN_VACUUM_PAGE doesn't produce error +# +def test_gin_redo(neon_simple_env: NeonEnv): + env = neon_simple_env + + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + time.sleep(1) + secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + con = primary.connect() + cur = con.cursor() + cur.execute("create table gin_test_tbl(id integer, i int4[])") + cur.execute("create index gin_test_idx on gin_test_tbl using gin (i)") + cur.execute("insert into gin_test_tbl select g,array[3, 1, g] from generate_series(1, 10000) g") + cur.execute("delete from gin_test_tbl where id % 2 = 0") + cur.execute("vacuum gin_test_tbl") + wait_replica_caughtup(primary, secondary) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 0d30e28f74..17e0f5ff4e 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 0d30e28f74f49fe6a27a6bd45dcfeb1060656b8f +Subproject commit 17e0f5ff4e1905691aa40e1e08f9b79b14c99652 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index 74fb144890..c2c3d40534 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit 74fb144890c4f955db1ef50ee1eeb9d8a6c2f69d +Subproject commit c2c3d40534db97d83dd7e185d1971e707fa2f445 diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 3c2b9d576c..b228f20372 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 3c2b9d576c580e0b5b7108001f959b8c5b42e0a2 +Subproject commit b228f20372ebcabfd7946647cb7adbd38bacb14a diff --git a/vendor/revisions.json b/vendor/revisions.json index 2f16f334c5..5bf4e289ef 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "3c2b9d576c580e0b5b7108001f959b8c5b42e0a2"], - "v15": ["15.7", "74fb144890c4f955db1ef50ee1eeb9d8a6c2f69d"], - "v14": ["14.12", "0d30e28f74f49fe6a27a6bd45dcfeb1060656b8f"] + "v16": ["16.3", "b228f20372ebcabfd7946647cb7adbd38bacb14a"], + "v15": ["15.7", "c2c3d40534db97d83dd7e185d1971e707fa2f445"], + "v14": ["14.12", "17e0f5ff4e1905691aa40e1e08f9b79b14c99652"] } From b0a954bde237f424381a76af5ffd046a9b0e85a7 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Thu, 30 May 2024 08:25:10 +0200 Subject: [PATCH 097/220] CI: switch ubuntu-latest with ubuntu-22.04 (#7256) (#7901) ## Problem We use ubuntu-latest as a default OS for running jobs. It can cause problems due to instability, so we should use the LTS version of Ubuntu. ## Summary of changes The image ubuntu-latest was changed with ubuntu-22.04 in workflows. ## Checklist before requesting a review - [x] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --- .github/workflows/actionlint.yml | 14 +++++++++++++- .github/workflows/approved-for-ci-run.yml | 6 +++--- .github/workflows/benchmarking.yml | 2 +- .github/workflows/build-build-tools-image.yml | 2 +- .github/workflows/build_and_test.yml | 12 ++++++------ .github/workflows/check-build-tools-image.yml | 2 +- .github/workflows/check-permissions.yml | 2 +- .github/workflows/cleanup-caches-by-a-branch.yml | 2 +- .github/workflows/pg_clients.yml | 2 +- .github/workflows/pin-build-tools-image.yml | 2 +- .github/workflows/release-notify.yml | 2 +- .github/workflows/release.yml | 4 ++-- .github/workflows/trigger-e2e-tests.yml | 6 +++--- 13 files changed, 35 insertions(+), 23 deletions(-) diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index f2736614bf..078c7f88c4 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -24,7 +24,7 @@ jobs: actionlint: needs: [ check-permissions ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - uses: reviewdog/action-actionlint@v1 @@ -36,3 +36,15 @@ jobs: fail_on_error: true filter_mode: nofilter level: error + - run: | + PAT='^\s*runs-on:.*-latest' + if grep -ERq $PAT .github/workflows + then + grep -ERl $PAT .github/workflows |\ + while read -r f + do + l=$(grep -nE $PAT .github/workflows/release.yml | awk -F: '{print $1}' | head -1) + echo "::error file=$f,line=$l::Please, do not use ubuntu-latest images to run on, use LTS instead." + done + exit 1 + fi diff --git a/.github/workflows/approved-for-ci-run.yml b/.github/workflows/approved-for-ci-run.yml index ab616d17e2..b14b66a439 100644 --- a/.github/workflows/approved-for-ci-run.yml +++ b/.github/workflows/approved-for-ci-run.yml @@ -44,7 +44,7 @@ jobs: contains(fromJSON('["opened", "synchronize", "reopened", "closed"]'), github.event.action) && contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run') - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run" @@ -60,7 +60,7 @@ jobs: github.event.action == 'labeled' && contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run') - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run" @@ -109,7 +109,7 @@ jobs: github.event.action == 'closed' && github.event.pull_request.head.repo.full_name != github.repository - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Close PR and delete `ci-run/pr-${{ env.PR_NUMBER }}` branch diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index d5a375d704..57d24063bf 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -137,7 +137,7 @@ jobs: # - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage env: RUN_AWS_RDS_AND_AURORA: ${{ github.event.inputs.run_AWS_RDS_AND_AURORA || 'false' }} - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 outputs: pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }} olap-compare-matrix: ${{ steps.olap-compare-matrix.outputs.matrix }} diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index bdf00bcaae..9aacb09d10 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -88,7 +88,7 @@ jobs: merge-images: needs: [ build-image ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: IMAGE_TAG: ${{ inputs.image-tag }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index f8c011a0a5..b9caf76060 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -35,7 +35,7 @@ jobs: cancel-previous-e2e-tests: needs: [ check-permissions ] if: github.event_name == 'pull_request' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cancel previous e2e-tests runs for this PR @@ -549,7 +549,7 @@ jobs: report-benchmarks-failures: needs: [ benchmarks, create-test-report ] if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: slackapi/slack-github-action@v1 @@ -774,7 +774,7 @@ jobs: neon-image: needs: [ neon-image-arch, tag ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: docker/login-action@v3 @@ -884,7 +884,7 @@ jobs: compute-node-image: needs: [ compute-node-image-arch, tag ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: @@ -1032,7 +1032,7 @@ jobs: promote-images: needs: [ check-permissions, tag, test-images, vm-compute-node-image ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: VERSIONS: v14 v15 v16 @@ -1077,7 +1077,7 @@ jobs: trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set PR's status to pending and request a remote CI test run: | diff --git a/.github/workflows/check-build-tools-image.yml b/.github/workflows/check-build-tools-image.yml index a1e22cf93f..97116940a0 100644 --- a/.github/workflows/check-build-tools-image.yml +++ b/.github/workflows/check-build-tools-image.yml @@ -19,7 +19,7 @@ permissions: {} jobs: check-image: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 outputs: tag: ${{ steps.get-build-tools-tag.outputs.image-tag }} found: ${{ steps.check-image.outputs.found }} diff --git a/.github/workflows/check-permissions.yml b/.github/workflows/check-permissions.yml index c3357c6cf8..9c42794797 100644 --- a/.github/workflows/check-permissions.yml +++ b/.github/workflows/check-permissions.yml @@ -16,7 +16,7 @@ permissions: {} jobs: check-permissions: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Disallow CI runs on PRs from forks if: | diff --git a/.github/workflows/cleanup-caches-by-a-branch.yml b/.github/workflows/cleanup-caches-by-a-branch.yml index d8c225dedb..0c074e36dc 100644 --- a/.github/workflows/cleanup-caches-by-a-branch.yml +++ b/.github/workflows/cleanup-caches-by-a-branch.yml @@ -9,7 +9,7 @@ on: jobs: cleanup: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cleanup run: | diff --git a/.github/workflows/pg_clients.yml b/.github/workflows/pg_clients.yml index 50e3227a74..fef3aec754 100644 --- a/.github/workflows/pg_clients.yml +++ b/.github/workflows/pg_clients.yml @@ -20,7 +20,7 @@ concurrency: jobs: test-postgres-client-libs: # TODO: switch to gen2 runner, requires docker - runs-on: [ ubuntu-latest ] + runs-on: ubuntu-22.04 env: DEFAULT_PG_VERSION: 14 diff --git a/.github/workflows/pin-build-tools-image.yml b/.github/workflows/pin-build-tools-image.yml index d495a158e8..024594532f 100644 --- a/.github/workflows/pin-build-tools-image.yml +++ b/.github/workflows/pin-build-tools-image.yml @@ -26,7 +26,7 @@ permissions: {} jobs: tag-image: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: FROM_TAG: ${{ inputs.from-tag }} diff --git a/.github/workflows/release-notify.yml b/.github/workflows/release-notify.yml index ba396dba74..8bd10e993c 100644 --- a/.github/workflows/release-notify.yml +++ b/.github/workflows/release-notify.yml @@ -19,7 +19,7 @@ on: jobs: notify: - runs-on: [ ubuntu-latest ] + runs-on: ubuntu-22.04 steps: - uses: neondatabase/dev-actions/release-pr-notify@main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fe24f6330e..90a3aaaf2d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -26,7 +26,7 @@ defaults: jobs: create-storage-release-branch: if: ${{ github.event.schedule == '0 6 * * MON' || format('{0}', inputs.create-storage-release-branch) == 'true' }} - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 permissions: contents: write # for `git push` @@ -65,7 +65,7 @@ jobs: create-proxy-release-branch: if: ${{ github.event.schedule == '0 6 * * THU' || format('{0}', inputs.create-proxy-release-branch) == 'true' }} - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 permissions: contents: write # for `git push` diff --git a/.github/workflows/trigger-e2e-tests.yml b/.github/workflows/trigger-e2e-tests.yml index 7111ee37fa..77928a343e 100644 --- a/.github/workflows/trigger-e2e-tests.yml +++ b/.github/workflows/trigger-e2e-tests.yml @@ -19,7 +19,7 @@ env: jobs: cancel-previous-e2e-tests: if: github.event_name == 'pull_request' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Cancel previous e2e-tests runs for this PR @@ -31,7 +31,7 @@ jobs: --field concurrency_group="${{ env.E2E_CONCURRENCY_GROUP }}" tag: - runs-on: [ ubuntu-latest ] + runs-on: ubuntu-22.04 outputs: build-tag: ${{ steps.build-tag.outputs.tag }} @@ -62,7 +62,7 @@ jobs: trigger-e2e-tests: needs: [ tag ] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: TAG: ${{ needs.tag.outputs.build-tag }} steps: From 238fa47bee911d23730dc1e8e91defb0bf57dda9 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 30 May 2024 11:09:27 +0100 Subject: [PATCH 098/220] proxy fix wake compute rate limit (#7902) ## Problem We were rate limiting wake_compute in the wrong place ## Summary of changes Move wake_compute rate limit to after the permit is acquired. Also makes a slight refactor on normalize, as it caught my eye --- proxy/src/auth/backend.rs | 2 +- proxy/src/console/provider/neon.rs | 19 ++++++++++--------- proxy/src/lib.rs | 21 ++++++++++++--------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/proxy/src/auth/backend.rs b/proxy/src/auth/backend.rs index 3555eba543..f757a15fbb 100644 --- a/proxy/src/auth/backend.rs +++ b/proxy/src/auth/backend.rs @@ -35,7 +35,7 @@ use crate::{ }, stream, url, }; -use crate::{scram, EndpointCacheKey, EndpointId, Normalize, RoleName}; +use crate::{scram, EndpointCacheKey, EndpointId, RoleName}; /// Alternative to [`std::borrow::Cow`] but doesn't need `T: ToOwned` as we don't need that functionality pub enum MaybeOwned<'a, T> { diff --git a/proxy/src/console/provider/neon.rs b/proxy/src/console/provider/neon.rs index 5d691e5f15..d72229b029 100644 --- a/proxy/src/console/provider/neon.rs +++ b/proxy/src/console/provider/neon.rs @@ -13,7 +13,7 @@ use crate::{ http, metrics::{CacheOutcome, Metrics}, rate_limiter::EndpointRateLimiter, - scram, EndpointCacheKey, Normalize, + scram, EndpointCacheKey, }; use crate::{cache::Cached, context::RequestMonitoring}; use futures::TryFutureExt; @@ -281,14 +281,6 @@ impl super::Api for Api { return Ok(cached); } - // check rate limit - if !self - .wake_compute_endpoint_rate_limiter - .check(user_info.endpoint.normalize().into(), 1) - { - return Err(WakeComputeError::TooManyConnections); - } - let permit = self.locks.get_permit(&key).await?; // after getting back a permit - it's possible the cache was filled @@ -301,6 +293,15 @@ impl super::Api for Api { } } + // check rate limit + if !self + .wake_compute_endpoint_rate_limiter + .check(user_info.endpoint.normalize_intern(), 1) + { + info!(key = &*key, "found cached compute node info"); + return Err(WakeComputeError::TooManyConnections); + } + let mut node = permit.release_result(self.do_wake_compute(ctx, user_info).await)?; ctx.set_project(node.aux.clone()); let cold_start_info = node.aux.cold_start_info; diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index 35c1616481..ea92eaaa55 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -3,6 +3,7 @@ use std::convert::Infallible; use anyhow::{bail, Context}; +use intern::{EndpointIdInt, EndpointIdTag, InternId}; use tokio::task::JoinError; use tokio_util::sync::CancellationToken; use tracing::warn; @@ -129,20 +130,22 @@ macro_rules! smol_str_wrapper { const POOLER_SUFFIX: &str = "-pooler"; -pub trait Normalize { - fn normalize(&self) -> Self; -} - -impl + From> Normalize for S { +impl EndpointId { fn normalize(&self) -> Self { - if self.as_ref().ends_with(POOLER_SUFFIX) { - let mut s = self.as_ref().to_string(); - s.truncate(s.len() - POOLER_SUFFIX.len()); - s.into() + if let Some(stripped) = self.as_ref().strip_suffix(POOLER_SUFFIX) { + stripped.into() } else { self.clone() } } + + fn normalize_intern(&self) -> EndpointIdInt { + if let Some(stripped) = self.as_ref().strip_suffix(POOLER_SUFFIX) { + EndpointIdTag::get_interner().get_or_intern(stripped) + } else { + self.into() + } + } } // 90% of role name strings are 20 characters or less. From fddd11dd1a4d86623d1683ecc7f9f679a5132f89 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 30 May 2024 11:10:27 +0100 Subject: [PATCH 099/220] proxy: upload postgres connection options as json in the parquet upload (#7903) ## Problem https://github.com/neondatabase/cloud/issues/9943 ## Summary of changes Captures the postgres options, converts them to json, uploads them in parquet. --- libs/pq_proto/src/lib.rs | 7 ++- proxy/src/auth/backend/link.rs | 1 + proxy/src/auth/credentials.rs | 8 --- proxy/src/context.rs | 21 ++++++- proxy/src/context/parquet.rs | 83 +++++++++++++++++---------- proxy/src/proxy.rs | 2 + proxy/src/serverless/sql_over_http.rs | 13 +++-- 7 files changed, 89 insertions(+), 46 deletions(-) diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index 522b65f5d1..f8e578c6f2 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -50,12 +50,17 @@ pub enum FeStartupPacket { }, } -#[derive(Debug)] +#[derive(Debug, Clone, Default)] pub struct StartupMessageParams { params: HashMap, } impl StartupMessageParams { + /// Set parameter's value by its name. + pub fn insert(&mut self, name: &str, value: &str) { + self.params.insert(name.to_owned(), value.to_owned()); + } + /// Get parameter's value by its name. pub fn get(&self, name: &str) -> Option<&str> { self.params.get(name).map(|s| s.as_str()) diff --git a/proxy/src/auth/backend/link.rs b/proxy/src/auth/backend/link.rs index 415a4b7d85..5932e1337c 100644 --- a/proxy/src/auth/backend/link.rs +++ b/proxy/src/auth/backend/link.rs @@ -100,6 +100,7 @@ pub(super) async fn authenticate( .dbname(&db_info.dbname) .user(&db_info.user); + ctx.set_dbname(db_info.dbname.into()); ctx.set_user(db_info.user.into()); ctx.set_project(db_info.aux.clone()); info!("woken up a compute node"); diff --git a/proxy/src/auth/credentials.rs b/proxy/src/auth/credentials.rs index 783a1a5a21..d06f5614f1 100644 --- a/proxy/src/auth/credentials.rs +++ b/proxy/src/auth/credentials.rs @@ -11,7 +11,6 @@ use crate::{ }; use itertools::Itertools; use pq_proto::StartupMessageParams; -use smol_str::SmolStr; use std::{collections::HashSet, net::IpAddr, str::FromStr}; use thiserror::Error; use tracing::{info, warn}; @@ -96,13 +95,6 @@ impl ComputeUserInfoMaybeEndpoint { let get_param = |key| params.get(key).ok_or(MissingKey(key)); let user: RoleName = get_param("user")?.into(); - // record the values if we have them - ctx.set_application(params.get("application_name").map(SmolStr::from)); - ctx.set_user(user.clone()); - if let Some(dbname) = params.get("database") { - ctx.set_dbname(dbname.into()); - } - // Project name might be passed via PG's command-line options. let endpoint_option = params .options_raw() diff --git a/proxy/src/context.rs b/proxy/src/context.rs index dfd3ef108e..ff79ba8275 100644 --- a/proxy/src/context.rs +++ b/proxy/src/context.rs @@ -2,6 +2,7 @@ use chrono::Utc; use once_cell::sync::OnceCell; +use pq_proto::StartupMessageParams; use smol_str::SmolStr; use std::net::IpAddr; use tokio::sync::mpsc; @@ -46,6 +47,7 @@ pub struct RequestMonitoring { pub(crate) auth_method: Option, success: bool, pub(crate) cold_start_info: ColdStartInfo, + pg_options: Option, // extra // This sender is here to keep the request monitoring channel open while requests are taking place. @@ -102,6 +104,7 @@ impl RequestMonitoring { success: false, rejected: None, cold_start_info: ColdStartInfo::Unknown, + pg_options: None, sender: LOG_CHAN.get().and_then(|tx| tx.upgrade()), disconnect_sender: LOG_CHAN_DISCONNECT.get().and_then(|tx| tx.upgrade()), @@ -132,6 +135,18 @@ impl RequestMonitoring { self.latency_timer.cold_start_info(info); } + pub fn set_db_options(&mut self, options: StartupMessageParams) { + self.set_application(options.get("application_name").map(SmolStr::from)); + if let Some(user) = options.get("user") { + self.set_user(user.into()); + } + if let Some(dbname) = options.get("database") { + self.set_dbname(dbname.into()); + } + + self.pg_options = Some(options); + } + pub fn set_project(&mut self, x: MetricsAuxInfo) { if self.endpoint_id.is_none() { self.set_endpoint_id(x.endpoint_id.as_str().into()) @@ -155,8 +170,10 @@ impl RequestMonitoring { } } - pub fn set_application(&mut self, app: Option) { - self.application = app.or_else(|| self.application.clone()); + fn set_application(&mut self, app: Option) { + if let Some(app) = app { + self.application = Some(app); + } } pub fn set_dbname(&mut self, dbname: DbName) { diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index a213a32ca4..1355b7e1d8 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -13,7 +13,9 @@ use parquet::{ }, record::RecordWriter, }; +use pq_proto::StartupMessageParams; use remote_storage::{GenericRemoteStorage, RemotePath, TimeoutOrCancel}; +use serde::ser::SerializeMap; use tokio::{sync::mpsc, time}; use tokio_util::sync::CancellationToken; use tracing::{debug, info, Span}; @@ -87,6 +89,7 @@ pub struct RequestData { database: Option, project: Option, branch: Option, + pg_options: Option, auth_method: Option<&'static str>, error: Option<&'static str>, /// Success is counted if we form a HTTP response with sql rows inside @@ -101,6 +104,23 @@ pub struct RequestData { disconnect_timestamp: Option, } +struct Options<'a> { + options: &'a StartupMessageParams, +} + +impl<'a> serde::Serialize for Options<'a> { + fn serialize(&self, s: S) -> Result + where + S: serde::Serializer, + { + let mut state = s.serialize_map(None)?; + for (k, v) in self.options.iter() { + state.serialize_entry(k, v)?; + } + state.end() + } +} + impl From<&RequestMonitoring> for RequestData { fn from(value: &RequestMonitoring) -> Self { Self { @@ -113,6 +133,10 @@ impl From<&RequestMonitoring> for RequestData { database: value.dbname.as_deref().map(String::from), project: value.project.as_deref().map(String::from), branch: value.branch.as_deref().map(String::from), + pg_options: value + .pg_options + .as_ref() + .and_then(|options| serde_json::to_string(&Options { options }).ok()), auth_method: value.auth_method.as_ref().map(|x| match x { super::AuthMethod::Web => "web", super::AuthMethod::ScramSha256 => "scram_sha_256", @@ -494,6 +518,7 @@ mod tests { database: Some(hex::encode(rng.gen::<[u8; 16]>())), project: Some(hex::encode(rng.gen::<[u8; 16]>())), branch: Some(hex::encode(rng.gen::<[u8; 16]>())), + pg_options: None, auth_method: None, protocol: ["tcp", "ws", "http"][rng.gen_range(0..3)], region: "us-east-1", @@ -570,15 +595,15 @@ mod tests { assert_eq!( file_stats, [ - (1315314, 3, 6000), - (1315307, 3, 6000), - (1315367, 3, 6000), - (1315324, 3, 6000), - (1315454, 3, 6000), - (1315296, 3, 6000), - (1315088, 3, 6000), - (1315324, 3, 6000), - (438713, 1, 2000) + (1315874, 3, 6000), + (1315867, 3, 6000), + (1315927, 3, 6000), + (1315884, 3, 6000), + (1316014, 3, 6000), + (1315856, 3, 6000), + (1315648, 3, 6000), + (1315884, 3, 6000), + (438913, 1, 2000) ] ); @@ -608,11 +633,11 @@ mod tests { assert_eq!( file_stats, [ - (1222212, 5, 10000), - (1228362, 5, 10000), - (1230156, 5, 10000), - (1229518, 5, 10000), - (1220796, 5, 10000) + (1223214, 5, 10000), + (1229364, 5, 10000), + (1231158, 5, 10000), + (1230520, 5, 10000), + (1221798, 5, 10000) ] ); @@ -644,11 +669,11 @@ mod tests { assert_eq!( file_stats, [ - (1207859, 5, 10000), - (1207590, 5, 10000), - (1207883, 5, 10000), - (1207871, 5, 10000), - (1208126, 5, 10000) + (1208861, 5, 10000), + (1208592, 5, 10000), + (1208885, 5, 10000), + (1208873, 5, 10000), + (1209128, 5, 10000) ] ); @@ -673,15 +698,15 @@ mod tests { assert_eq!( file_stats, [ - (1315314, 3, 6000), - (1315307, 3, 6000), - (1315367, 3, 6000), - (1315324, 3, 6000), - (1315454, 3, 6000), - (1315296, 3, 6000), - (1315088, 3, 6000), - (1315324, 3, 6000), - (438713, 1, 2000) + (1315874, 3, 6000), + (1315867, 3, 6000), + (1315927, 3, 6000), + (1315884, 3, 6000), + (1316014, 3, 6000), + (1315856, 3, 6000), + (1315648, 3, 6000), + (1315884, 3, 6000), + (438913, 1, 2000) ] ); @@ -718,7 +743,7 @@ mod tests { // files are smaller than the size threshold, but they took too long to fill so were flushed early assert_eq!( file_stats, - [(659462, 2, 3001), (659176, 2, 3000), (658972, 2, 2999)] + [(659836, 2, 3001), (659550, 2, 3000), (659346, 2, 2999)] ); tmpdir.close().unwrap(); diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 5824b70df9..95b46ae002 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -267,6 +267,8 @@ pub async fn handle_client( }; drop(pause); + ctx.set_db_options(params.clone()); + let hostname = mode.hostname(stream.get_ref()); let common_names = tls.map(|tls| &tls.common_names); diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 5376bddfd3..9a7cdc8577 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -17,6 +17,7 @@ use hyper1::http::HeaderValue; use hyper1::Response; use hyper1::StatusCode; use hyper1::{HeaderMap, Request}; +use pq_proto::StartupMessageParams; use serde_json::json; use serde_json::Value; use tokio::time; @@ -192,13 +193,13 @@ fn get_conn_info( let mut options = Option::None; + let mut params = StartupMessageParams::default(); + params.insert("user", &username); + params.insert("database", &dbname); for (key, value) in pairs { - match &*key { - "options" => { - options = Some(NeonOptions::parse_options_raw(&value)); - } - "application_name" => ctx.set_application(Some(value.into())), - _ => {} + params.insert(&key, &value); + if key == "options" { + options = Some(NeonOptions::parse_options_raw(&value)); } } From 9a081c230f6b1d2bff7fea1e201631a7e7ee4328 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Thu, 30 May 2024 12:02:38 +0100 Subject: [PATCH 100/220] proxy: lazily parse startup pg params (#7905) ## Problem proxy params being a `HashMap` when it contains just ``` application_name: psql database: neondb user: neondb_owner ``` is quite wasteful allocation wise. ## Summary of changes Keep the params in the wire protocol form, eg: ``` application_name\0psql\0database\0neondb\0user\0neondb_owner\0 ``` Using a linear search for the map is fast enough at small sizes, which is the normal case. --- Cargo.lock | 1 + libs/pq_proto/Cargo.toml | 1 + libs/pq_proto/src/lib.rs | 80 +++++++++++++++------------ proxy/src/serverless/sql_over_http.rs | 4 +- 4 files changed, 48 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794486e2e1..44edbabaf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4113,6 +4113,7 @@ version = "0.1.0" dependencies = [ "byteorder", "bytes", + "itertools", "pin-project-lite", "postgres-protocol", "rand 0.8.5", diff --git a/libs/pq_proto/Cargo.toml b/libs/pq_proto/Cargo.toml index 6eeb3bafef..8afabe670e 100644 --- a/libs/pq_proto/Cargo.toml +++ b/libs/pq_proto/Cargo.toml @@ -7,6 +7,7 @@ license.workspace = true [dependencies] bytes.workspace = true byteorder.workspace = true +itertools.workspace = true pin-project-lite.workspace = true postgres-protocol.workspace = true rand.workspace = true diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index f8e578c6f2..cee3742017 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -7,8 +7,9 @@ pub mod framed; use byteorder::{BigEndian, ReadBytesExt}; use bytes::{Buf, BufMut, Bytes, BytesMut}; +use itertools::Itertools; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, collections::HashMap, fmt, io, str}; +use std::{borrow::Cow, fmt, io, str}; // re-export for use in utils pageserver_feedback.rs pub use postgres_protocol::PG_EPOCH; @@ -50,20 +51,37 @@ pub enum FeStartupPacket { }, } +#[derive(Debug, Clone, Default)] +pub struct StartupMessageParamsBuilder { + params: BytesMut, +} + +impl StartupMessageParamsBuilder { + /// Set parameter's value by its name. + /// name and value must not contain a \0 byte + pub fn insert(&mut self, name: &str, value: &str) { + self.params.put(name.as_bytes()); + self.params.put(&b"\0"[..]); + self.params.put(value.as_bytes()); + self.params.put(&b"\0"[..]); + } + + pub fn freeze(self) -> StartupMessageParams { + StartupMessageParams { + params: self.params.freeze(), + } + } +} + #[derive(Debug, Clone, Default)] pub struct StartupMessageParams { - params: HashMap, + params: Bytes, } impl StartupMessageParams { - /// Set parameter's value by its name. - pub fn insert(&mut self, name: &str, value: &str) { - self.params.insert(name.to_owned(), value.to_owned()); - } - /// Get parameter's value by its name. pub fn get(&self, name: &str) -> Option<&str> { - self.params.get(name).map(|s| s.as_str()) + self.iter().find_map(|(k, v)| (k == name).then_some(v)) } /// Split command-line options according to PostgreSQL's logic, @@ -117,15 +135,19 @@ impl StartupMessageParams { /// Iterate through key-value pairs in an arbitrary order. pub fn iter(&self) -> impl Iterator { - self.params.iter().map(|(k, v)| (k.as_str(), v.as_str())) + let params = + std::str::from_utf8(&self.params).expect("should be validated as utf8 already"); + params.split_terminator('\0').tuples() } // This function is mostly useful in tests. #[doc(hidden)] pub fn new<'a, const N: usize>(pairs: [(&'a str, &'a str); N]) -> Self { - Self { - params: pairs.map(|(k, v)| (k.to_owned(), v.to_owned())).into(), + let mut b = StartupMessageParamsBuilder::default(); + for (k, v) in pairs { + b.insert(k, v) } + b.freeze() } } @@ -350,35 +372,21 @@ impl FeStartupPacket { (major_version, minor_version) => { // StartupMessage - // Parse pairs of null-terminated strings (key, value). - // See `postgres: ProcessStartupPacket, build_startup_packet`. - let mut tokens = str::from_utf8(&msg) - .map_err(|_e| { - ProtocolError::BadMessage("StartupMessage params: invalid utf-8".to_owned()) - })? - .strip_suffix('\0') // drop packet's own null - .ok_or_else(|| { - ProtocolError::Protocol( - "StartupMessage params: missing null terminator".to_string(), - ) - })? - .split_terminator('\0'); - - let mut params = HashMap::new(); - while let Some(name) = tokens.next() { - let value = tokens.next().ok_or_else(|| { - ProtocolError::Protocol( - "StartupMessage params: key without value".to_string(), - ) - })?; - - params.insert(name.to_owned(), value.to_owned()); - } + let s = str::from_utf8(&msg).map_err(|_e| { + ProtocolError::BadMessage("StartupMessage params: invalid utf-8".to_owned()) + })?; + let s = s.strip_suffix('\0').ok_or_else(|| { + ProtocolError::Protocol( + "StartupMessage params: missing null terminator".to_string(), + ) + })?; FeStartupPacket::StartupMessage { major_version, minor_version, - params: StartupMessageParams { params }, + params: StartupMessageParams { + params: msg.slice_ref(s.as_bytes()), + }, } } }; diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 9a7cdc8577..9d6a475aeb 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -17,7 +17,7 @@ use hyper1::http::HeaderValue; use hyper1::Response; use hyper1::StatusCode; use hyper1::{HeaderMap, Request}; -use pq_proto::StartupMessageParams; +use pq_proto::StartupMessageParamsBuilder; use serde_json::json; use serde_json::Value; use tokio::time; @@ -193,7 +193,7 @@ fn get_conn_info( let mut options = Option::None; - let mut params = StartupMessageParams::default(); + let mut params = StartupMessageParamsBuilder::default(); params.insert("user", &username); params.insert("database", &dbname); for (key, value) in pairs { From 167394a0735abb422e0f2b544af4b160edbd2f34 Mon Sep 17 00:00:00 2001 From: YukiSeino <36467282+SeinoYuki@users.noreply.github.com> Date: Thu, 30 May 2024 22:58:20 +0900 Subject: [PATCH 101/220] refacter : VirtualFile::open uses AsRef (#7908) ## Problem #7371 ## Summary of changes * The VirtualFile::open, open_with_options, and create methods use AsRef, similar to the standard library's std::fs APIs. --- pageserver/src/virtual_file.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index b68f3a0e89..04d9386fab 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -344,21 +344,21 @@ macro_rules! with_file { impl VirtualFile { /// Open a file in read-only mode. Like File::open. - pub async fn open( - path: &Utf8Path, + pub async fn open>( + path: P, ctx: &RequestContext, ) -> Result { - Self::open_with_options(path, OpenOptions::new().read(true), ctx).await + Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await } /// Create a new file for writing. If the file exists, it will be truncated. /// Like File::create. - pub async fn create( - path: &Utf8Path, + pub async fn create>( + path: P, ctx: &RequestContext, ) -> Result { Self::open_with_options( - path, + path.as_ref(), OpenOptions::new().write(true).create(true).truncate(true), ctx, ) @@ -370,12 +370,13 @@ impl VirtualFile { /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt, /// they will be applied also when the file is subsequently re-opened, not only /// on the first time. Make sure that's sane! - pub async fn open_with_options( - path: &Utf8Path, + pub async fn open_with_options>( + path: P, open_options: &OpenOptions, _ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */ ) -> Result { - let path_str = path.to_string(); + let path_ref = path.as_ref(); + let path_str = path_ref.to_string(); let parts = path_str.split('/').collect::>(); let (tenant_id, shard_id, timeline_id) = if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME { @@ -401,7 +402,7 @@ impl VirtualFile { // where our caller doesn't get to use the returned VirtualFile before its // slot gets re-used by someone else. let file = observe_duration!(StorageIoOperation::Open, { - open_options.open(path.as_std_path()).await? + open_options.open(path_ref.as_std_path()).await? }); // Strip all options other than read and write. @@ -417,7 +418,7 @@ impl VirtualFile { let vfile = VirtualFile { handle: RwLock::new(handle), pos: 0, - path: path.to_path_buf(), + path: path_ref.to_path_buf(), open_options: reopen_options, tenant_id, shard_id, From 1eca8b8a6b56e82445d9d8354e7acfee97c80603 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 30 May 2024 10:03:17 -0400 Subject: [PATCH 102/220] fix(pageserver): ensure to_i128 works for metadata keys (#7895) field2 of metadata keys can be 0xFFFF because of the mapping. Allow 0xFFFF for `to_i128`. An alternative is to encode 0xFFFF as 0xFFFFFFFF (which is allowed in the original `to_i128`). But checking the places where field2 is referenced, the rest part of the system does not seem to depend on this assertion. Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/key.rs | 37 +++++++++++----------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index 2511de00d5..b00d48498c 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -1,6 +1,5 @@ use anyhow::{bail, Result}; use byteorder::{ByteOrder, BE}; -use bytes::BufMut; use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi::{Oid, TransactionId}; use serde::{Deserialize, Serialize}; @@ -53,14 +52,8 @@ impl Key { /// Encode a metadata key to a storage key. pub fn from_metadata_key_fixed_size(key: &[u8; METADATA_KEY_SIZE]) -> Self { assert!(is_metadata_key_slice(key), "key not in metadata key range"); - Key { - field1: key[0], - field2: u16::from_be_bytes(key[1..3].try_into().unwrap()) as u32, - field3: u32::from_be_bytes(key[3..7].try_into().unwrap()), - field4: u32::from_be_bytes(key[7..11].try_into().unwrap()), - field5: key[11], - field6: u32::from_be_bytes(key[12..16].try_into().unwrap()), - } + // Metadata key space ends at 0x7F so it's fine to directly convert it to i128. + Self::from_i128(i128::from_be_bytes(*key)) } /// Encode a metadata key to a storage key. @@ -68,17 +61,6 @@ impl Key { Self::from_metadata_key_fixed_size(key.try_into().expect("expect 16 byte metadata key")) } - /// Extract a metadata key to a writer. The result should always be 16 bytes. - pub fn extract_metadata_key_to_writer(&self, mut writer: impl BufMut) { - writer.put_u8(self.field1); - assert!(self.field2 <= 0xFFFF); - writer.put_u16(self.field2 as u16); - writer.put_u32(self.field3); - writer.put_u32(self.field4); - writer.put_u8(self.field5); - writer.put_u32(self.field6); - } - /// Get the range of metadata keys. pub const fn metadata_key_range() -> Range { Key { @@ -121,7 +103,7 @@ impl Key { /// As long as Neon does not support tablespace (because of lack of access to local file system), /// we can assume that only some predefined namespace OIDs are used which can fit in u16 pub fn to_i128(&self) -> i128 { - assert!(self.field2 < 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222); + assert!(self.field2 <= 0xFFFF || self.field2 == 0xFFFFFFFF || self.field2 == 0x22222222); (((self.field1 & 0x7F) as i128) << 120) | (((self.field2 & 0xFFFF) as i128) << 104) | ((self.field3 as i128) << 72) @@ -175,7 +157,7 @@ impl Key { } /// Convert a 18B slice to a key. This function should not be used for metadata keys because field2 is handled differently. - /// Use [`Key::from_metadata_key`] instead. + /// Use [`Key::from_i128`] instead if you want to handle 16B keys (i.e., metadata keys). pub fn from_slice(b: &[u8]) -> Self { Key { field1: b[0], @@ -188,7 +170,7 @@ impl Key { } /// Convert a key to a 18B slice. This function should not be used for metadata keys because field2 is handled differently. - /// Use [`Key::extract_metadata_key_to_writer`] instead. + /// Use [`Key::to_i128`] instead if you want to get a 16B key (i.e., metadata keys). pub fn write_to_byte_slice(&self, buf: &mut [u8]) { buf[0] = self.field1; BE::write_u32(&mut buf[1..5], self.field2); @@ -687,10 +669,15 @@ mod tests { let mut metadata_key = vec![AUX_KEY_PREFIX]; metadata_key.extend_from_slice(&[0xFF; 15]); let encoded_key = Key::from_metadata_key(&metadata_key); - let mut output_key = Vec::new(); - encoded_key.extract_metadata_key_to_writer(&mut output_key); + let output_key = encoded_key.to_i128().to_be_bytes(); assert_eq!(metadata_key, output_key); assert!(encoded_key.is_metadata_key()); assert!(is_metadata_key_slice(&metadata_key)); } + + #[test] + fn test_possible_largest_key() { + Key::from_i128(0x7FFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF); + // TODO: put this key into the system and see if anything breaks. + } } From 33395dcf4ef137b39d3ba8022e9e4d07e7de9ed4 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 30 May 2024 10:31:57 -0400 Subject: [PATCH 103/220] perf(pageserver): postpone vectored get fringe keyspace construction (#7904) Perf shows a significant amount of time is spent on `Keyspace::merge`. This pull request postpones merging keyspace until retrieving the layer, which contributes to a 30x improvement in aux keyspace basebackup time. ``` --- old 10000 files found in 0.580569459s --- new 10000 files found in 0.02995075s ``` Signed-off-by: Alex Chi Z --- pageserver/pagebench/src/cmd/aux_files.rs | 17 ++++++++++++----- pageserver/src/tenant/storage_layer.rs | 17 +++++++++++++---- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/pageserver/pagebench/src/cmd/aux_files.rs b/pageserver/pagebench/src/cmd/aux_files.rs index eb5b242a5f..bce3285606 100644 --- a/pageserver/pagebench/src/cmd/aux_files.rs +++ b/pageserver/pagebench/src/cmd/aux_files.rs @@ -5,6 +5,7 @@ use utils::lsn::Lsn; use std::collections::HashMap; use std::sync::Arc; +use std::time::Instant; /// Ingest aux files into the pageserver. #[derive(clap::Parser)] @@ -88,11 +89,17 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { println!("ingested {file_cnt} files"); } - let files = mgmt_api_client - .list_aux_files(tenant_shard_id, timeline_id, Lsn(Lsn::MAX.0 - 1)) - .await?; - - println!("{} files found", files.len()); + for _ in 0..100 { + let start = Instant::now(); + let files = mgmt_api_client + .list_aux_files(tenant_shard_id, timeline_id, Lsn(Lsn::MAX.0 - 1)) + .await?; + println!( + "{} files found in {}s", + files.len(), + start.elapsed().as_secs_f64() + ); + } anyhow::Ok(()) } diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 9ccf20c0d4..0b3f841ccf 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -318,7 +318,7 @@ pub(crate) struct LayerFringe { #[derive(Debug)] struct LayerKeyspace { layer: ReadableLayer, - target_keyspace: KeySpace, + target_keyspace: Vec, } impl LayerFringe { @@ -336,6 +336,7 @@ impl LayerFringe { }; let removed = self.layers.remove_entry(&read_desc.layer_id); + match removed { Some(( _, @@ -343,7 +344,15 @@ impl LayerFringe { layer, target_keyspace, }, - )) => Some((layer, target_keyspace, read_desc.lsn_range)), + )) => { + let mut keyspace = KeySpaceRandomAccum::new(); + for ks in target_keyspace { + for part in ks.ranges { + keyspace.add_range(part); + } + } + Some((layer, keyspace.consume_keyspace(), read_desc.lsn_range)) + } None => unreachable!("fringe internals are always consistent"), } } @@ -358,7 +367,7 @@ impl LayerFringe { let entry = self.layers.entry(layer_id.clone()); match entry { Entry::Occupied(mut entry) => { - entry.get_mut().target_keyspace.merge(&keyspace); + entry.get_mut().target_keyspace.push(keyspace); } Entry::Vacant(entry) => { self.planned_reads_by_lsn.push(ReadDesc { @@ -367,7 +376,7 @@ impl LayerFringe { }); entry.insert(LayerKeyspace { layer, - target_keyspace: keyspace, + target_keyspace: vec![keyspace], }); } } From f20a9e760fc7371c84c550adcb3fe6c553610c96 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 30 May 2024 10:45:34 -0400 Subject: [PATCH 104/220] chore(pageserver): warn on delete non-existing file (#7847) Consider the following sequence of migration: ``` 1. user starts compute 2. force migrate to v2 3. user continues to write data ``` At the time of (3), the compute node is not aware that the page server does not contain replication states any more, and might continue to ingest neon-file records into the safekeeper. This will leave the pageserver store a partial replication state and cause some errors. For example, the compute could issue a deletion of some aux files in v1, but this file does not exist in v2. Therefore, we should ignore all these errors until everyone is migrated to v2. Also note that if we see this warning in prod, it is likely because we did not fully suspend users' compute when flipping the v1/v2 flag. Signed-off-by: Alex Chi Z --- pageserver/src/pgdatadir_mapping.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index afba34c6d1..4480c7df6e 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -1552,7 +1552,7 @@ impl<'a> DatadirModification<'a> { self.tline.aux_file_size_estimator.on_add(content.len()); new_files.push((path, content)); } - (None, true) => anyhow::bail!("removing non-existing aux file: {}", path), + (None, true) => warn!("removing non-existing aux file: {}", path), } let new_val = aux_file::encode_file_value(&new_files)?; self.put(key, Value::Image(new_val.into())); From c18b1c06460f1a55d947cdad267da4f71278655c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 30 May 2024 17:45:48 +0200 Subject: [PATCH 105/220] Update tokio-epoll-uring for linux-raw-sys (#7918) Updates the `tokio-epoll-uring` dependency. There is [only one change](https://github.com/neondatabase/tokio-epoll-uring/compare/342ddd197a060a8354e8f11f4d12994419fff939...08ccfa94ff5507727bf4d8d006666b5b192e04c6), the adoption of linux-raw-sys for `statx` instead of using libc. Part of #7889. --- Cargo.lock | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44edbabaf6..96ba5c8ec3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2915,6 +2915,12 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +[[package]] +name = "linux-raw-sys" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0b5399f6804fbab912acbd8878ed3532d506b7c951b8f9f164ef90fef39e3f4" + [[package]] name = "lock_api" version = "0.4.10" @@ -6157,7 +6163,7 @@ dependencies = [ [[package]] name = "tokio-epoll-uring" version = "0.1.0" -source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#342ddd197a060a8354e8f11f4d12994419fff939" +source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#08ccfa94ff5507727bf4d8d006666b5b192e04c6" dependencies = [ "futures", "nix 0.26.4", @@ -6669,11 +6675,12 @@ dependencies = [ [[package]] name = "uring-common" version = "0.1.0" -source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#342ddd197a060a8354e8f11f4d12994419fff939" +source = "git+https://github.com/neondatabase/tokio-epoll-uring.git?branch=main#08ccfa94ff5507727bf4d8d006666b5b192e04c6" dependencies = [ "bytes", "io-uring", "libc", + "linux-raw-sys 0.6.4", ] [[package]] From 98dadf854383f7d96cd6b30c87ba49b1b5a48d1e Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 31 May 2024 09:18:58 +0100 Subject: [PATCH 106/220] pageserver: quieten some shutdown logs around logical size and flush (#7907) ## Problem Looking at several noisy shutdown logs: - In https://github.com/neondatabase/neon/issues/7861 we're hitting a log error with `InternalServerError(timeline shutting down\n'` on the checkpoint API handler. - In the field, we see initial_logical_size_calculation errors on shutdown, via DownloadError - In the field, we see errors logged from layer download code (independent of the error propagated) during shutdown Closes: https://github.com/neondatabase/neon/issues/7861 ## Summary of changes The theme of these changes is to avoid propagating anyhow::Errors for cases that aren't really unexpected error cases that we might want a stacktrace for, and avoid "Other" error variants unless we really do have unexpected error cases to propagate. - On the flush_frozen_layers path, use the `FlushLayerError` type throughout, rather than munging it into an anyhow::Error. Give FlushLayerError an explicit from_anyhow helper that checks for timeline cancellation, and uses it to give a Cancelled error instead of an Other error when the timeline is shutting down. - In logical size calculation, remove BackgroundCalculationError (this type was just a Cancelled variant and an Other variant), and instead use CalculateLogicalSizeError throughout. This can express a PageReconstructError, and has a From impl that translates cancel-like page reconstruct errors to Cancelled. - Replace CalculateLogicalSizeError's Other(anyhow::Error) variant case with a Decode(DeserializeError) variant, as this was the only kind of error we actually used in the Other case. - During layer download, drop out early if the timeline is shutting down, so that we don't do an `error!()` log of the shutdown error in this case. --- pageserver/src/http/routes.rs | 16 ++- pageserver/src/page_service.rs | 6 +- pageserver/src/pgdatadir_mapping.rs | 18 +++- pageserver/src/tenant.rs | 2 +- pageserver/src/tenant/storage_layer/layer.rs | 10 +- pageserver/src/tenant/timeline.rs | 100 ++++++++++-------- .../src/tenant/timeline/detach_ancestor.rs | 4 +- test_runner/regress/test_ondemand_download.py | 2 +- 8 files changed, 102 insertions(+), 56 deletions(-) diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 8a061f3ae1..913d45d63c 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -74,6 +74,7 @@ use crate::tenant::size::ModelInputs; use crate::tenant::storage_layer::LayerAccessStatsReset; use crate::tenant::storage_layer::LayerName; use crate::tenant::timeline::CompactFlags; +use crate::tenant::timeline::CompactionError; use crate::tenant::timeline::Timeline; use crate::tenant::GetTimelineError; use crate::tenant::SpawnMode; @@ -1813,11 +1814,22 @@ async fn timeline_checkpoint_handler( timeline .freeze_and_flush() .await - .map_err(ApiError::InternalServerError)?; + .map_err(|e| { + match e { + tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown, + other => ApiError::InternalServerError(other.into()), + + } + })?; timeline .compact(&cancel, flags, &ctx) .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; + .map_err(|e| + match e { + CompactionError::ShuttingDown => ApiError::ShuttingDown, + CompactionError::Other(e) => ApiError::InternalServerError(e) + } + )?; if wait_until_uploaded { timeline.remote_client.wait_completion().await.map_err(ApiError::InternalServerError)?; diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index e9651165b1..35150b210e 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -66,6 +66,7 @@ use crate::tenant::mgr::GetTenantError; use crate::tenant::mgr::ShardResolveResult; use crate::tenant::mgr::ShardSelector; use crate::tenant::mgr::TenantManager; +use crate::tenant::timeline::FlushLayerError; use crate::tenant::timeline::WaitLsnError; use crate::tenant::GetTimelineError; use crate::tenant::PageReconstructError; @@ -830,7 +831,10 @@ impl PageServerHandler { // We only want to persist the data, and it doesn't matter if it's in the // shape of deltas or images. info!("flushing layers"); - timeline.freeze_and_flush().await?; + timeline.freeze_and_flush().await.map_err(|e| match e { + FlushLayerError::Cancelled => QueryError::Shutdown, + other => QueryError::Other(other.into()), + })?; info!("done"); Ok(()) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 4480c7df6e..0fc846e5f3 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -78,11 +78,19 @@ pub enum LsnForTimestamp { } #[derive(Debug, thiserror::Error)] -pub enum CalculateLogicalSizeError { +pub(crate) enum CalculateLogicalSizeError { #[error("cancelled")] Cancelled, + + /// Something went wrong while reading the metadata we use to calculate logical size + /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`] + /// in the `From` implementation for this variant. #[error(transparent)] - Other(#[from] anyhow::Error), + PageRead(PageReconstructError), + + /// Something went wrong deserializing metadata that we read to calculate logical size + #[error("decode error: {0}")] + Decode(#[from] DeserializeError), } #[derive(Debug, thiserror::Error)] @@ -110,7 +118,7 @@ impl From for CalculateLogicalSizeError { PageReconstructError::AncestorStopping(_) | PageReconstructError::Cancelled => { Self::Cancelled } - _ => Self::Other(pre.into()), + _ => Self::PageRead(pre), } } } @@ -763,7 +771,7 @@ impl Timeline { /// # Cancel-Safety /// /// This method is cancellation-safe. - pub async fn get_current_logical_size_non_incremental( + pub(crate) async fn get_current_logical_size_non_incremental( &self, lsn: Lsn, ctx: &RequestContext, @@ -772,7 +780,7 @@ impl Timeline { // Fetch list of database dirs and iterate them let buf = self.get(DBDIR_KEY, lsn, ctx).await?; - let dbdir = DbDirectory::des(&buf).context("deserialize db directory")?; + let dbdir = DbDirectory::des(&buf)?; let mut total_size: u64 = 0; for (spcnode, dbnode) in dbdir.dbdirs.keys() { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index e6bfd57a44..311338554c 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4154,7 +4154,7 @@ mod tests { .await?; writer.finish_write(lsn); } - tline.freeze_and_flush().await + tline.freeze_and_flush().await.map_err(|e| e.into()) } #[tokio::test] diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 3ac799c69a..1ec13882da 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -366,7 +366,10 @@ impl Layer { .0 .get_or_maybe_download(true, Some(ctx)) .await - .map_err(|err| GetVectoredError::Other(anyhow::anyhow!(err)))?; + .map_err(|err| match err { + DownloadError::DownloadCancelled => GetVectoredError::Cancelled, + other => GetVectoredError::Other(anyhow::anyhow!(other)), + })?; self.0 .access_stats @@ -1158,6 +1161,11 @@ impl LayerInner { let consecutive_failures = 1 + self.consecutive_failures.fetch_add(1, Ordering::Relaxed); + if timeline.cancel.is_cancelled() { + // If we're shutting down, drop out before logging the error + return Err(e); + } + tracing::error!(consecutive_failures, "layer file download failed: {e:#}"); let backoff = utils::backoff::exponential_backoff_duration_seconds( diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index d4f6e25843..d07e2352fa 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -138,7 +138,7 @@ use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer}; #[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub(super) enum FlushLoopState { +pub(crate) enum FlushLoopState { NotStarted, Running { #[cfg(test)] @@ -577,7 +577,7 @@ impl PageReconstructError { } #[derive(thiserror::Error, Debug)] -enum CreateImageLayersError { +pub(crate) enum CreateImageLayersError { #[error("timeline shutting down")] Cancelled, @@ -591,17 +591,35 @@ enum CreateImageLayersError { Other(#[from] anyhow::Error), } -#[derive(thiserror::Error, Debug)] -enum FlushLayerError { +#[derive(thiserror::Error, Debug, Clone)] +pub(crate) enum FlushLayerError { /// Timeline cancellation token was cancelled #[error("timeline shutting down")] Cancelled, + /// We tried to flush a layer while the Timeline is in an unexpected state + #[error("cannot flush frozen layers when flush_loop is not running, state is {0:?}")] + NotRunning(FlushLoopState), + + // Arc<> the following non-clonable error types: we must be Clone-able because the flush error is propagated from the flush + // loop via a watch channel, where we can only borrow it. #[error(transparent)] - CreateImageLayersError(CreateImageLayersError), + CreateImageLayersError(Arc), #[error(transparent)] - Other(#[from] anyhow::Error), + Other(#[from] Arc), +} + +impl FlushLayerError { + // When crossing from generic anyhow errors to this error type, we explicitly check + // for timeline cancellation to avoid logging inoffensive shutdown errors as warn/err. + fn from_anyhow(timeline: &Timeline, err: anyhow::Error) -> Self { + if timeline.cancel.is_cancelled() { + Self::Cancelled + } else { + Self::Other(Arc::new(err)) + } + } } #[derive(thiserror::Error, Debug)] @@ -696,7 +714,7 @@ impl From for FlushLayerError { fn from(e: CreateImageLayersError) -> Self { match e { CreateImageLayersError::Cancelled => FlushLayerError::Cancelled, - any => FlushLayerError::CreateImageLayersError(any), + any => FlushLayerError::CreateImageLayersError(Arc::new(any)), } } } @@ -1547,13 +1565,13 @@ impl Timeline { /// Flush to disk all data that was written with the put_* functions #[instrument(skip(self), fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), timeline_id=%self.timeline_id))] - pub(crate) async fn freeze_and_flush(&self) -> anyhow::Result<()> { + pub(crate) async fn freeze_and_flush(&self) -> Result<(), FlushLayerError> { self.freeze_and_flush0().await } // This exists to provide a non-span creating version of `freeze_and_flush` we can call without // polluting the span hierarchy. - pub(crate) async fn freeze_and_flush0(&self) -> anyhow::Result<()> { + pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> { let to_lsn = self.freeze_inmem_layer(false).await; self.flush_frozen_layers_and_wait(to_lsn).await } @@ -2735,11 +2753,6 @@ impl Timeline { self.current_logical_size.initialized.add_permits(1); } - enum BackgroundCalculationError { - Cancelled, - Other(anyhow::Error), - } - let try_once = |attempt: usize| { let background_ctx = &background_ctx; let self_ref = &self; @@ -2757,10 +2770,10 @@ impl Timeline { (Some(permit), StartCircumstances::AfterBackgroundTasksRateLimit) } _ = self_ref.cancel.cancelled() => { - return Err(BackgroundCalculationError::Cancelled); + return Err(CalculateLogicalSizeError::Cancelled); } _ = cancel.cancelled() => { - return Err(BackgroundCalculationError::Cancelled); + return Err(CalculateLogicalSizeError::Cancelled); }, () = skip_concurrency_limiter.cancelled() => { // Some action that is part of a end user interaction requested logical size @@ -2787,18 +2800,7 @@ impl Timeline { .await { Ok(calculated_size) => Ok((calculated_size, metrics_guard)), - Err(CalculateLogicalSizeError::Cancelled) => { - Err(BackgroundCalculationError::Cancelled) - } - Err(CalculateLogicalSizeError::Other(err)) => { - if let Some(PageReconstructError::AncestorStopping(_)) = - err.root_cause().downcast_ref() - { - Err(BackgroundCalculationError::Cancelled) - } else { - Err(BackgroundCalculationError::Other(err)) - } - } + Err(e) => Err(e), } } }; @@ -2810,8 +2812,11 @@ impl Timeline { match try_once(attempt).await { Ok(res) => return ControlFlow::Continue(res), - Err(BackgroundCalculationError::Cancelled) => return ControlFlow::Break(()), - Err(BackgroundCalculationError::Other(e)) => { + Err(CalculateLogicalSizeError::Cancelled) => return ControlFlow::Break(()), + Err( + e @ (CalculateLogicalSizeError::Decode(_) + | CalculateLogicalSizeError::PageRead(_)), + ) => { warn!(attempt, "initial size calculation failed: {e:?}"); // exponential back-off doesn't make sense at these long intervals; // use fixed retry interval with generous jitter instead @@ -3717,7 +3722,9 @@ impl Timeline { return; } err @ Err( - FlushLayerError::Other(_) | FlushLayerError::CreateImageLayersError(_), + FlushLayerError::NotRunning(_) + | FlushLayerError::Other(_) + | FlushLayerError::CreateImageLayersError(_), ) => { error!("could not flush frozen layer: {err:?}"); break err.map(|_| ()); @@ -3763,7 +3770,10 @@ impl Timeline { /// `last_record_lsn` may be higher than the highest LSN of a frozen layer: if this is the case, /// it means no data will be written between the top of the highest frozen layer and to_lsn, /// e.g. because this tenant shard has ingested up to to_lsn and not written any data locally for that part of the WAL. - async fn flush_frozen_layers_and_wait(&self, last_record_lsn: Lsn) -> anyhow::Result<()> { + async fn flush_frozen_layers_and_wait( + &self, + last_record_lsn: Lsn, + ) -> Result<(), FlushLayerError> { let mut rx = self.layer_flush_done_tx.subscribe(); // Increment the flush cycle counter and wake up the flush task. @@ -3774,7 +3784,7 @@ impl Timeline { let flush_loop_state = { *self.flush_loop_state.lock().unwrap() }; if !matches!(flush_loop_state, FlushLoopState::Running { .. }) { - anyhow::bail!("cannot flush frozen layers when flush_loop is not running, state is {flush_loop_state:?}") + return Err(FlushLayerError::NotRunning(flush_loop_state)); } self.layer_flush_start_tx.send_modify(|(counter, lsn)| { @@ -3787,14 +3797,11 @@ impl Timeline { { let (last_result_counter, last_result) = &*rx.borrow(); if *last_result_counter >= my_flush_request { - if let Err(_err) = last_result { + if let Err(err) = last_result { // We already logged the original error in // flush_loop. We cannot propagate it to the caller // here, because it might not be Cloneable - anyhow::bail!( - "Could not flush frozen layer. Request id: {}", - my_flush_request - ); + return Err(err.clone()); } else { return Ok(()); } @@ -3803,7 +3810,7 @@ impl Timeline { trace!("waiting for flush to complete"); tokio::select! { rx_e = rx.changed() => { - rx_e?; + rx_e.map_err(|_| FlushLayerError::NotRunning(*self.flush_loop_state.lock().unwrap()))?; }, // Cancellation safety: we are not leaving an I/O in-flight for the flush, we're just ignoring // the notification from [`flush_loop`] that it completed. @@ -3875,7 +3882,8 @@ impl Timeline { EnumSet::empty(), ctx, ) - .await?; + .await + .map_err(|e| FlushLayerError::from_anyhow(self, e))?; if self.cancel.is_cancelled() { return Err(FlushLayerError::Cancelled); @@ -3899,7 +3907,8 @@ impl Timeline { Some(metadata_keyspace.0.ranges[0].clone()), ctx, ) - .await? + .await + .map_err(|e| FlushLayerError::from_anyhow(self, e))? } else { None }; @@ -3926,7 +3935,11 @@ impl Timeline { // Normal case, write out a L0 delta layer file. // `create_delta_layer` will not modify the layer map. // We will remove frozen layer and add delta layer in one atomic operation later. - let Some(layer) = self.create_delta_layer(&frozen_layer, None, ctx).await? else { + let Some(layer) = self + .create_delta_layer(&frozen_layer, None, ctx) + .await + .map_err(|e| FlushLayerError::from_anyhow(self, e))? + else { panic!("delta layer cannot be empty if no filter is applied"); }; ( @@ -3959,7 +3972,8 @@ impl Timeline { if self.set_disk_consistent_lsn(disk_consistent_lsn) { // Schedule remote uploads that will reflect our new disk_consistent_lsn - self.schedule_uploads(disk_consistent_lsn, layers_to_upload)?; + self.schedule_uploads(disk_consistent_lsn, layers_to_upload) + .map_err(|e| FlushLayerError::from_anyhow(self, e))?; } // release lock on 'layers' }; diff --git a/pageserver/src/tenant/timeline/detach_ancestor.rs b/pageserver/src/tenant/timeline/detach_ancestor.rs index e6ddabe5b5..4fc89330ba 100644 --- a/pageserver/src/tenant/timeline/detach_ancestor.rs +++ b/pageserver/src/tenant/timeline/detach_ancestor.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use super::{layer_manager::LayerManager, Timeline}; +use super::{layer_manager::LayerManager, FlushLayerError, Timeline}; use crate::{ context::{DownloadBehavior, RequestContext}, task_mgr::TaskKind, @@ -23,7 +23,7 @@ pub(crate) enum Error { #[error("shutting down, please retry later")] ShuttingDown, #[error("flushing failed")] - FlushAncestor(#[source] anyhow::Error), + FlushAncestor(#[source] FlushLayerError), #[error("layer download failed")] RewrittenDeltaDownloadFailed(#[source] anyhow::Error), #[error("copying LSN prefix locally failed")] diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index b137fb3a5c..6fe23846c7 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -402,7 +402,7 @@ def test_download_remote_layers_api( env.pageserver.allowed_errors.extend( [ ".*download failed: downloading evicted layer file failed.*", - f".*initial_size_calculation.*{tenant_id}.*{timeline_id}.*initial size calculation failed: downloading evicted layer file failed", + f".*initial_size_calculation.*{tenant_id}.*{timeline_id}.*initial size calculation failed.*downloading evicted layer file failed", ] ) From e6db8069b0a5476504e723ff1b5bbe079afbac0b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 28 May 2024 21:28:44 +0300 Subject: [PATCH 107/220] neon_walreader: check after local read that the segment still exists. Otherwise read might receive zeros/garbage if the file is recycled (renamed) for as a future segment. --- pgxn/neon/neon_walreader.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/pgxn/neon/neon_walreader.c b/pgxn/neon/neon_walreader.c index e43f4d9d96..60eb8e1fc9 100644 --- a/pgxn/neon/neon_walreader.c +++ b/pgxn/neon/neon_walreader.c @@ -184,8 +184,8 @@ NeonWALRead(NeonWALReader *state, char *buf, XLogRecPtr startptr, Size count, Ti } else if (state->wre_errno == ENOENT) { - nwr_log(LOG, "local read failed as segment at %X/%X doesn't exist, attempting remote", - LSN_FORMAT_ARGS(startptr)); + nwr_log(LOG, "local read at %X/%X len %zu failed as segment file doesn't exist, attempting remote", + LSN_FORMAT_ARGS(startptr), count); return NeonWALReadRemote(state, buf, startptr, count, tli); } else @@ -614,6 +614,7 @@ NeonWALReadLocal(NeonWALReader *state, char *buf, XLogRecPtr startptr, Size coun uint32 startoff; int segbytes; int readbytes; + XLogSegNo lastRemovedSegNo; startoff = XLogSegmentOffset(recptr, state->segcxt.ws_segsize); @@ -689,6 +690,23 @@ NeonWALReadLocal(NeonWALReader *state, char *buf, XLogRecPtr startptr, Size coun return false; } + /* + * Recheck that the segment hasn't been removed while we were reading + * it. + */ + lastRemovedSegNo = XLogGetLastRemovedSegno(); + if (state->seg.ws_segno <= lastRemovedSegNo) + { + char fname[MAXFNAMELEN]; + + state->wre_errno = ENOENT; + + XLogFileName(fname, tli, state->seg.ws_segno, state->segcxt.ws_segsize); + snprintf(state->err_msg, sizeof(state->err_msg), "WAL segment %s has been removed during the read, lastRemovedSegNo " UINT64_FORMAT, + fname, lastRemovedSegNo); + return false; + } + /* Update state for read */ recptr += readbytes; nbytes -= readbytes; From af40bf3c2ee11f23ef10f8e2d99edc3024137a3b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 28 May 2024 13:34:04 +0300 Subject: [PATCH 108/220] Fix term/epoch confusion in python tests. Call epoch last_log_term and add separate term field. --- test_runner/fixtures/safekeeper/http.py | 6 ++++-- test_runner/regress/test_wal_acceptor.py | 10 +++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/test_runner/fixtures/safekeeper/http.py b/test_runner/fixtures/safekeeper/http.py index a5480f557f..11e6fef28f 100644 --- a/test_runner/fixtures/safekeeper/http.py +++ b/test_runner/fixtures/safekeeper/http.py @@ -19,7 +19,8 @@ class Walreceiver: @dataclass class SafekeeperTimelineStatus: - acceptor_epoch: int + term: int + last_log_term: int pg_version: int # Not exactly a PgVersion, safekeeper returns version as int, for example 150002 for 15.2 flush_lsn: Lsn commit_lsn: Lsn @@ -156,7 +157,8 @@ class SafekeeperHttpClient(requests.Session): resj = res.json() walreceivers = [Walreceiver(wr["conn_id"], wr["status"]) for wr in resj["walreceivers"]] return SafekeeperTimelineStatus( - acceptor_epoch=resj["acceptor_state"]["epoch"], + term=resj["acceptor_state"]["term"], + last_log_term=resj["acceptor_state"]["epoch"], pg_version=resj["pg_info"]["pg_version"], flush_lsn=Lsn(resj["flush_lsn"]), commit_lsn=Lsn(resj["commit_lsn"]), diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index cff13e74ee..7dca6c3ec2 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -841,7 +841,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): # fetch something sensible from status tli_status = wa_http_cli.timeline_status(tenant_id, timeline_id) - epoch = tli_status.acceptor_epoch + term = tli_status.term timeline_start_lsn = tli_status.timeline_start_lsn if auth_enabled: @@ -862,8 +862,8 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): endpoint.safe_psql("insert into t values(10)") tli_status = wa_http_cli.timeline_status(tenant_id, timeline_id) - epoch_after_reboot = tli_status.acceptor_epoch - assert epoch_after_reboot > epoch + term_after_reboot = tli_status.term + assert term_after_reboot > term # and timeline_start_lsn stays the same assert tli_status.timeline_start_lsn == timeline_start_lsn @@ -1104,11 +1104,11 @@ def cmp_sk_wal(sks: List[Safekeeper], tenant_id: TenantId, timeline_id: Timeline # First check that term / flush_lsn are the same: it is easier to # report/understand if WALs are different due to that. statuses = [sk_http_cli.timeline_status(tenant_id, timeline_id) for sk_http_cli in sk_http_clis] - term_flush_lsns = [(s.acceptor_epoch, s.flush_lsn) for s in statuses] + term_flush_lsns = [(s.last_log_term, s.flush_lsn) for s in statuses] for tfl, sk in zip(term_flush_lsns[1:], sks[1:]): assert ( term_flush_lsns[0] == tfl - ), f"(term, flush_lsn) are not equal on sks {sks[0].id} and {sk.id}: {term_flush_lsns[0]} != {tfl}" + ), f"(last_log_term, flush_lsn) are not equal on sks {sks[0].id} and {sk.id}: {term_flush_lsns[0]} != {tfl}" # check that WALs are identic. segs = [sk.list_segments(tenant_id, timeline_id) for sk in sks] From 1fcc2b37eba2e3be3f248c50713a6cda02a99453 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 28 May 2024 13:43:28 +0300 Subject: [PATCH 109/220] Add test checking term change during pull_timeline. --- test_runner/regress/test_wal_acceptor.py | 60 ++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 7dca6c3ec2..dce30f5388 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -17,6 +17,7 @@ import psycopg2 import psycopg2.errors import psycopg2.extras import pytest +import requests from fixtures.broker import NeonBroker from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log @@ -1867,6 +1868,65 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): assert digests[0] == digests[1], f"digest on src is {digests[0]} but on dst is {digests[1]}" +# Test pull_timeline while concurrently changing term on the donor: +# 1) Start pull_timeline, listing files to fetch. +# 2) Change term on the donor +# 3) Finish pull_timeline. +# +# Currently (until proper membership change procedure), we want to pull_timeline +# to fetch the log up to . This is unsafe if term +# changes during the procedure (unless timeline is locked all the time but we +# don't want that): recepient might end up with mix of WAL from different +# histories. Thus the schedule above is expected to fail. Later we'd allow +# pull_timeline to only initialize timeline to any valid state (up to +# commit_lsn), holding switch to fully new configuration until it recovers +# enough, so it won't be affected by term change anymore. +# +# Expected to fail while term check is not implemented. +@pytest.mark.xfail +def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_safekeepers = 3 + neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) + env = neon_env_builder.init_start() + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + + log.info("use only first 2 safekeepers, 3rd will be seeded") + ep = env.endpoints.create("main") + ep.active_safekeepers = [1, 2] + ep.start() + ep.safe_psql("create table t(key int, value text)") + ep.safe_psql("insert into t select generate_series(1, 1000), 'pear'") + + dst_http = dst_sk.http_client() + # run pull_timeline which will halt before downloading files + dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + pt_handle = PropagatingThread( + target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) + ) + pt_handle.start() + dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + + src_http = src_sk.http_client() + term_before = src_http.timeline_status(tenant_id, timeline_id).term + + # restart compute to bump term + ep.stop() + ep = env.endpoints.create("main") + ep.active_safekeepers = [1, 2] + ep.start() + ep.safe_psql("insert into t select generate_series(1, 100), 'pear'") + + term_after = src_http.timeline_status(tenant_id, timeline_id).term + assert term_after > term_before, f"term_after={term_after}, term_before={term_before}" + + dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + with pytest.raises(requests.exceptions.HTTPError): + pt_handle.join() + + # In this test we check for excessive START_REPLICATION and START_WAL_PUSH queries # when compute is active, but there are no writes to the timeline. In that case # pageserver should maintain a single connection to safekeeper and don't attempt From 7ec70b5eff731f6072e4804050d49f0b951ef872 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 28 May 2024 12:47:04 +0300 Subject: [PATCH 110/220] safekeeper: rename epoch to last_log_term. epoch is a historical and potentially confusing name. It semantically means lastLogTerm from the raft paper, so let's use it. This commit changes only internal namings, not public interface (http). --- safekeeper/src/http/routes.rs | 8 +++--- safekeeper/src/json_ctrl.rs | 2 +- safekeeper/src/recovery.rs | 2 +- safekeeper/src/safekeeper.rs | 53 ++++++++++++++++++----------------- safekeeper/src/timeline.rs | 8 +++--- 5 files changed, 38 insertions(+), 35 deletions(-) diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 4aacd3421d..593e102e35 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -85,11 +85,11 @@ impl From for TermLsn { } } -/// Augment AcceptorState with epoch for convenience +/// Augment AcceptorState with last_log_term for convenience #[derive(Debug, Serialize, Deserialize)] pub struct AcceptorStateStatus { pub term: Term, - pub epoch: Term, + pub epoch: Term, // aka last_log_term pub term_history: Vec, } @@ -130,7 +130,7 @@ async fn timeline_status_handler(request: Request) -> Result) -> Result { let ar_hdr = AppendRequestHeader { term: donor.term, - epoch_start_lsn: Lsn::INVALID, // unused + term_start_lsn: Lsn::INVALID, // unused begin_lsn: Lsn(xlog_data.wal_start()), end_lsn: Lsn(xlog_data.wal_start()) + xlog_data.data().len() as u64, commit_lsn: Lsn::INVALID, // do not attempt to advance, peer communication anyway does it diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 2a620f5fef..4686c9aa8e 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -188,8 +188,8 @@ pub struct AcceptorState { } impl AcceptorState { - /// acceptor's epoch is the term of the highest entry in the log - pub fn get_epoch(&self, flush_lsn: Lsn) -> Term { + /// acceptor's last_log_term is the term of the highest entry in the log + pub fn get_last_log_term(&self, flush_lsn: Lsn) -> Term { let th = self.term_history.up_to(flush_lsn); match th.0.last() { Some(e) => e.term, @@ -305,9 +305,9 @@ pub struct AppendRequest { pub struct AppendRequestHeader { // safekeeper's current term; if it is higher than proposer's, the compute is out of date. pub term: Term, - // TODO: remove this field, it in unused -- LSN of term switch can be taken - // from ProposerElected (as well as from term history). - pub epoch_start_lsn: Lsn, + // TODO: remove this field from the protocol, it in unused -- LSN of term + // switch can be taken from ProposerElected (as well as from term history). + pub term_start_lsn: Lsn, /// start position of message in WAL pub begin_lsn: Lsn, /// end position of message in WAL @@ -326,9 +326,10 @@ pub struct AppendResponse { // Current term of the safekeeper; if it is higher than proposer's, the // compute is out of date. pub term: Term, - // NOTE: this is physical end of wal on safekeeper; currently it doesn't - // make much sense without taking epoch into account, as history can be - // diverged. + // Flushed end of wal on safekeeper; one should be always mindful from what + // term history this value comes, either checking history directly or + // observing term being set to one for which WAL truncation is known to have + // happened. pub flush_lsn: Lsn, // We report back our awareness about which WAL is committed, as this is // a criterion for walproposer --sync mode exit @@ -482,8 +483,8 @@ impl AcceptorProposerMessage { /// - messages from broker peers pub struct SafeKeeper { /// LSN since the proposer safekeeper currently talking to appends WAL; - /// determines epoch switch point. - pub epoch_start_lsn: Lsn, + /// determines last_log_term switch point. + pub term_start_lsn: Lsn, pub state: TimelineState, // persistent state storage pub wal_store: WAL, @@ -511,7 +512,7 @@ where } Ok(SafeKeeper { - epoch_start_lsn: Lsn(0), + term_start_lsn: Lsn(0), state: TimelineState::new(state), wal_store, node_id, @@ -531,8 +532,10 @@ where self.state.acceptor_state.term } - pub fn get_epoch(&self) -> Term { - self.state.acceptor_state.get_epoch(self.flush_lsn()) + pub fn get_last_log_term(&self) -> Term { + self.state + .acceptor_state + .get_last_log_term(self.flush_lsn()) } /// wal_store wrapper avoiding commit_lsn <= flush_lsn violation when we don't have WAL yet. @@ -713,7 +716,7 @@ where // proceed, but to prevent commit_lsn surprisingly going down we should // either refuse the session (simpler) or skip the part we already have // from the stream (can be implemented). - if msg.term == self.get_epoch() && self.flush_lsn() > msg.start_streaming_at { + if msg.term == self.get_last_log_term() && self.flush_lsn() > msg.start_streaming_at { bail!("refusing ProposerElected which is going to overwrite correct WAL: term={}, flush_lsn={}, start_streaming_at={}; restarting the handshake should help", msg.term, self.flush_lsn(), msg.start_streaming_at) } @@ -788,7 +791,7 @@ where // Cache LSN where term starts to immediately fsync control file with // commit_lsn once we reach it -- sync-safekeepers finishes when // persisted commit_lsn on majority of safekeepers aligns. - self.epoch_start_lsn = match msg.term_history.0.last() { + self.term_start_lsn = match msg.term_history.0.last() { None => bail!("proposer elected with empty term history"), Some(term_lsn_start) => term_lsn_start.lsn, }; @@ -814,11 +817,11 @@ where self.state.inmem.commit_lsn = commit_lsn; - // If new commit_lsn reached epoch switch, force sync of control + // If new commit_lsn reached term switch, force sync of control // file: walproposer in sync mode is very interested when this // happens. Note: this is for sync-safekeepers mode only, as - // otherwise commit_lsn might jump over epoch_start_lsn. - if commit_lsn >= self.epoch_start_lsn && self.state.commit_lsn < self.epoch_start_lsn { + // otherwise commit_lsn might jump over term_start_lsn. + if commit_lsn >= self.term_start_lsn && self.state.commit_lsn < self.term_start_lsn { self.state.flush().await?; } @@ -933,7 +936,7 @@ where // Note: the check is too restrictive, generally we can update local // commit_lsn if our history matches (is part of) history of advanced // commit_lsn provider. - if sk_info.last_log_term == self.get_epoch() { + if sk_info.last_log_term == self.get_last_log_term() { self.update_commit_lsn(Lsn(sk_info.commit_lsn)).await?; } } @@ -1079,7 +1082,7 @@ mod tests { } #[tokio::test] - async fn test_epoch_switch() { + async fn test_last_log_term_switch() { let storage = InMemoryState { persisted_state: test_sk_state(), }; @@ -1089,7 +1092,7 @@ mod tests { let mut ar_hdr = AppendRequestHeader { term: 1, - epoch_start_lsn: Lsn(3), + term_start_lsn: Lsn(3), begin_lsn: Lsn(1), end_lsn: Lsn(2), commit_lsn: Lsn(0), @@ -1114,14 +1117,14 @@ mod tests { .await .unwrap(); - // check that AppendRequest before epochStartLsn doesn't switch epoch + // check that AppendRequest before term_start_lsn doesn't switch last_log_term. let resp = sk .process_msg(&ProposerAcceptorMessage::AppendRequest(append_request)) .await; assert!(resp.is_ok()); - assert_eq!(sk.get_epoch(), 0); + assert_eq!(sk.get_last_log_term(), 0); - // but record at epochStartLsn does the switch + // but record at term_start_lsn does the switch ar_hdr.begin_lsn = Lsn(2); ar_hdr.end_lsn = Lsn(3); append_request = AppendRequest { @@ -1133,7 +1136,7 @@ mod tests { .await; assert!(resp.is_ok()); sk.wal_store.truncate_wal(Lsn(3)).await.unwrap(); // imitate the complete record at 3 %) - assert_eq!(sk.get_epoch(), 1); + assert_eq!(sk.get_last_log_term(), 1); } #[test] diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index f30c503382..aa9ccfc21e 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -244,7 +244,7 @@ impl SharedState { timeline_id: ttid.timeline_id.as_ref().to_owned(), }), term: self.sk.state.acceptor_state.term, - last_log_term: self.sk.get_epoch(), + last_log_term: self.sk.get_last_log_term(), flush_lsn: self.sk.flush_lsn().0, // note: this value is not flushed to control file yet and can be lost commit_lsn: self.sk.state.inmem.commit_lsn.0, @@ -704,7 +704,7 @@ impl Timeline { pub async fn recovery_needed(&self, heartbeat_timeout: Duration) -> RecoveryNeededInfo { let ss = self.read_shared_state().await; let term = ss.sk.state.acceptor_state.term; - let last_log_term = ss.sk.get_epoch(); + let last_log_term = ss.sk.get_last_log_term(); let flush_lsn = ss.sk.flush_lsn(); // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us. let mut peers = ss.get_peers(heartbeat_timeout); @@ -844,7 +844,7 @@ impl Timeline { timeline_is_active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, last_removed_segno: state.last_removed_segno, - epoch_start_lsn: state.sk.epoch_start_lsn, + epoch_start_lsn: state.sk.term_start_lsn, mem_state: state.sk.state.inmem.clone(), persisted_state: state.sk.state.clone(), flush_lsn: state.sk.wal_store.flush_lsn(), @@ -867,7 +867,7 @@ impl Timeline { active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, last_removed_segno: state.last_removed_segno, - epoch_start_lsn: state.sk.epoch_start_lsn, + epoch_start_lsn: state.sk.term_start_lsn, mem_state: state.sk.state.inmem.clone(), write_lsn, write_record_lsn, From 5a394fde566eea238e317604a3e9b414bed04d1b Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 31 May 2024 13:31:42 +0100 Subject: [PATCH 111/220] pageserver: avoid spurious "bad state" logs/errors during shutdown (#7912) ## Problem - Initial size calculations tend to fail with `Bad state (not active)` Closes: https://github.com/neondatabase/neon/issues/7911 ## Summary of changes - In `wait_lsn`, return WaitLsnError::Cancelled rather than BadState when the state is Stopping - Replace PageReconstructError's `Other` variant with a specific `BadState` variant - Avoid returning anyhow::Error from get_ready_ancestor_timeline -- this was only used for the case where there was no ancestor. All callers of this function had implicitly checked that the ancestor timeline exists before calling it, so they can pass in the ancestor instead of handling an error. --- pageserver/src/page_service.rs | 4 +- pageserver/src/tenant.rs | 9 ++-- pageserver/src/tenant/timeline.rs | 89 +++++++++++++++---------------- 3 files changed, 51 insertions(+), 51 deletions(-) diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 35150b210e..ae389826d5 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -373,7 +373,7 @@ impl From for PageStreamError { match value { e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e), WaitLsnError::Shutdown => Self::Shutdown, - WaitLsnError::BadState => Self::Reconnect("Timeline is not active".into()), + e @ WaitLsnError::BadState { .. } => Self::Reconnect(format!("{e}").into()), } } } @@ -383,7 +383,7 @@ impl From for QueryError { match value { e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)), WaitLsnError::Shutdown => Self::Shutdown, - WaitLsnError::BadState => Self::Reconnect, + WaitLsnError::BadState { .. } => Self::Reconnect, } } } diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 311338554c..0a9637884f 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -1507,7 +1507,7 @@ impl Tenant { .wait_lsn(*lsn, timeline::WaitLsnWaiter::Tenant, ctx) .await .map_err(|e| match e { - e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState) => { + e @ (WaitLsnError::Timeout(_) | WaitLsnError::BadState { .. }) => { CreateTimelineError::AncestorLsn(anyhow::anyhow!(e)) } WaitLsnError::Shutdown => CreateTimelineError::ShuttingDown, @@ -4308,9 +4308,10 @@ mod tests { // This needs to traverse to the parent, and fails. let err = newtline.get(*TEST_KEY, Lsn(0x50), &ctx).await.unwrap_err(); - assert!(err - .to_string() - .contains("will not become active. Current state: Broken")); + assert!(err.to_string().starts_with(&format!( + "Bad state on timeline {}: Broken", + tline.timeline_id + ))); Ok(()) } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index d07e2352fa..b498876465 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -496,7 +496,7 @@ pub(crate) enum PageReconstructError { Other(#[from] anyhow::Error), #[error("Ancestor LSN wait error: {0}")] - AncestorLsnTimeout(#[from] WaitLsnError), + AncestorLsnTimeout(WaitLsnError), #[error("timeline shutting down")] Cancelled, @@ -651,11 +651,14 @@ pub(crate) enum GetReadyAncestorError { #[error("Ancestor LSN wait error: {0}")] AncestorLsnTimeout(#[from] WaitLsnError), + #[error("Bad state on timeline {timeline_id}: {state:?}")] + BadState { + timeline_id: TimelineId, + state: TimelineState, + }, + #[error("Cancelled")] Cancelled, - - #[error(transparent)] - Other(#[from] anyhow::Error), } #[derive(Clone, Copy)] @@ -690,8 +693,8 @@ pub(crate) enum WaitLsnError { Shutdown, // Called on an timeline not in active state or shutting down - #[error("Bad state (not active)")] - BadState, + #[error("Bad timeline state: {0:?}")] + BadState(TimelineState), // Timeout expired while waiting for LSN to catch up with goal. #[error("{0}")] @@ -756,8 +759,8 @@ impl From for PageReconstructError { match e { AncestorStopping(tid) => PageReconstructError::AncestorStopping(tid), AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err), + bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)), Cancelled => PageReconstructError::Cancelled, - Other(other) => PageReconstructError::Other(other), } } } @@ -1466,10 +1469,11 @@ impl Timeline { who_is_waiting: WaitLsnWaiter<'_>, ctx: &RequestContext, /* Prepare for use by cancellation */ ) -> Result<(), WaitLsnError> { - if self.cancel.is_cancelled() { + let state = self.current_state(); + if self.cancel.is_cancelled() || matches!(state, TimelineState::Stopping) { return Err(WaitLsnError::Shutdown); - } else if !self.is_active() { - return Err(WaitLsnError::BadState); + } else if !matches!(state, TimelineState::Active) { + return Err(WaitLsnError::BadState(state)); } if cfg!(debug_assertions) { @@ -3193,17 +3197,21 @@ impl Timeline { } // Recurse into ancestor if needed - if is_inherited_key(key) && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn { - trace!( - "going into ancestor {}, cont_lsn is {}", - timeline.ancestor_lsn, - cont_lsn - ); + if let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() { + if is_inherited_key(key) && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn { + trace!( + "going into ancestor {}, cont_lsn is {}", + timeline.ancestor_lsn, + cont_lsn + ); - timeline_owned = timeline.get_ready_ancestor_timeline(ctx).await?; - timeline = &*timeline_owned; - prev_lsn = None; - continue 'outer; + timeline_owned = timeline + .get_ready_ancestor_timeline(ancestor_timeline, ctx) + .await?; + timeline = &*timeline_owned; + prev_lsn = None; + continue 'outer; + } } let guard = timeline.layers.read().await; @@ -3352,10 +3360,10 @@ impl Timeline { break None; } - // Not fully retrieved but no ancestor timeline. - if timeline.ancestor_timeline.is_none() { + let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() else { + // Not fully retrieved but no ancestor timeline. break Some(keyspace); - } + }; // Now we see if there are keys covered by the image layer but does not exist in the // image layer, which means that the key does not exist. @@ -3375,7 +3383,7 @@ impl Timeline { // Take the min to avoid reconstructing a page with data newer than request Lsn. cont_lsn = std::cmp::min(Lsn(request_lsn.0 + 1), Lsn(timeline.ancestor_lsn.0 + 1)); timeline_owned = timeline - .get_ready_ancestor_timeline(ctx) + .get_ready_ancestor_timeline(ancestor_timeline, ctx) .await .map_err(GetVectoredError::GetReadyAncestorError)?; timeline = &*timeline_owned; @@ -3547,13 +3555,9 @@ impl Timeline { async fn get_ready_ancestor_timeline( &self, + ancestor: &Arc, ctx: &RequestContext, ) -> Result, GetReadyAncestorError> { - let ancestor = match self.get_ancestor_timeline() { - Ok(timeline) => timeline, - Err(e) => return Err(GetReadyAncestorError::from(e)), - }; - // It's possible that the ancestor timeline isn't active yet, or // is active but hasn't yet caught up to the branch point. Wait // for it. @@ -3586,11 +3590,10 @@ impl Timeline { )); } Err(state) => { - return Err(GetReadyAncestorError::Other(anyhow::anyhow!( - "Timeline {} will not become active. Current state: {:?}", - ancestor.timeline_id, - &state, - ))); + return Err(GetReadyAncestorError::BadState { + timeline_id: ancestor.timeline_id, + state, + }); } } ancestor @@ -3599,21 +3602,17 @@ impl Timeline { .map_err(|e| match e { e @ WaitLsnError::Timeout(_) => GetReadyAncestorError::AncestorLsnTimeout(e), WaitLsnError::Shutdown => GetReadyAncestorError::Cancelled, - e @ WaitLsnError::BadState => GetReadyAncestorError::Other(anyhow::anyhow!(e)), + WaitLsnError::BadState(state) => GetReadyAncestorError::BadState { + timeline_id: ancestor.timeline_id, + state, + }, })?; - Ok(ancestor) + Ok(ancestor.clone()) } - pub(crate) fn get_ancestor_timeline(&self) -> anyhow::Result> { - let ancestor = self.ancestor_timeline.as_ref().with_context(|| { - format!( - "Ancestor is missing. Timeline id: {} Ancestor id {:?}", - self.timeline_id, - self.get_ancestor_timeline_id(), - ) - })?; - Ok(Arc::clone(ancestor)) + pub(crate) fn get_ancestor_timeline(&self) -> Option> { + self.ancestor_timeline.clone() } pub(crate) fn get_shard_identity(&self) -> &ShardIdentity { From 16b2e74037dfcacec2ceeef3bc597e75b435cc1b Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Fri, 31 May 2024 14:19:45 +0100 Subject: [PATCH 112/220] Add FullAccessTimeline guard in safekeepers (#7887) This is a preparation for https://github.com/neondatabase/neon/issues/6337. The idea is to add FullAccessTimeline, which will act as a guard for tasks requiring access to WAL files. Eviction will be blocked on these tasks and WAL won't be deleted from disk until there is at least one active FullAccessTimeline. To get FullAccessTimeline, tasks call `tli.full_access_guard().await?`. After eviction is implemented, this function will be responsible for downloading missing WAL file and waiting until the download finishes. This commit also contains other small refactorings: - Separate `get_tenant_dir` and `get_timeline_dir` functions for building a local path. This is useful for looking at usages and finding tasks requiring access to local filesystem. - `timeline_manager` is now responsible for spawning all background tasks - WAL removal task is now spawned instantly after horizon is updated --- libs/remote_storage/src/lib.rs | 4 +- pageserver/src/deletion_queue.rs | 2 +- safekeeper/src/bin/safekeeper.rs | 11 +- safekeeper/src/control_file.rs | 37 ++- safekeeper/src/copy_timeline.rs | 26 +- safekeeper/src/debug_dump.rs | 38 +-- safekeeper/src/http/routes.rs | 22 +- safekeeper/src/json_ctrl.rs | 20 +- safekeeper/src/lib.rs | 14 +- safekeeper/src/pull_timeline.rs | 6 +- safekeeper/src/receive_wal.rs | 19 +- safekeeper/src/recovery.rs | 100 ++++++- safekeeper/src/remove_wal.rs | 56 ++-- safekeeper/src/safekeeper.rs | 19 -- safekeeper/src/send_wal.rs | 21 +- safekeeper/src/timeline.rs | 379 +++++++++---------------- safekeeper/src/timeline_manager.rs | 331 +++++++++++++++++---- safekeeper/src/timelines_global_map.rs | 21 +- safekeeper/src/wal_backup.rs | 100 +++---- safekeeper/src/wal_backup_partial.rs | 27 +- safekeeper/src/wal_storage.rs | 21 +- test_runner/fixtures/common_types.py | 12 + test_runner/fixtures/neon_fixtures.py | 16 ++ 23 files changed, 726 insertions(+), 576 deletions(-) diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 708662f20f..cb3df0985d 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -121,8 +121,8 @@ impl RemotePath { self.0.file_name() } - pub fn join(&self, segment: &Utf8Path) -> Self { - Self(self.0.join(segment)) + pub fn join(&self, path: impl AsRef) -> Self { + Self(self.0.join(path)) } pub fn get_path(&self) -> &Utf8PathBuf { diff --git a/pageserver/src/deletion_queue.rs b/pageserver/src/deletion_queue.rs index 8790a9b0a8..3960fc1b99 100644 --- a/pageserver/src/deletion_queue.rs +++ b/pageserver/src/deletion_queue.rs @@ -311,7 +311,7 @@ impl DeletionList { result.extend( timeline_layers .into_iter() - .map(|l| timeline_remote_path.join(&Utf8PathBuf::from(l))), + .map(|l| timeline_remote_path.join(Utf8PathBuf::from(l))), ); } } diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index aee3898ac7..7476654426 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -29,13 +29,12 @@ use safekeeper::defaults::{ DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, }; -use safekeeper::remove_wal; +use safekeeper::http; use safekeeper::wal_service; use safekeeper::GlobalTimelines; use safekeeper::SafeKeeperConf; use safekeeper::{broker, WAL_SERVICE_RUNTIME}; use safekeeper::{control_file, BROKER_RUNTIME}; -use safekeeper::{http, WAL_REMOVER_RUNTIME}; use safekeeper::{wal_backup, HTTP_RUNTIME}; use storage_broker::DEFAULT_ENDPOINT; use utils::auth::{JwtAuth, Scope, SwappableJwtAuth}; @@ -441,14 +440,6 @@ async fn start_safekeeper(conf: SafeKeeperConf) -> Result<()> { .map(|res| ("broker main".to_owned(), res)); tasks_handles.push(Box::pin(broker_task_handle)); - let conf_ = conf.clone(); - let wal_remover_handle = current_thread_rt - .as_ref() - .unwrap_or_else(|| WAL_REMOVER_RUNTIME.handle()) - .spawn(remove_wal::task_main(conf_)) - .map(|res| ("WAL remover".to_owned(), res)); - tasks_handles.push(Box::pin(wal_remover_handle)); - set_build_info_metric(GIT_VERSION, BUILD_TAG); // TODO: update tokio-stream, convert to real async Stream with diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index fe9f2e6899..e9bb5202da 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -2,7 +2,7 @@ use anyhow::{bail, ensure, Context, Result}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; -use camino::Utf8PathBuf; +use camino::{Utf8Path, Utf8PathBuf}; use tokio::fs::File; use tokio::io::AsyncWriteExt; use utils::crashsafe::durable_rename; @@ -12,9 +12,9 @@ use std::ops::Deref; use std::path::Path; use std::time::Instant; -use crate::control_file_upgrade::upgrade_control_file; use crate::metrics::PERSIST_CONTROL_FILE_SECONDS; use crate::state::TimelinePersistentState; +use crate::{control_file_upgrade::upgrade_control_file, timeline::get_timeline_dir}; use utils::{bin_ser::LeSer, id::TenantTimelineId}; use crate::SafeKeeperConf; @@ -43,7 +43,7 @@ pub trait Storage: Deref { pub struct FileStorage { // save timeline dir to avoid reconstructing it every time timeline_dir: Utf8PathBuf, - conf: SafeKeeperConf, + no_sync: bool, /// Last state persisted to disk. state: TimelinePersistentState, @@ -54,13 +54,12 @@ pub struct FileStorage { impl FileStorage { /// Initialize storage by loading state from disk. pub fn restore_new(ttid: &TenantTimelineId, conf: &SafeKeeperConf) -> Result { - let timeline_dir = conf.timeline_dir(ttid); - - let state = Self::load_control_file_conf(conf, ttid)?; + let timeline_dir = get_timeline_dir(conf, ttid); + let state = Self::load_control_file_from_dir(&timeline_dir)?; Ok(FileStorage { timeline_dir, - conf: conf.clone(), + no_sync: conf.no_sync, state, last_persist_at: Instant::now(), }) @@ -74,7 +73,7 @@ impl FileStorage { ) -> Result { let store = FileStorage { timeline_dir, - conf: conf.clone(), + no_sync: conf.no_sync, state, last_persist_at: Instant::now(), }; @@ -102,12 +101,9 @@ impl FileStorage { upgrade_control_file(buf, version) } - /// Load control file for given ttid at path specified by conf. - pub fn load_control_file_conf( - conf: &SafeKeeperConf, - ttid: &TenantTimelineId, - ) -> Result { - let path = conf.timeline_dir(ttid).join(CONTROL_FILE_NAME); + /// Load control file from given directory. + pub fn load_control_file_from_dir(timeline_dir: &Utf8Path) -> Result { + let path = timeline_dir.join(CONTROL_FILE_NAME); Self::load_control_file(path) } @@ -203,7 +199,7 @@ impl Storage for FileStorage { })?; let control_path = self.timeline_dir.join(CONTROL_FILE_NAME); - durable_rename(&control_partial_path, &control_path, !self.conf.no_sync).await?; + durable_rename(&control_partial_path, &control_path, !self.no_sync).await?; // update internal state self.state = s.clone(); @@ -233,12 +229,13 @@ mod test { conf: &SafeKeeperConf, ttid: &TenantTimelineId, ) -> Result<(FileStorage, TimelinePersistentState)> { - fs::create_dir_all(conf.timeline_dir(ttid)) + let timeline_dir = get_timeline_dir(conf, ttid); + fs::create_dir_all(&timeline_dir) .await .expect("failed to create timeline dir"); Ok(( FileStorage::restore_new(ttid, conf)?, - FileStorage::load_control_file_conf(conf, ttid)?, + FileStorage::load_control_file_from_dir(&timeline_dir)?, )) } @@ -246,11 +243,11 @@ mod test { conf: &SafeKeeperConf, ttid: &TenantTimelineId, ) -> Result<(FileStorage, TimelinePersistentState)> { - fs::create_dir_all(conf.timeline_dir(ttid)) + let timeline_dir = get_timeline_dir(conf, ttid); + fs::create_dir_all(&timeline_dir) .await .expect("failed to create timeline dir"); let state = TimelinePersistentState::empty(); - let timeline_dir = conf.timeline_dir(ttid); let storage = FileStorage::create_new(timeline_dir, conf, state.clone())?; Ok((storage, state)) } @@ -291,7 +288,7 @@ mod test { .await .expect("failed to persist state"); } - let control_path = conf.timeline_dir(&ttid).join(CONTROL_FILE_NAME); + let control_path = get_timeline_dir(&conf, &ttid).join(CONTROL_FILE_NAME); let mut data = fs::read(&control_path).await.unwrap(); data[0] += 1; // change the first byte of the file to fail checksum validation fs::write(&control_path, &data) diff --git a/safekeeper/src/copy_timeline.rs b/safekeeper/src/copy_timeline.rs index 3023d4e2cb..51cf4db6b5 100644 --- a/safekeeper/src/copy_timeline.rs +++ b/safekeeper/src/copy_timeline.rs @@ -15,10 +15,10 @@ use crate::{ control_file::{FileStorage, Storage}, pull_timeline::{create_temp_timeline_dir, load_temp_timeline, validate_temp_timeline}, state::TimelinePersistentState, - timeline::{Timeline, TimelineError}, + timeline::{FullAccessTimeline, Timeline, TimelineError}, wal_backup::copy_s3_segments, wal_storage::{wal_file_paths, WalReader}, - GlobalTimelines, SafeKeeperConf, + GlobalTimelines, }; // we don't want to have more than 10 segments on disk after copy, because they take space @@ -46,12 +46,14 @@ pub async fn handle_request(request: Request) -> Result<()> { } } + let source_tli = request.source.full_access_guard().await?; + let conf = &GlobalTimelines::get_global_config(); let ttid = request.destination_ttid; let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?; - let (mem_state, state) = request.source.get_state().await; + let (mem_state, state) = source_tli.get_state().await; let start_lsn = state.timeline_start_lsn; if start_lsn == Lsn::INVALID { bail!("timeline is not initialized"); @@ -60,7 +62,7 @@ pub async fn handle_request(request: Request) -> Result<()> { { let commit_lsn = mem_state.commit_lsn; - let flush_lsn = request.source.get_flush_lsn().await; + let flush_lsn = source_tli.get_flush_lsn().await; info!( "collected info about source timeline: start_lsn={}, backup_lsn={}, commit_lsn={}, flush_lsn={}", @@ -127,10 +129,8 @@ pub async fn handle_request(request: Request) -> Result<()> { .await?; copy_disk_segments( - conf, - &state, + &source_tli, wal_seg_size, - &request.source.ttid, new_backup_lsn, request.until_lsn, &tli_dir_path, @@ -159,21 +159,13 @@ pub async fn handle_request(request: Request) -> Result<()> { } async fn copy_disk_segments( - conf: &SafeKeeperConf, - persisted_state: &TimelinePersistentState, + tli: &FullAccessTimeline, wal_seg_size: usize, - source_ttid: &TenantTimelineId, start_lsn: Lsn, end_lsn: Lsn, tli_dir_path: &Utf8PathBuf, ) -> Result<()> { - let mut wal_reader = WalReader::new( - conf.workdir.clone(), - conf.timeline_dir(source_ttid), - persisted_state, - start_lsn, - true, - )?; + let mut wal_reader = tli.get_walreader(start_lsn).await?; let mut buf = [0u8; MAX_SEND_SIZE]; diff --git a/safekeeper/src/debug_dump.rs b/safekeeper/src/debug_dump.rs index b50f2e1158..062ff4b3db 100644 --- a/safekeeper/src/debug_dump.rs +++ b/safekeeper/src/debug_dump.rs @@ -10,6 +10,7 @@ use std::sync::Arc; use anyhow::bail; use anyhow::Result; use camino::Utf8Path; +use camino::Utf8PathBuf; use chrono::{DateTime, Utc}; use postgres_ffi::XLogSegNo; use postgres_ffi::MAX_SEND_SIZE; @@ -26,7 +27,8 @@ use crate::safekeeper::TermHistory; use crate::send_wal::WalSenderState; use crate::state::TimelineMemState; use crate::state::TimelinePersistentState; -use crate::wal_storage::WalReader; +use crate::timeline::get_timeline_dir; +use crate::timeline::FullAccessTimeline; use crate::GlobalTimelines; use crate::SafeKeeperConf; @@ -68,6 +70,7 @@ pub struct Response { pub struct TimelineDumpSer { pub tli: Arc, pub args: Args, + pub timeline_dir: Utf8PathBuf, pub runtime: Arc, } @@ -85,14 +88,20 @@ impl Serialize for TimelineDumpSer { where S: serde::Serializer, { - let dump = self - .runtime - .block_on(build_from_tli_dump(self.tli.clone(), self.args.clone())); + let dump = self.runtime.block_on(build_from_tli_dump( + &self.tli, + &self.args, + &self.timeline_dir, + )); dump.serialize(serializer) } } -async fn build_from_tli_dump(timeline: Arc, args: Args) -> Timeline { +async fn build_from_tli_dump( + timeline: &Arc, + args: &Args, + timeline_dir: &Utf8Path, +) -> Timeline { let control_file = if args.dump_control_file { let mut state = timeline.get_state().await.1; if !args.dump_term_history { @@ -112,7 +121,8 @@ async fn build_from_tli_dump(timeline: Arc, args: Arg let disk_content = if args.dump_disk_content { // build_disk_content can fail, but we don't want to fail the whole // request because of that. - build_disk_content(&timeline.timeline_dir).ok() + // Note: timeline can be in offloaded state, this is not a problem. + build_disk_content(timeline_dir).ok() } else { None }; @@ -186,6 +196,7 @@ pub struct FileInfo { pub async fn build(args: Args) -> Result { let start_time = Utc::now(); let timelines_count = GlobalTimelines::timelines_count(); + let config = GlobalTimelines::get_global_config(); let ptrs_snapshot = if args.tenant_id.is_some() && args.timeline_id.is_some() { // If both tenant_id and timeline_id are specified, we can just get the @@ -223,12 +234,11 @@ pub async fn build(args: Args) -> Result { timelines.push(TimelineDumpSer { tli, args: args.clone(), + timeline_dir: get_timeline_dir(&config, &ttid), runtime: runtime.clone(), }); } - let config = GlobalTimelines::get_global_config(); - Ok(Response { start_time, finish_time: Utc::now(), @@ -316,27 +326,19 @@ pub struct TimelineDigest { } pub async fn calculate_digest( - tli: &Arc, + tli: &FullAccessTimeline, request: TimelineDigestRequest, ) -> Result { if request.from_lsn > request.until_lsn { bail!("from_lsn is greater than until_lsn"); } - let conf = GlobalTimelines::get_global_config(); let (_, persisted_state) = tli.get_state().await; - if persisted_state.timeline_start_lsn > request.from_lsn { bail!("requested LSN is before the start of the timeline"); } - let mut wal_reader = WalReader::new( - conf.workdir.clone(), - tli.timeline_dir.clone(), - &persisted_state, - request.from_lsn, - true, - )?; + let mut wal_reader = tli.get_walreader(request.from_lsn).await?; let mut hasher = Sha256::new(); let mut buf = [0u8; MAX_SEND_SIZE]; diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 593e102e35..1e29b21fac 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -249,6 +249,10 @@ async fn timeline_digest_handler(request: Request) -> Result) -> Result let filename: String = parse_request_param(&request, "filename")?; let tli = GlobalTimelines::get(ttid).map_err(ApiError::from)?; + let tli = tli + .full_access_guard() + .await + .map_err(ApiError::InternalServerError)?; - let filepath = tli.timeline_dir.join(filename); + let filepath = tli.get_timeline_dir().join(filename); let mut file = File::open(&filepath) .await .map_err(|e| ApiError::InternalServerError(e.into()))?; @@ -287,7 +295,7 @@ async fn timeline_files_handler(request: Request) -> Result .map_err(|e| ApiError::InternalServerError(e.into())) } -/// Force persist control file and remove old WAL. +/// Force persist control file. async fn timeline_checkpoint_handler(request: Request) -> Result, ApiError> { check_permission(&request, None)?; @@ -297,13 +305,13 @@ async fn timeline_checkpoint_handler(request: Request) -> Result( async fn prepare_safekeeper( ttid: TenantTimelineId, pg_version: u32, -) -> anyhow::Result> { - GlobalTimelines::create( +) -> anyhow::Result { + let tli = GlobalTimelines::create( ttid, ServerInfo { pg_version, @@ -115,10 +113,16 @@ async fn prepare_safekeeper( Lsn::INVALID, Lsn::INVALID, ) - .await + .await?; + + tli.full_access_guard().await } -async fn send_proposer_elected(tli: &Arc, term: Term, lsn: Lsn) -> anyhow::Result<()> { +async fn send_proposer_elected( + tli: &FullAccessTimeline, + term: Term, + lsn: Lsn, +) -> anyhow::Result<()> { // add new term to existing history let history = tli.get_state().await.1.acceptor_state.term_history; let history = history.up_to(lsn.checked_sub(1u64).unwrap()); @@ -147,7 +151,7 @@ pub struct InsertedWAL { /// Extend local WAL with new LogicalMessage record. To do that, /// create AppendRequest with new WAL and pass it to safekeeper. pub async fn append_logical_message( - tli: &Arc, + tli: &FullAccessTimeline, msg: &AppendLogicalMessage, ) -> anyhow::Result { let wal_data = encode_logical_message(&msg.lm_prefix, &msg.lm_message); diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index 8d8d2cf23e..1a56ff736c 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -7,10 +7,7 @@ use tokio::runtime::Runtime; use std::time::Duration; use storage_broker::Uri; -use utils::{ - auth::SwappableJwtAuth, - id::{NodeId, TenantId, TenantTimelineId}, -}; +use utils::{auth::SwappableJwtAuth, id::NodeId}; mod auth; pub mod broker; @@ -89,15 +86,6 @@ pub struct SafeKeeperConf { } impl SafeKeeperConf { - pub fn tenant_dir(&self, tenant_id: &TenantId) -> Utf8PathBuf { - self.workdir.join(tenant_id.to_string()) - } - - pub fn timeline_dir(&self, ttid: &TenantTimelineId) -> Utf8PathBuf { - self.tenant_dir(&ttid.tenant_id) - .join(ttid.timeline_id.to_string()) - } - pub fn is_wal_backup_enabled(&self) -> bool { self.remote_storage.is_some() && self.wal_backup_enabled } diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index f7cc40f58a..7b41c98cb8 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -17,7 +17,7 @@ use utils::{ use crate::{ control_file, debug_dump, http::routes::TimelineStatus, - timeline::{Timeline, TimelineError}, + timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError}, wal_storage::{self, Storage}, GlobalTimelines, SafeKeeperConf, }; @@ -283,13 +283,13 @@ pub async fn load_temp_timeline( } // Move timeline dir to the correct location - let timeline_path = conf.timeline_dir(&ttid); + let timeline_path = get_timeline_dir(conf, &ttid); info!( "moving timeline {} from {} to {}", ttid, tmp_path, timeline_path ); - tokio::fs::create_dir_all(conf.tenant_dir(&ttid.tenant_id)).await?; + tokio::fs::create_dir_all(get_tenant_dir(conf, &ttid.tenant_id)).await?; tokio::fs::rename(tmp_path, &timeline_path).await?; let tli = GlobalTimelines::load_timeline(&guard, ttid) diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index 03cfa882c4..7943a2fd86 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -6,7 +6,7 @@ use crate::handler::SafekeeperPostgresHandler; use crate::safekeeper::AcceptorProposerMessage; use crate::safekeeper::ProposerAcceptorMessage; use crate::safekeeper::ServerInfo; -use crate::timeline::Timeline; +use crate::timeline::FullAccessTimeline; use crate::wal_service::ConnectionId; use crate::GlobalTimelines; use anyhow::{anyhow, Context}; @@ -213,7 +213,7 @@ impl SafekeeperPostgresHandler { &mut self, pgb: &mut PostgresBackend, ) -> Result<(), QueryError> { - let mut tli: Option> = None; + let mut tli: Option = None; if let Err(end) = self.handle_start_wal_push_guts(pgb, &mut tli).await { // Log the result and probably send it to the client, closing the stream. let handle_end_fut = pgb.handle_copy_stream_end(end); @@ -233,7 +233,7 @@ impl SafekeeperPostgresHandler { pub async fn handle_start_wal_push_guts( &mut self, pgb: &mut PostgresBackend, - tli: &mut Option>, + tli: &mut Option, ) -> Result<(), CopyStreamHandlerEnd> { // Notify the libpq client that it's allowed to send `CopyData` messages pgb.write_message(&BeMessage::CopyBothResponse).await?; @@ -323,7 +323,7 @@ struct NetworkReader<'a, IO> { impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { async fn read_first_message( &mut self, - ) -> Result<(Arc, ProposerAcceptorMessage), CopyStreamHandlerEnd> { + ) -> Result<(FullAccessTimeline, ProposerAcceptorMessage), CopyStreamHandlerEnd> { // Receive information about server to create timeline, if not yet. let next_msg = read_message(self.pgb_reader).await?; let tli = match next_msg { @@ -337,7 +337,10 @@ impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { system_id: greeting.system_id, wal_seg_size: greeting.wal_seg_size, }; - GlobalTimelines::create(self.ttid, server_info, Lsn::INVALID, Lsn::INVALID).await? + let tli = + GlobalTimelines::create(self.ttid, server_info, Lsn::INVALID, Lsn::INVALID) + .await?; + tli.full_access_guard().await? } _ => { return Err(CopyStreamHandlerEnd::Other(anyhow::anyhow!( @@ -353,7 +356,7 @@ impl<'a, IO: AsyncRead + AsyncWrite + Unpin> NetworkReader<'a, IO> { msg_tx: Sender, msg_rx: Receiver, reply_tx: Sender, - tli: Arc, + tli: FullAccessTimeline, next_msg: ProposerAcceptorMessage, ) -> Result<(), CopyStreamHandlerEnd> { *self.acceptor_handle = Some(WalAcceptor::spawn( @@ -448,7 +451,7 @@ const KEEPALIVE_INTERVAL: Duration = Duration::from_secs(1); /// replies to reply_tx; reading from socket and writing to disk in parallel is /// beneficial for performance, this struct provides writing to disk part. pub struct WalAcceptor { - tli: Arc, + tli: FullAccessTimeline, msg_rx: Receiver, reply_tx: Sender, conn_id: Option, @@ -461,7 +464,7 @@ impl WalAcceptor { /// /// conn_id None means WalAcceptor is used by recovery initiated at this safekeeper. pub fn spawn( - tli: Arc, + tli: FullAccessTimeline, msg_rx: Receiver, reply_tx: Sender, conn_id: Option, diff --git a/safekeeper/src/recovery.rs b/safekeeper/src/recovery.rs index 568a512c4a..80a630b1e1 100644 --- a/safekeeper/src/recovery.rs +++ b/safekeeper/src/recovery.rs @@ -2,7 +2,7 @@ //! provide it, i.e. safekeeper lags too much. use std::time::SystemTime; -use std::{fmt, pin::pin, sync::Arc}; +use std::{fmt, pin::pin}; use anyhow::{bail, Context}; use futures::StreamExt; @@ -21,6 +21,7 @@ use utils::{id::NodeId, lsn::Lsn, postgres_client::wal_stream_connection_config} use crate::receive_wal::{WalAcceptor, REPLY_QUEUE_SIZE}; use crate::safekeeper::{AppendRequest, AppendRequestHeader}; +use crate::timeline::FullAccessTimeline; use crate::{ http::routes::TimelineStatus, receive_wal::MSG_QUEUE_SIZE, @@ -28,14 +29,14 @@ use crate::{ AcceptorProposerMessage, ProposerAcceptorMessage, ProposerElected, Term, TermHistory, TermLsn, VoteRequest, }, - timeline::{PeerInfo, Timeline}, + timeline::PeerInfo, SafeKeeperConf, }; /// Entrypoint for per timeline task which always runs, checking whether /// recovery for this safekeeper is needed and starting it if so. #[instrument(name = "recovery task", skip_all, fields(ttid = %tli.ttid))] -pub async fn recovery_main(tli: Arc, conf: SafeKeeperConf) { +pub async fn recovery_main(tli: FullAccessTimeline, conf: SafeKeeperConf) { info!("started"); let cancel = tli.cancel.clone(); @@ -47,6 +48,87 @@ pub async fn recovery_main(tli: Arc, conf: SafeKeeperConf) { } } +/// Should we start fetching WAL from a peer safekeeper, and if yes, from +/// which? Answer is yes, i.e. .donors is not empty if 1) there is something +/// to fetch, and we can do that without running elections; 2) there is no +/// actively streaming compute, as we don't want to compete with it. +/// +/// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal +/// to its last_log_term so we are sure such a leader ever had been elected. +/// +/// All possible donors are returned so that we could keep connection to the +/// current one if it is good even if it slightly lags behind. +/// +/// Note that term conditions above might be not met, but safekeepers are +/// still not aligned on last flush_lsn. Generally in this case until +/// elections are run it is not possible to say which safekeeper should +/// recover from which one -- history which would be committed is different +/// depending on assembled quorum (e.g. classic picture 8 from Raft paper). +/// Thus we don't try to predict it here. +async fn recovery_needed( + tli: &FullAccessTimeline, + heartbeat_timeout: Duration, +) -> RecoveryNeededInfo { + let ss = tli.read_shared_state().await; + let term = ss.sk.state.acceptor_state.term; + let last_log_term = ss.sk.get_last_log_term(); + let flush_lsn = ss.sk.flush_lsn(); + // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us. + let mut peers = ss.get_peers(heartbeat_timeout); + // Sort by pairs. + peers.sort_by(|p1, p2| { + let tl1 = TermLsn { + term: p1.last_log_term, + lsn: p1.flush_lsn, + }; + let tl2 = TermLsn { + term: p2.last_log_term, + lsn: p2.flush_lsn, + }; + tl2.cmp(&tl1) // desc + }); + let num_streaming_computes = tli.get_walreceivers().get_num_streaming(); + let donors = if num_streaming_computes > 0 { + vec![] // If there is a streaming compute, don't try to recover to not intervene. + } else { + peers + .iter() + .filter_map(|candidate| { + // Are we interested in this candidate? + let candidate_tl = TermLsn { + term: candidate.last_log_term, + lsn: candidate.flush_lsn, + }; + let my_tl = TermLsn { + term: last_log_term, + lsn: flush_lsn, + }; + if my_tl < candidate_tl { + // Yes, we are interested. Can we pull from it without + // (re)running elections? It is possible if 1) his term + // is equal to his last_log_term so we could act on + // behalf of leader of this term (we must be sure he was + // ever elected) and 2) our term is not higher, or we'll refuse data. + if candidate.term == candidate.last_log_term && candidate.term >= term { + Some(Donor::from(candidate)) + } else { + None + } + } else { + None + } + }) + .collect() + }; + RecoveryNeededInfo { + term, + last_log_term, + flush_lsn, + peers, + num_streaming_computes, + donors, + } +} /// Result of Timeline::recovery_needed, contains donor(s) if recovery needed and /// fields to explain the choice. #[derive(Debug)] @@ -113,10 +195,10 @@ impl From<&PeerInfo> for Donor { const CHECK_INTERVAL_MS: u64 = 2000; /// Check regularly whether we need to start recovery. -async fn recovery_main_loop(tli: Arc, conf: SafeKeeperConf) { +async fn recovery_main_loop(tli: FullAccessTimeline, conf: SafeKeeperConf) { let check_duration = Duration::from_millis(CHECK_INTERVAL_MS); loop { - let recovery_needed_info = tli.recovery_needed(conf.heartbeat_timeout).await; + let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await; match recovery_needed_info.donors.first() { Some(donor) => { info!( @@ -146,7 +228,7 @@ async fn recovery_main_loop(tli: Arc, conf: SafeKeeperConf) { /// Recover from the specified donor. Returns message explaining normal finish /// reason or error. async fn recover( - tli: Arc, + tli: FullAccessTimeline, donor: &Donor, conf: &SafeKeeperConf, ) -> anyhow::Result { @@ -232,7 +314,7 @@ async fn recover( // Pull WAL from donor, assuming handshake is already done. async fn recovery_stream( - tli: Arc, + tli: FullAccessTimeline, donor: &Donor, start_streaming_at: Lsn, conf: &SafeKeeperConf, @@ -316,7 +398,7 @@ async fn network_io( physical_stream: ReplicationStream, msg_tx: Sender, donor: Donor, - tli: Arc, + tli: FullAccessTimeline, conf: SafeKeeperConf, ) -> anyhow::Result> { let mut physical_stream = pin!(physical_stream); @@ -365,7 +447,7 @@ async fn network_io( } ReplicationMessage::PrimaryKeepAlive(_) => { // keepalive means nothing is being streamed for a while. Check whether we need to stop. - let recovery_needed_info = tli.recovery_needed(conf.heartbeat_timeout).await; + let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await; // do current donors still contain one we currently connected to? if !recovery_needed_info .donors diff --git a/safekeeper/src/remove_wal.rs b/safekeeper/src/remove_wal.rs index 3400eee9b7..b661e48cb5 100644 --- a/safekeeper/src/remove_wal.rs +++ b/safekeeper/src/remove_wal.rs @@ -1,41 +1,25 @@ -//! Thread removing old WAL. +use utils::lsn::Lsn; -use std::time::Duration; +use crate::timeline_manager::StateSnapshot; -use tokio::time::sleep; -use tracing::*; +/// Get oldest LSN we still need to keep. We hold WAL till it is consumed +/// by all of 1) pageserver (remote_consistent_lsn) 2) peers 3) s3 +/// offloading. +/// While it is safe to use inmem values for determining horizon, +/// we use persistent to make possible normal states less surprising. +/// All segments covering LSNs before horizon_lsn can be removed. +pub fn calc_horizon_lsn(state: &StateSnapshot, extra_horizon_lsn: Option) -> Lsn { + use std::cmp::min; -use crate::{GlobalTimelines, SafeKeeperConf}; - -pub async fn task_main(_conf: SafeKeeperConf) -> anyhow::Result<()> { - let wal_removal_interval = Duration::from_millis(5000); - loop { - let now = tokio::time::Instant::now(); - let tlis = GlobalTimelines::get_all(); - for tli in &tlis { - let ttid = tli.ttid; - async { - if let Err(e) = tli.maybe_persist_control_file(false).await { - warn!("failed to persist control file: {e}"); - } - if let Err(e) = tli.remove_old_wal().await { - error!("failed to remove WAL: {}", e); - } - } - .instrument(info_span!("WAL removal", ttid = %ttid)) - .await; - } - - let elapsed = now.elapsed(); - let total_timelines = tlis.len(); - - if elapsed > wal_removal_interval { - info!( - "WAL removal is too long, processed {} timelines in {:?}", - total_timelines, elapsed - ); - } - - sleep(wal_removal_interval).await; + let mut horizon_lsn = min( + state.cfile_remote_consistent_lsn, + state.cfile_peer_horizon_lsn, + ); + // we don't want to remove WAL that is not yet offloaded to s3 + horizon_lsn = min(horizon_lsn, state.cfile_backup_lsn); + if let Some(extra_horizon_lsn) = extra_horizon_lsn { + horizon_lsn = min(horizon_lsn, extra_horizon_lsn); } + + horizon_lsn } diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 4686c9aa8e..563dbbe315 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -10,7 +10,6 @@ use std::cmp::max; use std::cmp::min; use std::fmt; use std::io::Read; -use std::time::Duration; use storage_broker::proto::SafekeeperTimelineInfo; use tracing::*; @@ -828,24 +827,6 @@ where Ok(()) } - /// Persist control file if there is something to save and enough time - /// passed after the last save. - pub async fn maybe_persist_inmem_control_file(&mut self, force: bool) -> Result { - const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300); - if !force && self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL { - return Ok(false); - } - let need_persist = self.state.inmem.commit_lsn > self.state.commit_lsn - || self.state.inmem.backup_lsn > self.state.backup_lsn - || self.state.inmem.peer_horizon_lsn > self.state.peer_horizon_lsn - || self.state.inmem.remote_consistent_lsn > self.state.remote_consistent_lsn; - if need_persist { - self.state.flush().await?; - trace!("saved control file: {CF_SAVE_INTERVAL:?} passed"); - } - Ok(need_persist) - } - /// Handle request to append WAL. #[allow(clippy::comparison_chain)] async fn handle_append_request( diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 5a9745e1c9..df75893838 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -5,7 +5,7 @@ use crate::handler::SafekeeperPostgresHandler; use crate::metrics::RECEIVED_PS_FEEDBACKS; use crate::receive_wal::WalReceivers; use crate::safekeeper::{Term, TermLsn}; -use crate::timeline::Timeline; +use crate::timeline::FullAccessTimeline; use crate::wal_service::ConnectionId; use crate::wal_storage::WalReader; use crate::GlobalTimelines; @@ -387,8 +387,10 @@ impl SafekeeperPostgresHandler { term: Option, ) -> Result<(), QueryError> { let tli = GlobalTimelines::get(self.ttid).map_err(|e| QueryError::Other(e.into()))?; + let full_access = tli.full_access_guard().await?; + if let Err(end) = self - .handle_start_replication_guts(pgb, start_pos, term, tli.clone()) + .handle_start_replication_guts(pgb, start_pos, term, full_access) .await { let info = tli.get_safekeeper_info(&self.conf).await; @@ -405,7 +407,7 @@ impl SafekeeperPostgresHandler { pgb: &mut PostgresBackend, start_pos: Lsn, term: Option, - tli: Arc, + tli: FullAccessTimeline, ) -> Result<(), CopyStreamHandlerEnd> { let appname = self.appname.clone(); @@ -448,14 +450,7 @@ impl SafekeeperPostgresHandler { // switch to copy pgb.write_message(&BeMessage::CopyBothResponse).await?; - let (_, persisted_state) = tli.get_state().await; - let wal_reader = WalReader::new( - self.conf.workdir.clone(), - self.conf.timeline_dir(&tli.ttid), - &persisted_state, - start_pos, - self.conf.is_wal_backup_enabled(), - )?; + let wal_reader = tli.get_walreader(start_pos).await?; // Split to concurrently receive and send data; replies are generally // not synchronized with sends, so this avoids deadlocks. @@ -532,7 +527,7 @@ impl EndWatch { /// A half driving sending WAL. struct WalSender<'a, IO> { pgb: &'a mut PostgresBackend, - tli: Arc, + tli: FullAccessTimeline, appname: Option, // Position since which we are sending next chunk. start_pos: Lsn, @@ -741,7 +736,7 @@ impl WalSender<'_, IO> { struct ReplyReader { reader: PostgresBackendReader, ws_guard: Arc, - tli: Arc, + tli: FullAccessTimeline, } impl ReplyReader { diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index aa9ccfc21e..148a7e90bd 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -3,14 +3,14 @@ use anyhow::{anyhow, bail, Result}; use camino::Utf8PathBuf; -use postgres_ffi::XLogSegNo; use serde::{Deserialize, Serialize}; use tokio::fs; use tokio_util::sync::CancellationToken; +use utils::id::TenantId; use std::cmp::max; use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -26,7 +26,6 @@ use storage_broker::proto::SafekeeperTimelineInfo; use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId; use crate::receive_wal::WalReceivers; -use crate::recovery::{recovery_main, Donor, RecoveryNeededInfo}; use crate::safekeeper::{ AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, ServerInfo, Term, TermLsn, INVALID_TERM, @@ -38,8 +37,8 @@ use crate::wal_backup::{self}; use crate::{control_file, safekeeper::UNKNOWN_SERVER_VERSION}; use crate::metrics::FullTimelineInfo; -use crate::wal_storage::Storage as wal_storage_iface; -use crate::{debug_dump, timeline_manager, wal_backup_partial, wal_storage}; +use crate::wal_storage::{Storage as wal_storage_iface, WalReader}; +use crate::{debug_dump, timeline_manager, wal_storage}; use crate::{GlobalTimelines, SafeKeeperConf}; /// Things safekeeper should know about timeline state on peers. @@ -169,7 +168,6 @@ pub struct SharedState { pub(crate) sk: SafeKeeper, /// In memory list containing state of peers sent in latest messages from them. pub(crate) peers_info: PeersInfo, - pub(crate) last_removed_segno: XLogSegNo, } impl SharedState { @@ -197,33 +195,33 @@ impl SharedState { // We don't want to write anything to disk, because we may have existing timeline there. // These functions should not change anything on disk. - let timeline_dir = conf.timeline_dir(ttid); - let control_store = control_file::FileStorage::create_new(timeline_dir, conf, state)?; + let timeline_dir = get_timeline_dir(conf, ttid); + let control_store = + control_file::FileStorage::create_new(timeline_dir.clone(), conf, state)?; let wal_store = - wal_storage::PhysicalStorage::new(ttid, conf.timeline_dir(ttid), conf, &control_store)?; + wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?; let sk = SafeKeeper::new(control_store, wal_store, conf.my_id)?; Ok(Self { sk, peers_info: PeersInfo(vec![]), - last_removed_segno: 0, }) } /// Restore SharedState from control file. If file doesn't exist, bails out. fn restore(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Result { + let timeline_dir = get_timeline_dir(conf, ttid); let control_store = control_file::FileStorage::restore_new(ttid, conf)?; if control_store.server.wal_seg_size == 0 { bail!(TimelineError::UninitializedWalSegSize(*ttid)); } let wal_store = - wal_storage::PhysicalStorage::new(ttid, conf.timeline_dir(ttid), conf, &control_store)?; + wal_storage::PhysicalStorage::new(ttid, timeline_dir, conf, &control_store)?; Ok(Self { sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?, peers_info: PeersInfo(vec![]), - last_removed_segno: 0, }) } @@ -275,24 +273,6 @@ impl SharedState { .cloned() .collect() } - - /// Get oldest segno we still need to keep. We hold WAL till it is consumed - /// by all of 1) pageserver (remote_consistent_lsn) 2) peers 3) s3 - /// offloading. - /// While it is safe to use inmem values for determining horizon, - /// we use persistent to make possible normal states less surprising. - fn get_horizon_segno(&self, extra_horizon_lsn: Option) -> XLogSegNo { - let state = &self.sk.state; - - use std::cmp::min; - let mut horizon_lsn = min(state.remote_consistent_lsn, state.peer_horizon_lsn); - // we don't want to remove WAL that is not yet offloaded to s3 - horizon_lsn = min(horizon_lsn, state.backup_lsn); - if let Some(extra_horizon_lsn) = extra_horizon_lsn { - horizon_lsn = min(horizon_lsn, extra_horizon_lsn); - } - horizon_lsn.segment_number(state.server.wal_seg_size as usize) - } } #[derive(Debug, thiserror::Error)] @@ -349,22 +329,15 @@ pub struct Timeline { mutex: RwLock, walsenders: Arc, walreceivers: Arc, + timeline_dir: Utf8PathBuf, /// Delete/cancel will trigger this, background tasks should drop out as soon as it fires pub(crate) cancel: CancellationToken, - /// Directory where timeline state is stored. - pub timeline_dir: Utf8PathBuf, - - /// Should we keep WAL on disk for active replication connections. - /// Especially useful for sharding, when different shards process WAL - /// with different speed. - // TODO: add `Arc` here instead of adding each field separately. - walsenders_keep_horizon: bool, - // timeline_manager controlled state pub(crate) broker_active: AtomicBool, pub(crate) wal_backup_active: AtomicBool, + pub(crate) last_removed_segno: AtomicU64, } impl Timeline { @@ -394,10 +367,10 @@ impl Timeline { walsenders: WalSenders::new(walreceivers.clone()), walreceivers, cancel: CancellationToken::default(), - timeline_dir: conf.timeline_dir(&ttid), - walsenders_keep_horizon: conf.walsenders_keep_horizon, + timeline_dir: get_timeline_dir(conf, &ttid), broker_active: AtomicBool::new(false), wal_backup_active: AtomicBool::new(false), + last_removed_segno: AtomicU64::new(0), }) } @@ -430,10 +403,10 @@ impl Timeline { walsenders: WalSenders::new(walreceivers.clone()), walreceivers, cancel: CancellationToken::default(), - timeline_dir: conf.timeline_dir(&ttid), - walsenders_keep_horizon: conf.walsenders_keep_horizon, + timeline_dir: get_timeline_dir(conf, &ttid), broker_active: AtomicBool::new(false), wal_backup_active: AtomicBool::new(false), + last_removed_segno: AtomicU64::new(0), }) } @@ -494,15 +467,6 @@ impl Timeline { conf.clone(), broker_active_set, )); - - // Start recovery task which always runs on the timeline. - if conf.peer_recovery_enabled { - tokio::spawn(recovery_main(self.clone(), conf.clone())); - } - // TODO: migrate to timeline_manager - if conf.is_wal_backup_enabled() && conf.partial_backup_enabled { - tokio::spawn(wal_backup_partial::main_task(self.clone(), conf.clone())); - } } /// Delete timeline from disk completely, by removing timeline directory. @@ -555,36 +519,6 @@ impl Timeline { self.mutex.read().await } - /// Returns true if walsender should stop sending WAL to pageserver. We - /// terminate it if remote_consistent_lsn reached commit_lsn and there is no - /// computes. While there might be nothing to stream already, we learn about - /// remote_consistent_lsn update through replication feedback, and we want - /// to stop pushing to the broker if pageserver is fully caughtup. - pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool { - if self.is_cancelled() { - return true; - } - let shared_state = self.read_shared_state().await; - if self.walreceivers.get_num() == 0 { - return shared_state.sk.state.inmem.commit_lsn == Lsn(0) || // no data at all yet - reported_remote_consistent_lsn >= shared_state.sk.state.inmem.commit_lsn; - } - false - } - - /// Ensure that current term is t, erroring otherwise, and lock the state. - pub async fn acquire_term(&self, t: Term) -> Result { - let ss = self.read_shared_state().await; - if ss.sk.state.acceptor_state.term != t { - bail!( - "failed to acquire term {}, current term {}", - t, - ss.sk.state.acceptor_state.term - ); - } - Ok(ss) - } - /// Returns commit_lsn watch channel. pub fn get_commit_lsn_watch_rx(&self) -> watch::Receiver { self.commit_lsn_watch_rx.clone() @@ -600,28 +534,6 @@ impl Timeline { self.shared_state_version_rx.clone() } - /// Pass arrived message to the safekeeper. - pub async fn process_msg( - self: &Arc, - msg: &ProposerAcceptorMessage, - ) -> Result> { - if self.is_cancelled() { - bail!(TimelineError::Cancelled(self.ttid)); - } - - let mut rmsg: Option; - { - let mut shared_state = self.write_shared_state().await; - rmsg = shared_state.sk.process_msg(msg).await?; - - // if this is AppendResponse, fill in proper hot standby feedback. - if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg { - resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback; - } - } - Ok(rmsg) - } - /// Returns wal_seg_size. pub async fn get_wal_seg_size(&self) -> usize { self.read_shared_state().await.get_wal_seg_size() @@ -672,97 +584,11 @@ impl Timeline { Ok(()) } - /// Update in memory remote consistent lsn. - pub async fn update_remote_consistent_lsn(self: &Arc, candidate: Lsn) { - let mut shared_state = self.write_shared_state().await; - shared_state.sk.state.inmem.remote_consistent_lsn = - max(shared_state.sk.state.inmem.remote_consistent_lsn, candidate); - } - pub async fn get_peers(&self, conf: &SafeKeeperConf) -> Vec { let shared_state = self.read_shared_state().await; shared_state.get_peers(conf.heartbeat_timeout) } - /// Should we start fetching WAL from a peer safekeeper, and if yes, from - /// which? Answer is yes, i.e. .donors is not empty if 1) there is something - /// to fetch, and we can do that without running elections; 2) there is no - /// actively streaming compute, as we don't want to compete with it. - /// - /// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal - /// to its last_log_term so we are sure such a leader ever had been elected. - /// - /// All possible donors are returned so that we could keep connection to the - /// current one if it is good even if it slightly lags behind. - /// - /// Note that term conditions above might be not met, but safekeepers are - /// still not aligned on last flush_lsn. Generally in this case until - /// elections are run it is not possible to say which safekeeper should - /// recover from which one -- history which would be committed is different - /// depending on assembled quorum (e.g. classic picture 8 from Raft paper). - /// Thus we don't try to predict it here. - pub async fn recovery_needed(&self, heartbeat_timeout: Duration) -> RecoveryNeededInfo { - let ss = self.read_shared_state().await; - let term = ss.sk.state.acceptor_state.term; - let last_log_term = ss.sk.get_last_log_term(); - let flush_lsn = ss.sk.flush_lsn(); - // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us. - let mut peers = ss.get_peers(heartbeat_timeout); - // Sort by pairs. - peers.sort_by(|p1, p2| { - let tl1 = TermLsn { - term: p1.last_log_term, - lsn: p1.flush_lsn, - }; - let tl2 = TermLsn { - term: p2.last_log_term, - lsn: p2.flush_lsn, - }; - tl2.cmp(&tl1) // desc - }); - let num_streaming_computes = self.walreceivers.get_num_streaming(); - let donors = if num_streaming_computes > 0 { - vec![] // If there is a streaming compute, don't try to recover to not intervene. - } else { - peers - .iter() - .filter_map(|candidate| { - // Are we interested in this candidate? - let candidate_tl = TermLsn { - term: candidate.last_log_term, - lsn: candidate.flush_lsn, - }; - let my_tl = TermLsn { - term: last_log_term, - lsn: flush_lsn, - }; - if my_tl < candidate_tl { - // Yes, we are interested. Can we pull from it without - // (re)running elections? It is possible if 1) his term - // is equal to his last_log_term so we could act on - // behalf of leader of this term (we must be sure he was - // ever elected) and 2) our term is not higher, or we'll refuse data. - if candidate.term == candidate.last_log_term && candidate.term >= term { - Some(Donor::from(candidate)) - } else { - None - } - } else { - None - } - }) - .collect() - }; - RecoveryNeededInfo { - term, - last_log_term, - flush_lsn, - peers, - num_streaming_computes, - donors, - } - } - pub fn get_walsenders(&self) -> &Arc { &self.walsenders } @@ -776,58 +602,6 @@ impl Timeline { self.read_shared_state().await.sk.wal_store.flush_lsn() } - /// Delete WAL segments from disk that are no longer needed. This is determined - /// based on pageserver's remote_consistent_lsn and local backup_lsn/peer_lsn. - pub async fn remove_old_wal(self: &Arc) -> Result<()> { - if self.is_cancelled() { - bail!(TimelineError::Cancelled(self.ttid)); - } - - // If enabled, we use LSN of the most lagging walsender as a WAL removal horizon. - // This allows to get better read speed for pageservers that are lagging behind, - // at the cost of keeping more WAL on disk. - let replication_horizon_lsn = if self.walsenders_keep_horizon { - self.walsenders.laggard_lsn() - } else { - None - }; - - let horizon_segno: XLogSegNo; - let remover = { - let shared_state = self.read_shared_state().await; - horizon_segno = shared_state.get_horizon_segno(replication_horizon_lsn); - if horizon_segno <= 1 || horizon_segno <= shared_state.last_removed_segno { - return Ok(()); // nothing to do - } - - // release the lock before removing - shared_state.sk.wal_store.remove_up_to(horizon_segno - 1) - }; - - // delete old WAL files - remover.await?; - - // update last_removed_segno - let mut shared_state = self.write_shared_state().await; - if shared_state.last_removed_segno != horizon_segno { - shared_state.last_removed_segno = horizon_segno; - } else { - shared_state.skip_update = true; - } - Ok(()) - } - - /// Persist control file if there is something to save and enough time - /// passed after the last save. This helps to keep remote_consistent_lsn up - /// to date so that storage nodes restart doesn't cause many pageserver -> - /// safekeeper reconnections. - pub async fn maybe_persist_control_file(self: &Arc, force: bool) -> Result<()> { - let mut guard = self.write_shared_state().await; - let changed = guard.sk.maybe_persist_inmem_control_file(force).await?; - guard.skip_update = !changed; - Ok(()) - } - /// Gather timeline data for metrics. pub async fn info_for_metrics(&self) -> Option { if self.is_cancelled() { @@ -843,7 +617,7 @@ impl Timeline { wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed), timeline_is_active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, - last_removed_segno: state.last_removed_segno, + last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed), epoch_start_lsn: state.sk.term_start_lsn, mem_state: state.sk.state.inmem.clone(), persisted_state: state.sk.state.clone(), @@ -866,7 +640,7 @@ impl Timeline { wal_backup_active: self.wal_backup_active.load(Ordering::Relaxed), active: self.broker_active.load(Ordering::Relaxed), num_computes: self.walreceivers.get_num() as u32, - last_removed_segno: state.last_removed_segno, + last_removed_segno: self.last_removed_segno.load(Ordering::Relaxed), epoch_start_lsn: state.sk.term_start_lsn, mem_state: state.sk.state.inmem.clone(), write_lsn, @@ -889,6 +663,110 @@ impl Timeline { state.sk.state.finish_change(&persistent_state).await?; Ok(res) } + + /// Get the timeline guard for reading/writing WAL files. + /// TODO: if WAL files are not present on disk (evicted), they will be + /// downloaded from S3. Also there will logic for preventing eviction + /// while someone is holding FullAccessTimeline guard. + pub async fn full_access_guard(self: &Arc) -> Result { + if self.is_cancelled() { + bail!(TimelineError::Cancelled(self.ttid)); + } + Ok(FullAccessTimeline { tli: self.clone() }) + } +} + +/// This is a guard that allows to read/write disk timeline state. +/// All tasks that are using the disk should use this guard. +#[derive(Clone)] +pub struct FullAccessTimeline { + pub tli: Arc, +} + +impl Deref for FullAccessTimeline { + type Target = Arc; + + fn deref(&self) -> &Self::Target { + &self.tli + } +} + +impl FullAccessTimeline { + /// Returns true if walsender should stop sending WAL to pageserver. We + /// terminate it if remote_consistent_lsn reached commit_lsn and there is no + /// computes. While there might be nothing to stream already, we learn about + /// remote_consistent_lsn update through replication feedback, and we want + /// to stop pushing to the broker if pageserver is fully caughtup. + pub async fn should_walsender_stop(&self, reported_remote_consistent_lsn: Lsn) -> bool { + if self.is_cancelled() { + return true; + } + let shared_state = self.read_shared_state().await; + if self.walreceivers.get_num() == 0 { + return shared_state.sk.state.inmem.commit_lsn == Lsn(0) || // no data at all yet + reported_remote_consistent_lsn >= shared_state.sk.state.inmem.commit_lsn; + } + false + } + + /// Ensure that current term is t, erroring otherwise, and lock the state. + pub async fn acquire_term(&self, t: Term) -> Result { + let ss = self.read_shared_state().await; + if ss.sk.state.acceptor_state.term != t { + bail!( + "failed to acquire term {}, current term {}", + t, + ss.sk.state.acceptor_state.term + ); + } + Ok(ss) + } + + /// Pass arrived message to the safekeeper. + pub async fn process_msg( + &self, + msg: &ProposerAcceptorMessage, + ) -> Result> { + if self.is_cancelled() { + bail!(TimelineError::Cancelled(self.ttid)); + } + + let mut rmsg: Option; + { + let mut shared_state = self.write_shared_state().await; + rmsg = shared_state.sk.process_msg(msg).await?; + + // if this is AppendResponse, fill in proper hot standby feedback. + if let Some(AcceptorProposerMessage::AppendResponse(ref mut resp)) = rmsg { + resp.hs_feedback = self.walsenders.get_hotstandby().hs_feedback; + } + } + Ok(rmsg) + } + + pub async fn get_walreader(&self, start_lsn: Lsn) -> Result { + let (_, persisted_state) = self.get_state().await; + let enable_remote_read = GlobalTimelines::get_global_config().is_wal_backup_enabled(); + + WalReader::new( + &self.ttid, + self.timeline_dir.clone(), + &persisted_state, + start_lsn, + enable_remote_read, + ) + } + + pub fn get_timeline_dir(&self) -> Utf8PathBuf { + self.timeline_dir.clone() + } + + /// Update in memory remote consistent lsn. + pub async fn update_remote_consistent_lsn(&self, candidate: Lsn) { + let mut shared_state = self.write_shared_state().await; + shared_state.sk.state.inmem.remote_consistent_lsn = + max(shared_state.sk.state.inmem.remote_consistent_lsn, candidate); + } } /// Deletes directory and it's contents. Returns false if directory does not exist. @@ -899,3 +777,16 @@ async fn delete_dir(path: &Utf8PathBuf) -> Result { Err(e) => Err(e.into()), } } + +/// Get a path to the tenant directory. If you just need to get a timeline directory, +/// use FullAccessTimeline::get_timeline_dir instead. +pub(crate) fn get_tenant_dir(conf: &SafeKeeperConf, tenant_id: &TenantId) -> Utf8PathBuf { + conf.workdir.join(tenant_id.to_string()) +} + +/// Get a path to the timeline directory. If you need to read WAL files from disk, +/// use FullAccessTimeline::get_timeline_dir instead. This function does not check +/// timeline eviction status and WAL files might not be present on disk. +pub(crate) fn get_timeline_dir(conf: &SafeKeeperConf, ttid: &TenantTimelineId) -> Utf8PathBuf { + get_tenant_dir(conf, &ttid.tenant_id).join(ttid.timeline_id.to_string()) +} diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index ed544352f9..84862207d5 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -3,23 +3,42 @@ //! It watches for changes in the timeline state and decides when to spawn or kill background tasks. //! It also can manage some reactive state, like should the timeline be active for broker pushes or not. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use postgres_ffi::XLogSegNo; +use tokio::task::{JoinError, JoinHandle}; use tracing::{info, instrument, warn}; use utils::lsn::Lsn; use crate::{ + control_file::Storage, metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL}, + recovery::recovery_main, + remove_wal::calc_horizon_lsn, + send_wal::WalSenders, timeline::{PeerInfo, ReadGuardSharedState, Timeline}, - timelines_set::TimelinesSet, + timelines_set::{TimelineSetGuard, TimelinesSet}, wal_backup::{self, WalBackupTaskHandle}, - SafeKeeperConf, + wal_backup_partial, SafeKeeperConf, }; pub struct StateSnapshot { + // inmem values pub commit_lsn: Lsn, pub backup_lsn: Lsn, pub remote_consistent_lsn: Lsn, + + // persistent control file values + pub cfile_peer_horizon_lsn: Lsn, + pub cfile_remote_consistent_lsn: Lsn, + pub cfile_backup_lsn: Lsn, + + // misc + pub cfile_last_persist_at: Instant, + pub inmem_flush_pending: bool, pub peers: Vec, } @@ -30,17 +49,34 @@ impl StateSnapshot { commit_lsn: read_guard.sk.state.inmem.commit_lsn, backup_lsn: read_guard.sk.state.inmem.backup_lsn, remote_consistent_lsn: read_guard.sk.state.inmem.remote_consistent_lsn, + cfile_peer_horizon_lsn: read_guard.sk.state.peer_horizon_lsn, + cfile_remote_consistent_lsn: read_guard.sk.state.remote_consistent_lsn, + cfile_backup_lsn: read_guard.sk.state.backup_lsn, + cfile_last_persist_at: read_guard.sk.state.pers.last_persist_at(), + inmem_flush_pending: Self::has_unflushed_inmem_state(&read_guard), peers: read_guard.get_peers(heartbeat_timeout), } } + + fn has_unflushed_inmem_state(read_guard: &ReadGuardSharedState) -> bool { + let state = &read_guard.sk.state; + state.inmem.commit_lsn > state.commit_lsn + || state.inmem.backup_lsn > state.backup_lsn + || state.inmem.peer_horizon_lsn > state.peer_horizon_lsn + || state.inmem.remote_consistent_lsn > state.remote_consistent_lsn + } } /// Control how often the manager task should wake up to check updates. /// There is no need to check for updates more often than this. const REFRESH_INTERVAL: Duration = Duration::from_millis(300); +/// How often to save the control file if the is no other activity. +const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300); + /// This task gets spawned alongside each timeline and is responsible for managing the timeline's /// background tasks. +/// Be careful, this task is not respawned on panic, so it should not panic. #[instrument(name = "manager", skip_all, fields(ttid = %tli.ttid))] pub async fn main_task( tli: Arc, @@ -55,20 +91,50 @@ pub async fn main_task( } }; - // sets whether timeline is active for broker pushes or not - let mut tli_broker_active = broker_active_set.guard(tli.clone()); - - let ttid = tli.ttid; + // configuration & dependencies let wal_seg_size = tli.get_wal_seg_size().await; let heartbeat_timeout = conf.heartbeat_timeout; - - let mut state_version_rx = tli.get_state_version_rx(); - + let walsenders = tli.get_walsenders(); let walreceivers = tli.get_walreceivers(); + + // current state + let mut state_version_rx = tli.get_state_version_rx(); let mut num_computes_rx = walreceivers.get_num_rx(); + let mut tli_broker_active = broker_active_set.guard(tli.clone()); + let mut last_removed_segno = 0 as XLogSegNo; // list of background tasks let mut backup_task: Option = None; + let mut recovery_task: Option> = None; + let mut partial_backup_task: Option> = None; + let mut wal_removal_task: Option>> = None; + + // Start recovery task which always runs on the timeline. + if conf.peer_recovery_enabled { + match tli.full_access_guard().await { + Ok(tli) => { + recovery_task = Some(tokio::spawn(recovery_main(tli, conf.clone()))); + } + Err(e) => { + warn!("failed to start recovery task: {:?}", e); + } + } + } + + // Start partial backup task which always runs on the timeline. + if conf.is_wal_backup_enabled() && conf.partial_backup_enabled { + match tli.full_access_guard().await { + Ok(tli) => { + partial_backup_task = Some(tokio::spawn(wal_backup_partial::main_task( + tli, + conf.clone(), + ))); + } + Err(e) => { + warn!("failed to start partial backup task: {:?}", e); + } + } + } let last_state = 'outer: loop { MANAGER_ITERATIONS_TOTAL.inc(); @@ -76,47 +142,36 @@ pub async fn main_task( let state_snapshot = StateSnapshot::new(tli.read_shared_state().await, heartbeat_timeout); let num_computes = *num_computes_rx.borrow(); - let is_wal_backup_required = - wal_backup::is_wal_backup_required(wal_seg_size, num_computes, &state_snapshot); + let is_wal_backup_required = update_backup( + &conf, + &tli, + wal_seg_size, + num_computes, + &state_snapshot, + &mut backup_task, + ) + .await; - if conf.is_wal_backup_enabled() { - wal_backup::update_task( - &conf, - ttid, - is_wal_backup_required, - &state_snapshot, - &mut backup_task, - ) - .await; - } + let _is_active = update_is_active( + is_wal_backup_required, + num_computes, + &state_snapshot, + &mut tli_broker_active, + &tli, + ); - let is_active = is_wal_backup_required - || num_computes > 0 - || state_snapshot.remote_consistent_lsn < state_snapshot.commit_lsn; + let next_cfile_save = update_control_file_save(&state_snapshot, &tli).await; - // update the broker timeline set - if tli_broker_active.set(is_active) { - // write log if state has changed - info!( - "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}", - is_active, state_snapshot.remote_consistent_lsn, state_snapshot.commit_lsn, - ); - - MANAGER_ACTIVE_CHANGES.inc(); - - if !is_active { - // TODO: maybe use tokio::spawn? - if let Err(e) = tli.maybe_persist_control_file(false).await { - warn!("control file save in update_status failed: {:?}", e); - } - } - } - - // update the state in Arc - tli.wal_backup_active - .store(backup_task.is_some(), std::sync::atomic::Ordering::Relaxed); - tli.broker_active - .store(is_active, std::sync::atomic::Ordering::Relaxed); + update_wal_removal( + &conf, + walsenders, + &tli, + wal_seg_size, + &state_snapshot, + last_removed_segno, + &mut wal_removal_task, + ) + .await; // wait until something changes. tx channels are stored under Arc, so they will not be // dropped until the manager task is finished. @@ -135,11 +190,189 @@ pub async fn main_task( _ = num_computes_rx.changed() => { // number of connected computes was updated } + _ = async { + if let Some(timeout) = next_cfile_save { + tokio::time::sleep_until(timeout).await + } else { + futures::future::pending().await + } + } => { + // it's time to save the control file + } + res = async { + if let Some(task) = &mut wal_removal_task { + task.await + } else { + futures::future::pending().await + } + } => { + // WAL removal task finished + wal_removal_task = None; + update_wal_removal_end(res, &tli, &mut last_removed_segno); + } } }; // shutdown background tasks if conf.is_wal_backup_enabled() { - wal_backup::update_task(&conf, ttid, false, &last_state, &mut backup_task).await; + wal_backup::update_task(&conf, &tli, false, &last_state, &mut backup_task).await; + } + + if let Some(recovery_task) = recovery_task { + if let Err(e) = recovery_task.await { + warn!("recovery task failed: {:?}", e); + } + } + + if let Some(partial_backup_task) = partial_backup_task { + if let Err(e) = partial_backup_task.await { + warn!("partial backup task failed: {:?}", e); + } + } + + if let Some(wal_removal_task) = wal_removal_task { + let res = wal_removal_task.await; + update_wal_removal_end(res, &tli, &mut last_removed_segno); } } + +/// Spawns/kills backup task and returns true if backup is required. +async fn update_backup( + conf: &SafeKeeperConf, + tli: &Arc, + wal_seg_size: usize, + num_computes: usize, + state: &StateSnapshot, + backup_task: &mut Option, +) -> bool { + let is_wal_backup_required = + wal_backup::is_wal_backup_required(wal_seg_size, num_computes, state); + + if conf.is_wal_backup_enabled() { + wal_backup::update_task(conf, tli, is_wal_backup_required, state, backup_task).await; + } + + // update the state in Arc + tli.wal_backup_active + .store(backup_task.is_some(), std::sync::atomic::Ordering::Relaxed); + is_wal_backup_required +} + +/// Update is_active flag and returns its value. +fn update_is_active( + is_wal_backup_required: bool, + num_computes: usize, + state: &StateSnapshot, + tli_broker_active: &mut TimelineSetGuard, + tli: &Arc, +) -> bool { + let is_active = is_wal_backup_required + || num_computes > 0 + || state.remote_consistent_lsn < state.commit_lsn; + + // update the broker timeline set + if tli_broker_active.set(is_active) { + // write log if state has changed + info!( + "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}", + is_active, state.remote_consistent_lsn, state.commit_lsn, + ); + + MANAGER_ACTIVE_CHANGES.inc(); + } + + // update the state in Arc + tli.broker_active + .store(is_active, std::sync::atomic::Ordering::Relaxed); + is_active +} + +/// Save control file if needed. Returns Instant if we should persist the control file in the future. +async fn update_control_file_save( + state: &StateSnapshot, + tli: &Arc, +) -> Option { + if !state.inmem_flush_pending { + return None; + } + + if state.cfile_last_persist_at.elapsed() > CF_SAVE_INTERVAL { + let mut write_guard = tli.write_shared_state().await; + // this can be done in the background because it blocks manager task, but flush() should + // be fast enough not to be a problem now + if let Err(e) = write_guard.sk.state.flush().await { + warn!("failed to save control file: {:?}", e); + } + + None + } else { + // we should wait until next CF_SAVE_INTERVAL + Some((state.cfile_last_persist_at + CF_SAVE_INTERVAL).into()) + } +} + +/// Spawns WAL removal task if needed. +async fn update_wal_removal( + conf: &SafeKeeperConf, + walsenders: &Arc, + tli: &Arc, + wal_seg_size: usize, + state: &StateSnapshot, + last_removed_segno: u64, + wal_removal_task: &mut Option>>, +) { + if wal_removal_task.is_some() { + // WAL removal is already in progress + return; + } + + // If enabled, we use LSN of the most lagging walsender as a WAL removal horizon. + // This allows to get better read speed for pageservers that are lagging behind, + // at the cost of keeping more WAL on disk. + let replication_horizon_lsn = if conf.walsenders_keep_horizon { + walsenders.laggard_lsn() + } else { + None + }; + + let removal_horizon_lsn = calc_horizon_lsn(state, replication_horizon_lsn); + let removal_horizon_segno = removal_horizon_lsn + .segment_number(wal_seg_size) + .saturating_sub(1); + + if removal_horizon_segno > last_removed_segno { + // we need to remove WAL + let remover = crate::wal_storage::Storage::remove_up_to( + &tli.read_shared_state().await.sk.wal_store, + removal_horizon_segno, + ); + *wal_removal_task = Some(tokio::spawn(async move { + remover.await?; + Ok(removal_horizon_segno) + })); + } +} + +/// Update the state after WAL removal task finished. +fn update_wal_removal_end( + res: Result, JoinError>, + tli: &Arc, + last_removed_segno: &mut u64, +) { + let new_last_removed_segno = match res { + Ok(Ok(segno)) => segno, + Err(e) => { + warn!("WAL removal task failed: {:?}", e); + return; + } + Ok(Err(e)) => { + warn!("WAL removal task failed: {:?}", e); + return; + } + }; + + *last_removed_segno = new_last_removed_segno; + // update the state in Arc + tli.last_removed_segno + .store(new_last_removed_segno, std::sync::atomic::Ordering::Relaxed); +} diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index 8d37bd6371..45e08ede3c 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -3,7 +3,7 @@ //! all from the disk on startup and keeping them in memory. use crate::safekeeper::ServerInfo; -use crate::timeline::{Timeline, TimelineError}; +use crate::timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError}; use crate::timelines_set::TimelinesSet; use crate::SafeKeeperConf; use anyhow::{bail, Context, Result}; @@ -127,7 +127,7 @@ impl GlobalTimelines { state.get_dependencies() }; - let timelines_dir = conf.tenant_dir(&tenant_id); + let timelines_dir = get_tenant_dir(&conf, &tenant_id); for timelines_dir_entry in std::fs::read_dir(&timelines_dir) .with_context(|| format!("failed to list timelines dir {}", timelines_dir))? { @@ -348,11 +348,7 @@ impl GlobalTimelines { } Err(_) => { // Timeline is not memory, but it may still exist on disk in broken state. - let dir_path = TIMELINES_STATE - .lock() - .unwrap() - .get_conf() - .timeline_dir(ttid); + let dir_path = get_timeline_dir(TIMELINES_STATE.lock().unwrap().get_conf(), ttid); let dir_existed = delete_dir(dir_path)?; Ok(TimelineDeleteForceResult { @@ -401,13 +397,10 @@ impl GlobalTimelines { // Note that we could concurrently create new timelines while we were deleting them, // so the directory may be not empty. In this case timelines will have bad state // and timeline background jobs can panic. - delete_dir( - TIMELINES_STATE - .lock() - .unwrap() - .get_conf() - .tenant_dir(tenant_id), - )?; + delete_dir(get_tenant_dir( + TIMELINES_STATE.lock().unwrap().get_conf(), + tenant_id, + ))?; // FIXME: we temporarily disabled removing timelines from the map, see `delete_force` // let tlis_after_delete = Self::get_all_for_tenant(*tenant_id); diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index 84680557f9..58591aecfa 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -30,9 +30,9 @@ use tracing::*; use utils::{id::TenantTimelineId, lsn::Lsn}; use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS}; -use crate::timeline::{PeerInfo, Timeline}; +use crate::timeline::{FullAccessTimeline, PeerInfo, Timeline}; use crate::timeline_manager::StateSnapshot; -use crate::{GlobalTimelines, SafeKeeperConf, WAL_BACKUP_RUNTIME}; +use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME}; use once_cell::sync::OnceCell; @@ -63,13 +63,13 @@ pub fn is_wal_backup_required( /// is running, kill it. pub async fn update_task( conf: &SafeKeeperConf, - ttid: TenantTimelineId, + tli: &Arc, need_backup: bool, state: &StateSnapshot, entry: &mut Option, ) { let (offloader, election_dbg_str) = - determine_offloader(&state.peers, state.backup_lsn, ttid, conf); + determine_offloader(&state.peers, state.backup_lsn, tli.ttid, conf); let elected_me = Some(conf.my_id) == offloader; let should_task_run = need_backup && elected_me; @@ -80,15 +80,8 @@ pub async fn update_task( info!("elected for backup: {}", election_dbg_str); let (shutdown_tx, shutdown_rx) = mpsc::channel(1); - let timeline_dir = conf.timeline_dir(&ttid); - let async_task = backup_task_main( - ttid, - timeline_dir, - conf.workdir.clone(), - conf.backup_parallel_jobs, - shutdown_rx, - ); + let async_task = backup_task_main(tli.clone(), conf.backup_parallel_jobs, shutdown_rx); let handle = if conf.current_thread_runtime { tokio::spawn(async_task) @@ -198,39 +191,32 @@ pub fn init_remote_storage(conf: &SafeKeeperConf) { } struct WalBackupTask { - timeline: Arc, + timeline: FullAccessTimeline, timeline_dir: Utf8PathBuf, - workspace_dir: Utf8PathBuf, wal_seg_size: usize, parallel_jobs: usize, commit_lsn_watch_rx: watch::Receiver, } /// Offload single timeline. -#[instrument(name = "WAL backup", skip_all, fields(ttid = %ttid))] -async fn backup_task_main( - ttid: TenantTimelineId, - timeline_dir: Utf8PathBuf, - workspace_dir: Utf8PathBuf, - parallel_jobs: usize, - mut shutdown_rx: Receiver<()>, -) { +#[instrument(name = "WAL backup", skip_all, fields(ttid = %tli.ttid))] +async fn backup_task_main(tli: Arc, parallel_jobs: usize, mut shutdown_rx: Receiver<()>) { let _guard = WAL_BACKUP_TASKS.guard(); + let tli = match tli.full_access_guard().await { + Ok(tli) => tli, + Err(e) => { + error!("backup error: {}", e); + return; + } + }; info!("started"); - let res = GlobalTimelines::get(ttid); - if let Err(e) = res { - error!("backup error: {}", e); - return; - } - let tli = res.unwrap(); let mut wb = WalBackupTask { wal_seg_size: tli.get_wal_seg_size().await, commit_lsn_watch_rx: tli.get_commit_lsn_watch_rx(), + timeline_dir: tli.get_timeline_dir(), timeline: tli, - timeline_dir, - workspace_dir, parallel_jobs, }; @@ -297,7 +283,6 @@ impl WalBackupTask { commit_lsn, self.wal_seg_size, &self.timeline_dir, - &self.workspace_dir, self.parallel_jobs, ) .await @@ -319,18 +304,18 @@ impl WalBackupTask { } async fn backup_lsn_range( - timeline: &Arc, + timeline: &FullAccessTimeline, backup_lsn: &mut Lsn, end_lsn: Lsn, wal_seg_size: usize, timeline_dir: &Utf8Path, - workspace_dir: &Utf8Path, parallel_jobs: usize, ) -> Result<()> { if parallel_jobs < 1 { anyhow::bail!("parallel_jobs must be >= 1"); } + let remote_timeline_path = remote_timeline_path(&timeline.ttid)?; let start_lsn = *backup_lsn; let segments = get_segments(start_lsn, end_lsn, wal_seg_size); @@ -343,7 +328,11 @@ async fn backup_lsn_range( loop { let added_task = match iter.next() { Some(s) => { - uploads.push_back(backup_single_segment(s, timeline_dir, workspace_dir)); + uploads.push_back(backup_single_segment( + s, + timeline_dir, + &remote_timeline_path, + )); true } None => false, @@ -381,18 +370,10 @@ async fn backup_lsn_range( async fn backup_single_segment( seg: &Segment, timeline_dir: &Utf8Path, - workspace_dir: &Utf8Path, + remote_timeline_path: &RemotePath, ) -> Result { let segment_file_path = seg.file_path(timeline_dir)?; - let remote_segment_path = segment_file_path - .strip_prefix(workspace_dir) - .context("Failed to strip workspace dir prefix") - .and_then(RemotePath::new) - .with_context(|| { - format!( - "Failed to resolve remote part of path {segment_file_path:?} for base {workspace_dir:?}", - ) - })?; + let remote_segment_path = seg.remote_path(remote_timeline_path); let res = backup_object(&segment_file_path, &remote_segment_path, seg.size()).await; if res.is_ok() { @@ -430,6 +411,10 @@ impl Segment { Ok(timeline_dir.join(self.object_name())) } + pub fn remote_path(self, remote_timeline_path: &RemotePath) -> RemotePath { + remote_timeline_path.join(self.object_name()) + } + pub fn size(self) -> usize { (u64::from(self.end_lsn) - u64::from(self.start_lsn)) as usize } @@ -530,8 +515,7 @@ pub async fn read_object( /// when called. pub async fn delete_timeline(ttid: &TenantTimelineId) -> Result<()> { let storage = get_configured_remote_storage(); - let ttid_path = Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string()); - let remote_path = RemotePath::new(&ttid_path)?; + let remote_path = remote_timeline_path(ttid)?; // see DEFAULT_MAX_KEYS_PER_LIST_RESPONSE // const Option unwrap is not stable, otherwise it would be const. @@ -613,15 +597,17 @@ pub async fn copy_s3_segments( .as_ref() .unwrap(); - let relative_dst_path = - Utf8Path::new(&dst_ttid.tenant_id.to_string()).join(dst_ttid.timeline_id.to_string()); - - let remote_path = RemotePath::new(&relative_dst_path)?; + let remote_dst_path = remote_timeline_path(dst_ttid)?; let cancel = CancellationToken::new(); let files = storage - .list(Some(&remote_path), ListingMode::NoDelimiter, None, &cancel) + .list( + Some(&remote_dst_path), + ListingMode::NoDelimiter, + None, + &cancel, + ) .await? .keys; @@ -635,9 +621,6 @@ pub async fn copy_s3_segments( uploaded_segments ); - let relative_src_path = - Utf8Path::new(&src_ttid.tenant_id.to_string()).join(src_ttid.timeline_id.to_string()); - for segno in from_segment..to_segment { if segno % SEGMENTS_PROGRESS_REPORT_INTERVAL == 0 { info!("copied all segments from {} until {}", from_segment, segno); @@ -649,8 +632,8 @@ pub async fn copy_s3_segments( } debug!("copying segment {}", segment_name); - let from = RemotePath::new(&relative_src_path.join(&segment_name))?; - let to = RemotePath::new(&relative_dst_path.join(&segment_name))?; + let from = remote_timeline_path(src_ttid)?.join(&segment_name); + let to = remote_dst_path.join(&segment_name); storage.copy_object(&from, &to, &cancel).await?; } @@ -661,3 +644,8 @@ pub async fn copy_s3_segments( ); Ok(()) } + +/// Get S3 (remote_storage) prefix path used for timeline files. +pub fn remote_timeline_path(ttid: &TenantTimelineId) -> Result { + RemotePath::new(&Utf8Path::new(&ttid.tenant_id.to_string()).join(ttid.timeline_id.to_string())) +} diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index 29e944bff3..a320be3bad 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -18,8 +18,6 @@ //! This way control file stores information about all potentially existing //! remote partial segments and can clean them up after uploading a newer version. -use std::sync::Arc; - use camino::Utf8PathBuf; use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI}; use rand::Rng; @@ -32,8 +30,9 @@ use utils::lsn::Lsn; use crate::{ metrics::{PARTIAL_BACKUP_UPLOADED_BYTES, PARTIAL_BACKUP_UPLOADS}, safekeeper::Term, - timeline::Timeline, - wal_backup, SafeKeeperConf, + timeline::FullAccessTimeline, + wal_backup::{self, remote_timeline_path}, + SafeKeeperConf, }; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -83,10 +82,10 @@ impl State { struct PartialBackup { wal_seg_size: usize, - tli: Arc, + tli: FullAccessTimeline, conf: SafeKeeperConf, local_prefix: Utf8PathBuf, - remote_prefix: Utf8PathBuf, + remote_timeline_path: RemotePath, state: State, } @@ -153,7 +152,7 @@ impl PartialBackup { let backup_bytes = flush_lsn.segment_offset(self.wal_seg_size); let local_path = self.local_prefix.join(self.local_segment_name(segno)); - let remote_path = RemotePath::new(self.remote_prefix.join(&prepared.name).as_ref())?; + let remote_path = self.remote_timeline_path.join(&prepared.name); // Upload first `backup_bytes` bytes of the segment to the remote storage. wal_backup::backup_partial_segment(&local_path, &remote_path, backup_bytes).await?; @@ -253,7 +252,7 @@ impl PartialBackup { info!("deleting objects: {:?}", segments_to_delete); let mut objects_to_delete = vec![]; for seg in segments_to_delete.iter() { - let remote_path = RemotePath::new(self.remote_prefix.join(seg).as_ref())?; + let remote_path = self.remote_timeline_path.join(seg); objects_to_delete.push(remote_path); } @@ -273,7 +272,7 @@ impl PartialBackup { } #[instrument(name = "Partial backup", skip_all, fields(ttid = %tli.ttid))] -pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { +pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { debug!("started"); let await_duration = conf.partial_backup_timeout; @@ -289,11 +288,11 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { let mut flush_lsn_rx = tli.get_term_flush_lsn_watch_rx(); let wal_seg_size = tli.get_wal_seg_size().await; - let local_prefix = tli.timeline_dir.clone(); - let remote_prefix = match tli.timeline_dir.strip_prefix(&conf.workdir) { - Ok(path) => path.to_owned(), + let local_prefix = tli.get_timeline_dir(); + let remote_timeline_path = match remote_timeline_path(&tli.ttid) { + Ok(path) => path, Err(e) => { - error!("failed to strip workspace dir prefix: {:?}", e); + error!("failed to create remote path: {:?}", e); return; } }; @@ -304,7 +303,7 @@ pub async fn main_task(tli: Arc, conf: SafeKeeperConf) { state: persistent_state.partial_backup, conf, local_prefix, - remote_prefix, + remote_timeline_path, }; debug!("state: {:?}", backup.state); diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 6bc8c7c3f9..45e27e1951 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -25,7 +25,7 @@ use utils::crashsafe::durable_rename; use crate::metrics::{time_io_closure, WalStorageMetrics, REMOVED_WAL_SEGMENTS}; use crate::state::TimelinePersistentState; -use crate::wal_backup::read_object; +use crate::wal_backup::{read_object, remote_timeline_path}; use crate::SafeKeeperConf; use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::XLogFileName; @@ -536,7 +536,7 @@ async fn remove_segments_from_disk( } pub struct WalReader { - workdir: Utf8PathBuf, + remote_path: RemotePath, timeline_dir: Utf8PathBuf, wal_seg_size: usize, pos: Lsn, @@ -558,7 +558,7 @@ pub struct WalReader { impl WalReader { pub fn new( - workdir: Utf8PathBuf, + ttid: &TenantTimelineId, timeline_dir: Utf8PathBuf, state: &TimelinePersistentState, start_pos: Lsn, @@ -586,7 +586,7 @@ impl WalReader { } Ok(Self { - workdir, + remote_path: remote_timeline_path(ttid)?, timeline_dir, wal_seg_size: state.server.wal_seg_size as usize, pos: start_pos, @@ -684,7 +684,7 @@ impl WalReader { let xlogoff = self.pos.segment_offset(self.wal_seg_size); let segno = self.pos.segment_number(self.wal_seg_size); let wal_file_name = XLogFileName(PG_TLI, segno, self.wal_seg_size); - let wal_file_path = self.timeline_dir.join(wal_file_name); + let wal_file_path = self.timeline_dir.join(&wal_file_name); // Try to open local file, if we may have WAL locally if self.pos >= self.local_start_lsn { @@ -712,16 +712,7 @@ impl WalReader { // Try to open remote file, if remote reads are enabled if self.enable_remote_read { - let remote_wal_file_path = wal_file_path - .strip_prefix(&self.workdir) - .context("Failed to strip workdir prefix") - .and_then(RemotePath::new) - .with_context(|| { - format!( - "Failed to resolve remote part of path {:?} for base {:?}", - wal_file_path, self.workdir, - ) - })?; + let remote_wal_file_path = self.remote_path.join(&wal_file_name); return read_object(&remote_wal_file_path, xlogoff as u64).await; } diff --git a/test_runner/fixtures/common_types.py b/test_runner/fixtures/common_types.py index e9be765669..147264762c 100644 --- a/test_runner/fixtures/common_types.py +++ b/test_runner/fixtures/common_types.py @@ -72,6 +72,18 @@ class Lsn: def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> "Lsn": return Lsn(self.lsn_int - (self.lsn_int % seg_sz)) + def segno(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> int: + return self.lsn_int // seg_sz + + def segment_name(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> str: + segno = self.segno(seg_sz) + # The filename format is 00000001XXXXXXXX000000YY, where XXXXXXXXYY is segno in hex. + # XXXXXXXX is the higher 8 hex digits of segno + high_bits = segno >> 8 + # YY is the lower 2 hex digits of segno + low_bits = segno & 0xFF + return f"00000001{high_bits:08X}000000{low_bits:02X}" + @dataclass(frozen=True) class Key: diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index b8ef63faa9..0004745bf0 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -973,6 +973,9 @@ class NeonEnvBuilder: for pageserver in self.env.pageservers: pageserver.assert_no_errors() + for safekeeper in self.env.safekeepers: + safekeeper.assert_no_errors() + self.env.storage_controller.assert_no_errors() try: @@ -3813,6 +3816,9 @@ class Safekeeper(LogUtils): self.running = False return self + def assert_no_errors(self): + assert not self.log_contains("manager task finished prematurely") + def append_logical_message( self, tenant_id: TenantId, timeline_id: TimelineId, request: Dict[str, Any] ) -> Dict[str, Any]: @@ -3898,6 +3904,15 @@ class Safekeeper(LogUtils): """ cli = self.http_client() + target_segment_file = lsn.segment_name() + + def are_segments_removed(): + segments = self.list_segments(tenant_id, timeline_id) + log.info( + f"waiting for all segments before {target_segment_file} to be removed from sk {self.id}, current segments: {segments}" + ) + assert all(target_segment_file <= s for s in segments) + def are_lsns_advanced(): stat = cli.timeline_status(tenant_id, timeline_id) log.info( @@ -3909,6 +3924,7 @@ class Safekeeper(LogUtils): # pageserver to this safekeeper wait_until(30, 1, are_lsns_advanced) cli.checkpoint(tenant_id, timeline_id) + wait_until(30, 1, are_segments_removed) def wait_until_paused(self, failpoint: str): msg = f"at failpoint {failpoint}" From 87afbf6b24313cbfa28809ec7ada2e72911263be Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 31 May 2024 12:00:40 -0400 Subject: [PATCH 113/220] test(pageserver): add test interface to create artificial layers (#7899) This pull request adds necessary interfaces to deterministically create scenarios we want to test. Simplify some test cases to use this interface to make it stable + reproducible. Compaction test will be able to use this interface. Also the upcoming delete tombstone tests will use this interface to make test reproducible. ## Summary of changes * `force_create_image_layer` * `force_create_delta_layer` * `force_advance_lsn` * `create_test_timeline_with_states` * `branch_timeline_test_with_states` --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 261 ++++++++---------- pageserver/src/tenant/timeline.rs | 96 +++++++ .../src/tenant/timeline/layer_manager.rs | 7 + 3 files changed, 223 insertions(+), 141 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 0a9637884f..cfa683beb8 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -1393,6 +1393,36 @@ impl Tenant { Ok(tl) } + /// Helper for unit tests to create a timeline with some pre-loaded states. + #[cfg(test)] + #[allow(clippy::too_many_arguments)] + pub async fn create_test_timeline_with_layers( + &self, + new_timeline_id: TimelineId, + initdb_lsn: Lsn, + pg_version: u32, + ctx: &RequestContext, + delta_layer_desc: Vec>, + image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>, + end_lsn: Lsn, + ) -> anyhow::Result> { + let tline = self + .create_test_timeline(new_timeline_id, initdb_lsn, pg_version, ctx) + .await?; + tline.force_advance_lsn(end_lsn); + for deltas in delta_layer_desc { + tline + .force_create_delta_layer(deltas, Some(initdb_lsn), ctx) + .await?; + } + for (lsn, images) in image_layer_desc { + tline + .force_create_image_layer(lsn, images, Some(initdb_lsn), ctx) + .await?; + } + Ok(tline) + } + /// Create a new timeline. /// /// Returns the new timeline ID and reference to its Timeline object. @@ -2992,17 +3022,53 @@ impl Tenant { &self, src_timeline: &Arc, dst_id: TimelineId, - start_lsn: Option, + ancestor_lsn: Option, ctx: &RequestContext, ) -> Result, CreateTimelineError> { let create_guard = self.create_timeline_create_guard(dst_id).unwrap(); let tl = self - .branch_timeline_impl(src_timeline, dst_id, start_lsn, create_guard, ctx) + .branch_timeline_impl(src_timeline, dst_id, ancestor_lsn, create_guard, ctx) .await?; tl.set_state(TimelineState::Active); Ok(tl) } + /// Helper for unit tests to branch a timeline with some pre-loaded states. + #[cfg(test)] + #[allow(clippy::too_many_arguments)] + pub async fn branch_timeline_test_with_layers( + &self, + src_timeline: &Arc, + dst_id: TimelineId, + ancestor_lsn: Option, + ctx: &RequestContext, + delta_layer_desc: Vec>, + image_layer_desc: Vec<(Lsn, Vec<(pageserver_api::key::Key, bytes::Bytes)>)>, + end_lsn: Lsn, + ) -> anyhow::Result> { + let tline = self + .branch_timeline_test(src_timeline, dst_id, ancestor_lsn, ctx) + .await?; + let ancestor_lsn = if let Some(ancestor_lsn) = ancestor_lsn { + ancestor_lsn + } else { + tline.get_last_record_lsn() + }; + assert!(end_lsn >= ancestor_lsn); + tline.force_advance_lsn(end_lsn); + for deltas in delta_layer_desc { + tline + .force_create_delta_layer(deltas, Some(ancestor_lsn), ctx) + .await?; + } + for (lsn, images) in image_layer_desc { + tline + .force_create_image_layer(lsn, images, Some(ancestor_lsn), ctx) + .await?; + } + Ok(tline) + } + /// Branch an existing timeline. /// /// The caller is responsible for activating the returned timeline. @@ -6206,75 +6272,36 @@ mod tests { async fn test_vectored_missing_data_key_reads() -> anyhow::Result<()> { let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?; let (tenant, ctx) = harness.load().await; - let tline = tenant - .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) - .await?; - - let cancel = CancellationToken::new(); let base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); let base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap(); let base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap(); - let mut lsn = Lsn(0x20); - - { - let mut writer = tline.writer().await; - writer - .put(base_key, lsn, &Value::Image(test_img("data key 1")), &ctx) - .await?; - writer.finish_write(lsn); - drop(writer); - - tline.freeze_and_flush().await?; // this will create a image layer - } + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + Vec::new(), // delta layers + vec![(Lsn(0x20), vec![(base_key, test_img("data key 1"))])], // image layers + Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN + ) + .await?; let child = tenant - .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx) + .branch_timeline_test_with_layers( + &tline, + NEW_TIMELINE_ID, + Some(Lsn(0x20)), + &ctx, + Vec::new(), // delta layers + vec![(Lsn(0x30), vec![(base_key_child, test_img("data key 2"))])], // image layers + Lsn(0x30), + ) .await .unwrap(); - lsn.0 += 0x10; - - { - let mut writer = child.writer().await; - writer - .put( - base_key_child, - lsn, - &Value::Image(test_img("data key 2")), - &ctx, - ) - .await?; - writer.finish_write(lsn); - drop(writer); - - child.freeze_and_flush().await?; // this will create a delta - - { - // update the partitioning to include the test key space, otherwise they - // will be dropped by image layer creation - let mut guard = child.partitioning.lock().await; - let ((partitioning, _), partition_lsn) = &mut *guard; - partitioning - .parts - .push(KeySpace::single(base_key..base_key_nonexist)); // exclude the nonexist key - *partition_lsn = lsn; - } - - child - .compact( - &cancel, - { - let mut set = EnumSet::empty(); - set.insert(CompactFlags::ForceImageLayerCreation); - set - }, - &ctx, - ) - .await?; // force create an image layer for the keys, TODO: check if the image layer is created - } - async fn get_vectored_impl_wrapper( tline: &Arc, key: Key, @@ -6296,6 +6323,8 @@ mod tests { })) } + let lsn = Lsn(0x30); + // test vectored get on parent timeline assert_eq!( get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?, @@ -6333,94 +6362,42 @@ mod tests { #[tokio::test] async fn test_vectored_missing_metadata_key_reads() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_vectored_missing_metadata_key_reads")?; + let harness = TenantHarness::create("test_vectored_missing_data_key_reads")?; let (tenant, ctx) = harness.load().await; + + let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + let base_key_child = Key::from_hex("620000000033333333444444445500000001").unwrap(); + let base_key_nonexist = Key::from_hex("620000000033333333444444445500000002").unwrap(); + assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix... + let tline = tenant - .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + Vec::new(), // delta layers + vec![(Lsn(0x20), vec![(base_key, test_img("metadata key 1"))])], // image layers + Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN + ) .await?; - let cancel = CancellationToken::new(); - - let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); - let mut base_key_child = Key::from_hex("000000000033333333444444445500000001").unwrap(); - let mut base_key_nonexist = Key::from_hex("000000000033333333444444445500000002").unwrap(); - base_key.field1 = AUX_KEY_PREFIX; - base_key_child.field1 = AUX_KEY_PREFIX; - base_key_nonexist.field1 = AUX_KEY_PREFIX; - - let mut lsn = Lsn(0x20); - - { - let mut writer = tline.writer().await; - writer - .put( - base_key, - lsn, - &Value::Image(test_img("metadata key 1")), - &ctx, - ) - .await?; - writer.finish_write(lsn); - drop(writer); - - tline.freeze_and_flush().await?; // this will create an image layer - - tline - .compact( - &cancel, - { - let mut set = EnumSet::empty(); - set.insert(CompactFlags::ForceImageLayerCreation); - set.insert(CompactFlags::ForceRepartition); - set - }, - &ctx, - ) - .await?; // force create an image layer for metadata keys - tenant - .gc_iteration(Some(tline.timeline_id), 0, Duration::ZERO, &cancel, &ctx) - .await?; - } - let child = tenant - .branch_timeline_test(&tline, NEW_TIMELINE_ID, Some(lsn), &ctx) + .branch_timeline_test_with_layers( + &tline, + NEW_TIMELINE_ID, + Some(Lsn(0x20)), + &ctx, + Vec::new(), // delta layers + vec![( + Lsn(0x30), + vec![(base_key_child, test_img("metadata key 2"))], + )], // image layers + Lsn(0x30), + ) .await .unwrap(); - lsn.0 += 0x10; - - { - let mut writer = child.writer().await; - writer - .put( - base_key_child, - lsn, - &Value::Image(test_img("metadata key 2")), - &ctx, - ) - .await?; - writer.finish_write(lsn); - drop(writer); - - child.freeze_and_flush().await?; - - child - .compact( - &cancel, - { - let mut set = EnumSet::empty(); - set.insert(CompactFlags::ForceImageLayerCreation); - set.insert(CompactFlags::ForceRepartition); - set - }, - &ctx, - ) - .await?; // force create an image layer for metadata keys - tenant - .gc_iteration(Some(child.timeline_id), 0, Duration::ZERO, &cancel, &ctx) - .await?; - } - async fn get_vectored_impl_wrapper( tline: &Arc, key: Key, @@ -6442,6 +6419,8 @@ mod tests { })) } + let lsn = Lsn(0x30); + // test vectored get on parent timeline assert_eq!( get_vectored_impl_wrapper(&tline, base_key, lsn, &ctx).await?, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index b498876465..8033edaa12 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -5371,6 +5371,102 @@ impl Timeline { shard_count: self.tenant_shard_id.shard_count, } } + + #[cfg(test)] + pub(super) fn force_advance_lsn(self: &Arc, new_lsn: Lsn) { + self.last_record_lsn.advance(new_lsn); + } + + /// Force create an image layer and place it into the layer map. + /// + /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`] + /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run. + #[cfg(test)] + pub(super) async fn force_create_image_layer( + self: &Arc, + lsn: Lsn, + mut images: Vec<(Key, Bytes)>, + check_start_lsn: Option, + ctx: &RequestContext, + ) -> anyhow::Result<()> { + let last_record_lsn = self.get_last_record_lsn(); + assert!( + lsn <= last_record_lsn, + "advance last record lsn before inserting a layer, lsn={lsn}, last_record_lsn={last_record_lsn}" + ); + if let Some(check_start_lsn) = check_start_lsn { + assert!(lsn >= check_start_lsn); + } + images.sort_unstable_by(|(ka, _), (kb, _)| ka.cmp(kb)); + let min_key = *images.first().map(|(k, _)| k).unwrap(); + let max_key = images.last().map(|(k, _)| k).unwrap().next(); + let mut image_layer_writer = ImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + &(min_key..max_key), + lsn, + ctx, + ) + .await?; + for (key, img) in images { + image_layer_writer.put_image(key, img, ctx).await?; + } + let image_layer = image_layer_writer.finish(self, ctx).await?; + + { + let mut guard = self.layers.write().await; + guard.force_insert_layer(image_layer); + } + + Ok(()) + } + + /// Force create a delta layer and place it into the layer map. + /// + /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`] + /// or [`Tenant::create_test_timeline_with_layers`] to ensure all these layers are placed into the layer map in one run. + #[cfg(test)] + pub(super) async fn force_create_delta_layer( + self: &Arc, + mut deltas: Vec<(Key, Lsn, Value)>, + check_start_lsn: Option, + ctx: &RequestContext, + ) -> anyhow::Result<()> { + let last_record_lsn = self.get_last_record_lsn(); + deltas.sort_unstable_by(|(ka, la, _), (kb, lb, _)| (ka, la).cmp(&(kb, lb))); + let min_key = *deltas.first().map(|(k, _, _)| k).unwrap(); + let max_key = deltas.last().map(|(k, _, _)| k).unwrap().next(); + let min_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap(); + let max_lsn = Lsn(deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap().0 + 1); + assert!( + max_lsn <= last_record_lsn, + "advance last record lsn before inserting a layer, max_lsn={max_lsn}, last_record_lsn={last_record_lsn}" + ); + if let Some(check_start_lsn) = check_start_lsn { + assert!(min_lsn >= check_start_lsn); + } + let mut delta_layer_writer = DeltaLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + min_key, + min_lsn..max_lsn, + ctx, + ) + .await?; + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + let delta_layer = delta_layer_writer.finish(max_key, self, ctx).await?; + + { + let mut guard = self.layers.write().await; + guard.force_insert_layer(delta_layer); + } + + Ok(()) + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 884b71df75..b78c98a506 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -255,6 +255,13 @@ impl LayerManager { updates.flush() } + #[cfg(test)] + pub(crate) fn force_insert_layer(&mut self, layer: ResidentLayer) { + let mut updates = self.layer_map.batch_update(); + Self::insert_historic_layer(layer.as_ref().clone(), &mut updates, &mut self.layer_fmgr); + updates.flush() + } + /// Helper function to insert a layer into the layer map and file manager. fn insert_historic_layer( layer: Layer, From 9fda85b4862bccf7e57c1f2fadfd03f1c3c7288b Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 31 May 2024 17:02:10 +0100 Subject: [PATCH 114/220] pageserver: remove AncestorStopping error variants (#7916) ## Problem In all cases, AncestorStopping is equivalent to Cancelled. This became more obvious in https://github.com/neondatabase/neon/pull/7912#discussion_r1620582309 when updating these error types. ## Summary of changes - Remove AncestorStopping, always use Cancelled instead --- pageserver/src/consumption_metrics.rs | 2 +- pageserver/src/http/routes.rs | 3 --- pageserver/src/pgdatadir_mapping.rs | 7 ++----- pageserver/src/tenant/timeline.rs | 19 ++++--------------- .../fixtures/pageserver/allowed_errors.py | 2 +- 5 files changed, 8 insertions(+), 25 deletions(-) diff --git a/pageserver/src/consumption_metrics.rs b/pageserver/src/consumption_metrics.rs index 62bbde42f4..540d0d2e8c 100644 --- a/pageserver/src/consumption_metrics.rs +++ b/pageserver/src/consumption_metrics.rs @@ -358,7 +358,7 @@ async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &Re // mean the synthetic size worker should terminate. let shutting_down = matches!( e.downcast_ref::(), - Some(PageReconstructError::Cancelled | PageReconstructError::AncestorStopping(_)) + Some(PageReconstructError::Cancelled) ); if !shutting_down { diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 913d45d63c..bd6fa028ac 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -184,9 +184,6 @@ impl From for ApiError { PageReconstructError::Cancelled => { ApiError::InternalServerError(anyhow::anyhow!("request was cancelled")) } - PageReconstructError::AncestorStopping(_) => { - ApiError::ResourceUnavailable(format!("{pre}").into()) - } PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()), PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre), } diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 0fc846e5f3..c78c358855 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -115,9 +115,7 @@ impl From for CollectKeySpaceError { impl From for CalculateLogicalSizeError { fn from(pre: PageReconstructError) -> Self { match pre { - PageReconstructError::AncestorStopping(_) | PageReconstructError::Cancelled => { - Self::Cancelled - } + PageReconstructError::Cancelled => Self::Cancelled, _ => Self::PageRead(pre), } } @@ -1614,8 +1612,7 @@ impl<'a> DatadirModification<'a> { aux_files.dir = Some(dir); } Err( - e @ (PageReconstructError::AncestorStopping(_) - | PageReconstructError::Cancelled + e @ (PageReconstructError::Cancelled | PageReconstructError::AncestorLsnTimeout(_)), ) => { // Important that we do not interpret a shutdown error as "not found" and thereby diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 8033edaa12..4a9d981ad8 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -501,10 +501,6 @@ pub(crate) enum PageReconstructError { #[error("timeline shutting down")] Cancelled, - /// The ancestor of this is being stopped - #[error("ancestor timeline {0} is being stopped")] - AncestorStopping(TimelineId), - /// An error happened replaying WAL records #[error(transparent)] WalRedo(anyhow::Error), @@ -569,7 +565,7 @@ impl PageReconstructError { match self { Other(_) => false, AncestorLsnTimeout(_) => false, - Cancelled | AncestorStopping(_) => true, + Cancelled => true, WalRedo(_) => false, MissingKey { .. } => false, } @@ -645,9 +641,6 @@ pub(crate) enum GetVectoredError { #[derive(thiserror::Error, Debug)] pub(crate) enum GetReadyAncestorError { - #[error("ancestor timeline {0} is being stopped")] - AncestorStopping(TimelineId), - #[error("Ancestor LSN wait error: {0}")] AncestorLsnTimeout(#[from] WaitLsnError), @@ -757,7 +750,6 @@ impl From for PageReconstructError { fn from(e: GetReadyAncestorError) -> Self { use GetReadyAncestorError::*; match e { - AncestorStopping(tid) => PageReconstructError::AncestorStopping(tid), AncestorLsnTimeout(wait_err) => PageReconstructError::AncestorLsnTimeout(wait_err), bad_state @ BadState { .. } => PageReconstructError::Other(anyhow::anyhow!(bad_state)), Cancelled => PageReconstructError::Cancelled, @@ -1192,9 +1184,7 @@ impl Timeline { use PageReconstructError::*; match block { - Err(Cancelled | AncestorStopping(_)) => { - return Err(GetVectoredError::Cancelled) - } + Err(Cancelled) => return Err(GetVectoredError::Cancelled), Err(MissingKey(_)) if NON_INHERITED_RANGE.contains(&key) || NON_INHERITED_SPARSE_RANGE.contains(&key) => @@ -3585,9 +3575,8 @@ impl Timeline { match ancestor.wait_to_become_active(ctx).await { Ok(()) => {} Err(TimelineState::Stopping) => { - return Err(GetReadyAncestorError::AncestorStopping( - ancestor.timeline_id, - )); + // If an ancestor is stopping, it means the tenant is stopping: handle this the same as if this timeline was stopping. + return Err(GetReadyAncestorError::Cancelled); } Err(state) => { return Err(GetReadyAncestorError::BadState { diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index ad8bbe2021..ef412cade7 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -66,7 +66,7 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( ".*query handler for 'pagestream.*failed: Timeline .* is not active", # timeline delete in progress ".*task iteration took longer than the configured period.*", # these can happen anytime we do compactions from background task and shutdown pageserver - r".*ERROR.*ancestor timeline \S+ is being stopped", + ".*could not compact.*cancelled.*", # this is expected given our collaborative shutdown approach for the UploadQueue ".*Compaction failed.*, retrying in .*: Other\\(queue is in state Stopped.*", ".*Compaction failed.*, retrying in .*: ShuttingDown", From ef83f31e77abf7cf55387635eb3e8ad2191d97a1 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Fri, 31 May 2024 21:19:41 +0300 Subject: [PATCH 115/220] pagectl: key command for dumping what we know about the key (#7890) What we know about the key via added `pagectl key $key` command: - debug formatting - shard placement when `--shard-count` is specified - different boolean queries in `key.rs` - aux files v2 Example: ``` $ cargo run -qp pagectl -- key 000000063F00004005000060270000100E2C parsed from hex: 000000063F00004005000060270000100E2C: Key { field1: 0, field2: 1599, field3: 16389, field4: 24615, field5: 0, field6: 1052204 } rel_block: true rel_vm_block: false rel_fsm_block: false slru_block: false inherited: true rel_size: false slru_segment_size: false recognized kind: None ``` --- Cargo.lock | 1 + libs/pageserver_api/src/key.rs | 36 ++- libs/pageserver_api/src/reltag.rs | 53 +++- libs/pageserver_api/src/shard.rs | 25 ++ libs/utils/src/hex.rs | 19 +- pageserver/ctl/Cargo.toml | 1 + pageserver/ctl/src/key.rs | 477 ++++++++++++++++++++++++++++++ pageserver/ctl/src/main.rs | 4 + 8 files changed, 608 insertions(+), 8 deletions(-) create mode 100644 pageserver/ctl/src/key.rs diff --git a/Cargo.lock b/Cargo.lock index 96ba5c8ec3..6a60104472 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3570,6 +3570,7 @@ dependencies = [ "serde", "serde_json", "svg_fmt", + "thiserror", "tokio", "tokio-util", "toml_edit", diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index b00d48498c..e52d4ef986 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -381,10 +381,15 @@ pub fn rel_size_to_key(rel: RelTag) -> Key { field3: rel.dbnode, field4: rel.relnode, field5: rel.forknum, - field6: 0xffffffff, + field6: 0xffff_ffff, } } +#[inline(always)] +pub fn is_rel_size_key(key: &Key) -> bool { + key.field1 == 0 && key.field6 == u32::MAX +} + #[inline(always)] pub fn rel_key_range(rel: RelTag) -> Range { Key { @@ -422,6 +427,25 @@ pub fn slru_dir_to_key(kind: SlruKind) -> Key { } } +#[inline(always)] +pub fn slru_dir_kind(key: &Key) -> Option> { + if key.field1 == 0x01 + && key.field3 == 0 + && key.field4 == 0 + && key.field5 == 0 + && key.field6 == 0 + { + match key.field2 { + 0 => Some(Ok(SlruKind::Clog)), + 1 => Some(Ok(SlruKind::MultiXactMembers)), + 2 => Some(Ok(SlruKind::MultiXactOffsets)), + x => Some(Err(x)), + } + } else { + None + } +} + #[inline(always)] pub fn slru_block_to_key(kind: SlruKind, segno: u32, blknum: BlockNumber) -> Key { Key { @@ -450,10 +474,18 @@ pub fn slru_segment_size_to_key(kind: SlruKind, segno: u32) -> Key { field3: 1, field4: segno, field5: 0, - field6: 0xffffffff, + field6: 0xffff_ffff, } } +pub fn is_slru_segment_size_key(key: &Key) -> bool { + key.field1 == 0x01 + && key.field2 < 0x03 + && key.field3 == 0x01 + && key.field5 == 0 + && key.field6 == u32::MAX +} + #[inline(always)] pub fn slru_segment_key_range(kind: SlruKind, segno: u32) -> Range { let field2 = match kind { diff --git a/libs/pageserver_api/src/reltag.rs b/libs/pageserver_api/src/reltag.rs index 38693ab847..010a9c2932 100644 --- a/libs/pageserver_api/src/reltag.rs +++ b/libs/pageserver_api/src/reltag.rs @@ -3,7 +3,7 @@ use std::cmp::Ordering; use std::fmt; use postgres_ffi::pg_constants::GLOBALTABLESPACE_OID; -use postgres_ffi::relfile_utils::forknumber_to_name; +use postgres_ffi::relfile_utils::{forkname_to_number, forknumber_to_name, MAIN_FORKNUM}; use postgres_ffi::Oid; /// @@ -68,6 +68,57 @@ impl fmt::Display for RelTag { } } +#[derive(Debug, thiserror::Error)] +pub enum ParseRelTagError { + #[error("invalid forknum")] + InvalidForknum(#[source] std::num::ParseIntError), + #[error("missing triplet member {}", .0)] + MissingTripletMember(usize), + #[error("invalid triplet member {}", .0)] + InvalidTripletMember(usize, #[source] std::num::ParseIntError), +} + +impl std::str::FromStr for RelTag { + type Err = ParseRelTagError; + + fn from_str(s: &str) -> Result { + use ParseRelTagError::*; + + // FIXME: in postgres logs this separator is dot + // Example: + // could not read block 2 in rel 1663/208101/2620.1 from page server at lsn 0/2431E6F0 + // with a regex we could get this more painlessly + let (triplet, forknum) = match s.split_once('_').or_else(|| s.split_once('.')) { + Some((t, f)) => { + let forknum = forkname_to_number(Some(f)); + let forknum = if let Ok(f) = forknum { + f + } else { + f.parse::().map_err(InvalidForknum)? + }; + + (t, Some(forknum)) + } + None => (s, None), + }; + + let mut split = triplet + .splitn(3, '/') + .enumerate() + .map(|(i, s)| s.parse::().map_err(|e| InvalidTripletMember(i, e))); + let spcnode = split.next().ok_or(MissingTripletMember(0))??; + let dbnode = split.next().ok_or(MissingTripletMember(1))??; + let relnode = split.next().ok_or(MissingTripletMember(2))??; + + Ok(RelTag { + spcnode, + forknum: forknum.unwrap_or(MAIN_FORKNUM), + dbnode, + relnode, + }) + } +} + impl RelTag { pub fn to_segfile_name(&self, segno: u32) -> String { let mut name = if self.spcnode == GLOBALTABLESPACE_OID { diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index 1c05a01926..8ace426f88 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -428,6 +428,12 @@ impl<'de> Deserialize<'de> for TenantShardId { #[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)] pub struct ShardStripeSize(pub u32); +impl Default for ShardStripeSize { + fn default() -> Self { + DEFAULT_STRIPE_SIZE + } +} + /// Layout version: for future upgrades where we might change how the key->shard mapping works #[derive(Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Debug)] pub struct ShardLayout(u8); @@ -713,6 +719,25 @@ fn key_to_shard_number(count: ShardCount, stripe_size: ShardStripeSize, key: &Ke ShardNumber((hash % count.0 as u32) as u8) } +/// For debugging, while not exposing the internals. +#[derive(Debug)] +#[allow(unused)] // used by debug formatting by pagectl +struct KeyShardingInfo { + shard0: bool, + shard_number: ShardNumber, +} + +pub fn describe( + key: &Key, + shard_count: ShardCount, + stripe_size: ShardStripeSize, +) -> impl std::fmt::Debug { + KeyShardingInfo { + shard0: key_is_shard0(key), + shard_number: key_to_shard_number(shard_count, stripe_size, key), + } +} + #[cfg(test)] mod tests { use utils::Hex; diff --git a/libs/utils/src/hex.rs b/libs/utils/src/hex.rs index fc0bb7e4a2..382f805a96 100644 --- a/libs/utils/src/hex.rs +++ b/libs/utils/src/hex.rs @@ -19,13 +19,13 @@ /// // right: [0x68; 1] /// # fn serialize_something() -> Vec { "hello world".as_bytes().to_vec() } /// ``` -#[derive(PartialEq)] -pub struct Hex<'a>(pub &'a [u8]); +pub struct Hex(pub S); -impl std::fmt::Debug for Hex<'_> { +impl> std::fmt::Debug for Hex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "[")?; - for (i, c) in self.0.chunks(16).enumerate() { + let chunks = self.0.as_ref().chunks(16); + for (i, c) in chunks.enumerate() { if i > 0 && !c.is_empty() { writeln!(f, ", ")?; } @@ -36,6 +36,15 @@ impl std::fmt::Debug for Hex<'_> { write!(f, "0x{b:02x}")?; } } - write!(f, "; {}]", self.0.len()) + write!(f, "; {}]", self.0.as_ref().len()) + } +} + +impl, L: AsRef<[u8]>> PartialEq> for Hex { + fn eq(&self, other: &Hex) -> bool { + let left = self.0.as_ref(); + let right = other.0.as_ref(); + + left == right } } diff --git a/pageserver/ctl/Cargo.toml b/pageserver/ctl/Cargo.toml index 843f5dd862..be5626040b 100644 --- a/pageserver/ctl/Cargo.toml +++ b/pageserver/ctl/Cargo.toml @@ -17,6 +17,7 @@ pageserver = { path = ".." } pageserver_api.workspace = true remote_storage = { path = "../../libs/remote_storage" } postgres_ffi.workspace = true +thiserror.workspace = true tokio.workspace = true tokio-util.workspace = true toml_edit.workspace = true diff --git a/pageserver/ctl/src/key.rs b/pageserver/ctl/src/key.rs new file mode 100644 index 0000000000..28448811f8 --- /dev/null +++ b/pageserver/ctl/src/key.rs @@ -0,0 +1,477 @@ +use anyhow::Context; +use clap::Parser; +use pageserver_api::{ + key::Key, + reltag::{BlockNumber, RelTag, SlruKind}, + shard::{ShardCount, ShardStripeSize}, +}; +use std::str::FromStr; + +#[derive(Parser)] +pub(super) struct DescribeKeyCommand { + /// Key material in one of the forms: hex, span attributes captured from log, reltag blocknum + input: Vec, + + /// The number of shards to calculate what Keys placement would be. + #[arg(long)] + shard_count: Option, + + /// The sharding stripe size. + /// + /// The default is hardcoded. It makes no sense to provide this without providing + /// `--shard-count`. + #[arg(long, requires = "shard_count")] + stripe_size: Option, +} + +/// Sharded shard count without unsharded count, which the actual ShardCount supports. +#[derive(Clone, Copy)] +pub(super) struct CustomShardCount(std::num::NonZeroU8); + +#[derive(Debug, thiserror::Error)] +pub(super) enum InvalidShardCount { + #[error(transparent)] + ParsingFailed(#[from] std::num::ParseIntError), + #[error("too few shards")] + TooFewShards, +} + +impl FromStr for CustomShardCount { + type Err = InvalidShardCount; + + fn from_str(s: &str) -> Result { + let inner: std::num::NonZeroU8 = s.parse()?; + if inner.get() < 2 { + Err(InvalidShardCount::TooFewShards) + } else { + Ok(CustomShardCount(inner)) + } + } +} + +impl From for ShardCount { + fn from(value: CustomShardCount) -> Self { + ShardCount::new(value.0.get()) + } +} + +impl DescribeKeyCommand { + pub(super) fn execute(self) { + let DescribeKeyCommand { + input, + shard_count, + stripe_size, + } = self; + + let material = KeyMaterial::try_from(input.as_slice()).unwrap(); + let kind = material.kind(); + let key = Key::from(material); + + println!("parsed from {kind}: {key}:"); + println!(); + println!("{key:?}"); + + macro_rules! kind_query { + ($name:ident) => {{ + let s: &'static str = stringify!($name); + let s = s.strip_prefix("is_").unwrap_or(s); + let s = s.strip_suffix("_key").unwrap_or(s); + + #[allow(clippy::needless_borrow)] + (s, pageserver_api::key::$name(key)) + }}; + } + + // the current characterization is a mess of these boolean queries and separate + // "recognization". I think it accurately represents how strictly we model the Key + // right now, but could of course be made less confusing. + + let queries = [ + ("rel_block", pageserver_api::key::is_rel_block_key(&key)), + kind_query!(is_rel_vm_block_key), + kind_query!(is_rel_fsm_block_key), + kind_query!(is_slru_block_key), + kind_query!(is_inherited_key), + ("rel_size", pageserver_api::key::is_rel_size_key(&key)), + ( + "slru_segment_size", + pageserver_api::key::is_slru_segment_size_key(&key), + ), + ]; + + let recognized_kind = "recognized kind"; + let metadata_key = "metadata key"; + let shard_placement = "shard placement"; + + let longest = queries + .iter() + .map(|t| t.0) + .chain([recognized_kind, metadata_key, shard_placement]) + .map(|s| s.len()) + .max() + .unwrap(); + + let colon = 1; + let padding = 1; + + for (name, is) in queries { + let width = longest - name.len() + colon + padding; + println!("{}{:width$}{}", name, ":", is); + } + + let width = longest - recognized_kind.len() + colon + padding; + println!( + "{}{:width$}{:?}", + recognized_kind, + ":", + RecognizedKeyKind::new(key), + ); + + if let Some(shard_count) = shard_count { + // seeing the sharding placement might be confusing, so leave it out unless shard + // count was given. + + let stripe_size = stripe_size.map(ShardStripeSize).unwrap_or_default(); + println!( + "# placement with shard_count: {} and stripe_size: {}:", + shard_count.0, stripe_size.0 + ); + let width = longest - shard_placement.len() + colon + padding; + println!( + "{}{:width$}{:?}", + shard_placement, + ":", + pageserver_api::shard::describe(&key, shard_count.into(), stripe_size) + ); + } + } +} + +/// Hand-wavy "inputs we accept" for a key. +#[derive(Debug)] +pub(super) enum KeyMaterial { + Hex(Key), + String(SpanAttributesFromLogs), + Split(RelTag, BlockNumber), +} + +impl KeyMaterial { + fn kind(&self) -> &'static str { + match self { + KeyMaterial::Hex(_) => "hex", + KeyMaterial::String(_) | KeyMaterial::Split(_, _) => "split", + } + } +} + +impl From for Key { + fn from(value: KeyMaterial) -> Self { + match value { + KeyMaterial::Hex(key) => key, + KeyMaterial::String(SpanAttributesFromLogs(rt, blocknum)) + | KeyMaterial::Split(rt, blocknum) => { + pageserver_api::key::rel_block_to_key(rt, blocknum) + } + } + } +} + +impl> TryFrom<&[S]> for KeyMaterial { + type Error = anyhow::Error; + + fn try_from(value: &[S]) -> Result { + match value { + [] => anyhow::bail!( + "need 1..N positional arguments describing the key, try hex or a log line" + ), + [one] => { + let one = one.as_ref(); + + let key = Key::from_hex(one).map(KeyMaterial::Hex); + + let attrs = SpanAttributesFromLogs::from_str(one).map(KeyMaterial::String); + + match (key, attrs) { + (Ok(key), _) => Ok(key), + (_, Ok(s)) => Ok(s), + (Err(e1), Err(e2)) => anyhow::bail!( + "failed to parse {one:?} as hex or span attributes:\n- {e1:#}\n- {e2:#}" + ), + } + } + more => { + // assume going left to right one of these is a reltag and then we find a blocknum + // this works, because we don't have plain numbers at least right after reltag in + // logs. for some definition of "works". + + let Some((reltag_at, reltag)) = more + .iter() + .map(AsRef::as_ref) + .enumerate() + .find_map(|(i, s)| { + s.split_once("rel=") + .map(|(_garbage, actual)| actual) + .unwrap_or(s) + .parse::() + .ok() + .map(|rt| (i, rt)) + }) + else { + anyhow::bail!("found no RelTag in arguments"); + }; + + let Some(blocknum) = more + .iter() + .map(AsRef::as_ref) + .skip(reltag_at) + .find_map(|s| { + s.split_once("blkno=") + .map(|(_garbage, actual)| actual) + .unwrap_or(s) + .parse::() + .ok() + }) + else { + anyhow::bail!("found no blocknum in arguments"); + }; + + Ok(KeyMaterial::Split(reltag, blocknum)) + } + } + } +} + +#[derive(Debug)] +pub(super) struct SpanAttributesFromLogs(RelTag, BlockNumber); + +impl std::str::FromStr for SpanAttributesFromLogs { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + // accept the span separator but do not require or fail if either is missing + // "whatever{rel=1663/16389/24615 blkno=1052204 req_lsn=FFFFFFFF/FFFFFFFF}" + let (_, reltag) = s + .split_once("rel=") + .ok_or_else(|| anyhow::anyhow!("cannot find 'rel='"))?; + let reltag = reltag.split_whitespace().next().unwrap(); + + let (_, blocknum) = s + .split_once("blkno=") + .ok_or_else(|| anyhow::anyhow!("cannot find 'blkno='"))?; + let blocknum = blocknum.split_whitespace().next().unwrap(); + + let reltag = reltag + .parse() + .with_context(|| format!("parse reltag from {reltag:?}"))?; + let blocknum = blocknum + .parse() + .with_context(|| format!("parse blocknum from {blocknum:?}"))?; + + Ok(Self(reltag, blocknum)) + } +} + +#[derive(Debug)] +#[allow(dead_code)] // debug print is used +enum RecognizedKeyKind { + DbDir, + ControlFile, + Checkpoint, + AuxFilesV1, + SlruDir(Result), + RelMap(RelTagish<2>), + RelDir(RelTagish<2>), + AuxFileV2(Result>), +} + +#[derive(Debug, PartialEq)] +#[allow(unused)] +enum AuxFileV2 { + Recognized(&'static str, utils::Hex<[u8; 13]>), + OtherWithPrefix(&'static str, utils::Hex<[u8; 13]>), + Other(utils::Hex<[u8; 13]>), +} + +impl RecognizedKeyKind { + fn new(key: Key) -> Option { + use RecognizedKeyKind::{ + AuxFilesV1, Checkpoint, ControlFile, DbDir, RelDir, RelMap, SlruDir, + }; + + let slru_dir_kind = pageserver_api::key::slru_dir_kind(&key); + + Some(match key { + pageserver_api::key::DBDIR_KEY => DbDir, + pageserver_api::key::CONTROLFILE_KEY => ControlFile, + pageserver_api::key::CHECKPOINT_KEY => Checkpoint, + pageserver_api::key::AUX_FILES_KEY => AuxFilesV1, + _ if slru_dir_kind.is_some() => SlruDir(slru_dir_kind.unwrap()), + _ if key.field1 == 0 && key.field4 == 0 && key.field5 == 0 && key.field6 == 0 => { + RelMap([key.field2, key.field3].into()) + } + _ if key.field1 == 0 && key.field4 == 0 && key.field5 == 0 && key.field6 == 1 => { + RelDir([key.field2, key.field3].into()) + } + _ if key.is_metadata_key() => RecognizedKeyKind::AuxFileV2( + AuxFileV2::new(key).ok_or_else(|| utils::Hex(key.to_i128().to_be_bytes())), + ), + _ => return None, + }) + } +} + +impl AuxFileV2 { + fn new(key: Key) -> Option { + const EMPTY_HASH: [u8; 13] = { + let mut out = [0u8; 13]; + let hash = pageserver::aux_file::fnv_hash(b"").to_be_bytes(); + let mut i = 3; + while i < 16 { + out[i - 3] = hash[i]; + i += 1; + } + out + }; + + let bytes = key.to_i128().to_be_bytes(); + let hash = utils::Hex(<[u8; 13]>::try_from(&bytes[3..]).unwrap()); + + assert_eq!(EMPTY_HASH.len(), hash.0.len()); + + // TODO: we could probably find the preimages for the hashes + + Some(match (bytes[1], bytes[2]) { + (1, 1) => AuxFileV2::Recognized("pg_logical/mappings/", hash), + (1, 2) => AuxFileV2::Recognized("pg_logical/snapshots/", hash), + (1, 3) if hash.0 == EMPTY_HASH => { + AuxFileV2::Recognized("pg_logical/replorigin_checkpoint", hash) + } + (2, 1) => AuxFileV2::Recognized("pg_replslot/", hash), + (1, 0xff) => AuxFileV2::OtherWithPrefix("pg_logical/", hash), + (0xff, 0xff) => AuxFileV2::Other(hash), + _ => return None, + }) + } +} + +/// Prefix of RelTag, currently only known use cases are the two item versions. +/// +/// Renders like a reltag with `/`, nothing else. +struct RelTagish([u32; N]); + +impl From<[u32; N]> for RelTagish { + fn from(val: [u32; N]) -> Self { + RelTagish(val) + } +} + +impl std::fmt::Debug for RelTagish { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use std::fmt::Write as _; + let mut first = true; + self.0.iter().try_for_each(|x| { + if !first { + f.write_char('/')?; + } + first = false; + write!(f, "{}", x) + }) + } +} + +#[cfg(test)] +mod tests { + use pageserver::aux_file::encode_aux_file_key; + + use super::*; + + #[test] + fn hex_is_key_material() { + let m = KeyMaterial::try_from(&["000000067F0000400200DF927900FFFFFFFF"][..]).unwrap(); + assert!(matches!(m, KeyMaterial::Hex(_)), "{m:?}"); + } + + #[test] + fn single_positional_spanalike_is_key_material() { + // why is this needed? if you are checking many, then copypaste starts to appeal + let strings = [ + (line!(), "2024-05-15T15:33:49.873906Z ERROR page_service_conn_main{peer_addr=A:B}:process_query{tenant_id=C timeline_id=D}:handle_pagerequests:handle_get_page_at_lsn_request{rel=1663/208101/2620_fsm blkno=2 req_lsn=0/238D98C8}: error reading relation or page version: Read error: could not find data for key 000000067F00032CE5000000000000000001 (shard ShardNumber(0)) at LSN 0/1D0A16C1, request LSN 0/238D98C8, ancestor 0/0"), + (line!(), "rel=1663/208101/2620_fsm blkno=2"), + (line!(), "rel=1663/208101/2620.1 blkno=2"), + ]; + + let mut first: Option = None; + + for (line, example) in strings { + let m = KeyMaterial::try_from(&[example][..]) + .unwrap_or_else(|e| panic!("failed to parse example from line {line}: {e:?}")); + let key = Key::from(m); + if let Some(first) = first { + assert_eq!(first, key); + } else { + first = Some(key); + } + } + + // not supporting this is rather accidential, but I think the input parsing is lenient + // enough already + KeyMaterial::try_from(&["1663/208101/2620_fsm 2"][..]).unwrap_err(); + } + + #[test] + fn multiple_spanlike_args() { + let strings = [ + (line!(), &["process_query{tenant_id=C", "timeline_id=D}:handle_pagerequests:handle_get_page_at_lsn_request{rel=1663/208101/2620_fsm", "blkno=2", "req_lsn=0/238D98C8}"][..]), + (line!(), &["rel=1663/208101/2620_fsm", "blkno=2"][..]), + (line!(), &["1663/208101/2620_fsm", "2"][..]), + ]; + + let mut first: Option = None; + + for (line, example) in strings { + let m = KeyMaterial::try_from(example) + .unwrap_or_else(|e| panic!("failed to parse example from line {line}: {e:?}")); + let key = Key::from(m); + if let Some(first) = first { + assert_eq!(first, key); + } else { + first = Some(key); + } + } + } + #[test] + fn recognized_auxfiles() { + use AuxFileV2::*; + + let empty = [ + 0x2e, 0x07, 0xbb, 0x01, 0x42, 0x62, 0xb8, 0x21, 0x75, 0x62, 0x95, 0xc5, 0x8d, + ]; + let foobar = [ + 0x62, 0x79, 0x3c, 0x64, 0xbf, 0x6f, 0x0d, 0x35, 0x97, 0xba, 0x44, 0x6f, 0x18, + ]; + + #[rustfmt::skip] + let examples = [ + (line!(), "pg_logical/mappings/foobar", Recognized("pg_logical/mappings/", utils::Hex(foobar))), + (line!(), "pg_logical/snapshots/foobar", Recognized("pg_logical/snapshots/", utils::Hex(foobar))), + (line!(), "pg_logical/replorigin_checkpoint", Recognized("pg_logical/replorigin_checkpoint", utils::Hex(empty))), + (line!(), "pg_logical/foobar", OtherWithPrefix("pg_logical/", utils::Hex(foobar))), + (line!(), "pg_replslot/foobar", Recognized("pg_replslot/", utils::Hex(foobar))), + (line!(), "foobar", Other(utils::Hex(foobar))), + ]; + + for (line, path, expected) in examples { + let key = encode_aux_file_key(path); + let recognized = + AuxFileV2::new(key).unwrap_or_else(|| panic!("line {line} example failed")); + + assert_eq!(recognized, expected); + } + + assert_eq!( + AuxFileV2::new(Key::from_hex("600000102000000000000000000000000000").unwrap()), + None, + "example key has one too few 0 after 6 before 1" + ); + } +} diff --git a/pageserver/ctl/src/main.rs b/pageserver/ctl/src/main.rs index e92c352dab..50c3ac4c61 100644 --- a/pageserver/ctl/src/main.rs +++ b/pageserver/ctl/src/main.rs @@ -6,6 +6,7 @@ mod draw_timeline_dir; mod index_part; +mod key; mod layer_map_analyzer; mod layers; @@ -61,6 +62,8 @@ enum Commands { AnalyzeLayerMap(AnalyzeLayerMapCmd), #[command(subcommand)] Layer(LayerCmd), + /// Debug print a hex key found from logs + Key(key::DescribeKeyCommand), } /// Read and update pageserver metadata file @@ -183,6 +186,7 @@ async fn main() -> anyhow::Result<()> { .time_travel_recover(Some(&prefix), timestamp, done_if_after, &cancel) .await?; } + Commands::Key(dkc) => dkc.execute(), }; Ok(()) } From 7e60563910936cf6643edb686a8163b0b03c7108 Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 31 May 2024 22:20:06 +0100 Subject: [PATCH 116/220] pageserver: add GcError type (#7917) ## Problem - Because GC exposes all errors as an anyhow::Error, we have intermittent issues with spurious log errors during shutdown, e.g. in this failure of a performance test https://neon-github-public-dev.s3.amazonaws.com/reports/main/9300804302/index.html#suites/07874de07c4a1c9effe0d92da7755ebf/214a2154f6f0217a/ ``` Gc failed 1 times, retrying in 2s: shutting down ``` GC really doesn't do a lot of complicated IO: it doesn't benefit from the backtrace capabilities of anyhow::Error, and can be expressed more robustly as an enum. ## Summary of changes - Add GcError type and use it instead of anyhow::Error in GC functions - In `gc_iteration_internal`, return GcError::Cancelled on shutdown rather than Ok(()) (we only used Ok before because we didn't have a clear cancellation error variant to use). - In `gc_iteration_internal`, skip past timelines that are shutting down, to avoid having to go through another GC iteration if we happen to see a deleting timeline during a GC run. - In `refresh_gc_info_internal`, avoid an error case where a timeline might not be found after being looked up, by carrying an Arc instead of a TimelineId between the first loop and second loop in the function. - In HTTP request handler, handle Cancelled variants as 503 instead of turning all GC errors into 500s. --- pageserver/src/tenant.rs | 112 +++++++++++++--------- pageserver/src/tenant/mgr.rs | 10 +- pageserver/src/tenant/tasks.rs | 33 ++++--- pageserver/src/tenant/timeline.rs | 47 ++++++--- test_runner/regress/test_tenant_detach.py | 4 +- 5 files changed, 129 insertions(+), 77 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index cfa683beb8..eff9c742c1 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -487,6 +487,33 @@ enum CreateTimelineCause { Delete, } +#[derive(thiserror::Error, Debug)] +pub(crate) enum GcError { + // The tenant is shutting down + #[error("tenant shutting down")] + TenantCancelled, + + // The tenant is shutting down + #[error("timeline shutting down")] + TimelineCancelled, + + // The tenant is in a state inelegible to run GC + #[error("not active")] + NotActive, + + // A requested GC cutoff LSN was invalid, for example it tried to move backwards + #[error("not active")] + BadLsn { why: String }, + + // A remote storage error while scheduling updates after compaction + #[error(transparent)] + Remote(anyhow::Error), + + // If GC was invoked for a particular timeline, this error means it didn't exist + #[error("timeline not found")] + TimelineNotFound, +} + impl Tenant { /// Yet another helper for timeline initialization. /// @@ -1605,24 +1632,23 @@ impl Tenant { /// GC cutoff point is determined conservatively by either `horizon` and `pitr`, whichever /// requires more history to be retained. // - pub async fn gc_iteration( + pub(crate) async fn gc_iteration( &self, target_timeline_id: Option, horizon: u64, pitr: Duration, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { // Don't start doing work during shutdown if let TenantState::Stopping { .. } = self.current_state() { return Ok(GcResult::default()); } // there is a global allowed_error for this - anyhow::ensure!( - self.is_active(), - "Cannot run GC iteration on inactive tenant" - ); + if !self.is_active() { + return Err(GcError::NotActive); + } { let conf = self.tenant_conf.load(); @@ -2790,28 +2816,13 @@ impl Tenant { pitr: Duration, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let mut totals: GcResult = Default::default(); let now = Instant::now(); - let gc_timelines = match self + let gc_timelines = self .refresh_gc_info_internal(target_timeline_id, horizon, pitr, cancel, ctx) - .await - { - Ok(result) => result, - Err(e) => { - if let Some(PageReconstructError::Cancelled) = - e.downcast_ref::() - { - // Handle cancellation - totals.elapsed = now.elapsed(); - return Ok(totals); - } else { - // Propagate other errors - return Err(e); - } - } - }; + .await?; failpoint_support::sleep_millis_async!("gc_iteration_internal_after_getting_gc_timelines"); @@ -2836,7 +2847,19 @@ impl Tenant { // made. break; } - let result = timeline.gc().await?; + let result = match timeline.gc().await { + Err(GcError::TimelineCancelled) => { + if target_timeline_id.is_some() { + // If we were targetting this specific timeline, surface cancellation to caller + return Err(GcError::TimelineCancelled); + } else { + // A timeline may be shutting down independently of the tenant's lifecycle: we should + // skip past this and proceed to try GC on other timelines. + continue; + } + } + r => r?, + }; totals += result; } @@ -2849,11 +2872,11 @@ impl Tenant { /// [`Tenant::get_gc_horizon`]. /// /// This is usually executed as part of periodic gc, but can now be triggered more often. - pub async fn refresh_gc_info( + pub(crate) async fn refresh_gc_info( &self, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result>> { + ) -> Result>, GcError> { // since this method can now be called at different rates than the configured gc loop, it // might be that these configuration values get applied faster than what it was previously, // since these were only read from the gc task. @@ -2874,7 +2897,7 @@ impl Tenant { pitr: Duration, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result>> { + ) -> Result>, GcError> { // before taking the gc_cs lock, do the heavier weight finding of gc_cutoff points for // currently visible timelines. let timelines = self @@ -2911,8 +2934,8 @@ impl Tenant { } } - if !self.is_active() { - anyhow::bail!("shutting down"); + if !self.is_active() || self.cancel.is_cancelled() { + return Err(GcError::TenantCancelled); } // grab mutex to prevent new timelines from being created here; avoid doing long operations @@ -2921,19 +2944,19 @@ impl Tenant { // Scan all timelines. For each timeline, remember the timeline ID and // the branch point where it was created. - let (all_branchpoints, timeline_ids): (BTreeSet<(TimelineId, Lsn)>, _) = { + let (all_branchpoints, timelines): (BTreeSet<(TimelineId, Lsn)>, _) = { let timelines = self.timelines.lock().unwrap(); let mut all_branchpoints = BTreeSet::new(); - let timeline_ids = { + let timelines = { if let Some(target_timeline_id) = target_timeline_id.as_ref() { if timelines.get(target_timeline_id).is_none() { - bail!("gc target timeline does not exist") + return Err(GcError::TimelineNotFound); } }; timelines .iter() - .map(|(timeline_id, timeline_entry)| { + .map(|(_timeline_id, timeline_entry)| { if let Some(ancestor_timeline_id) = &timeline_entry.get_ancestor_timeline_id() { @@ -2955,33 +2978,28 @@ impl Tenant { } } - *timeline_id + timeline_entry.clone() }) .collect::>() }; - (all_branchpoints, timeline_ids) + (all_branchpoints, timelines) }; // Ok, we now know all the branch points. // Update the GC information for each timeline. - let mut gc_timelines = Vec::with_capacity(timeline_ids.len()); - for timeline_id in timeline_ids { - // Timeline is known to be local and loaded. - let timeline = self - .get_timeline(timeline_id, false) - .with_context(|| format!("Timeline {timeline_id} was not found"))?; - + let mut gc_timelines = Vec::with_capacity(timelines.len()); + for timeline in timelines { // If target_timeline is specified, ignore all other timelines if let Some(target_timeline_id) = target_timeline_id { - if timeline_id != target_timeline_id { + if timeline.timeline_id != target_timeline_id { continue; } } let branchpoints: Vec = all_branchpoints .range(( - Included((timeline_id, Lsn(0))), - Included((timeline_id, Lsn(u64::MAX))), + Included((timeline.timeline_id, Lsn(0))), + Included((timeline.timeline_id, Lsn(u64::MAX))), )) .map(|&x| x.1) .collect(); @@ -2989,7 +3007,7 @@ impl Tenant { { let mut target = timeline.gc_info.write().unwrap(); - match gc_cutoffs.remove(&timeline_id) { + match gc_cutoffs.remove(&timeline.timeline_id) { Some(cutoffs) => { *target = GcInfo { retain_lsns: branchpoints, diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 89fdf31849..0bb1d750aa 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -45,7 +45,7 @@ use crate::tenant::delete::DeleteTenantFlow; use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::storage_layer::inmemory_layer; use crate::tenant::timeline::ShutdownMode; -use crate::tenant::{AttachedTenantConf, SpawnMode, Tenant, TenantState}; +use crate::tenant::{AttachedTenantConf, GcError, SpawnMode, Tenant, TenantState}; use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME, TEMP_FILE_SUFFIX}; use utils::crashsafe::path_with_suffix_extension; @@ -2833,7 +2833,13 @@ pub(crate) async fn immediate_gc( } } - result.map_err(ApiError::InternalServerError) + result.map_err(|e| match e { + GcError::TenantCancelled | GcError::TimelineCancelled => ApiError::ShuttingDown, + GcError::TimelineNotFound => { + ApiError::NotFound(anyhow::anyhow!("Timeline not found").into()) + } + other => ApiError::InternalServerError(anyhow::anyhow!(other)), + }) } #[cfg(test)] diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index bf2d8a47b4..a6dfa84f35 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -380,21 +380,28 @@ async fn gc_loop(tenant: Arc, cancel: CancellationToken) { let res = tenant .gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), &cancel, &ctx) .await; - if let Err(e) = res { - let wait_duration = backoff::exponential_backoff_duration_seconds( - error_run_count + 1, - 1.0, - MAX_BACKOFF_SECS, - ); - error_run_count += 1; - let wait_duration = Duration::from_secs_f64(wait_duration); - error!( + match res { + Ok(_) => { + error_run_count = 0; + period + } + Err(crate::tenant::GcError::TenantCancelled) => { + return; + } + Err(e) => { + let wait_duration = backoff::exponential_backoff_duration_seconds( + error_run_count + 1, + 1.0, + MAX_BACKOFF_SECS, + ); + error_run_count += 1; + let wait_duration = Duration::from_secs_f64(wait_duration); + + error!( "Gc failed {error_run_count} times, retrying in {wait_duration:?}: {e:?}", ); - wait_duration - } else { - error_run_count = 0; - period + wait_duration + } } }; diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 4a9d981ad8..9bf429972d 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -131,11 +131,14 @@ use self::layer_manager::LayerManager; use self::logical_size::LogicalSize; use self::walreceiver::{WalReceiver, WalReceiverConf}; -use super::secondary::heatmap::{HeatMapLayer, HeatMapTimeline}; use super::{config::TenantConf, storage_layer::VectoredValueReconstructState}; use super::{debug_assert_current_span_has_tenant_and_timeline_id, AttachedTenantConf}; use super::{remote_timeline_client::index::IndexPart, storage_layer::LayerFringe}; use super::{remote_timeline_client::RemoteTimelineClient, storage_layer::ReadableLayer}; +use super::{ + secondary::heatmap::{HeatMapLayer, HeatMapTimeline}, + GcError, +}; #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub(crate) enum FlushLoopState { @@ -4837,7 +4840,7 @@ impl Timeline { /// Currently, we don't make any attempt at removing unneeded page versions /// within a layer file. We can only remove the whole file if it's fully /// obsolete. - pub(super) async fn gc(&self) -> anyhow::Result { + pub(super) async fn gc(&self) -> Result { // this is most likely the background tasks, but it might be the spawned task from // immediate_gc let _g = tokio::select! { @@ -4850,7 +4853,7 @@ impl Timeline { // Is the timeline being deleted? if self.is_stopping() { - anyhow::bail!("timeline is Stopping"); + return Err(GcError::TimelineCancelled); } let (horizon_cutoff, pitr_cutoff, retain_lsns) = { @@ -4908,7 +4911,7 @@ impl Timeline { pitr_cutoff: Lsn, retain_lsns: Vec, new_gc_cutoff: Lsn, - ) -> anyhow::Result { + ) -> Result { // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc let now = SystemTime::now(); @@ -4930,12 +4933,15 @@ impl Timeline { // The GC cutoff should only ever move forwards. let waitlist = { let write_guard = self.latest_gc_cutoff_lsn.lock_for_write(); - ensure!( - *write_guard <= new_gc_cutoff, - "Cannot move GC cutoff LSN backwards (was {}, new {})", - *write_guard, - new_gc_cutoff - ); + if *write_guard > new_gc_cutoff { + return Err(GcError::BadLsn { + why: format!( + "Cannot move GC cutoff LSN backwards (was {}, new {})", + *write_guard, new_gc_cutoff + ), + }); + } + write_guard.store_and_unlock(new_gc_cutoff) }; waitlist.wait().await; @@ -5044,7 +5050,14 @@ impl Timeline { // This unconditionally schedules also an index_part.json update, even though, we will // be doing one a bit later with the unlinked gc'd layers. let disk_consistent_lsn = self.disk_consistent_lsn.load(); - self.schedule_uploads(disk_consistent_lsn, None)?; + self.schedule_uploads(disk_consistent_lsn, None) + .map_err(|e| { + if self.cancel.is_cancelled() { + GcError::TimelineCancelled + } else { + GcError::Remote(e) + } + })?; let gc_layers = layers_to_remove .iter() @@ -5053,7 +5066,15 @@ impl Timeline { result.layers_removed = gc_layers.len() as u64; - self.remote_client.schedule_gc_update(&gc_layers)?; + self.remote_client + .schedule_gc_update(&gc_layers) + .map_err(|e| { + if self.cancel.is_cancelled() { + GcError::TimelineCancelled + } else { + GcError::Remote(e) + } + })?; guard.finish_gc_timeline(&gc_layers); @@ -5068,7 +5089,7 @@ impl Timeline { result.layers_removed, new_gc_cutoff ); - result.elapsed = now.elapsed()?; + result.elapsed = now.elapsed().unwrap_or(Duration::ZERO); Ok(result) } diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 12a4730e69..871351b2d5 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -302,7 +302,7 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): # gc should not try to even start on a timeline that doesn't exist with pytest.raises( - expected_exception=PageserverApiException, match="gc target timeline does not exist" + expected_exception=PageserverApiException, match="NotFound: Timeline not found" ): bogus_timeline_id = TimelineId.generate() pageserver_http.timeline_gc(tenant_id, bogus_timeline_id, 0) @@ -310,7 +310,7 @@ def test_tenant_detach_smoke(neon_env_builder: NeonEnvBuilder): env.pageserver.allowed_errors.extend( [ # the error will be printed to the log too - ".*gc target timeline does not exist.*", + ".*NotFound: Timeline not found.*", # Timelines get stopped during detach, ignore the gc calls that error, witnessing that ".*InternalServerError\\(timeline is Stopping.*", ] From e98bc4fd2ba3cb4a3fa6d00f98406fb0fdb916a8 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Sat, 1 Jun 2024 00:18:56 +0100 Subject: [PATCH 117/220] Run gc on too many partial backup segments (#7700) The general partial backup idea is that each safekeeper keeps only one partial segment in remote storage at a time. Sometimes this is not true, for example if we uploaded object to S3 but got an error when tried to remove the previous upload. In this case we still keep a list of all potentially uploaded objects in safekeeper state. This commit prints a warning to logs if there is too many objects in safekeeper state. This is not expected and we should try to fix this state, we can do this by running gc. I haven't seen this being an issue anywhere, but printing a warning is something that I wanted to do and forgot in initial PR. --- safekeeper/src/wal_backup_partial.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index a320be3bad..6c0f35095b 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -24,7 +24,7 @@ use rand::Rng; use remote_storage::RemotePath; use serde::{Deserialize, Serialize}; -use tracing::{debug, error, info, instrument}; +use tracing::{debug, error, info, instrument, warn}; use utils::lsn::Lsn; use crate::{ @@ -308,7 +308,23 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { debug!("state: {:?}", backup.state); + // The general idea is that each safekeeper keeps only one partial segment + // both in remote storage and in local state. If this is not true, something + // went wrong. + const MAX_SIMULTANEOUS_SEGMENTS: usize = 10; + 'outer: loop { + if backup.state.segments.len() > MAX_SIMULTANEOUS_SEGMENTS { + warn!( + "too many segments in control_file state, running gc: {}", + backup.state.segments.len() + ); + + backup.gc().await.unwrap_or_else(|e| { + error!("failed to run gc: {:#}", e); + }); + } + // wait until we have something to upload let uploaded_segment = backup.state.uploaded_segment(); if let Some(seg) = &uploaded_segment { From a345cf3fc695282823a7cc2a8711213adb64ec3c Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Sat, 1 Jun 2024 12:23:59 +0100 Subject: [PATCH 118/220] Fix span for WAL removal task (#7930) During refactoring in https://github.com/neondatabase/neon/pull/7887 I forgot to add "WAL removal" span with ttid. This commit fixes it. --- safekeeper/src/timeline_manager.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 84862207d5..7174d843fc 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -10,7 +10,7 @@ use std::{ use postgres_ffi::XLogSegNo; use tokio::task::{JoinError, JoinHandle}; -use tracing::{info, instrument, warn}; +use tracing::{info, info_span, instrument, warn, Instrument}; use utils::lsn::Lsn; use crate::{ @@ -346,10 +346,13 @@ async fn update_wal_removal( &tli.read_shared_state().await.sk.wal_store, removal_horizon_segno, ); - *wal_removal_task = Some(tokio::spawn(async move { - remover.await?; - Ok(removal_horizon_segno) - })); + *wal_removal_task = Some(tokio::spawn( + async move { + remover.await?; + Ok(removal_horizon_segno) + } + .instrument(info_span!("WAL removal", ttid=%tli.ttid)), + )); } } From db477c0b8c59081207c1ba7fc6e599b741a0b717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Sun, 2 Jun 2024 16:10:56 +0200 Subject: [PATCH 119/220] Add metrics for Azure blob storage (#7933) In issue #5590 it was proposed to implement metrics for Azure blob storage. This PR implements them except for the part that performs the rename, which is left for a followup. Closes #5590 --- libs/remote_storage/src/azure_blob.rs | 85 ++++++++++++++----- libs/remote_storage/src/lib.rs | 1 + .../src/{s3_bucket => }/metrics.rs | 52 +++++++++--- libs/remote_storage/src/s3_bucket.rs | 65 +++++--------- 4 files changed, 125 insertions(+), 78 deletions(-) rename libs/remote_storage/src/{s3_bucket => }/metrics.rs (76%) diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 24c1248304..aca22c6b3e 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -26,13 +26,14 @@ use futures::stream::Stream; use futures_util::StreamExt; use futures_util::TryStreamExt; use http_types::{StatusCode, Url}; +use scopeguard::ScopeGuard; use tokio_util::sync::CancellationToken; use tracing::debug; +use crate::metrics::{start_measuring_requests, AttemptOutcome, RequestKind}; use crate::{ - error::Cancelled, s3_bucket::RequestKind, AzureConfig, ConcurrencyLimiter, Download, - DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, StorageMetadata, - TimeTravelError, TimeoutOrCancel, + error::Cancelled, AzureConfig, ConcurrencyLimiter, Download, DownloadError, Listing, + ListingMode, RemotePath, RemoteStorage, StorageMetadata, TimeTravelError, TimeoutOrCancel, }; pub struct AzureBlobStorage { @@ -137,6 +138,8 @@ impl AzureBlobStorage { let mut last_modified = None; let mut metadata = HashMap::new(); + let started_at = start_measuring_requests(kind); + let download = async { let response = builder // convert to concrete Pageable @@ -200,13 +203,22 @@ impl AzureBlobStorage { }) }; - tokio::select! { + let download = tokio::select! { bufs = download => bufs, cancel_or_timeout = cancel_or_timeout => match cancel_or_timeout { - TimeoutOrCancel::Timeout => Err(DownloadError::Timeout), - TimeoutOrCancel::Cancel => Err(DownloadError::Cancelled), + TimeoutOrCancel::Timeout => return Err(DownloadError::Timeout), + TimeoutOrCancel::Cancel => return Err(DownloadError::Cancelled), }, - } + }; + let started_at = ScopeGuard::into_inner(started_at); + let outcome = match &download { + Ok(_) => AttemptOutcome::Ok, + Err(_) => AttemptOutcome::Err, + }; + crate::metrics::BUCKET_METRICS + .req_seconds + .observe_elapsed(kind, outcome, started_at); + download } async fn permit( @@ -340,7 +352,10 @@ impl RemoteStorage for AzureBlobStorage { metadata: Option, cancel: &CancellationToken, ) -> anyhow::Result<()> { - let _permit = self.permit(RequestKind::Put, cancel).await?; + let kind = RequestKind::Put; + let _permit = self.permit(kind, cancel).await?; + + let started_at = start_measuring_requests(kind); let op = async { let blob_client = self.client.blob_client(self.relative_path_to_name(to)); @@ -364,14 +379,25 @@ impl RemoteStorage for AzureBlobStorage { match fut.await { Ok(Ok(_response)) => Ok(()), Ok(Err(azure)) => Err(azure.into()), - Err(_timeout) => Err(TimeoutOrCancel::Cancel.into()), + Err(_timeout) => Err(TimeoutOrCancel::Timeout.into()), } }; - tokio::select! { + let res = tokio::select! { res = op => res, - _ = cancel.cancelled() => Err(TimeoutOrCancel::Cancel.into()), - } + _ = cancel.cancelled() => return Err(TimeoutOrCancel::Cancel.into()), + }; + + let outcome = match res { + Ok(_) => AttemptOutcome::Ok, + Err(_) => AttemptOutcome::Err, + }; + let started_at = ScopeGuard::into_inner(started_at); + crate::metrics::BUCKET_METRICS + .req_seconds + .observe_elapsed(kind, outcome, started_at); + + res } async fn download( @@ -417,12 +443,13 @@ impl RemoteStorage for AzureBlobStorage { paths: &'a [RemotePath], cancel: &CancellationToken, ) -> anyhow::Result<()> { - let _permit = self.permit(RequestKind::Delete, cancel).await?; + let kind = RequestKind::Delete; + let _permit = self.permit(kind, cancel).await?; + let started_at = start_measuring_requests(kind); let op = async { - // TODO batch requests are also not supported by the SDK + // TODO batch requests are not supported by the SDK // https://github.com/Azure/azure-sdk-for-rust/issues/1068 - // https://github.com/Azure/azure-sdk-for-rust/issues/1249 for path in paths { let blob_client = self.client.blob_client(self.relative_path_to_name(path)); @@ -447,10 +474,16 @@ impl RemoteStorage for AzureBlobStorage { Ok(()) }; - tokio::select! { + let res = tokio::select! { res = op => res, - _ = cancel.cancelled() => Err(TimeoutOrCancel::Cancel.into()), - } + _ = cancel.cancelled() => return Err(TimeoutOrCancel::Cancel.into()), + }; + + let started_at = ScopeGuard::into_inner(started_at); + crate::metrics::BUCKET_METRICS + .req_seconds + .observe_elapsed(kind, &res, started_at); + res } async fn copy( @@ -459,7 +492,9 @@ impl RemoteStorage for AzureBlobStorage { to: &RemotePath, cancel: &CancellationToken, ) -> anyhow::Result<()> { - let _permit = self.permit(RequestKind::Copy, cancel).await?; + let kind = RequestKind::Copy; + let _permit = self.permit(kind, cancel).await?; + let started_at = start_measuring_requests(kind); let timeout = tokio::time::sleep(self.timeout); @@ -503,15 +538,21 @@ impl RemoteStorage for AzureBlobStorage { } }; - tokio::select! { + let res = tokio::select! { res = op => res, - _ = cancel.cancelled() => Err(anyhow::Error::new(TimeoutOrCancel::Cancel)), + _ = cancel.cancelled() => return Err(anyhow::Error::new(TimeoutOrCancel::Cancel)), _ = timeout => { let e = anyhow::Error::new(TimeoutOrCancel::Timeout); let e = e.context(format!("Timeout, last status: {copy_status:?}")); Err(e) }, - } + }; + + let started_at = ScopeGuard::into_inner(started_at); + crate::metrics::BUCKET_METRICS + .req_seconds + .observe_elapsed(kind, &res, started_at); + res } async fn time_travel_recover( diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index cb3df0985d..8c984abed2 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -12,6 +12,7 @@ mod azure_blob; mod error; mod local_fs; +mod metrics; mod s3_bucket; mod simulate_failures; mod support; diff --git a/libs/remote_storage/src/s3_bucket/metrics.rs b/libs/remote_storage/src/metrics.rs similarity index 76% rename from libs/remote_storage/src/s3_bucket/metrics.rs rename to libs/remote_storage/src/metrics.rs index beca755920..bbb51590f3 100644 --- a/libs/remote_storage/src/s3_bucket/metrics.rs +++ b/libs/remote_storage/src/metrics.rs @@ -15,6 +15,7 @@ pub(crate) enum RequestKind { TimeTravel = 5, } +use scopeguard::ScopeGuard; use RequestKind::*; impl RequestKind { @@ -33,10 +34,10 @@ impl RequestKind { } } -pub(super) struct RequestTyped([C; 6]); +pub(crate) struct RequestTyped([C; 6]); impl RequestTyped { - pub(super) fn get(&self, kind: RequestKind) -> &C { + pub(crate) fn get(&self, kind: RequestKind) -> &C { &self.0[kind.as_index()] } @@ -58,19 +59,19 @@ impl RequestTyped { } impl RequestTyped { - pub(super) fn observe_elapsed(&self, kind: RequestKind, started_at: std::time::Instant) { + pub(crate) fn observe_elapsed(&self, kind: RequestKind, started_at: std::time::Instant) { self.get(kind).observe(started_at.elapsed().as_secs_f64()) } } -pub(super) struct PassFailCancelledRequestTyped { +pub(crate) struct PassFailCancelledRequestTyped { success: RequestTyped, fail: RequestTyped, cancelled: RequestTyped, } #[derive(Debug, Clone, Copy)] -pub(super) enum AttemptOutcome { +pub(crate) enum AttemptOutcome { Ok, Err, Cancelled, @@ -86,7 +87,7 @@ impl From<&Result> for AttemptOutcome { } impl AttemptOutcome { - pub(super) fn as_str(&self) -> &'static str { + pub(crate) fn as_str(&self) -> &'static str { match self { AttemptOutcome::Ok => "ok", AttemptOutcome::Err => "err", @@ -96,7 +97,7 @@ impl AttemptOutcome { } impl PassFailCancelledRequestTyped { - pub(super) fn get(&self, kind: RequestKind, outcome: AttemptOutcome) -> &C { + pub(crate) fn get(&self, kind: RequestKind, outcome: AttemptOutcome) -> &C { let target = match outcome { AttemptOutcome::Ok => &self.success, AttemptOutcome::Err => &self.fail, @@ -119,7 +120,7 @@ impl PassFailCancelledRequestTyped { } impl PassFailCancelledRequestTyped { - pub(super) fn observe_elapsed( + pub(crate) fn observe_elapsed( &self, kind: RequestKind, outcome: impl Into, @@ -130,19 +131,44 @@ impl PassFailCancelledRequestTyped { } } -pub(super) struct BucketMetrics { +/// On drop (cancellation) count towards [`BucketMetrics::cancelled_waits`]. +pub(crate) fn start_counting_cancelled_wait( + kind: RequestKind, +) -> ScopeGuard { + scopeguard::guard_on_success(std::time::Instant::now(), move |_| { + crate::metrics::BUCKET_METRICS + .cancelled_waits + .get(kind) + .inc() + }) +} + +/// On drop (cancellation) add time to [`BucketMetrics::req_seconds`]. +pub(crate) fn start_measuring_requests( + kind: RequestKind, +) -> ScopeGuard { + scopeguard::guard_on_success(std::time::Instant::now(), move |started_at| { + crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed( + kind, + AttemptOutcome::Cancelled, + started_at, + ) + }) +} + +pub(crate) struct BucketMetrics { /// Full request duration until successful completion, error or cancellation. - pub(super) req_seconds: PassFailCancelledRequestTyped, + pub(crate) req_seconds: PassFailCancelledRequestTyped, /// Total amount of seconds waited on queue. - pub(super) wait_seconds: RequestTyped, + pub(crate) wait_seconds: RequestTyped, /// Track how many semaphore awaits were cancelled per request type. /// /// This is in case cancellations are happening more than expected. - pub(super) cancelled_waits: RequestTyped, + pub(crate) cancelled_waits: RequestTyped, /// Total amount of deleted objects in batches or single requests. - pub(super) deleted_objects_total: IntCounter, + pub(crate) deleted_objects_total: IntCounter, } impl Default for BucketMetrics { diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index c3d6c75e20..76cf3eac80 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -46,15 +46,16 @@ use utils::backoff; use super::StorageMetadata; use crate::{ - error::Cancelled, support::PermitCarrying, ConcurrencyLimiter, Download, DownloadError, - Listing, ListingMode, RemotePath, RemoteStorage, S3Config, TimeTravelError, TimeoutOrCancel, - MAX_KEYS_PER_DELETE, REMOTE_STORAGE_PREFIX_SEPARATOR, + error::Cancelled, + metrics::{start_counting_cancelled_wait, start_measuring_requests}, + support::PermitCarrying, + ConcurrencyLimiter, Download, DownloadError, Listing, ListingMode, RemotePath, RemoteStorage, + S3Config, TimeTravelError, TimeoutOrCancel, MAX_KEYS_PER_DELETE, + REMOTE_STORAGE_PREFIX_SEPARATOR, }; -pub(super) mod metrics; - -use self::metrics::AttemptOutcome; -pub(super) use self::metrics::RequestKind; +use crate::metrics::AttemptOutcome; +pub(super) use crate::metrics::RequestKind; /// AWS S3 storage. pub struct S3Bucket { @@ -227,7 +228,7 @@ impl S3Bucket { }; let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .wait_seconds .observe_elapsed(kind, started_at); @@ -248,7 +249,7 @@ impl S3Bucket { }; let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .wait_seconds .observe_elapsed(kind, started_at); Ok(permit) @@ -287,7 +288,7 @@ impl S3Bucket { // Count this in the AttemptOutcome::Ok bucket, because 404 is not // an error: we expect to sometimes fetch an object and find it missing, // e.g. when probing for timeline indices. - metrics::BUCKET_METRICS.req_seconds.observe_elapsed( + crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed( kind, AttemptOutcome::Ok, started_at, @@ -295,7 +296,7 @@ impl S3Bucket { return Err(DownloadError::NotFound); } Err(e) => { - metrics::BUCKET_METRICS.req_seconds.observe_elapsed( + crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed( kind, AttemptOutcome::Err, started_at, @@ -371,12 +372,12 @@ impl S3Bucket { }; let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .req_seconds .observe_elapsed(kind, &resp, started_at); let resp = resp.context("request deletion")?; - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .deleted_objects_total .inc_by(chunk.len() as u64); @@ -435,14 +436,14 @@ pin_project_lite::pin_project! { /// Times and tracks the outcome of the request. struct TimedDownload { started_at: std::time::Instant, - outcome: metrics::AttemptOutcome, + outcome: AttemptOutcome, #[pin] inner: S } impl PinnedDrop for TimedDownload { fn drop(mut this: Pin<&mut Self>) { - metrics::BUCKET_METRICS.req_seconds.observe_elapsed(RequestKind::Get, this.outcome, this.started_at); + crate::metrics::BUCKET_METRICS.req_seconds.observe_elapsed(RequestKind::Get, this.outcome, this.started_at); } } } @@ -451,7 +452,7 @@ impl TimedDownload { fn new(started_at: std::time::Instant, inner: S) -> Self { TimedDownload { started_at, - outcome: metrics::AttemptOutcome::Cancelled, + outcome: AttemptOutcome::Cancelled, inner, } } @@ -468,8 +469,8 @@ impl>> Stream for TimedDownload { let res = ready!(this.inner.poll_next(cx)); match &res { Some(Ok(_)) => {} - Some(Err(_)) => *this.outcome = metrics::AttemptOutcome::Err, - None => *this.outcome = metrics::AttemptOutcome::Ok, + Some(Err(_)) => *this.outcome = AttemptOutcome::Err, + None => *this.outcome = AttemptOutcome::Ok, } Poll::Ready(res) @@ -543,7 +544,7 @@ impl RemoteStorage for S3Bucket { let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .req_seconds .observe_elapsed(kind, &response, started_at); @@ -625,7 +626,7 @@ impl RemoteStorage for S3Bucket { if let Ok(inner) = &res { // do not incl. timeouts as errors in metrics but cancellations let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .req_seconds .observe_elapsed(kind, inner, started_at); } @@ -673,7 +674,7 @@ impl RemoteStorage for S3Bucket { }; let started_at = ScopeGuard::into_inner(started_at); - metrics::BUCKET_METRICS + crate::metrics::BUCKET_METRICS .req_seconds .observe_elapsed(kind, &res, started_at); @@ -977,28 +978,6 @@ impl RemoteStorage for S3Bucket { } } -/// On drop (cancellation) count towards [`metrics::BucketMetrics::cancelled_waits`]. -fn start_counting_cancelled_wait( - kind: RequestKind, -) -> ScopeGuard { - scopeguard::guard_on_success(std::time::Instant::now(), move |_| { - metrics::BUCKET_METRICS.cancelled_waits.get(kind).inc() - }) -} - -/// On drop (cancellation) add time to [`metrics::BucketMetrics::req_seconds`]. -fn start_measuring_requests( - kind: RequestKind, -) -> ScopeGuard { - scopeguard::guard_on_success(std::time::Instant::now(), move |started_at| { - metrics::BUCKET_METRICS.req_seconds.observe_elapsed( - kind, - AttemptOutcome::Cancelled, - started_at, - ) - }) -} - // Save RAM and only store the needed data instead of the entire ObjectVersion/DeleteMarkerEntry struct VerOrDelete { kind: VerOrDeleteKind, From 34f450c05ae92d3bbc840310838ee70f57000b38 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 3 Jun 2024 16:37:11 +0300 Subject: [PATCH 120/220] test: allow no vectored gets happening (#7939) when running the regress tests locally without any environment variables we use on CI, `test_pageserver_compaction_smoke` fails with division by zero. fix it temporarily by allowing no vectored read happening. to be cleaned when vectored get validation gets removed and the default value can be changed. Cc: https://github.com/neondatabase/neon/issues/7381 --- test_runner/regress/test_compaction.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 4850a5c688..9772e2d106 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -85,7 +85,13 @@ page_cache_size=10 vectored_sum = metrics.query_one("pageserver_layers_visited_per_vectored_read_global_sum") vectored_count = metrics.query_one("pageserver_layers_visited_per_vectored_read_global_count") - vectored_average = vectored_sum.value / vectored_count.value + if vectored_count.value > 0: + assert vectored_sum.value > 0 + vectored_average = vectored_sum.value / vectored_count.value + else: + # special case: running local tests with default legacy configuration + assert vectored_sum.value == 0 + vectored_average = 0 log.info(f"{non_vectored_average=} {vectored_average=}") From c1f55c1525e64a3260635fd63e1abb0efcfd2147 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 3 Jun 2024 09:56:36 -0400 Subject: [PATCH 121/220] feat(pageserver): collect aux file tombstones (#7900) close https://github.com/neondatabase/neon/issues/7800 This is a small change to enable the tombstone -> exclude from image layer path. Most of the pull request is unit tests. --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 204 ++++++++++++++++++++++++++++++ pageserver/src/tenant/timeline.rs | 101 +++++++++++---- 2 files changed, 278 insertions(+), 27 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index eff9c742c1..7ca829535b 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -6469,4 +6469,208 @@ mod tests { Ok(()) } + + async fn get_vectored_impl_wrapper( + tline: &Arc, + key: Key, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result, GetVectoredError> { + let mut reconstruct_state = ValuesReconstructState::new(); + let mut res = tline + .get_vectored_impl( + KeySpace::single(key..key.next()), + lsn, + &mut reconstruct_state, + ctx, + ) + .await?; + Ok(res.pop_last().map(|(k, v)| { + assert_eq!(k, key); + v.unwrap() + })) + } + + #[tokio::test] + async fn test_metadata_tombstone_reads() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_metadata_tombstone_reads")?; + let (tenant, ctx) = harness.load().await; + let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap(); + let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap(); + let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap(); + let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap(); + + // We emulate the situation that the compaction algorithm creates an image layer that removes the tombstones + // Lsn 0x30 key0, key3, no key1+key2 + // Lsn 0x20 key1+key2 tomestones + // Lsn 0x10 key1 in image, key2 in delta + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + // delta layers + vec![ + vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))], + vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))], + vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))], + ], + // image layers + vec![ + (Lsn(0x10), vec![(key1, test_img("metadata key 1"))]), + ( + Lsn(0x30), + vec![ + (key0, test_img("metadata key 0")), + (key3, test_img("metadata key 3")), + ], + ), + ], + Lsn(0x30), + ) + .await?; + + let lsn = Lsn(0x30); + let old_lsn = Lsn(0x20); + + assert_eq!( + get_vectored_impl_wrapper(&tline, key0, lsn, &ctx).await?, + Some(test_img("metadata key 0")) + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, key1, lsn, &ctx).await?, + None, + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, key2, lsn, &ctx).await?, + None, + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, key1, old_lsn, &ctx).await?, + Some(Bytes::new()), + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, key2, old_lsn, &ctx).await?, + Some(Bytes::new()), + ); + assert_eq!( + get_vectored_impl_wrapper(&tline, key3, lsn, &ctx).await?, + Some(test_img("metadata key 3")) + ); + + Ok(()) + } + + #[tokio::test] + async fn test_metadata_tombstone_image_creation() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + let (tenant, ctx) = harness.load().await; + + let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap(); + let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap(); + let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap(); + let key3 = Key::from_hex("620000000033333333444444445500000003").unwrap(); + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + // delta layers + vec![ + vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))], + vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))], + vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))], + vec![ + (key0, Lsn(0x30), Value::Image(test_img("metadata key 0"))), + (key3, Lsn(0x30), Value::Image(test_img("metadata key 3"))), + ], + ], + // image layers + vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], + Lsn(0x30), + ) + .await?; + + let cancel = CancellationToken::new(); + + tline + .compact( + &cancel, + { + let mut flags = EnumSet::new(); + flags.insert(CompactFlags::ForceImageLayerCreation); + flags.insert(CompactFlags::ForceRepartition); + flags + }, + &ctx, + ) + .await?; + + // Image layers are created at last_record_lsn + let images = tline + .inspect_image_layers(Lsn(0x30), &ctx) + .await? + .into_iter() + .filter(|(k, _)| k.is_metadata_key()) + .collect::>(); + assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed. + + Ok(()) + } + + #[tokio::test] + async fn test_metadata_tombstone_empty_image_creation() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + let (tenant, ctx) = harness.load().await; + + let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap(); + let key2 = Key::from_hex("620000000033333333444444445500000002").unwrap(); + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + // delta layers + vec![ + vec![(key2, Lsn(0x10), Value::Image(test_img("metadata key 2")))], + vec![(key1, Lsn(0x20), Value::Image(Bytes::new()))], + vec![(key2, Lsn(0x20), Value::Image(Bytes::new()))], + ], + // image layers + vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], + Lsn(0x30), + ) + .await?; + + let cancel = CancellationToken::new(); + + tline + .compact( + &cancel, + { + let mut flags = EnumSet::new(); + flags.insert(CompactFlags::ForceImageLayerCreation); + flags.insert(CompactFlags::ForceRepartition); + flags + }, + &ctx, + ) + .await?; + + // Image layers are created at last_record_lsn + let images = tline + .inspect_image_layers(Lsn(0x30), &ctx) + .await? + .into_iter() + .filter(|(k, _)| k.is_metadata_key()) + .collect::>(); + assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created + + Ok(()) + } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 9bf429972d..fb1f55f5e3 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4312,6 +4312,7 @@ impl Timeline { ctx: &RequestContext, img_range: Range, mode: ImageLayerCreationMode, + start: Key, ) -> Result { assert!(!matches!(mode, ImageLayerCreationMode::Initial)); @@ -4320,39 +4321,43 @@ impl Timeline { let data = self .get_vectored_impl(partition.clone(), lsn, &mut reconstruct_state, ctx) .await?; - let (data, total_kb_retrieved, total_key_retrieved) = { + let (data, total_kb_retrieved, total_keys_retrieved) = { let mut new_data = BTreeMap::new(); let mut total_kb_retrieved = 0; - let mut total_key_retrieved = 0; + let mut total_keys_retrieved = 0; for (k, v) in data { let v = v.map_err(CreateImageLayersError::PageReconstructError)?; total_kb_retrieved += KEY_SIZE + v.len(); - total_key_retrieved += 1; + total_keys_retrieved += 1; new_data.insert(k, v); } - (new_data, total_kb_retrieved / 1024, total_key_retrieved) + (new_data, total_kb_retrieved / 1024, total_keys_retrieved) }; - let delta_file_accessed = reconstruct_state.get_delta_layers_visited(); + let delta_files_accessed = reconstruct_state.get_delta_layers_visited(); - let trigger_generation = delta_file_accessed as usize >= MAX_AUX_FILE_V2_DELTAS; + let trigger_generation = delta_files_accessed as usize >= MAX_AUX_FILE_V2_DELTAS; debug!( - "generate image layers for metadata keys: trigger_generation={trigger_generation}, \ - delta_file_accessed={delta_file_accessed}, total_kb_retrieved={total_kb_retrieved}, \ - total_key_retrieved={total_key_retrieved}" + trigger_generation, + delta_files_accessed, + total_kb_retrieved, + total_keys_retrieved, + "generate metadata images" ); + if !trigger_generation && mode == ImageLayerCreationMode::Try { return Ok(ImageLayerCreationOutcome { image: None, next_start_key: img_range.end, }); } - let has_keys = !data.is_empty(); + let mut wrote_any_image = false; for (k, v) in data { - // Even if the value is empty (deleted), we do not delete it for now until we can ensure vectored get - // considers this situation properly. - // if v.is_empty() { - // continue; - // } + if v.is_empty() { + // the key has been deleted, it does not need an image + // in metadata keyspace, an empty image == tombstone + continue; + } + wrote_any_image = true; // No need to handle sharding b/c metadata keys are always on the 0-th shard. @@ -4360,16 +4365,26 @@ impl Timeline { // on the normal data path either. image_layer_writer.put_image(k, v, ctx).await?; } - Ok(ImageLayerCreationOutcome { - image: if has_keys { - let image_layer = image_layer_writer.finish(self, ctx).await?; - Some(image_layer) - } else { - tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); - None - }, - next_start_key: img_range.end, - }) + + if wrote_any_image { + // Normal path: we have written some data into the new image layer for this + // partition, so flush it to disk. + let image_layer = image_layer_writer.finish(self, ctx).await?; + Ok(ImageLayerCreationOutcome { + image: Some(image_layer), + next_start_key: img_range.end, + }) + } else { + // Special case: the image layer may be empty if this is a sharded tenant and the + // partition does not cover any keys owned by this shard. In this case, to ensure + // we don't leave gaps between image layers, leave `start` where it is, so that the next + // layer we write will cover the key range that we just scanned. + tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); + Ok(ImageLayerCreationOutcome { + image: None, + next_start_key: start, + }) + } } #[tracing::instrument(skip_all, fields(%lsn, %mode))] @@ -4479,6 +4494,7 @@ impl Timeline { ctx, img_range, mode, + start, ) .await?; start = next_start_key; @@ -5448,11 +5464,12 @@ impl Timeline { let min_key = *deltas.first().map(|(k, _, _)| k).unwrap(); let max_key = deltas.last().map(|(k, _, _)| k).unwrap().next(); let min_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap(); - let max_lsn = Lsn(deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap().0 + 1); + let max_lsn = *deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap(); assert!( max_lsn <= last_record_lsn, "advance last record lsn before inserting a layer, max_lsn={max_lsn}, last_record_lsn={last_record_lsn}" ); + let end_lsn = Lsn(max_lsn.0 + 1); if let Some(check_start_lsn) = check_start_lsn { assert!(min_lsn >= check_start_lsn); } @@ -5461,7 +5478,7 @@ impl Timeline { self.timeline_id, self.tenant_shard_id, min_key, - min_lsn..max_lsn, + min_lsn..end_lsn, ctx, ) .await?; @@ -5477,6 +5494,36 @@ impl Timeline { Ok(()) } + + /// Return all keys at the LSN in the image layers + #[cfg(test)] + pub(crate) async fn inspect_image_layers( + self: &Arc, + lsn: Lsn, + ctx: &RequestContext, + ) -> anyhow::Result> { + let mut all_data = Vec::new(); + let guard = self.layers.read().await; + for layer in guard.layer_map().iter_historic_layers() { + if !layer.is_delta() && layer.image_layer_lsn() == lsn { + let layer = guard.get_from_desc(&layer); + let mut reconstruct_data = ValuesReconstructState::default(); + layer + .get_values_reconstruct_data( + KeySpace::single(Key::MIN..Key::MAX), + lsn..Lsn(lsn.0 + 1), + &mut reconstruct_data, + ctx, + ) + .await?; + for (k, v) in reconstruct_data.keys { + all_data.push((k, v?.img.unwrap().1)); + } + } + } + all_data.sort(); + Ok(all_data) + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); From acf0a11feafb579c3c5f892ddb74ec9477571ceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Mon, 3 Jun 2024 16:18:07 +0200 Subject: [PATCH 122/220] Move keyspace utils to inherent impls (#7929) The keyspace utils like `is_rel_size_key` or `is_rel_fsm_block_key` and many others are free functions and have to be either imported separately or specified with the full path starting in `pageserver_api::key::`. This is less convenient than if these functions were just inherent impls. Follow-up of #7890 Fixes #6438 --- libs/pageserver_api/src/key.rs | 147 ++++++++++-------- libs/pageserver_api/src/shard.rs | 7 +- pageserver/ctl/src/key.rs | 24 ++- .../pagebench/src/cmd/getpage_latest_lsn.rs | 11 +- pageserver/src/basebackup.rs | 4 +- pageserver/src/pgdatadir_mapping.rs | 10 +- pageserver/src/tenant/timeline.rs | 5 +- pageserver/src/walredo.rs | 3 +- pageserver/src/walredo/apply_neon.rs | 12 +- 9 files changed, 113 insertions(+), 110 deletions(-) diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index e52d4ef986..27fab5e7a0 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -385,9 +385,11 @@ pub fn rel_size_to_key(rel: RelTag) -> Key { } } -#[inline(always)] -pub fn is_rel_size_key(key: &Key) -> bool { - key.field1 == 0 && key.field6 == u32::MAX +impl Key { + #[inline(always)] + pub fn is_rel_size_key(&self) -> bool { + self.field1 == 0 && self.field6 == u32::MAX + } } #[inline(always)] @@ -478,12 +480,14 @@ pub fn slru_segment_size_to_key(kind: SlruKind, segno: u32) -> Key { } } -pub fn is_slru_segment_size_key(key: &Key) -> bool { - key.field1 == 0x01 - && key.field2 < 0x03 - && key.field3 == 0x01 - && key.field5 == 0 - && key.field6 == u32::MAX +impl Key { + pub fn is_slru_segment_size_key(&self) -> bool { + self.field1 == 0x01 + && self.field2 < 0x03 + && self.field3 == 0x01 + && self.field5 == 0 + && self.field6 == u32::MAX + } } #[inline(always)] @@ -591,73 +595,78 @@ pub const NON_INHERITED_RANGE: Range = AUX_FILES_KEY..AUX_FILES_KEY.next(); /// Sparse keyspace range for vectored get. Missing key error will be ignored for this range. pub const NON_INHERITED_SPARSE_RANGE: Range = Key::metadata_key_range(); -// AUX_FILES currently stores only data for logical replication (slots etc), and -// we don't preserve these on a branch because safekeepers can't follow timeline -// switch (and generally it likely should be optional), so ignore these. -#[inline(always)] -pub fn is_inherited_key(key: Key) -> bool { - !NON_INHERITED_RANGE.contains(&key) && !NON_INHERITED_SPARSE_RANGE.contains(&key) -} +impl Key { + // AUX_FILES currently stores only data for logical replication (slots etc), and + // we don't preserve these on a branch because safekeepers can't follow timeline + // switch (and generally it likely should be optional), so ignore these. + #[inline(always)] + pub fn is_inherited_key(self) -> bool { + !NON_INHERITED_RANGE.contains(&self) && !NON_INHERITED_SPARSE_RANGE.contains(&self) + } -#[inline(always)] -pub fn is_rel_fsm_block_key(key: Key) -> bool { - key.field1 == 0x00 && key.field4 != 0 && key.field5 == FSM_FORKNUM && key.field6 != 0xffffffff -} + #[inline(always)] + pub fn is_rel_fsm_block_key(self) -> bool { + self.field1 == 0x00 + && self.field4 != 0 + && self.field5 == FSM_FORKNUM + && self.field6 != 0xffffffff + } -#[inline(always)] -pub fn is_rel_vm_block_key(key: Key) -> bool { - key.field1 == 0x00 - && key.field4 != 0 - && key.field5 == VISIBILITYMAP_FORKNUM - && key.field6 != 0xffffffff -} + #[inline(always)] + pub fn is_rel_vm_block_key(self) -> bool { + self.field1 == 0x00 + && self.field4 != 0 + && self.field5 == VISIBILITYMAP_FORKNUM + && self.field6 != 0xffffffff + } -#[inline(always)] -pub fn key_to_slru_block(key: Key) -> anyhow::Result<(SlruKind, u32, BlockNumber)> { - Ok(match key.field1 { - 0x01 => { - let kind = match key.field2 { - 0x00 => SlruKind::Clog, - 0x01 => SlruKind::MultiXactMembers, - 0x02 => SlruKind::MultiXactOffsets, - _ => anyhow::bail!("unrecognized slru kind 0x{:02x}", key.field2), - }; - let segno = key.field4; - let blknum = key.field6; + #[inline(always)] + pub fn to_slru_block(self) -> anyhow::Result<(SlruKind, u32, BlockNumber)> { + Ok(match self.field1 { + 0x01 => { + let kind = match self.field2 { + 0x00 => SlruKind::Clog, + 0x01 => SlruKind::MultiXactMembers, + 0x02 => SlruKind::MultiXactOffsets, + _ => anyhow::bail!("unrecognized slru kind 0x{:02x}", self.field2), + }; + let segno = self.field4; + let blknum = self.field6; - (kind, segno, blknum) - } - _ => anyhow::bail!("unexpected value kind 0x{:02x}", key.field1), - }) -} + (kind, segno, blknum) + } + _ => anyhow::bail!("unexpected value kind 0x{:02x}", self.field1), + }) + } -#[inline(always)] -pub fn is_slru_block_key(key: Key) -> bool { - key.field1 == 0x01 // SLRU-related - && key.field3 == 0x00000001 // but not SlruDir - && key.field6 != 0xffffffff // and not SlruSegSize -} + #[inline(always)] + pub fn is_slru_block_key(self) -> bool { + self.field1 == 0x01 // SLRU-related + && self.field3 == 0x00000001 // but not SlruDir + && self.field6 != 0xffffffff // and not SlruSegSize + } -#[inline(always)] -pub fn is_rel_block_key(key: &Key) -> bool { - key.field1 == 0x00 && key.field4 != 0 && key.field6 != 0xffffffff -} + #[inline(always)] + pub fn is_rel_block_key(&self) -> bool { + self.field1 == 0x00 && self.field4 != 0 && self.field6 != 0xffffffff + } -/// Guaranteed to return `Ok()` if [[is_rel_block_key]] returns `true` for `key`. -#[inline(always)] -pub fn key_to_rel_block(key: Key) -> anyhow::Result<(RelTag, BlockNumber)> { - Ok(match key.field1 { - 0x00 => ( - RelTag { - spcnode: key.field2, - dbnode: key.field3, - relnode: key.field4, - forknum: key.field5, - }, - key.field6, - ), - _ => anyhow::bail!("unexpected value kind 0x{:02x}", key.field1), - }) + /// Guaranteed to return `Ok()` if [`Self::is_rel_block_key`] returns `true` for `key`. + #[inline(always)] + pub fn to_rel_block(self) -> anyhow::Result<(RelTag, BlockNumber)> { + Ok(match self.field1 { + 0x00 => ( + RelTag { + spcnode: self.field2, + dbnode: self.field3, + relnode: self.field4, + forknum: self.field5, + }, + self.field6, + ), + _ => anyhow::bail!("unexpected value kind 0x{:02x}", self.field1), + }) + } } impl std::str::FromStr for Key { diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index 8ace426f88..8c5a4e6168 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -1,9 +1,6 @@ use std::{ops::RangeInclusive, str::FromStr}; -use crate::{ - key::{is_rel_block_key, Key}, - models::ShardParameters, -}; +use crate::{key::Key, models::ShardParameters}; use hex::FromHex; use postgres_ffi::relfile_utils::INIT_FORKNUM; use serde::{Deserialize, Serialize}; @@ -672,7 +669,7 @@ fn key_is_shard0(key: &Key) -> bool { // because they must be included in basebackups. let is_initfork = key.field5 == INIT_FORKNUM; - !is_rel_block_key(key) || is_initfork + !key.is_rel_block_key() || is_initfork } /// Provide the same result as the function in postgres `hashfn.h` with the same name diff --git a/pageserver/ctl/src/key.rs b/pageserver/ctl/src/key.rs index 28448811f8..af4b5a21ab 100644 --- a/pageserver/ctl/src/key.rs +++ b/pageserver/ctl/src/key.rs @@ -72,13 +72,14 @@ impl DescribeKeyCommand { println!("{key:?}"); macro_rules! kind_query { + ([$($name:ident),*$(,)?]) => {{[$(kind_query!($name)),*]}}; ($name:ident) => {{ let s: &'static str = stringify!($name); let s = s.strip_prefix("is_").unwrap_or(s); let s = s.strip_suffix("_key").unwrap_or(s); #[allow(clippy::needless_borrow)] - (s, pageserver_api::key::$name(key)) + (s, key.$name()) }}; } @@ -86,18 +87,15 @@ impl DescribeKeyCommand { // "recognization". I think it accurately represents how strictly we model the Key // right now, but could of course be made less confusing. - let queries = [ - ("rel_block", pageserver_api::key::is_rel_block_key(&key)), - kind_query!(is_rel_vm_block_key), - kind_query!(is_rel_fsm_block_key), - kind_query!(is_slru_block_key), - kind_query!(is_inherited_key), - ("rel_size", pageserver_api::key::is_rel_size_key(&key)), - ( - "slru_segment_size", - pageserver_api::key::is_slru_segment_size_key(&key), - ), - ]; + let queries = kind_query!([ + is_rel_block_key, + is_rel_vm_block_key, + is_rel_fsm_block_key, + is_slru_block_key, + is_inherited_key, + is_rel_size_key, + is_slru_segment_size_key, + ]); let recognized_kind = "recognized kind"; let metadata_key = "metadata key"; diff --git a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs index 5043a207fc..4992f37465 100644 --- a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs +++ b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs @@ -1,6 +1,6 @@ use anyhow::Context; use camino::Utf8PathBuf; -use pageserver_api::key::{is_rel_block_key, key_to_rel_block, Key}; +use pageserver_api::key::Key; use pageserver_api::keyspace::KeySpaceAccum; use pageserver_api::models::PagestreamGetPageRequest; @@ -187,7 +187,7 @@ async fn main_impl( for r in partitioning.keys.ranges.iter() { let mut i = r.start; while i != r.end { - if is_rel_block_key(&i) { + if i.is_rel_block_key() { filtered.add_key(i); } i = i.next(); @@ -308,9 +308,10 @@ async fn main_impl( let r = &ranges[weights.sample(&mut rng)]; let key: i128 = rng.gen_range(r.start..r.end); let key = Key::from_i128(key); - assert!(is_rel_block_key(&key)); - let (rel_tag, block_no) = - key_to_rel_block(key).expect("we filter non-rel-block keys out above"); + assert!(key.is_rel_block_key()); + let (rel_tag, block_no) = key + .to_rel_block() + .expect("we filter non-rel-block keys out above"); PagestreamGetPageRequest { request_lsn: if rng.gen_bool(args.req_latest_probability) { Lsn::MAX diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index dca1510810..31518f5632 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -13,7 +13,7 @@ use anyhow::{anyhow, Context}; use bytes::{BufMut, Bytes, BytesMut}; use fail::fail_point; -use pageserver_api::key::{key_to_slru_block, Key}; +use pageserver_api::key::Key; use postgres_ffi::pg_constants; use std::fmt::Write as FmtWrite; use std::time::SystemTime; @@ -170,7 +170,7 @@ where } async fn add_block(&mut self, key: &Key, block: Bytes) -> Result<(), BasebackupError> { - let (kind, segno, _) = key_to_slru_block(*key)?; + let (kind, segno, _) = key.to_slru_block()?; match kind { SlruKind::Clog => { diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index c78c358855..764c528a9e 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -17,10 +17,10 @@ use bytes::{Buf, Bytes, BytesMut}; use enum_map::Enum; use itertools::Itertools; use pageserver_api::key::{ - dbdir_key_range, is_rel_block_key, is_slru_block_key, rel_block_to_key, rel_dir_to_key, - rel_key_range, rel_size_to_key, relmap_file_key, slru_block_to_key, slru_dir_to_key, - slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range, - AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY, + dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key, + relmap_file_key, slru_block_to_key, slru_dir_to_key, slru_segment_key_range, + slru_segment_size_to_key, twophase_file_key, twophase_key_range, AUX_FILES_KEY, CHECKPOINT_KEY, + CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY, }; use pageserver_api::keyspace::SparseKeySpace; use pageserver_api::models::AuxFilePolicy; @@ -1684,7 +1684,7 @@ impl<'a> DatadirModification<'a> { let mut retained_pending_updates = HashMap::<_, Vec<_>>::new(); for (key, values) in self.pending_updates.drain() { for (lsn, value) in values { - if is_rel_block_key(&key) || is_slru_block_key(key) { + if key.is_rel_block_key() || key.is_slru_block_key() { // This bails out on first error without modifying pending_updates. // That's Ok, cf this function's doc comment. writer.put(key, lsn, &value, ctx).await?; diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index fb1f55f5e3..5402c776e3 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -102,7 +102,6 @@ use crate::metrics::{ }; use crate::pgdatadir_mapping::CalculateLogicalSizeError; use crate::tenant::config::TenantConfOpt; -use pageserver_api::key::{is_inherited_key, is_rel_fsm_block_key, is_rel_vm_block_key}; use pageserver_api::reltag::RelTag; use pageserver_api::shard::ShardIndex; @@ -3191,7 +3190,7 @@ impl Timeline { // Recurse into ancestor if needed if let Some(ancestor_timeline) = timeline.ancestor_timeline.as_ref() { - if is_inherited_key(key) && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn { + if key.is_inherited_key() && Lsn(cont_lsn.0 - 1) <= timeline.ancestor_lsn { trace!( "going into ancestor {}, cont_lsn is {}", timeline.ancestor_lsn, @@ -4262,7 +4261,7 @@ impl Timeline { // Unfortunately we cannot do this for the main fork, or for // any metadata keys, keys, as that would lead to actual data // loss. - if is_rel_fsm_block_key(img_key) || is_rel_vm_block_key(img_key) { + if img_key.is_rel_fsm_block_key() || img_key.is_rel_vm_block_key() { warn!("could not reconstruct FSM or VM key {img_key}, filling with zeros: {err:?}"); ZERO_PAGE.clone() } else { diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 3decea0c6d..1d72a97688 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -34,7 +34,6 @@ use crate::repository::Key; use crate::walrecord::NeonWalRecord; use anyhow::Context; use bytes::{Bytes, BytesMut}; -use pageserver_api::key::key_to_rel_block; use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus}; use pageserver_api::shard::TenantShardId; use std::sync::Arc; @@ -208,7 +207,7 @@ impl PostgresRedoManager { ) -> anyhow::Result { *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); - let (rel, blknum) = key_to_rel_block(key).context("invalid record")?; + let (rel, blknum) = key.to_rel_block().context("invalid record")?; const MAX_RETRY_ATTEMPTS: u32 = 1; let mut n_attempts = 0u32; loop { diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index 247704e2a5..695894a924 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -3,7 +3,7 @@ use crate::walrecord::NeonWalRecord; use anyhow::Context; use byteorder::{ByteOrder, LittleEndian}; use bytes::{BufMut, BytesMut}; -use pageserver_api::key::{key_to_rel_block, key_to_slru_block, Key}; +use pageserver_api::key::Key; use pageserver_api::reltag::SlruKind; use postgres_ffi::pg_constants; use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; @@ -48,7 +48,7 @@ pub(crate) fn apply_in_neon( flags, } => { // sanity check that this is modifying the correct relation - let (rel, blknum) = key_to_rel_block(key).context("invalid record")?; + let (rel, blknum) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, "ClearVisibilityMapFlags record on unexpected rel {}", @@ -85,7 +85,7 @@ pub(crate) fn apply_in_neon( // Non-relational WAL records are handled here, with custom code that has the // same effects as the corresponding Postgres WAL redo function. NeonWalRecord::ClogSetCommitted { xids, timestamp } => { - let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + let (slru_kind, segno, blknum) = key.to_slru_block().context("invalid record")?; assert_eq!( slru_kind, SlruKind::Clog, @@ -130,7 +130,7 @@ pub(crate) fn apply_in_neon( } } NeonWalRecord::ClogSetAborted { xids } => { - let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + let (slru_kind, segno, blknum) = key.to_slru_block().context("invalid record")?; assert_eq!( slru_kind, SlruKind::Clog, @@ -160,7 +160,7 @@ pub(crate) fn apply_in_neon( } } NeonWalRecord::MultixactOffsetCreate { mid, moff } => { - let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + let (slru_kind, segno, blknum) = key.to_slru_block().context("invalid record")?; assert_eq!( slru_kind, SlruKind::MultiXactOffsets, @@ -192,7 +192,7 @@ pub(crate) fn apply_in_neon( LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); } NeonWalRecord::MultixactMembersCreate { moff, members } => { - let (slru_kind, segno, blknum) = key_to_slru_block(key).context("invalid record")?; + let (slru_kind, segno, blknum) = key.to_slru_block().context("invalid record")?; assert_eq!( slru_kind, SlruKind::MultiXactMembers, From 69d18d642996f80dbe351f0b8456da75a5861710 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 3 Jun 2024 17:16:23 +0100 Subject: [PATCH 123/220] s3_scrubber: add `pageserver-physical-gc` (#7925) ## Problem Currently, we leave `index_part.json` objects from old generations behind each time a pageserver restarts or a tenant is migrated. This doesn't break anything, but it's annoying when a tenant has been around for a long time and starts to accumulate 10s-100s of these. Partially implements: #7043 ## Summary of changes - Add a new `pageserver-physical-gc` command to `s3_scrubber` The name is a bit of a mouthful, but I think it makes sense: - GC is the accurate term for what we are doing here: removing data that takes up storage but can never be accessed. - "physical" is a necessary distinction from the "normal" GC that we do online in the pageserver, which operates at a higher level in terms of LSNs+layers, whereas this type of GC is purely about S3 objects. - "pageserver" makes clear that this command deals exclusively with pageserver data, not safekeeper. --- Cargo.lock | 1 + s3_scrubber/Cargo.toml | 1 + s3_scrubber/src/checks.rs | 64 ++--- s3_scrubber/src/lib.rs | 3 +- s3_scrubber/src/main.rs | 25 +- s3_scrubber/src/pageserver_physical_gc.rs | 239 ++++++++++++++++++ test_runner/fixtures/neon_fixtures.py | 24 ++ .../regress/test_pageserver_secondary.py | 16 +- test_runner/regress/test_s3_scrubber.py | 51 +++- 9 files changed, 387 insertions(+), 37 deletions(-) create mode 100644 s3_scrubber/src/pageserver_physical_gc.rs diff --git a/Cargo.lock b/Cargo.lock index 6a60104472..84d919b817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5129,6 +5129,7 @@ dependencies = [ "futures-util", "hex", "histogram", + "humantime", "itertools", "once_cell", "pageserver", diff --git a/s3_scrubber/Cargo.toml b/s3_scrubber/Cargo.toml index e56bd43fb8..48b50ca21c 100644 --- a/s3_scrubber/Cargo.toml +++ b/s3_scrubber/Cargo.toml @@ -11,6 +11,7 @@ either.workspace = true tokio-rustls.workspace = true anyhow.workspace = true hex.workspace = true +humantime.workspace = true thiserror.workspace = true rand.workspace = true bytes.workspace = true diff --git a/s3_scrubber/src/checks.rs b/s3_scrubber/src/checks.rs index 134afa53da..2c14fef0af 100644 --- a/s3_scrubber/src/checks.rs +++ b/s3_scrubber/src/checks.rs @@ -1,7 +1,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context; -use aws_sdk_s3::{types::ObjectIdentifier, Client}; +use aws_sdk_s3::Client; use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; use pageserver_api::shard::ShardIndex; use tracing::{error, info, warn}; @@ -70,7 +70,7 @@ pub(crate) fn branch_cleanup_and_check_errors( match s3_data { Some(s3_data) => { - result.garbage_keys.extend(s3_data.keys_to_remove); + result.garbage_keys.extend(s3_data.unknown_keys); match s3_data.blob_data { BlobDataParseResult::Parsed { @@ -240,7 +240,12 @@ impl TenantObjectListing { #[derive(Debug)] pub(crate) struct S3TimelineBlobData { pub(crate) blob_data: BlobDataParseResult, - pub(crate) keys_to_remove: Vec, + + // Index objects that were not used when loading `blob_data`, e.g. those from old generations + pub(crate) unused_index_keys: Vec, + + // Objects whose keys were not recognized at all, i.e. not layer files, not indices + pub(crate) unknown_keys: Vec, } #[derive(Debug)] @@ -276,12 +281,12 @@ pub(crate) async fn list_timeline_blobs( let mut s3_layers = HashSet::new(); let mut errors = Vec::new(); - let mut keys_to_remove = Vec::new(); + let mut unknown_keys = Vec::new(); let mut timeline_dir_target = s3_root.timeline_root(&id); timeline_dir_target.delimiter = String::new(); - let mut index_parts: Vec = Vec::new(); + let mut index_part_keys: Vec = Vec::new(); let mut initdb_archive: bool = false; let mut stream = std::pin::pin!(stream_listing(s3_client, &timeline_dir_target)); @@ -292,16 +297,16 @@ pub(crate) async fn list_timeline_blobs( let blob_name = key.strip_prefix(&timeline_dir_target.prefix_in_bucket); match blob_name { Some(name) if name.starts_with("index_part.json") => { - tracing::info!("Index key {key}"); - index_parts.push(obj) + tracing::debug!("Index key {key}"); + index_part_keys.push(key.to_owned()) } Some("initdb.tar.zst") => { - tracing::info!("initdb archive {key}"); + tracing::debug!("initdb archive {key}"); initdb_archive = true; } Some(maybe_layer_name) => match parse_layer_object_name(maybe_layer_name) { Ok((new_layer, gen)) => { - tracing::info!("Parsed layer key: {} {:?}", new_layer, gen); + tracing::debug!("Parsed layer key: {} {:?}", new_layer, gen); s3_layers.insert((new_layer, gen)); } Err(e) => { @@ -309,37 +314,37 @@ pub(crate) async fn list_timeline_blobs( errors.push( format!("S3 list response got an object with key {key} that is not a layer name: {e}"), ); - keys_to_remove.push(key.to_string()); + unknown_keys.push(key.to_string()); } }, None => { - tracing::info!("Peculiar key {}", key); + tracing::warn!("Unknown key {}", key); errors.push(format!("S3 list response got an object with odd key {key}")); - keys_to_remove.push(key.to_string()); + unknown_keys.push(key.to_string()); } } } - if index_parts.is_empty() && s3_layers.is_empty() && initdb_archive { - tracing::info!( + if index_part_keys.is_empty() && s3_layers.is_empty() && initdb_archive { + tracing::debug!( "Timeline is empty apart from initdb archive: expected post-deletion state." ); return Ok(S3TimelineBlobData { blob_data: BlobDataParseResult::Relic, - keys_to_remove: Vec::new(), + unused_index_keys: index_part_keys, + unknown_keys: Vec::new(), }); } // Choose the index_part with the highest generation - let (index_part_object, index_part_generation) = match index_parts + let (index_part_object, index_part_generation) = match index_part_keys .iter() - .filter_map(|k| { - let key = k.key(); + .filter_map(|key| { // Stripping the index key to the last part, because RemotePath doesn't // like absolute paths, and depending on prefix_in_bucket it's possible // for the keys we read back to start with a slash. let basename = key.rsplit_once('/').unwrap().1; - parse_remote_index_path(RemotePath::from_string(basename).unwrap()).map(|g| (k, g)) + parse_remote_index_path(RemotePath::from_string(basename).unwrap()).map(|g| (key, g)) }) .max_by_key(|i| i.1) .map(|(k, g)| (k.clone(), g)) @@ -347,15 +352,18 @@ pub(crate) async fn list_timeline_blobs( Some((key, gen)) => (Some(key), gen), None => { // Legacy/missing case: one or zero index parts, which did not have a generation - (index_parts.pop(), Generation::none()) + (index_part_keys.pop(), Generation::none()) } }; - if index_part_object.is_none() { - errors.push("S3 list response got no index_part.json file".to_string()); + match index_part_object.as_ref() { + Some(selected) => index_part_keys.retain(|k| k != selected), + None => { + errors.push("S3 list response got no index_part.json file".to_string()); + } } - if let Some(index_part_object_key) = index_part_object.as_ref().map(|object| object.key()) { + if let Some(index_part_object_key) = index_part_object.as_ref() { let index_part_bytes = download_object_with_retries( s3_client, &timeline_dir_target.bucket_name, @@ -372,17 +380,14 @@ pub(crate) async fn list_timeline_blobs( index_part_generation, s3_layers, }, - keys_to_remove, + unused_index_keys: index_part_keys, + unknown_keys, }) } Err(index_parse_error) => errors.push(format!( "index_part.json body parsing error: {index_parse_error}" )), } - } else { - errors.push(format!( - "Index part object {index_part_object:?} has no key" - )); } if errors.is_empty() { @@ -393,6 +398,7 @@ pub(crate) async fn list_timeline_blobs( Ok(S3TimelineBlobData { blob_data: BlobDataParseResult::Incorrect(errors), - keys_to_remove, + unused_index_keys: index_part_keys, + unknown_keys, }) } diff --git a/s3_scrubber/src/lib.rs b/s3_scrubber/src/lib.rs index e0f99ecd9c..64273432fc 100644 --- a/s3_scrubber/src/lib.rs +++ b/s3_scrubber/src/lib.rs @@ -4,6 +4,7 @@ pub mod checks; pub mod cloud_admin_api; pub mod garbage; pub mod metadata_stream; +pub mod pageserver_physical_gc; pub mod scan_pageserver_metadata; pub mod scan_safekeeper_metadata; pub mod tenant_snapshot; @@ -396,7 +397,7 @@ async fn download_object_with_retries( .await { Ok(bytes_read) => { - tracing::info!("Downloaded {bytes_read} bytes for object object with key {key}"); + tracing::debug!("Downloaded {bytes_read} bytes for object {key}"); return Ok(body_buf); } Err(e) => { diff --git a/s3_scrubber/src/main.rs b/s3_scrubber/src/main.rs index e49c280b99..ade8ef7d7a 100644 --- a/s3_scrubber/src/main.rs +++ b/s3_scrubber/src/main.rs @@ -2,11 +2,13 @@ use anyhow::bail; use camino::Utf8PathBuf; use pageserver_api::shard::TenantShardId; use s3_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode}; +use s3_scrubber::pageserver_physical_gc::GcMode; use s3_scrubber::scan_pageserver_metadata::scan_metadata; use s3_scrubber::tenant_snapshot::SnapshotDownloader; use s3_scrubber::{ - init_logging, scan_safekeeper_metadata::scan_safekeeper_metadata, BucketConfig, ConsoleConfig, - NodeKind, TraversingDepth, + init_logging, pageserver_physical_gc::pageserver_physical_gc, + scan_safekeeper_metadata::scan_safekeeper_metadata, BucketConfig, ConsoleConfig, NodeKind, + TraversingDepth, }; use clap::{Parser, Subcommand}; @@ -62,6 +64,14 @@ enum Command { #[arg(short, long)] output_path: Utf8PathBuf, }, + PageserverPhysicalGc { + #[arg(long = "tenant-id", num_args = 0..)] + tenant_ids: Vec, + #[arg(long = "min-age")] + min_age: humantime::Duration, + #[arg(short, long, default_value_t = GcMode::IndicesOnly)] + mode: GcMode, + }, } #[tokio::main] @@ -75,6 +85,7 @@ async fn main() -> anyhow::Result<()> { Command::FindGarbage { .. } => "find-garbage", Command::PurgeGarbage { .. } => "purge-garbage", Command::TenantSnapshot { .. } => "tenant-snapshot", + Command::PageserverPhysicalGc { .. } => "pageserver-physical-gc", }; let _guard = init_logging(&format!( "{}_{}_{}_{}.log", @@ -178,5 +189,15 @@ async fn main() -> anyhow::Result<()> { SnapshotDownloader::new(bucket_config, tenant_id, output_path, concurrency)?; downloader.download().await } + Command::PageserverPhysicalGc { + tenant_ids, + min_age, + mode, + } => { + let summary = + pageserver_physical_gc(bucket_config, tenant_ids, min_age.into(), mode).await?; + println!("{}", serde_json::to_string(&summary).unwrap()); + Ok(()) + } } } diff --git a/s3_scrubber/src/pageserver_physical_gc.rs b/s3_scrubber/src/pageserver_physical_gc.rs new file mode 100644 index 0000000000..0146433128 --- /dev/null +++ b/s3_scrubber/src/pageserver_physical_gc.rs @@ -0,0 +1,239 @@ +use std::time::{Duration, UNIX_EPOCH}; + +use crate::checks::{list_timeline_blobs, BlobDataParseResult}; +use crate::metadata_stream::{stream_tenant_timelines, stream_tenants}; +use crate::{init_remote, BucketConfig, NodeKind, RootTarget, TenantShardTimelineId}; +use aws_sdk_s3::Client; +use futures_util::{StreamExt, TryStreamExt}; +use pageserver::tenant::remote_timeline_client::parse_remote_index_path; +use pageserver::tenant::IndexPart; +use pageserver_api::shard::TenantShardId; +use remote_storage::RemotePath; +use serde::Serialize; +use tracing::{info_span, Instrument}; +use utils::generation::Generation; + +#[derive(Serialize, Default)] +pub struct GcSummary { + indices_deleted: usize, + remote_storage_errors: usize, +} + +#[derive(clap::ValueEnum, Debug, Clone, Copy)] +pub enum GcMode { + // Delete nothing + DryRun, + + // Enable only removing old-generation indices + IndicesOnly, + // Enable all forms of GC + // TODO: this will be used when shard split ancestor layer deletion is added + // All, +} + +impl std::fmt::Display for GcMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GcMode::DryRun => write!(f, "dry-run"), + GcMode::IndicesOnly => write!(f, "indices-only"), + } + } +} + +async fn maybe_delete_index( + s3_client: &Client, + bucket_config: &BucketConfig, + min_age: &Duration, + latest_gen: Generation, + key: &str, + mode: GcMode, + summary: &mut GcSummary, +) { + // Validation: we will only delete things that parse cleanly + let basename = key.rsplit_once('/').unwrap().1; + let candidate_generation = + match parse_remote_index_path(RemotePath::from_string(basename).unwrap()) { + Some(g) => g, + None => { + if basename == IndexPart::FILE_NAME { + // A legacy pre-generation index + Generation::none() + } else { + // A strange key: we will not delete this because we don't understand it. + tracing::warn!("Bad index key"); + return; + } + } + }; + + // Validation: we will only delete indices more than one generation old, to avoid interfering + // in typical migrations, even if they are very long running. + if candidate_generation >= latest_gen { + // This shouldn't happen: when we loaded metadata, it should have selected the latest + // generation already, and only populated [`S3TimelineBlobData::unused_index_keys`] + // with older generations. + tracing::warn!("Deletion candidate is >= latest generation, this is a bug!"); + return; + } else if candidate_generation.next() == latest_gen { + // Skip deleting the latest-1th generation's index. + return; + } + + // Validation: we will only delete indices after one week, so that during incidents we will have + // easy access to recent indices. + let age: Duration = match s3_client + .head_object() + .bucket(&bucket_config.bucket) + .key(key) + .send() + .await + { + Ok(response) => match response.last_modified { + None => { + tracing::warn!("Missing last_modified"); + summary.remote_storage_errors += 1; + return; + } + Some(last_modified) => { + let last_modified = + UNIX_EPOCH + Duration::from_secs_f64(last_modified.as_secs_f64()); + match last_modified.elapsed() { + Ok(e) => e, + Err(_) => { + tracing::warn!("Bad last_modified time: {last_modified:?}"); + return; + } + } + } + }, + Err(e) => { + tracing::warn!("Failed to HEAD {key}: {e}"); + summary.remote_storage_errors += 1; + return; + } + }; + if &age < min_age { + tracing::info!( + "Skipping young object {} < {}", + age.as_secs_f64(), + min_age.as_secs_f64() + ); + return; + } + + if matches!(mode, GcMode::DryRun) { + tracing::info!("Dry run: would delete this key"); + return; + } + + // All validations passed: erase the object + match s3_client + .delete_object() + .bucket(&bucket_config.bucket) + .key(key) + .send() + .await + { + Ok(_) => { + tracing::info!("Successfully deleted index"); + summary.indices_deleted += 1; + } + Err(e) => { + tracing::warn!("Failed to delete index: {e}"); + summary.remote_storage_errors += 1; + } + } +} + +/// Physical garbage collection: removing unused S3 objects. This is distinct from the garbage collection +/// done inside the pageserver, which operates at a higher level (keys, layers). This type of garbage collection +/// is about removing: +/// - Objects that were uploaded but never referenced in the remote index (e.g. because of a shutdown between +/// uploading a layer and uploading an index) +/// - Index objects from historic generations +/// +/// This type of GC is not necessary for correctness: rather it serves to reduce wasted storage capacity, and +/// make sure that object listings don't get slowed down by large numbers of garbage objects. +pub async fn pageserver_physical_gc( + bucket_config: BucketConfig, + tenant_ids: Vec, + min_age: Duration, + mode: GcMode, +) -> anyhow::Result { + let (s3_client, target) = init_remote(bucket_config.clone(), NodeKind::Pageserver)?; + + let tenants = if tenant_ids.is_empty() { + futures::future::Either::Left(stream_tenants(&s3_client, &target)) + } else { + futures::future::Either::Right(futures::stream::iter(tenant_ids.into_iter().map(Ok))) + }; + + // How many tenants to process in parallel. We need to be mindful of pageservers + // accessing the same per tenant prefixes, so use a lower setting than pageservers. + const CONCURRENCY: usize = 32; + + // Generate a stream of TenantTimelineId + let timelines = tenants.map_ok(|t| stream_tenant_timelines(&s3_client, &target, t)); + let timelines = timelines.try_buffered(CONCURRENCY); + let timelines = timelines.try_flatten(); + + // Generate a stream of S3TimelineBlobData + async fn gc_timeline( + s3_client: &Client, + bucket_config: &BucketConfig, + min_age: &Duration, + target: &RootTarget, + mode: GcMode, + ttid: TenantShardTimelineId, + ) -> anyhow::Result { + let mut summary = GcSummary::default(); + let data = list_timeline_blobs(s3_client, ttid, target).await?; + + let (latest_gen, candidates) = match &data.blob_data { + BlobDataParseResult::Parsed { + index_part: _index_part, + index_part_generation, + s3_layers: _s3_layers, + } => (*index_part_generation, data.unused_index_keys), + BlobDataParseResult::Relic => { + // Post-deletion tenant location: don't try and GC it. + return Ok(summary); + } + BlobDataParseResult::Incorrect(reasons) => { + // Our primary purpose isn't to report on bad data, but log this rather than skipping silently + tracing::warn!("Skipping timeline {ttid}, bad metadata: {reasons:?}"); + return Ok(summary); + } + }; + + for key in candidates { + maybe_delete_index( + s3_client, + bucket_config, + min_age, + latest_gen, + &key, + mode, + &mut summary, + ) + .instrument(info_span!("maybe_delete_index", %ttid, ?latest_gen, key)) + .await; + } + + Ok(summary) + } + let timelines = timelines + .map_ok(|ttid| gc_timeline(&s3_client, &bucket_config, &min_age, &target, mode, ttid)); + let mut timelines = std::pin::pin!(timelines.try_buffered(CONCURRENCY)); + + let mut summary = GcSummary::default(); + + while let Some(i) = timelines.next().await { + let tl_summary = i?; + + summary.indices_deleted += tl_summary.indices_deleted; + summary.remote_storage_errors += tl_summary.remote_storage_errors; + } + + Ok(summary) +} diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 0004745bf0..a25b8bfca1 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3998,6 +3998,30 @@ class S3Scrubber: ) log.info(f"tenant-snapshot output: {stdout}") + def pageserver_physical_gc( + self, min_age_secs: int, tenant_ids: Optional[list[TenantId]] = None + ): + args = ["pageserver-physical-gc", "--min-age", f"{min_age_secs}s"] + + if tenant_ids is None: + tenant_ids = [] + + for tenant_id in tenant_ids: + args.extend(["--tenant-id", str(tenant_id)]) + + stdout = self.scrubber_cli( + args, + timeout=30, + ) + try: + return json.loads(stdout) + except: + log.error( + "Failed to decode JSON output from `pageserver-physical_gc`. Dumping stdout:" + ) + log.error(stdout) + raise + def _get_test_dir(request: FixtureRequest, top_output_dir: Path, prefix: str) -> Path: """Compute the path to a working directory for an individual test.""" diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 25a3f8521c..9b9bdb2b08 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -15,7 +15,7 @@ from fixtures.pageserver.utils import ( tenant_delete_wait_completed, wait_for_upload_queue_empty, ) -from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind, S3Storage +from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind, S3Storage, s3_storage from fixtures.utils import wait_until from fixtures.workload import Workload @@ -73,7 +73,7 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): """ neon_env_builder.num_pageservers = 3 neon_env_builder.enable_pageserver_remote_storage( - remote_storage_kind=RemoteStorageKind.MOCK_S3, + remote_storage_kind=s3_storage(), ) env = neon_env_builder.init_start(initial_tenant_conf=TENANT_CONF) @@ -215,6 +215,13 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): ) workload.validate(pageserver.id) + # Having done a bunch of attach/detach cycles, we will have generated some index garbage: check + # that the scrubber sees it and cleans it up. We do this before the final attach+validate pass, + # to also validate that the scrubber isn't breaking anything. + gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + assert gc_summary["remote_storage_errors"] == 0 + assert gc_summary["indices_deleted"] > 0 + # Attach all pageservers for ps in env.pageservers: location_conf = {"mode": "AttachedMulti", "secondary_conf": None, "tenant_conf": {}} @@ -227,10 +234,11 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): # Detach all pageservers for ps in env.pageservers: location_conf = {"mode": "Detached", "secondary_conf": None, "tenant_conf": {}} + assert ps.list_layers(tenant_id, timeline_id) != [] ps.tenant_location_configure(tenant_id, location_conf) - # Confirm that all local disk state was removed on detach - # TODO + # Confirm that all local disk state was removed on detach + assert ps.list_layers(tenant_id, timeline_id) == [] def test_live_migration(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_s3_scrubber.py b/test_runner/regress/test_s3_scrubber.py index 8981000c24..6baba190f3 100644 --- a/test_runner/regress/test_s3_scrubber.py +++ b/test_runner/regress/test_s3_scrubber.py @@ -3,7 +3,7 @@ import shutil from typing import Optional import pytest -from fixtures.common_types import TenantShardId +from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.neon_fixtures import ( NeonEnvBuilder, S3Scrubber, @@ -109,3 +109,52 @@ def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: # Check we can read everything workload.validate() + + +@pytest.mark.parametrize("shard_count", [None, 4]) +def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]): + neon_env_builder.enable_pageserver_remote_storage(s3_storage()) + neon_env_builder.num_pageservers = 2 + + env = neon_env_builder.init_configs() + env.start() + + tenant_id = TenantId.generate() + timeline_id = TimelineId.generate() + env.neon_cli.create_tenant(tenant_id, timeline_id, shard_count=shard_count) + + workload = Workload(env, tenant_id, timeline_id) + workload.init() + + # We will end up with an index per shard, per cycle, plus one for the initial startup + n_cycles = 4 + expect_indices_per_shard = n_cycles + 1 + shard_count = 1 if shard_count is None else shard_count + + # For each cycle, detach and attach the tenant to bump the generation, and do some writes to generate uploads + for _i in range(0, n_cycles): + env.storage_controller.tenant_policy_update(tenant_id, {"placement": "Detached"}) + env.storage_controller.reconcile_until_idle() + + env.storage_controller.tenant_policy_update(tenant_id, {"placement": {"Attached": 0}}) + env.storage_controller.reconcile_until_idle() + + # This write includes remote upload, will generate an index in this generation + workload.write_rows(1) + + # With a high min_age, the scrubber should decline to delete anything + gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=3600) + assert gc_summary["remote_storage_errors"] == 0 + assert gc_summary["indices_deleted"] == 0 + + # If targeting a different tenant, the scrubber shouldn't do anything + gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc( + min_age_secs=1, tenant_ids=[TenantId.generate()] + ) + assert gc_summary["remote_storage_errors"] == 0 + assert gc_summary["indices_deleted"] == 0 + + # With a low min_age, the scrubber should go ahead and clean up all but the latest 2 generations + gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + assert gc_summary["remote_storage_errors"] == 0 + assert gc_summary["indices_deleted"] == (expect_indices_per_shard - 2) * shard_count From 7006caf3a1480567e911169b4f9488ac2a81d699 Mon Sep 17 00:00:00 2001 From: Konstantin Knizhnik Date: Mon, 3 Jun 2024 19:37:33 +0300 Subject: [PATCH 124/220] Store logical replication origin in KV storage (#7099) Store logical replication origin in KV storage ## Problem See #6977 ## Summary of changes * Extract origin_lsn from commit WAl record * Add ReplOrigin key to KV storage and store origin_lsn * In basebackup replace snapshot origin_lsn with last committed origin_lsn at basebackup LSN ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --------- Signed-off-by: Alex Chi Z Co-authored-by: Konstantin Knizhnik Co-authored-by: Alex Chi Z --- libs/pageserver_api/src/key.rs | 35 ++++++++++++ libs/postgres_ffi/build.rs | 1 + libs/postgres_ffi/src/lib.rs | 1 + libs/postgres_ffi/src/pg_constants.rs | 10 +++- pageserver/src/basebackup.rs | 33 +++++++++++ pageserver/src/pgdatadir_mapping.rs | 47 +++++++++++++-- pageserver/src/tenant/timeline.rs | 17 +++--- pageserver/src/walingest.rs | 20 +++++++ pageserver/src/walrecord.rs | 46 ++++++++++++++- test_runner/regress/test_compaction.py | 6 +- .../regress/test_subscriber_restart.py | 57 +++++++++++++++++++ 11 files changed, 255 insertions(+), 18 deletions(-) create mode 100644 test_runner/regress/test_subscriber_restart.py diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index 27fab5e7a0..997c1cc43a 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -1,6 +1,7 @@ use anyhow::{bail, Result}; use byteorder::{ByteOrder, BE}; use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; +use postgres_ffi::RepOriginId; use postgres_ffi::{Oid, TransactionId}; use serde::{Deserialize, Serialize}; use std::{fmt, ops::Range}; @@ -38,6 +39,9 @@ pub const RELATION_SIZE_PREFIX: u8 = 0x61; /// The key prefix of AUX file keys. pub const AUX_KEY_PREFIX: u8 = 0x62; +/// The key prefix of ReplOrigin keys. +pub const REPL_ORIGIN_KEY_PREFIX: u8 = 0x63; + /// Check if the key falls in the range of metadata keys. pub const fn is_metadata_key_slice(key: &[u8]) -> bool { key[0] >= METADATA_KEY_BEGIN_PREFIX && key[0] < METADATA_KEY_END_PREFIX @@ -587,6 +591,37 @@ pub const AUX_FILES_KEY: Key = Key { field6: 2, }; +#[inline(always)] +pub fn repl_origin_key(origin_id: RepOriginId) -> Key { + Key { + field1: REPL_ORIGIN_KEY_PREFIX, + field2: 0, + field3: 0, + field4: 0, + field5: 0, + field6: origin_id as u32, + } +} + +/// Get the range of replorigin keys. +pub fn repl_origin_key_range() -> Range { + Key { + field1: REPL_ORIGIN_KEY_PREFIX, + field2: 0, + field3: 0, + field4: 0, + field5: 0, + field6: 0, + }..Key { + field1: REPL_ORIGIN_KEY_PREFIX, + field2: 0, + field3: 0, + field4: 0, + field5: 0, + field6: 0x10000, + } +} + // Reverse mappings for a few Keys. // These are needed by WAL redo manager. diff --git a/libs/postgres_ffi/build.rs b/libs/postgres_ffi/build.rs index 8e6761d6d3..370d9e9a6f 100644 --- a/libs/postgres_ffi/build.rs +++ b/libs/postgres_ffi/build.rs @@ -126,6 +126,7 @@ fn main() -> anyhow::Result<()> { .allowlist_type("PageHeaderData") .allowlist_type("DBState") .allowlist_type("RelMapFile") + .allowlist_type("RepOriginId") // Because structs are used for serialization, tell bindgen to emit // explicit padding fields. .explicit_padding(true) diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index 0d6986778a..729f57f829 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -110,6 +110,7 @@ pub mod pg_constants; pub mod relfile_utils; // Export some widely used datatypes that are unlikely to change across Postgres versions +pub use v14::bindings::RepOriginId; pub use v14::bindings::{uint32, uint64, Oid}; pub use v14::bindings::{BlockNumber, OffsetNumber}; pub use v14::bindings::{MultiXactId, TransactionId}; diff --git a/libs/postgres_ffi/src/pg_constants.rs b/libs/postgres_ffi/src/pg_constants.rs index 2701ddf5e0..54b032d138 100644 --- a/libs/postgres_ffi/src/pg_constants.rs +++ b/libs/postgres_ffi/src/pg_constants.rs @@ -102,7 +102,7 @@ pub const XACT_XINFO_HAS_SUBXACTS: u32 = 1u32 << 1; pub const XACT_XINFO_HAS_RELFILENODES: u32 = 1u32 << 2; pub const XACT_XINFO_HAS_INVALS: u32 = 1u32 << 3; pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4; -// pub const XACT_XINFO_HAS_ORIGIN: u32 = 1u32 << 5; +pub const XACT_XINFO_HAS_ORIGIN: u32 = 1u32 << 5; // pub const XACT_XINFO_HAS_AE_LOCKS: u32 = 1u32 << 6; // pub const XACT_XINFO_HAS_GID: u32 = 1u32 << 7; @@ -167,6 +167,7 @@ pub const RM_RELMAP_ID: u8 = 7; pub const RM_STANDBY_ID: u8 = 8; pub const RM_HEAP2_ID: u8 = 9; pub const RM_HEAP_ID: u8 = 10; +pub const RM_REPLORIGIN_ID: u8 = 19; pub const RM_LOGICALMSG_ID: u8 = 21; // from neon_rmgr.h @@ -223,6 +224,10 @@ pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10; pub const XLP_FIRST_IS_CONTRECORD: u16 = 0x0001; pub const XLP_LONG_HEADER: u16 = 0x0002; +/* From xlog.h */ +pub const XLOG_REPLORIGIN_SET: u8 = 0x00; +pub const XLOG_REPLORIGIN_DROP: u8 = 0x10; + /* From replication/slot.h */ pub const REPL_SLOT_ON_DISK_OFFSETOF_RESTART_LSN: usize = 4*4 /* offset of `slotdata` in ReplicationSlotOnDisk */ + 64 /* NameData */ + 4*4; @@ -237,6 +242,9 @@ pub const SLOTS_PER_FSM_PAGE: u32 = FSM_LEAF_NODES_PER_PAGE as u32; pub const VM_HEAPBLOCKS_PER_PAGE: u32 = (BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA) as u32 * (8 / 2); // MAPSIZE * (BITS_PER_BYTE / BITS_PER_HEAPBLOCK) +/* From origin.c */ +pub const REPLICATION_STATE_MAGIC: u32 = 0x1257DADE; + // List of subdirectories inside pgdata. // Copied from src/bin/initdb/initdb.c pub const PGDATA_SUBDIRS: [&str; 22] = [ diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index 31518f5632..0f057a4368 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -362,6 +362,13 @@ where )); info!("Replication slot {} restart LSN={}", path, restart_lsn); min_restart_lsn = Lsn::min(min_restart_lsn, restart_lsn); + } else if path == "pg_logical/replorigin_checkpoint" { + // replorigin_checkoint is written only on compute shutdown, so it contains + // deteriorated values. So we generate our own version of this file for the particular LSN + // based on information about replorigins extracted from transaction commit records. + // In future we will not generate AUX record for "pg_logical/replorigin_checkpoint" at all, + // but now we should handle (skip) it for backward compatibility. + continue; } let header = new_tar_header(&path, content.len() as u64)?; self.ar @@ -390,6 +397,32 @@ where { self.add_twophase_file(xid).await?; } + let repl_origins = self + .timeline + .get_replorigins(self.lsn, self.ctx) + .await + .map_err(|e| BasebackupError::Server(e.into()))?; + let n_origins = repl_origins.len(); + if n_origins != 0 { + // + // Construct "pg_logical/replorigin_checkpoint" file based on information about replication origins + // extracted from transaction commit record. We are using this file to pass information about replication + // origins to compute to allow logical replication to restart from proper point. + // + let mut content = Vec::with_capacity(n_origins * 16 + 8); + content.extend_from_slice(&pg_constants::REPLICATION_STATE_MAGIC.to_le_bytes()); + for (origin_id, origin_lsn) in repl_origins { + content.extend_from_slice(&origin_id.to_le_bytes()); + content.extend_from_slice(&[0u8; 6]); // align to 8 bytes + content.extend_from_slice(&origin_lsn.0.to_le_bytes()); + } + let crc32 = crc32c::crc32c(&content); + content.extend_from_slice(&crc32.to_le_bytes()); + let header = new_tar_header("pg_logical/replorigin_checkpoint", content.len() as u64)?; + self.ar.append(&header, &*content).await.context( + "could not add pg_logical/replorigin_checkpoint file to basebackup tarball", + )?; + } fail_point!("basebackup-before-control-file", |_| { Err(BasebackupError::Server(anyhow!( diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 764c528a9e..5eaf80bdaf 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -18,16 +18,16 @@ use enum_map::Enum; use itertools::Itertools; use pageserver_api::key::{ dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key, - relmap_file_key, slru_block_to_key, slru_dir_to_key, slru_segment_key_range, - slru_segment_size_to_key, twophase_file_key, twophase_key_range, AUX_FILES_KEY, CHECKPOINT_KEY, - CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY, + relmap_file_key, repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key, + slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range, + AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY, }; use pageserver_api::keyspace::SparseKeySpace; use pageserver_api::models::AuxFilePolicy; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi::BLCKSZ; -use postgres_ffi::{Oid, TimestampTz, TransactionId}; +use postgres_ffi::{Oid, RepOriginId, TimestampTz, TransactionId}; use serde::{Deserialize, Serialize}; use std::collections::{hash_map, HashMap, HashSet}; use std::ops::ControlFlow; @@ -760,6 +760,27 @@ impl Timeline { } } + pub(crate) async fn get_replorigins( + &self, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result, PageReconstructError> { + let kv = self + .scan(KeySpace::single(repl_origin_key_range()), lsn, ctx) + .await + .context("scan")?; + let mut result = HashMap::new(); + for (k, v) in kv { + let v = v.context("get value")?; + let origin_id = k.field6 as RepOriginId; + let origin_lsn = Lsn::des(&v).unwrap(); + if origin_lsn != Lsn::INVALID { + result.insert(origin_id, origin_lsn); + } + } + Ok(result) + } + /// Does the same as get_current_logical_size but counted on demand. /// Used to initialize the logical size tracking on startup. /// @@ -885,7 +906,9 @@ impl Timeline { Ok(( result.to_keyspace(), /* AUX sparse key space */ - SparseKeySpace(KeySpace::single(Key::metadata_aux_key_range())), + SparseKeySpace(KeySpace { + ranges: vec![repl_origin_key_range(), Key::metadata_aux_key_range()], + }), )) } @@ -1154,6 +1177,20 @@ impl<'a> DatadirModification<'a> { Ok(()) } + pub async fn set_replorigin( + &mut self, + origin_id: RepOriginId, + origin_lsn: Lsn, + ) -> anyhow::Result<()> { + let key = repl_origin_key(origin_id); + self.put(key, Value::Image(origin_lsn.ser().unwrap().into())); + Ok(()) + } + + pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> anyhow::Result<()> { + self.set_replorigin(origin_id, Lsn::INVALID).await + } + pub fn put_control_file(&mut self, img: Bytes) -> anyhow::Result<()> { self.put(CONTROLFILE_KEY, Value::Image(img)); Ok(()) diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 5402c776e3..35e6d1f92f 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -3879,22 +3879,25 @@ impl Timeline { return Err(FlushLayerError::Cancelled); } + // FIXME(auxfilesv2): support multiple metadata key partitions might need initdb support as well? + // This code path will not be hit during regression tests. After #7099 we have a single partition + // with two key ranges. If someone wants to fix initdb optimization in the future, this might need + // to be fixed. + // For metadata, always create delta layers. let delta_layer = if !metadata_partition.parts.is_empty() { assert_eq!( metadata_partition.parts.len(), 1, - "currently sparse keyspace should only contain a single aux file keyspace" + "currently sparse keyspace should only contain a single metadata keyspace" ); let metadata_keyspace = &metadata_partition.parts[0]; - assert_eq!( - metadata_keyspace.0.ranges.len(), - 1, - "aux file keyspace should be a single range" - ); self.create_delta_layer( &frozen_layer, - Some(metadata_keyspace.0.ranges[0].clone()), + Some( + metadata_keyspace.0.ranges.first().unwrap().start + ..metadata_keyspace.0.ranges.last().unwrap().end, + ), ctx, ) .await diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index 79f075b877..4f26f2f6d1 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -234,6 +234,7 @@ impl WalIngest { modification, &parsed_xact, info == pg_constants::XLOG_XACT_COMMIT, + decoded.origin_id, ctx, ) .await?; @@ -246,6 +247,7 @@ impl WalIngest { modification, &parsed_xact, info == pg_constants::XLOG_XACT_COMMIT_PREPARED, + decoded.origin_id, ctx, ) .await?; @@ -375,6 +377,18 @@ impl WalIngest { self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid; } } + pg_constants::RM_REPLORIGIN_ID => { + let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; + if info == pg_constants::XLOG_REPLORIGIN_SET { + let xlrec = crate::walrecord::XlReploriginSet::decode(&mut buf); + modification + .set_replorigin(xlrec.node_id, xlrec.remote_lsn) + .await? + } else if info == pg_constants::XLOG_REPLORIGIN_DROP { + let xlrec = crate::walrecord::XlReploriginDrop::decode(&mut buf); + modification.drop_replorigin(xlrec.node_id).await? + } + } _x => { // TODO: should probably log & fail here instead of blindly // doing something without understanding the protocol @@ -1178,6 +1192,7 @@ impl WalIngest { modification: &mut DatadirModification<'_>, parsed: &XlXactParsedRecord, is_commit: bool, + origin_id: u16, ctx: &RequestContext, ) -> anyhow::Result<()> { // Record update of CLOG pages @@ -1243,6 +1258,11 @@ impl WalIngest { } } } + if origin_id != 0 { + modification + .set_replorigin(origin_id, parsed.origin_lsn) + .await?; + } Ok(()) } diff --git a/pageserver/src/walrecord.rs b/pageserver/src/walrecord.rs index 02f6f49694..205f8dee4d 100644 --- a/pageserver/src/walrecord.rs +++ b/pageserver/src/walrecord.rs @@ -9,10 +9,10 @@ use postgres_ffi::pg_constants; use postgres_ffi::BLCKSZ; use postgres_ffi::{BlockNumber, TimestampTz}; use postgres_ffi::{MultiXactId, MultiXactOffset, MultiXactStatus, Oid, TransactionId}; -use postgres_ffi::{XLogRecord, XLOG_SIZE_OF_XLOG_RECORD}; +use postgres_ffi::{RepOriginId, XLogRecord, XLOG_SIZE_OF_XLOG_RECORD}; use serde::{Deserialize, Serialize}; use tracing::*; -use utils::bin_ser::DeserializeError; +use utils::{bin_ser::DeserializeError, lsn::Lsn}; /// Each update to a page is represented by a NeonWalRecord. It can be a wrapper /// around a PostgreSQL WAL record, or a custom neon-specific "record". @@ -116,6 +116,7 @@ pub struct DecodedWALRecord { pub blocks: Vec, pub main_data_offset: usize, + pub origin_id: u16, } #[repr(C)] @@ -573,6 +574,7 @@ pub struct XlXactParsedRecord { pub subxacts: Vec, pub xnodes: Vec, + pub origin_lsn: Lsn, } impl XlXactParsedRecord { @@ -651,6 +653,11 @@ impl XlXactParsedRecord { debug!("XLOG_XACT_COMMIT-XACT_XINFO_HAS_TWOPHASE xid {}", xid); } + let origin_lsn = if xinfo & pg_constants::XACT_XINFO_HAS_ORIGIN != 0 { + Lsn(buf.get_u64_le()) + } else { + Lsn::INVALID + }; XlXactParsedRecord { xid, info, @@ -660,6 +667,7 @@ impl XlXactParsedRecord { ts_id, subxacts, xnodes, + origin_lsn, } } } @@ -810,6 +818,36 @@ impl XlRunningXacts { } } +#[repr(C)] +#[derive(Debug)] +pub struct XlReploriginDrop { + pub node_id: RepOriginId, +} + +impl XlReploriginDrop { + pub fn decode(buf: &mut Bytes) -> XlReploriginDrop { + XlReploriginDrop { + node_id: buf.get_u16_le(), + } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct XlReploriginSet { + pub remote_lsn: Lsn, + pub node_id: RepOriginId, +} + +impl XlReploriginSet { + pub fn decode(buf: &mut Bytes) -> XlReploriginSet { + XlReploriginSet { + remote_lsn: Lsn(buf.get_u64_le()), + node_id: buf.get_u16_le(), + } + } +} + /// Main routine to decode a WAL record and figure out which blocks are modified // // See xlogrecord.h for details @@ -844,6 +882,7 @@ pub fn decode_wal_record( let mut rnode_dbnode: u32 = 0; let mut rnode_relnode: u32 = 0; let mut got_rnode = false; + let mut origin_id: u16 = 0; let mut buf = record.clone(); @@ -891,7 +930,7 @@ pub fn decode_wal_record( pg_constants::XLR_BLOCK_ID_ORIGIN => { // RepOriginId is uint16 - buf.advance(2); + origin_id = buf.get_u16_le(); } pg_constants::XLR_BLOCK_ID_TOPLEVEL_XID => { @@ -1088,6 +1127,7 @@ pub fn decode_wal_record( decoded.xl_info = xlogrec.xl_info; decoded.xl_rmid = xlogrec.xl_rmid; decoded.record = record; + decoded.origin_id = origin_id; decoded.main_data_offset = main_data_offset; Ok(()) diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 9772e2d106..b2e4d35cb8 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -81,8 +81,10 @@ page_cache_size=10 non_vectored_sum = metrics.query_one("pageserver_layers_visited_per_read_global_sum") non_vectored_count = metrics.query_one("pageserver_layers_visited_per_read_global_count") - non_vectored_average = non_vectored_sum.value / non_vectored_count.value - + if non_vectored_count.value != 0: + non_vectored_average = non_vectored_sum.value / non_vectored_count.value + else: + non_vectored_average = 0 vectored_sum = metrics.query_one("pageserver_layers_visited_per_vectored_read_global_sum") vectored_count = metrics.query_one("pageserver_layers_visited_per_vectored_read_global_count") if vectored_count.value > 0: diff --git a/test_runner/regress/test_subscriber_restart.py b/test_runner/regress/test_subscriber_restart.py new file mode 100644 index 0000000000..d7f3962620 --- /dev/null +++ b/test_runner/regress/test_subscriber_restart.py @@ -0,0 +1,57 @@ +import threading +import time + +from fixtures.neon_fixtures import NeonEnv +from fixtures.utils import wait_until + + +# This test checks of logical replication subscriber is able to correctly restart replication without receiving duplicates. +# It requires tracking information about replication origins at page server side +def test_subscriber_restart(neon_simple_env: NeonEnv): + env = neon_simple_env + env.neon_cli.create_branch("publisher") + pub = env.endpoints.create("publisher") + pub.start() + + env.neon_cli.create_branch("subscriber") + sub = env.endpoints.create("subscriber") + sub.start() + + n_records = 100000 + n_restarts = 100 + + def check_that_changes_propagated(): + scur.execute("SELECT count(*) FROM t") + res = scur.fetchall() + assert res[0][0] == n_records + + def insert_data(pub): + with pub.cursor() as pcur: + for i in range(0, n_records): + pcur.execute("INSERT into t values (%s,random()*100000)", (i,)) + + with pub.cursor() as pcur: + with sub.cursor() as scur: + pcur.execute("CREATE TABLE t (pk integer primary key, sk integer)") + pcur.execute("CREATE PUBLICATION pub FOR TABLE t") + scur.execute("CREATE TABLE t (pk integer primary key, sk integer)") + # scur.execute("CREATE INDEX on t(sk)") # slowdown applying WAL at replica + pub_conn = f"host=localhost port={pub.pg_port} dbname=postgres user=cloud_admin" + query = f"CREATE SUBSCRIPTION sub CONNECTION '{pub_conn}' PUBLICATION pub" + scur.execute(query) + time.sleep(2) # let initial table sync complete + + thread = threading.Thread(target=insert_data, args=(pub,), daemon=True) + thread.start() + + for _ in range(n_restarts): + # restart subscriber + # time.sleep(2) + sub.stop("immediate") + sub.start() + + thread.join() + pcur.execute(f"INSERT into t values ({n_records}, 0)") + n_records += 1 + with sub.cursor() as scur: + wait_until(10, 0.5, check_that_changes_propagated) From 69026a9a364bfe5d944c8443fea5f3cdc2d1f7e2 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 3 Jun 2024 19:13:01 +0100 Subject: [PATCH 125/220] storcon_cli: add 'drop' and eviction interval utilities (#7938) The storage controller has 'drop' APIs for tenants and nodes, for use in situations where something weird has happened: - node-drop is useful until we implement proper node decom, or if we have a partially provisioned node that somehow gets registered with the storage controller but is then dead. - tenant-drop is useful if we accidentally add a tenant that shouldn't be there at all, or if we want to make the controller forget about a tenant without deleting its data. For example, if one uses the tenant-warmup command with a bad tenant ID and needs to clean that up. The drop commands require an `--unsafe` parameter, to reduce the chance that someone incorrectly assumes these are the normal/clean ways to delete things. This PR also adds a convenience command for setting the time based eviction parameters on a tenant. This is useful when onboarding an existing tenant that has high resident size due to storage amplification in compaction: setting a lower time based eviction threshold brings down the resident size ahead of doing a shard split. --- Cargo.lock | 1 + control_plane/storcon_cli/Cargo.toml | 1 + control_plane/storcon_cli/src/main.rs | 67 ++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84d919b817..dbbf330cf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,6 +5820,7 @@ dependencies = [ "anyhow", "clap", "comfy-table", + "humantime", "hyper 0.14.26", "pageserver_api", "pageserver_client", diff --git a/control_plane/storcon_cli/Cargo.toml b/control_plane/storcon_cli/Cargo.toml index 61eb7fa4e4..ed3462961f 100644 --- a/control_plane/storcon_cli/Cargo.toml +++ b/control_plane/storcon_cli/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true anyhow.workspace = true clap.workspace = true comfy-table.workspace = true +humantime.workspace = true hyper.workspace = true pageserver_api.workspace = true pageserver_client.workspace = true diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index c19bc96cdb..05c4acdf90 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -7,8 +7,9 @@ use pageserver_api::{ TenantDescribeResponse, TenantPolicyRequest, }, models::{ - LocationConfigSecondary, ShardParameters, TenantConfig, TenantConfigRequest, - TenantCreateRequest, TenantShardSplitRequest, TenantShardSplitResponse, + EvictionPolicy, EvictionPolicyLayerAccessThreshold, LocationConfigSecondary, + ShardParameters, TenantConfig, TenantConfigRequest, TenantCreateRequest, + TenantShardSplitRequest, TenantShardSplitResponse, }, shard::{ShardStripeSize, TenantShardId}, }; @@ -125,6 +126,28 @@ enum Command { #[arg(long)] tenant_id: TenantId, }, + /// Uncleanly drop a tenant from the storage controller: this doesn't delete anything from pageservers. Appropriate + /// if you e.g. used `tenant-warmup` by mistake on a tenant ID that doesn't really exist, or is in some other region. + TenantDrop { + #[arg(long)] + tenant_id: TenantId, + #[arg(long)] + unclean: bool, + }, + NodeDrop { + #[arg(long)] + node_id: NodeId, + #[arg(long)] + unclean: bool, + }, + TenantSetTimeBasedEviction { + #[arg(long)] + tenant_id: TenantId, + #[arg(long)] + period: humantime::Duration, + #[arg(long)] + threshold: humantime::Duration, + }, } #[derive(Parser)] @@ -674,6 +697,46 @@ async fn main() -> anyhow::Result<()> { } } } + Command::TenantDrop { tenant_id, unclean } => { + if !unclean { + anyhow::bail!("This command is not a tenant deletion, and uncleanly drops all controller state for the tenant. If you know what you're doing, add `--unclean` to proceed.") + } + storcon_client + .dispatch::<(), ()>( + Method::POST, + format!("debug/v1/tenant/{tenant_id}/drop"), + None, + ) + .await?; + } + Command::NodeDrop { node_id, unclean } => { + if !unclean { + anyhow::bail!("This command is not a clean node decommission, and uncleanly drops all controller state for the node, without checking if any tenants still refer to it. If you know what you're doing, add `--unclean` to proceed.") + } + storcon_client + .dispatch::<(), ()>(Method::POST, format!("debug/v1/node/{node_id}/drop"), None) + .await?; + } + Command::TenantSetTimeBasedEviction { + tenant_id, + period, + threshold, + } => { + vps_client + .tenant_config(&TenantConfigRequest { + tenant_id, + config: TenantConfig { + eviction_policy: Some(EvictionPolicy::LayerAccessThreshold( + EvictionPolicyLayerAccessThreshold { + period: period.into(), + threshold: threshold.into(), + }, + )), + ..Default::default() + }, + }) + .await?; + } } Ok(()) From 11bb265de1aff794d0945c7e9c888d87f7d13824 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 3 Jun 2024 21:10:13 +0100 Subject: [PATCH 126/220] pageserver: don't squash all image layer generation errors into anyhow::Error (#7943) ## Problem CreateImageLayersError and CompactionError had proper From implementations, but compact_legacy was explicitly squashing all image layer errors into an anyhow::Error anyway. This led to errors like: ``` Error processing HTTP request: InternalServerError(timeline shutting down Stack backtrace: 0: <>::from as core::ops::function::FnOnce<(pageserver::tenant::timeline::CreateImageLayersError,)>>::call_once at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/ops/function.rs:250:5 1: , pageserver::tenant::timeline::CreateImageLayersError>>::map_err::>::from> at /rustc/9b00956e56009bab2aa15d7bff10916599e3d6d6/library/core/src/result.rs:829:27 2: ::compact_legacy::{closure#0} at pageserver/src/tenant/timeline/compaction.rs:125:36 3: ::compact::{closure#0} at pageserver/src/tenant/timeline.rs:1719:84 4: pageserver::http::routes::timeline_checkpoint_handler::{closure#0}::{closure#0} ``` Closes: https://github.com/neondatabase/neon/issues/7861 --- pageserver/src/tenant/timeline/compaction.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 07a12f535a..15c77d0316 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -133,8 +133,7 @@ impl Timeline { }, &image_ctx, ) - .await - .map_err(anyhow::Error::from)?; + .await?; self.upload_new_image_layers(image_layers)?; partitioning.parts.len() From 00032c9d9fff0dab5c69e612166c10e5245b43a4 Mon Sep 17 00:00:00 2001 From: Anna Khanova <32508607+khanova@users.noreply.github.com> Date: Tue, 4 Jun 2024 06:07:54 +0200 Subject: [PATCH 127/220] [proxy] Fix dynamic rate limiter (#7950) ## Problem There was a bug in dynamic rate limiter, which exhausted CPU in proxy and proxy wasn't able to accept any connections. ## Summary of changes 1. `if self.available > 1` -> `if self.available >= 1` 2. remove `timeout_at` to use just timeout 3. remove potential infinite loops which can exhaust CPUs. --- proxy/src/console/provider.rs | 4 +- proxy/src/rate_limiter/limit_algorithm.rs | 38 +++----- .../src/rate_limiter/limit_algorithm/aimd.rs | 88 ++++++++++++++++++- 3 files changed, 102 insertions(+), 28 deletions(-) diff --git a/proxy/src/console/provider.rs b/proxy/src/console/provider.rs index 4d074f98a5..634ec9042c 100644 --- a/proxy/src/console/provider.rs +++ b/proxy/src/console/provider.rs @@ -452,7 +452,7 @@ pub struct ApiLocks { #[derive(Debug, thiserror::Error)] pub enum ApiLockError { - #[error("permit could not be acquired")] + #[error("timeout acquiring resource permit")] TimeoutError(#[from] tokio::time::error::Elapsed), } @@ -504,7 +504,7 @@ impl ApiLocks { .clone() } }; - let permit = semaphore.acquire_deadline(now + self.timeout).await; + let permit = semaphore.acquire_timeout(self.timeout).await; self.metrics .semaphore_acquire_seconds diff --git a/proxy/src/rate_limiter/limit_algorithm.rs b/proxy/src/rate_limiter/limit_algorithm.rs index 072fdb80b0..3842ce269e 100644 --- a/proxy/src/rate_limiter/limit_algorithm.rs +++ b/proxy/src/rate_limiter/limit_algorithm.rs @@ -3,7 +3,7 @@ use parking_lot::Mutex; use std::{pin::pin, sync::Arc, time::Duration}; use tokio::{ sync::Notify, - time::{error::Elapsed, timeout_at, Instant}, + time::{error::Elapsed, Instant}, }; use self::aimd::Aimd; @@ -80,7 +80,7 @@ pub struct LimiterInner { } impl LimiterInner { - fn update(&mut self, latency: Duration, outcome: Option) { + fn update_limit(&mut self, latency: Duration, outcome: Option) { if let Some(outcome) = outcome { let sample = Sample { latency, @@ -92,12 +92,12 @@ impl LimiterInner { } fn take(&mut self, ready: &Notify) -> Option<()> { - if self.available > 1 { + if self.available >= 1 { self.available -= 1; self.in_flight += 1; // tell the next in the queue that there is a permit ready - if self.available > 1 { + if self.available >= 1 { ready.notify_one(); } Some(()) @@ -157,16 +157,12 @@ impl DynamicLimiter { } /// Try to acquire a concurrency [Token], waiting for `duration` if there are none available. - /// - /// Returns `None` if there are none available after `duration`. pub async fn acquire_timeout(self: &Arc, duration: Duration) -> Result { - self.acquire_deadline(Instant::now() + duration).await + tokio::time::timeout(duration, self.acquire()).await? } - /// Try to acquire a concurrency [Token], waiting until `deadline` if there are none available. - /// - /// Returns `None` if there are none available after `deadline`. - pub async fn acquire_deadline(self: &Arc, deadline: Instant) -> Result { + /// Try to acquire a concurrency [Token]. + async fn acquire(self: &Arc) -> Result { if self.config.initial_limit == 0 { // If the rate limiter is disabled, we can always acquire a token. Ok(Token::disabled()) @@ -174,22 +170,16 @@ impl DynamicLimiter { let mut notified = pin!(self.ready.notified()); let mut ready = notified.as_mut().enable(); loop { - let mut limit = None; if ready { let mut inner = self.inner.lock(); if inner.take(&self.ready).is_some() { break Ok(Token::new(self.clone())); - } - limit = Some(inner.limit); - } - match timeout_at(deadline, notified.as_mut()).await { - Ok(()) => ready = true, - Err(e) => { - let limit = limit.unwrap_or_else(|| self.inner.lock().limit); - tracing::info!(limit, "could not acquire token in time"); - break Err(e); + } else { + notified.set(self.ready.notified()); } } + notified.as_mut().await; + ready = true; } } } @@ -208,14 +198,14 @@ impl DynamicLimiter { let mut inner = self.inner.lock(); - inner.update(start.elapsed(), outcome); + inner.update_limit(start.elapsed(), outcome); + + inner.in_flight -= 1; if inner.in_flight < inner.limit { inner.available = inner.limit - inner.in_flight; // At least 1 permit is now available self.ready.notify_one(); } - - inner.in_flight -= 1; } /// The current state of the limiter. diff --git a/proxy/src/rate_limiter/limit_algorithm/aimd.rs b/proxy/src/rate_limiter/limit_algorithm/aimd.rs index 370d4be802..ccc9c42420 100644 --- a/proxy/src/rate_limiter/limit_algorithm/aimd.rs +++ b/proxy/src/rate_limiter/limit_algorithm/aimd.rs @@ -51,7 +51,9 @@ impl LimitAlgorithm for Aimd { // E.g. round(2 * 0.9) = 2, but floor(2 * 0.9) = 1 let limit = limit.floor() as usize; - limit.clamp(self.min, self.max) + let limit = limit.clamp(self.min, self.max); + tracing::info!(limit, "limit decreased"); + limit } } } @@ -67,6 +69,53 @@ mod tests { use super::*; + #[tokio::test(start_paused = true)] + async fn increase_decrease() { + let config = RateLimiterConfig { + initial_limit: 1, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 2, + inc: 10, + dec: 0.5, + utilisation: 0.8, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + token.release(Outcome::Success); + + assert_eq!(limiter.state().limit(), 2); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + token.release(Outcome::Success); + assert_eq!(limiter.state().limit(), 2); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + token.release(Outcome::Overload); + assert_eq!(limiter.state().limit(), 1); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + token.release(Outcome::Overload); + assert_eq!(limiter.state().limit(), 1); + } + #[tokio::test(start_paused = true)] async fn should_decrease_limit_on_overload() { let config = RateLimiterConfig { @@ -85,7 +134,7 @@ mod tests { let limiter = DynamicLimiter::new(config); let token = limiter - .acquire_timeout(Duration::from_millis(1)) + .acquire_timeout(Duration::from_millis(100)) .await .unwrap(); token.release(Outcome::Overload); @@ -93,6 +142,41 @@ mod tests { assert_eq!(limiter.state().limit(), 5, "overload: decrease"); } + #[tokio::test(start_paused = true)] + async fn acquire_timeout_times_out() { + let config = RateLimiterConfig { + initial_limit: 1, + algorithm: RateLimitAlgorithm::Aimd { + conf: Aimd { + min: 1, + max: 2, + inc: 10, + dec: 0.5, + utilisation: 0.8, + }, + }, + }; + + let limiter = DynamicLimiter::new(config); + + let token = limiter + .acquire_timeout(Duration::from_millis(1)) + .await + .unwrap(); + let now = tokio::time::Instant::now(); + limiter + .acquire_timeout(Duration::from_secs(1)) + .await + .err() + .unwrap(); + + assert!(now.elapsed() >= Duration::from_secs(1)); + + token.release(Outcome::Success); + + assert_eq!(limiter.state().limit(), 2); + } + #[tokio::test(start_paused = true)] async fn should_increase_limit_on_success_when_using_gt_util_threshold() { let config = RateLimiterConfig { From 387a36874c9bd145982c5ee3c5a3d47af16344d7 Mon Sep 17 00:00:00 2001 From: Konstantin Knizhnik Date: Tue, 4 Jun 2024 11:56:03 +0300 Subject: [PATCH 128/220] Set page LSN when reconstructing VM in page server (#7935) ## Problem Page LSN is not set while VM update. May be reason of test_vm_bits flukyness. Buit more serious issues can be also caused by wrong LSN. Related: https://github.com/neondatabase/neon/pull/7935 ## Summary of changes - In `apply_in_neon`, set the LSN bytes when applying records of type `ClearVisibilityMapFlags` --- pageserver/src/tenant.rs | 4 ++-- pageserver/src/walredo.rs | 4 ++-- pageserver/src/walredo/apply_neon.rs | 6 +++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 7ca829535b..19a0f59b2a 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3992,8 +3992,8 @@ pub(crate) mod harness { let base_img = base_img.expect("Neon WAL redo requires base image").1; let mut page = BytesMut::new(); page.extend_from_slice(&base_img); - for (_record_lsn, record) in records { - apply_neon::apply_in_neon(&record, key, &mut page)?; + for (record_lsn, record) in records { + apply_neon::apply_in_neon(&record, record_lsn, key, &mut page)?; } Ok(page.freeze()) } else { diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 1d72a97688..d660b68a34 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -361,10 +361,10 @@ impl PostgresRedoManager { &self, key: Key, page: &mut BytesMut, - _record_lsn: Lsn, + record_lsn: Lsn, record: &NeonWalRecord, ) -> anyhow::Result<()> { - apply_neon::apply_in_neon(record, key, page)?; + apply_neon::apply_in_neon(record, record_lsn, key, page)?; Ok(()) } diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index 695894a924..24e8d8b01c 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -14,6 +14,7 @@ use postgres_ffi::v14::nonrelfile_utils::{ use postgres_ffi::BLCKSZ; use tracing::*; use utils::bin_ser::BeSer; +use utils::lsn::Lsn; /// Can this request be served by neon redo functions /// or we need to pass it to wal-redo postgres process? @@ -32,6 +33,7 @@ pub(crate) fn can_apply_in_neon(rec: &NeonWalRecord) -> bool { pub(crate) fn apply_in_neon( record: &NeonWalRecord, + lsn: Lsn, key: Key, page: &mut BytesMut, ) -> Result<(), anyhow::Error> { @@ -67,6 +69,7 @@ pub(crate) fn apply_in_neon( let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; map[map_byte as usize] &= !(flags << map_offset); + postgres_ffi::page_set_lsn(page, lsn); } // Repeat for 'old_heap_blkno', if any @@ -80,6 +83,7 @@ pub(crate) fn apply_in_neon( let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; map[map_byte as usize] &= !(flags << map_offset); + postgres_ffi::page_set_lsn(page, lsn); } } // Non-relational WAL records are handled here, with custom code that has the @@ -285,7 +289,7 @@ mod test { let mut page = BytesMut::from_iter(base_image); for record in deltas { - apply_in_neon(&record, file_path, &mut page)?; + apply_in_neon(&record, Lsn(8), file_path, &mut page)?; } let reconstructed = AuxFilesDirectory::des(&page)?; From 0acb604fa3793a0ae99238c026bf3c40391ae461 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 4 Jun 2024 14:19:36 +0300 Subject: [PATCH 129/220] test: no missed wakeups, cancellation and timeout flow to downloads (#7863) I suspected a wakeup could be lost with `remote_storage::support::DownloadStream` if the cancellation and inner stream wakeups happen simultaneously. The next poll would only return the cancellation error without setting the wakeup. There is no lost wakeup because the single future for getting the cancellation error is consumed when the value is ready, and a new future is created for the *next* value. The new future is always polled. Similarly, if only the `Stream::poll_next` is being used after a `Some(_)` value has been yielded, it makes no sense to have an expectation of a wakeup for the *(N+1)th* stream value already set because when a value is wanted, `Stream::poll_next` will be called. A test is added to show that the above is true. Additionally, there was a question of these cancellations and timeouts flowing to attached or secondary tenant downloads. A test is added to show that this, in fact, happens. Lastly, a warning message is logged when a download stream is polled after a timeout or cancellation error (currently unexpected) so we can rule it out while troubleshooting. --- libs/remote_storage/src/support.rs | 50 ++++- .../tenant/remote_timeline_client/download.rs | 5 + pageserver/src/tenant/secondary/downloader.rs | 7 +- pageserver/src/tenant/secondary/scheduler.rs | 7 +- test_runner/fixtures/remote_storage.py | 5 + test_runner/regress/test_ondemand_download.py | 201 +++++++++++++++++- 6 files changed, 263 insertions(+), 12 deletions(-) diff --git a/libs/remote_storage/src/support.rs b/libs/remote_storage/src/support.rs index d146b5445b..1ed9ed9305 100644 --- a/libs/remote_storage/src/support.rs +++ b/libs/remote_storage/src/support.rs @@ -78,6 +78,10 @@ where let e = Err(std::io::Error::from(e)); return Poll::Ready(Some(e)); } + } else { + // this would be perfectly valid behaviour for doing a graceful completion on the + // download for example, but not one we expect to do right now. + tracing::warn!("continuing polling after having cancelled or timeouted"); } this.inner.poll_next(cx) @@ -89,13 +93,22 @@ where } /// Fires only on the first cancel or timeout, not on both. -pub(crate) async fn cancel_or_timeout( +pub(crate) fn cancel_or_timeout( timeout: Duration, cancel: CancellationToken, -) -> TimeoutOrCancel { - tokio::select! { - _ = tokio::time::sleep(timeout) => TimeoutOrCancel::Timeout, - _ = cancel.cancelled() => TimeoutOrCancel::Cancel, +) -> impl std::future::Future + 'static { + // futures are lazy, they don't do anything before being polled. + // + // "precalculate" the wanted deadline before returning the future, so that we can use pause + // failpoint to trigger a timeout in test. + let deadline = tokio::time::Instant::now() + timeout; + async move { + tokio::select! { + _ = tokio::time::sleep_until(deadline) => TimeoutOrCancel::Timeout, + _ = cancel.cancelled() => { + TimeoutOrCancel::Cancel + }, + } } } @@ -172,4 +185,31 @@ mod tests { _ = tokio::time::sleep(Duration::from_secs(121)) => {}, } } + + #[tokio::test] + async fn notified_but_pollable_after() { + let inner = futures::stream::once(futures::future::ready(Ok(bytes::Bytes::from_static( + b"hello world", + )))); + let timeout = Duration::from_secs(120); + let cancel = CancellationToken::new(); + + cancel.cancel(); + let stream = DownloadStream::new(cancel_or_timeout(timeout, cancel.clone()), inner); + let mut stream = std::pin::pin!(stream); + + let next = stream.next().await; + let ioe = next.unwrap().unwrap_err(); + assert!( + matches!( + ioe.get_ref().unwrap().downcast_ref::(), + Some(&DownloadError::Cancelled) + ), + "{ioe:?}" + ); + + let next = stream.next().await; + let bytes = next.unwrap().unwrap(); + assert_eq!(&b"hello world"[..], bytes); + } } diff --git a/pageserver/src/tenant/remote_timeline_client/download.rs b/pageserver/src/tenant/remote_timeline_client/download.rs index bd75f980e8..d0385e4aee 100644 --- a/pageserver/src/tenant/remote_timeline_client/download.rs +++ b/pageserver/src/tenant/remote_timeline_client/download.rs @@ -28,6 +28,7 @@ use crate::TEMP_FILE_SUFFIX; use remote_storage::{DownloadError, GenericRemoteStorage, ListingMode, RemotePath}; use utils::crashsafe::path_with_suffix_extension; use utils::id::{TenantId, TimelineId}; +use utils::pausable_failpoint; use super::index::{IndexPart, LayerFileMetadata}; use super::{ @@ -152,6 +153,8 @@ async fn download_object<'a>( let download = storage.download(src_path, cancel).await?; + pausable_failpoint!("before-downloading-layer-stream-pausable"); + let mut buf_writer = tokio::io::BufWriter::with_capacity(super::BUFFER_SIZE, destination_file); @@ -199,6 +202,8 @@ async fn download_object<'a>( let mut download = storage.download(src_path, cancel).await?; + pausable_failpoint!("before-downloading-layer-stream-pausable"); + // TODO: use vectored write (writev) once supported by tokio-epoll-uring. // There's chunks_vectored() on the stream. let (bytes_amount, destination_file) = async { diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 5c915d6b53..62803c7838 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -1000,7 +1000,7 @@ impl<'a> TenantDownloader<'a> { layer.name, layer.metadata.file_size ); - let downloaded_bytes = match download_layer_file( + let downloaded_bytes = download_layer_file( self.conf, self.remote_storage, *tenant_shard_id, @@ -1011,8 +1011,9 @@ impl<'a> TenantDownloader<'a> { &self.secondary_state.cancel, ctx, ) - .await - { + .await; + + let downloaded_bytes = match downloaded_bytes { Ok(bytes) => bytes, Err(DownloadError::NotFound) => { // A heatmap might be out of date and refer to a layer that doesn't exist any more. diff --git a/pageserver/src/tenant/secondary/scheduler.rs b/pageserver/src/tenant/secondary/scheduler.rs index 0ec1c7872a..28cf2125df 100644 --- a/pageserver/src/tenant/secondary/scheduler.rs +++ b/pageserver/src/tenant/secondary/scheduler.rs @@ -334,8 +334,11 @@ where let tenant_shard_id = job.get_tenant_shard_id(); let barrier = if let Some(barrier) = self.get_running(tenant_shard_id) { - tracing::info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), - "Command already running, waiting for it"); + tracing::info!( + tenant_id=%tenant_shard_id.tenant_id, + shard_id=%tenant_shard_id.shard_slug(), + "Command already running, waiting for it" + ); barrier } else { let running = self.spawn_now(job); diff --git a/test_runner/fixtures/remote_storage.py b/test_runner/fixtures/remote_storage.py index ee18c53b52..6f6526d3fc 100644 --- a/test_runner/fixtures/remote_storage.py +++ b/test_runner/fixtures/remote_storage.py @@ -171,6 +171,8 @@ class S3Storage: """Is this MOCK_S3 (false) or REAL_S3 (true)""" real: bool endpoint: Optional[str] = None + """formatting deserialized with humantime crate, for example "1s".""" + custom_timeout: Optional[str] = None def access_env_vars(self) -> Dict[str, str]: if self.aws_profile is not None: @@ -208,6 +210,9 @@ class S3Storage: if self.endpoint is not None: rv["endpoint"] = self.endpoint + if self.custom_timeout is not None: + rv["timeout"] = self.custom_timeout + return rv def to_toml_inline_table(self) -> str: diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index 6fe23846c7..4a25dfd874 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -3,8 +3,10 @@ import time from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor from typing import Any, DefaultDict, Dict, Tuple +import pytest from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_fixtures import ( @@ -13,7 +15,7 @@ from fixtures.neon_fixtures import ( last_flush_lsn_upload, wait_for_last_flush_lsn, ) -from fixtures.pageserver.http import PageserverHttpClient +from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient from fixtures.pageserver.utils import ( assert_tenant_state, wait_for_last_record_lsn, @@ -21,7 +23,7 @@ from fixtures.pageserver.utils import ( wait_for_upload_queue_empty, wait_until_tenant_active, ) -from fixtures.remote_storage import RemoteStorageKind +from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage from fixtures.utils import query_scalar, wait_until @@ -656,5 +658,200 @@ def test_compaction_downloads_on_demand_with_image_creation(neon_env_builder: Ne assert dict(kinds_after) == {"Delta": 4, "Image": 1} +def test_layer_download_cancelled_by_config_location(neon_env_builder: NeonEnvBuilder): + """ + Demonstrates that tenant shutdown will cancel on-demand download and secondary doing warmup. + """ + neon_env_builder.enable_pageserver_remote_storage(s3_storage()) + + # turn off background tasks so that they don't interfere with the downloads + env = neon_env_builder.init_start( + initial_tenant_conf={ + "gc_period": "0s", + "compaction_period": "0s", + } + ) + client = env.pageserver.http_client() + failpoint = "before-downloading-layer-stream-pausable" + client.configure_failpoints((failpoint, "pause")) + + env.pageserver.allowed_errors.extend( + [ + ".*downloading failed, possibly for shutdown.*", + ] + ) + + info = client.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(info.delta_layers()) == 1 + + layer = info.delta_layers()[0] + + client.tenant_heatmap_upload(env.initial_tenant) + + # evict the initdb layer so we can download it + client.evict_layer(env.initial_tenant, env.initial_timeline, layer.layer_file_name) + + with ThreadPoolExecutor(max_workers=2) as exec: + download = exec.submit( + client.download_layer, + env.initial_tenant, + env.initial_timeline, + layer.layer_file_name, + ) + + _, offset = wait_until( + 20, 0.5, lambda: env.pageserver.assert_log_contains(f"at failpoint {failpoint}") + ) + + location_conf = {"mode": "Detached", "tenant_conf": {}} + # assume detach removes the layers + detach = exec.submit(client.tenant_location_conf, env.initial_tenant, location_conf) + + _, offset = wait_until( + 20, + 0.5, + lambda: env.pageserver.assert_log_contains( + "closing is taking longer than expected", offset + ), + ) + + client.configure_failpoints((failpoint, "off")) + + with pytest.raises( + PageserverApiException, match="downloading failed, possibly for shutdown" + ): + download.result() + + env.pageserver.assert_log_contains(".*downloading failed, possibly for shutdown.*") + + detach.result() + + client.configure_failpoints((failpoint, "pause")) + + _, offset = wait_until( + 20, + 0.5, + lambda: env.pageserver.assert_log_contains(f"cfg failpoint: {failpoint} pause", offset), + ) + + location_conf = { + "mode": "Secondary", + "secondary_conf": {"warm": True}, + "tenant_conf": {}, + } + + client.tenant_location_conf(env.initial_tenant, location_conf) + + warmup = exec.submit(client.tenant_secondary_download, env.initial_tenant, wait_ms=30000) + + _, offset = wait_until( + 20, + 0.5, + lambda: env.pageserver.assert_log_contains(f"at failpoint {failpoint}", offset), + ) + + client.configure_failpoints((failpoint, "off")) + location_conf = {"mode": "Detached", "tenant_conf": {}} + client.tenant_location_conf(env.initial_tenant, location_conf) + + client.configure_failpoints((failpoint, "off")) + + # here we have nothing in the log, but we see that the warmup and conf location update worked + warmup.result() + + +def test_layer_download_timeouted(neon_env_builder: NeonEnvBuilder): + """ + Pause using a pausable_failpoint longer than the client timeout to simulate the timeout happening. + """ + neon_env_builder.enable_pageserver_remote_storage(s3_storage()) + assert isinstance(neon_env_builder.pageserver_remote_storage, S3Storage) + neon_env_builder.pageserver_remote_storage.custom_timeout = "1s" + + # turn off background tasks so that they don't interfere with the downloads + env = neon_env_builder.init_start( + initial_tenant_conf={ + "gc_period": "0s", + "compaction_period": "0s", + } + ) + client = env.pageserver.http_client() + failpoint = "before-downloading-layer-stream-pausable" + client.configure_failpoints((failpoint, "pause")) + + info = client.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(info.delta_layers()) == 1 + + layer = info.delta_layers()[0] + + client.tenant_heatmap_upload(env.initial_tenant) + + # evict so we can download it + client.evict_layer(env.initial_tenant, env.initial_timeline, layer.layer_file_name) + + with ThreadPoolExecutor(max_workers=2) as exec: + download = exec.submit( + client.download_layer, + env.initial_tenant, + env.initial_timeline, + layer.layer_file_name, + ) + + _, offset = wait_until( + 20, 0.5, lambda: env.pageserver.assert_log_contains(f"at failpoint {failpoint}") + ) + # ensure enough time while paused to trip the timeout + time.sleep(2) + + client.configure_failpoints((failpoint, "off")) + download.result() + + _, offset = env.pageserver.assert_log_contains( + ".*failed, will retry \\(attempt 0\\): timeout.*" + ) + _, offset = env.pageserver.assert_log_contains(".*succeeded after [0-9]+ retries.*", offset) + + client.evict_layer(env.initial_tenant, env.initial_timeline, layer.layer_file_name) + + client.configure_failpoints((failpoint, "pause")) + + # capture the next offset for a new synchronization with the failpoint + _, offset = wait_until( + 20, + 0.5, + lambda: env.pageserver.assert_log_contains(f"cfg failpoint: {failpoint} pause", offset), + ) + + location_conf = { + "mode": "Secondary", + "secondary_conf": {"warm": True}, + "tenant_conf": {}, + } + + client.tenant_location_conf( + env.initial_tenant, + location_conf, + ) + + started = time.time() + + warmup = exec.submit(client.tenant_secondary_download, env.initial_tenant, wait_ms=30000) + # ensure enough time while paused to trip the timeout + time.sleep(2) + + client.configure_failpoints((failpoint, "off")) + + warmup.result() + + elapsed = time.time() - started + + _, offset = env.pageserver.assert_log_contains( + ".*failed, will retry \\(attempt 0\\): timeout.*", offset + ) + _, offset = env.pageserver.assert_log_contains(".*succeeded after [0-9]+ retries.*", offset) + + assert elapsed < 30, "too long passed: {elapsed=}" + + def stringify(conf: Dict[str, Any]) -> Dict[str, str]: return dict(map(lambda x: (x[0], str(x[1])), conf.items())) From 9d4c113f9ba9aad94e01999d0e4e0c4c366960cf Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 4 Jun 2024 14:42:57 +0300 Subject: [PATCH 130/220] build(Dockerfile.compute-node): do not log tar contents (#7953) in build logs we get a lot of lines for building the compute node images because of verbose tar unpack. we know the sha256 so we don't need to log the contents. my hope is that this will allow us more reliably use the github live updating log view. --- Dockerfile.compute-node | 70 ++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 87fb218245..db3734047e 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -89,7 +89,7 @@ RUN apt update && \ # SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2 RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \ echo "4e39b3b2adada6254a7bdba6d297bb28e1a9835a9f879b74f37e2dab70203232 SFCGAL.tar.gz" | sha256sum --check && \ - mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \ + mkdir sfcgal-src && cd sfcgal-src && tar xzf ../SFCGAL.tar.gz --strip-components=1 -C . && \ cmake -DCMAKE_BUILD_TYPE=Release . && make -j $(getconf _NPROCESSORS_ONLN) && \ DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \ make clean && cp -R /sfcgal/* / @@ -98,7 +98,7 @@ ENV PATH "/usr/local/pgsql/bin:$PATH" RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.3.tar.gz -O postgis.tar.gz && \ echo "74eb356e3f85f14233791013360881b6748f78081cc688ff9d6f0f673a762d13 postgis.tar.gz" | sha256sum --check && \ - mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \ + mkdir postgis-src && cd postgis-src && tar xzf ../postgis.tar.gz --strip-components=1 -C . && \ find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\ ./autogen.sh && \ ./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \ @@ -124,7 +124,7 @@ RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.3.tar.gz -O postg RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \ echo "cac297c07d34460887c4f3b522b35c470138760fe358e351ad1db4edb6ee306e pgrouting.tar.gz" | sha256sum --check && \ - mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \ + mkdir pgrouting-src && cd pgrouting-src && tar xzf ../pgrouting.tar.gz --strip-components=1 -C . && \ mkdir build && cd build && \ cmake -DCMAKE_BUILD_TYPE=Release .. && \ make -j $(getconf _NPROCESSORS_ONLN) && \ @@ -149,7 +149,7 @@ RUN apt update && \ RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.10.tar.gz -O plv8.tar.gz && \ echo "7096c3290928561f0d4901b7a52794295dc47f6303102fae3f8e42dd575ad97d plv8.tar.gz" | sha256sum --check && \ - mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \ + mkdir plv8-src && cd plv8-src && tar xzf ../plv8.tar.gz --strip-components=1 -C . && \ # generate and copy upgrade scripts mkdir -p upgrade && ./generate_upgrade.sh 3.1.10 && \ cp upgrade/* /usr/local/pgsql/share/extension/ && \ @@ -194,7 +194,7 @@ RUN case "$(uname -m)" in \ RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \ echo "ec99f1f5974846bde64f4513cf8d2ea1b8d172d2218ab41803bf6a63532272bc h3.tar.gz" | sha256sum --check && \ - mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \ + mkdir h3-src && cd h3-src && tar xzf ../h3.tar.gz --strip-components=1 -C . && \ mkdir build && cd build && \ cmake .. -DCMAKE_BUILD_TYPE=Release && \ make -j $(getconf _NPROCESSORS_ONLN) && \ @@ -204,7 +204,7 @@ RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.3.tar.gz -O h3-pg.tar.gz && \ echo "5c17f09a820859ffe949f847bebf1be98511fb8f1bd86f94932512c00479e324 h3-pg.tar.gz" | sha256sum --check && \ - mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \ + mkdir h3-pg-src && cd h3-pg-src && tar xzf ../h3-pg.tar.gz --strip-components=1 -C . && \ export PATH="/usr/local/pgsql/bin:$PATH" && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ @@ -222,7 +222,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \ echo "411d05beeb97e5a4abf17572bfcfbb5a68d98d1018918feff995f6ee3bb03e79 postgresql-unit.tar.gz" | sha256sum --check && \ - mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \ + mkdir postgresql-unit-src && cd postgresql-unit-src && tar xzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ # unit extension's "create extension" script relies on absolute install path to fill some reference tables. @@ -243,12 +243,12 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY patches/pgvector.patch /pgvector.patch -# By default, pgvector Makefile uses `-march=native`. We don't want that, +# By default, pgvector Makefile uses `-march=native`. We don't want that, # because we build the images on different machines than where we run them. # Pass OPTFLAGS="" to remove it. RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.0.tar.gz -O pgvector.tar.gz && \ echo "1b5503a35c265408b6eb282621c5e1e75f7801afc04eecb950796cfee2e3d1d8 pgvector.tar.gz" | sha256sum --check && \ - mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \ + mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \ patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ @@ -266,7 +266,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ # 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021 RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \ echo "cfdefb15007286f67d3d45510f04a6a7a495004be5b3aecb12cda667e774203f pgjwt.tar.gz" | sha256sum --check && \ - mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \ + mkdir pgjwt-src && cd pgjwt-src && tar xzf ../pgjwt.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control @@ -281,7 +281,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.4.0.tar.gz -O hypopg.tar.gz && \ echo "0821011743083226fc9b813c1f2ef5897a91901b57b6bea85a78e466187c6819 hypopg.tar.gz" | sha256sum --check && \ - mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \ + mkdir hypopg-src && cd hypopg-src && tar xzf ../hypopg.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/hypopg.control @@ -297,7 +297,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \ echo "74576b992d9277c92196dd8d816baa2cc2d8046fe102f3dcd7f3c3febed6822a pg_hashids.tar.gz" | sha256sum --check && \ - mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \ + mkdir pg_hashids-src && cd pg_hashids-src && tar xzf ../pg_hashids.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_hashids.control @@ -313,7 +313,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \ echo "6ab370532c965568df6210bd844ac6ba649f53055e48243525b0b7e5c4d69a7d rum.tar.gz" | sha256sum --check && \ - mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \ + mkdir rum-src && cd rum-src && tar xzf ../rum.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/rum.control @@ -329,7 +329,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \ echo "9c7c3de67ea41638e14f06da5da57bac6f5bd03fea05c165a0ec862205a5c052 pgtap.tar.gz" | sha256sum --check && \ - mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \ + mkdir pgtap-src && cd pgtap-src && tar xzf ../pgtap.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control @@ -345,7 +345,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/RhodiumToad/ip4r/archive/refs/tags/2.4.2.tar.gz -O ip4r.tar.gz && \ echo "0f7b1f159974f49a47842a8ab6751aecca1ed1142b6d5e38d81b064b2ead1b4b ip4r.tar.gz" | sha256sum --check && \ - mkdir ip4r-src && cd ip4r-src && tar xvzf ../ip4r.tar.gz --strip-components=1 -C . && \ + mkdir ip4r-src && cd ip4r-src && tar xzf ../ip4r.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/ip4r.control @@ -361,7 +361,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/dimitri/prefix/archive/refs/tags/v1.2.10.tar.gz -O prefix.tar.gz && \ echo "4342f251432a5f6fb05b8597139d3ccde8dcf87e8ca1498e7ee931ca057a8575 prefix.tar.gz" | sha256sum --check && \ - mkdir prefix-src && cd prefix-src && tar xvzf ../prefix.tar.gz --strip-components=1 -C . && \ + mkdir prefix-src && cd prefix-src && tar xzf ../prefix.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/prefix.control @@ -377,7 +377,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/citusdata/postgresql-hll/archive/refs/tags/v2.18.tar.gz -O hll.tar.gz && \ echo "e2f55a6f4c4ab95ee4f1b4a2b73280258c5136b161fe9d059559556079694f0e hll.tar.gz" | sha256sum --check && \ - mkdir hll-src && cd hll-src && tar xvzf ../hll.tar.gz --strip-components=1 -C . && \ + mkdir hll-src && cd hll-src && tar xzf ../hll.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/hll.control @@ -393,7 +393,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ RUN wget https://github.com/okbob/plpgsql_check/archive/refs/tags/v2.5.3.tar.gz -O plpgsql_check.tar.gz && \ echo "6631ec3e7fb3769eaaf56e3dfedb829aa761abf163d13dba354b4c218508e1c0 plpgsql_check.tar.gz" | sha256sum --check && \ - mkdir plpgsql_check-src && cd plpgsql_check-src && tar xvzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \ + mkdir plpgsql_check-src && cd plpgsql_check-src && tar xzf ../plpgsql_check.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/plpgsql_check.control @@ -424,7 +424,7 @@ RUN case "${PG_VERSION}" in \ apt-get install -y cmake && \ wget https://github.com/timescale/timescaledb/archive/refs/tags/${TIMESCALEDB_VERSION}.tar.gz -O timescaledb.tar.gz && \ echo "${TIMESCALEDB_CHECKSUM} timescaledb.tar.gz" | sha256sum --check && \ - mkdir timescaledb-src && cd timescaledb-src && tar xvzf ../timescaledb.tar.gz --strip-components=1 -C . && \ + mkdir timescaledb-src && cd timescaledb-src && tar xzf ../timescaledb.tar.gz --strip-components=1 -C . && \ ./bootstrap -DSEND_TELEMETRY_DEFAULT:BOOL=OFF -DUSE_TELEMETRY:BOOL=OFF -DAPACHE_ONLY:BOOL=ON -DCMAKE_BUILD_TYPE=Release && \ cd build && \ make -j $(getconf _NPROCESSORS_ONLN) && \ @@ -462,7 +462,7 @@ RUN case "${PG_VERSION}" in \ esac && \ wget https://github.com/ossc-db/pg_hint_plan/archive/refs/tags/REL${PG_HINT_PLAN_VERSION}.tar.gz -O pg_hint_plan.tar.gz && \ echo "${PG_HINT_PLAN_CHECKSUM} pg_hint_plan.tar.gz" | sha256sum --check && \ - mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xvzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \ + mkdir pg_hint_plan-src && cd pg_hint_plan-src && tar xzf ../pg_hint_plan.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make install -j $(getconf _NPROCESSORS_ONLN) && \ echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_hint_plan.control @@ -481,7 +481,7 @@ RUN apt-get update && \ apt-get install -y git libgtk2.0-dev libpq-dev libpam-dev libxslt-dev libkrb5-dev cmake && \ wget https://github.com/ketteq-neon/postgres-exts/archive/e0bd1a9d9313d7120c1b9c7bb15c48c0dede4c4e.tar.gz -O kq_imcx.tar.gz && \ echo "dc93a97ff32d152d32737ba7e196d9687041cda15e58ab31344c2f2de8855336 kq_imcx.tar.gz" | sha256sum --check && \ - mkdir kq_imcx-src && cd kq_imcx-src && tar xvzf ../kq_imcx.tar.gz --strip-components=1 -C . && \ + mkdir kq_imcx-src && cd kq_imcx-src && tar xzf ../kq_imcx.tar.gz --strip-components=1 -C . && \ find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\ mkdir build && cd build && \ cmake -DCMAKE_BUILD_TYPE=Release .. && \ @@ -505,7 +505,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/citusdata/pg_cron/archive/refs/tags/v1.6.0.tar.gz -O pg_cron.tar.gz && \ echo "383a627867d730222c272bfd25cd5e151c578d73f696d32910c7db8c665cc7db pg_cron.tar.gz" | sha256sum --check && \ - mkdir pg_cron-src && cd pg_cron-src && tar xvzf ../pg_cron.tar.gz --strip-components=1 -C . && \ + mkdir pg_cron-src && cd pg_cron-src && tar xzf ../pg_cron.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_cron.control @@ -531,7 +531,7 @@ RUN apt-get update && \ ENV PATH "/usr/local/pgsql/bin/:/usr/local/pgsql/:$PATH" RUN wget https://github.com/rdkit/rdkit/archive/refs/tags/Release_2023_03_3.tar.gz -O rdkit.tar.gz && \ echo "bdbf9a2e6988526bfeb8c56ce3cdfe2998d60ac289078e2215374288185e8c8d rdkit.tar.gz" | sha256sum --check && \ - mkdir rdkit-src && cd rdkit-src && tar xvzf ../rdkit.tar.gz --strip-components=1 -C . && \ + mkdir rdkit-src && cd rdkit-src && tar xzf ../rdkit.tar.gz --strip-components=1 -C . && \ cmake \ -D RDK_BUILD_CAIRO_SUPPORT=OFF \ -D RDK_BUILD_INCHI_SUPPORT=ON \ @@ -571,7 +571,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/fboulnois/pg_uuidv7/archive/refs/tags/v1.0.1.tar.gz -O pg_uuidv7.tar.gz && \ echo "0d0759ab01b7fb23851ecffb0bce27822e1868a4a5819bfd276101c716637a7a pg_uuidv7.tar.gz" | sha256sum --check && \ - mkdir pg_uuidv7-src && cd pg_uuidv7-src && tar xvzf ../pg_uuidv7.tar.gz --strip-components=1 -C . && \ + mkdir pg_uuidv7-src && cd pg_uuidv7-src && tar xzf ../pg_uuidv7.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_uuidv7.control @@ -588,7 +588,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/ChenHuajun/pg_roaringbitmap/archive/refs/tags/v0.5.4.tar.gz -O pg_roaringbitmap.tar.gz && \ echo "b75201efcb1c2d1b014ec4ae6a22769cc7a224e6e406a587f5784a37b6b5a2aa pg_roaringbitmap.tar.gz" | sha256sum --check && \ - mkdir pg_roaringbitmap-src && cd pg_roaringbitmap-src && tar xvzf ../pg_roaringbitmap.tar.gz --strip-components=1 -C . && \ + mkdir pg_roaringbitmap-src && cd pg_roaringbitmap-src && tar xzf ../pg_roaringbitmap.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/roaringbitmap.control @@ -605,7 +605,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/theory/pg-semver/archive/refs/tags/v0.32.1.tar.gz -O pg_semver.tar.gz && \ echo "fbdaf7512026d62eec03fad8687c15ed509b6ba395bff140acd63d2e4fbe25d7 pg_semver.tar.gz" | sha256sum --check && \ - mkdir pg_semver-src && cd pg_semver-src && tar xvzf ../pg_semver.tar.gz --strip-components=1 -C . && \ + mkdir pg_semver-src && cd pg_semver-src && tar xzf ../pg_semver.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/semver.control @@ -631,7 +631,7 @@ RUN case "${PG_VERSION}" in \ esac && \ wget https://github.com/neondatabase/pg_embedding/archive/refs/tags/${PG_EMBEDDING_VERSION}.tar.gz -O pg_embedding.tar.gz && \ echo "${PG_EMBEDDING_CHECKSUM} pg_embedding.tar.gz" | sha256sum --check && \ - mkdir pg_embedding-src && cd pg_embedding-src && tar xvzf ../pg_embedding.tar.gz --strip-components=1 -C . && \ + mkdir pg_embedding-src && cd pg_embedding-src && tar xzf ../pg_embedding.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install @@ -647,7 +647,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/neondatabase/postgresql_anonymizer/archive/refs/tags/neon_1.1.1.tar.gz -O pg_anon.tar.gz && \ echo "321ea8d5c1648880aafde850a2c576e4a9e7b9933a34ce272efc839328999fa9 pg_anon.tar.gz" | sha256sum --check && \ - mkdir pg_anon-src && cd pg_anon-src && tar xvzf ../pg_anon.tar.gz --strip-components=1 -C . && \ + mkdir pg_anon-src && cd pg_anon-src && tar xzf ../pg_anon.tar.gz --strip-components=1 -C . && \ find /usr/local/pgsql -type f | sed 's|^/usr/local/pgsql/||' > /before.txt &&\ make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/anon.control && \ @@ -696,7 +696,7 @@ ARG PG_VERSION RUN wget https://github.com/supabase/pg_jsonschema/archive/refs/tags/v0.2.0.tar.gz -O pg_jsonschema.tar.gz && \ echo "9118fc508a6e231e7a39acaa6f066fcd79af17a5db757b47d2eefbe14f7794f0 pg_jsonschema.tar.gz" | sha256sum --check && \ - mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xvzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \ + mkdir pg_jsonschema-src && cd pg_jsonschema-src && tar xzf ../pg_jsonschema.tar.gz --strip-components=1 -C . && \ sed -i 's/pgrx = "0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \ cargo pgrx install --release && \ echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control @@ -713,7 +713,7 @@ ARG PG_VERSION RUN wget https://github.com/supabase/pg_graphql/archive/refs/tags/v1.4.0.tar.gz -O pg_graphql.tar.gz && \ echo "bd8dc7230282b3efa9ae5baf053a54151ed0e66881c7c53750e2d0c765776edc pg_graphql.tar.gz" | sha256sum --check && \ - mkdir pg_graphql-src && cd pg_graphql-src && tar xvzf ../pg_graphql.tar.gz --strip-components=1 -C . && \ + mkdir pg_graphql-src && cd pg_graphql-src && tar xzf ../pg_graphql.tar.gz --strip-components=1 -C . && \ sed -i 's/pgrx = "=0.10.2"/pgrx = { version = "0.10.2", features = [ "unsafe-postgres" ] }/g' Cargo.toml && \ cargo pgrx install --release && \ # it's needed to enable extension because it uses untrusted C language @@ -733,7 +733,7 @@ ARG PG_VERSION # 26806147b17b60763039c6a6878884c41a262318 made on 26/09/2023 RUN wget https://github.com/kelvich/pg_tiktoken/archive/26806147b17b60763039c6a6878884c41a262318.tar.gz -O pg_tiktoken.tar.gz && \ echo "e64e55aaa38c259512d3e27c572da22c4637418cf124caba904cd50944e5004e pg_tiktoken.tar.gz" | sha256sum --check && \ - mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xvzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \ + mkdir pg_tiktoken-src && cd pg_tiktoken-src && tar xzf ../pg_tiktoken.tar.gz --strip-components=1 -C . && \ cargo pgrx install --release && \ echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_tiktoken.control @@ -749,7 +749,7 @@ ARG PG_VERSION RUN wget https://github.com/pksunkara/pgx_ulid/archive/refs/tags/v0.1.3.tar.gz -O pgx_ulid.tar.gz && \ echo "ee5db82945d2d9f2d15597a80cf32de9dca67b897f605beb830561705f12683c pgx_ulid.tar.gz" | sha256sum --check && \ - mkdir pgx_ulid-src && cd pgx_ulid-src && tar xvzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \ + mkdir pgx_ulid-src && cd pgx_ulid-src && tar xzf ../pgx_ulid.tar.gz --strip-components=1 -C . && \ echo "******************* Apply a patch for Postgres 16 support; delete in the next release ******************" && \ wget https://github.com/pksunkara/pgx_ulid/commit/f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \ patch -p1 < f84954cf63fc8c80d964ac970d9eceed3c791196.patch && \ @@ -771,7 +771,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/eulerto/wal2json/archive/refs/tags/wal2json_2_5.tar.gz && \ echo "b516653575541cf221b99cf3f8be9b6821f6dbcfc125675c85f35090f824f00e wal2json_2_5.tar.gz" | sha256sum --check && \ - mkdir wal2json-src && cd wal2json-src && tar xvzf ../wal2json_2_5.tar.gz --strip-components=1 -C . && \ + mkdir wal2json-src && cd wal2json-src && tar xzf ../wal2json_2_5.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install @@ -787,7 +787,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/sraoss/pg_ivm/archive/refs/tags/v1.7.tar.gz -O pg_ivm.tar.gz && \ echo "ebfde04f99203c7be4b0e873f91104090e2e83e5429c32ac242d00f334224d5e pg_ivm.tar.gz" | sha256sum --check && \ - mkdir pg_ivm-src && cd pg_ivm-src && tar xvzf ../pg_ivm.tar.gz --strip-components=1 -C . && \ + mkdir pg_ivm-src && cd pg_ivm-src && tar xzf ../pg_ivm.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_ivm.control @@ -804,7 +804,7 @@ COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/ ENV PATH "/usr/local/pgsql/bin/:$PATH" RUN wget https://github.com/pgpartman/pg_partman/archive/refs/tags/v5.0.1.tar.gz -O pg_partman.tar.gz && \ echo "75b541733a9659a6c90dbd40fccb904a630a32880a6e3044d0c4c5f4c8a65525 pg_partman.tar.gz" | sha256sum --check && \ - mkdir pg_partman-src && cd pg_partman-src && tar xvzf ../pg_partman.tar.gz --strip-components=1 -C . && \ + mkdir pg_partman-src && cd pg_partman-src && tar xzf ../pg_partman.tar.gz --strip-components=1 -C . && \ make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) install && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_partman.control From 0112097e1321d16c4ff51dc4e69818aed395ec32 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 4 Jun 2024 17:27:08 +0300 Subject: [PATCH 131/220] feat(rtc): maintain dirty and uploaded IndexPart (#7833) RemoteTimelineClient maintains a copy of "next IndexPart" as a number of fields which are like an IndexPart but this is not immediately obvious. Instead of multiple fields, maintain a `dirty` ("next IndexPart") and `clean` ("uploaded IndexPart") fields. Additional cleanup: - rename `IndexPart::disk_consistent_lsn` accessor `duplicated_disk_consistent_lsn` - no one except scrubber should be looking at it, even scrubber is a stretch - remove usage elsewhere (pagectl used by tests, metadata scan endpoint) - serialize index part *before* the index upload operation - avoid upload operation being retried because of serialization error - serialization error is fatal anyway for timeline -- it can only make transient local progress after that, at least the error is bubbled up now - gather exploded IndexPart fields into single actual `UploadQueueInitialized::dirty` of which the uploaded snapshot is serialized - implement the long wished monotonicity check with the `clean` IndexPart with an assertion which is not expected to fire Continued work from #7860 towards next step of #6994. --- pageserver/ctl/src/index_part.rs | 2 +- pageserver/src/http/routes.rs | 2 +- .../src/tenant/remote_timeline_client.rs | 150 ++++++++++-------- .../tenant/remote_timeline_client/index.rs | 55 ++----- .../tenant/remote_timeline_client/upload.rs | 19 +-- pageserver/src/tenant/upload_queue.rs | 77 ++++----- s3_scrubber/src/checks.rs | 4 +- 7 files changed, 144 insertions(+), 165 deletions(-) diff --git a/pageserver/ctl/src/index_part.rs b/pageserver/ctl/src/index_part.rs index 2998b5c732..a33cae6769 100644 --- a/pageserver/ctl/src/index_part.rs +++ b/pageserver/ctl/src/index_part.rs @@ -26,7 +26,7 @@ pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { let output = Output { layer_metadata: &des.layer_metadata, - disk_consistent_lsn: des.get_disk_consistent_lsn(), + disk_consistent_lsn: des.metadata.disk_consistent_lsn(), timeline_metadata: &des.metadata, }; diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index bd6fa028ac..6b6a131c88 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -2182,7 +2182,7 @@ async fn tenant_scan_remote_handler( { Ok((index_part, index_generation)) => { tracing::info!("Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)", - index_part.layer_metadata.len(), index_part.get_disk_consistent_lsn()); + index_part.layer_metadata.len(), index_part.metadata.disk_consistent_lsn()); generation = std::cmp::max(generation, index_generation); } Err(DownloadError::NotFound) => { diff --git a/pageserver/src/tenant/remote_timeline_client.rs b/pageserver/src/tenant/remote_timeline_client.rs index 73438a790f..e33e4b84aa 100644 --- a/pageserver/src/tenant/remote_timeline_client.rs +++ b/pageserver/src/tenant/remote_timeline_client.rs @@ -91,8 +91,7 @@ //! //! The *actual* remote state lags behind the *desired* remote state while //! there are in-flight operations. -//! We keep track of the desired remote state in -//! [`UploadQueueInitialized::latest_files`] and [`UploadQueueInitialized::latest_metadata`]. +//! We keep track of the desired remote state in [`UploadQueueInitialized::dirty`]. //! It is initialized based on the [`IndexPart`] that was passed during init //! and updated with every `schedule_*` function call. //! All this is necessary necessary to compute the future [`IndexPart`]s @@ -115,8 +114,7 @@ //! //! # Completion //! -//! Once an operation has completed, we update -//! [`UploadQueueInitialized::projected_remote_consistent_lsn`] immediately, +//! Once an operation has completed, we update [`UploadQueueInitialized::clean`] immediately, //! and submit a request through the DeletionQueue to update //! [`UploadQueueInitialized::visible_remote_consistent_lsn`] after it has //! validated that our generation is not stale. It is this visible value @@ -416,6 +414,7 @@ impl RemoteTimelineClient { Ok(()) } + /// Returns `None` if nothing is yet uplodaded, `Some(disk_consistent_lsn)` otherwise. pub fn remote_consistent_lsn_projected(&self) -> Option { match &mut *self.upload_queue.lock().unwrap() { UploadQueue::Uninitialized => None, @@ -442,13 +441,11 @@ impl RemoteTimelineClient { /// Returns true if this timeline was previously detached at this Lsn and the remote timeline /// client is currently initialized. pub(crate) fn is_previous_ancestor_lsn(&self, lsn: Lsn) -> bool { - // technically this is a dirty read, but given how timeline detach ancestor is implemented - // via tenant restart, the lineage has always been uploaded. self.upload_queue .lock() .unwrap() .initialized_mut() - .map(|uq| uq.latest_lineage.is_previous_ancestor_lsn(lsn)) + .map(|uq| uq.clean.0.lineage.is_previous_ancestor_lsn(lsn)) .unwrap_or(false) } @@ -457,7 +454,6 @@ impl RemoteTimelineClient { current_remote_index_part .layer_metadata .values() - // If we don't have the file size for the layer, don't account for it in the metric. .map(|ilmd| ilmd.file_size) .sum() } else { @@ -585,9 +581,9 @@ impl RemoteTimelineClient { // As documented in the struct definition, it's ok for latest_metadata to be // ahead of what's _actually_ on the remote during index upload. - upload_queue.latest_metadata = metadata.clone(); + upload_queue.dirty.metadata = metadata.clone(); - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; Ok(()) } @@ -606,9 +602,9 @@ impl RemoteTimelineClient { let mut guard = self.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut()?; - upload_queue.latest_metadata.apply(update); + upload_queue.dirty.metadata.apply(update); - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; Ok(()) } @@ -620,8 +616,8 @@ impl RemoteTimelineClient { ) -> anyhow::Result<()> { let mut guard = self.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut()?; - upload_queue.last_aux_file_policy = last_aux_file_policy; - self.schedule_index_upload(upload_queue); + upload_queue.dirty.last_aux_file_policy = last_aux_file_policy; + self.schedule_index_upload(upload_queue)?; Ok(()) } /// @@ -639,30 +635,44 @@ impl RemoteTimelineClient { let upload_queue = guard.initialized_mut()?; if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 { - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; } Ok(()) } /// Launch an index-file upload operation in the background (internal function) - fn schedule_index_upload(self: &Arc, upload_queue: &mut UploadQueueInitialized) { - let disk_consistent_lsn = upload_queue.latest_metadata.disk_consistent_lsn(); + fn schedule_index_upload( + self: &Arc, + upload_queue: &mut UploadQueueInitialized, + ) -> anyhow::Result<()> { + let disk_consistent_lsn = upload_queue.dirty.metadata.disk_consistent_lsn(); + // fix up the duplicated field + upload_queue.dirty.disk_consistent_lsn = disk_consistent_lsn; + + // make sure it serializes before doing it in perform_upload_task so that it doesn't + // look like a retryable error + let void = std::io::sink(); + serde_json::to_writer(void, &upload_queue.dirty).context("serialize index_part.json")?; + + let index_part = &upload_queue.dirty; info!( "scheduling metadata upload up to consistent LSN {disk_consistent_lsn} with {} files ({} changed)", - upload_queue.latest_files.len(), + index_part.layer_metadata.len(), upload_queue.latest_files_changes_since_metadata_upload_scheduled, ); - let index_part = IndexPart::from(&*upload_queue); - let op = UploadOp::UploadMetadata(Box::new(index_part), disk_consistent_lsn); + let op = UploadOp::UploadMetadata { + uploaded: Box::new(index_part.clone()), + }; self.metric_begin(&op); upload_queue.queued_operations.push_back(op); upload_queue.latest_files_changes_since_metadata_upload_scheduled = 0; // Launch the task immediately, if possible self.launch_queued_tasks(upload_queue); + Ok(()) } pub(crate) async fn schedule_reparenting_and_wait( @@ -675,16 +685,16 @@ impl RemoteTimelineClient { let mut guard = self.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut()?; - let Some(prev) = upload_queue.latest_metadata.ancestor_timeline() else { + let Some(prev) = upload_queue.dirty.metadata.ancestor_timeline() else { return Err(anyhow::anyhow!( "cannot reparent without a current ancestor" )); }; - upload_queue.latest_metadata.reparent(new_parent); - upload_queue.latest_lineage.record_previous_ancestor(&prev); + upload_queue.dirty.metadata.reparent(new_parent); + upload_queue.dirty.lineage.record_previous_ancestor(&prev); - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; self.schedule_barrier0(upload_queue) }; @@ -705,16 +715,17 @@ impl RemoteTimelineClient { let mut guard = self.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut()?; - upload_queue.latest_metadata.detach_from_ancestor(&adopted); - upload_queue.latest_lineage.record_detaching(&adopted); + upload_queue.dirty.metadata.detach_from_ancestor(&adopted); + upload_queue.dirty.lineage.record_detaching(&adopted); for layer in layers { upload_queue - .latest_files + .dirty + .layer_metadata .insert(layer.layer_desc().layer_name(), layer.metadata()); } - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; let barrier = self.schedule_barrier0(upload_queue); self.launch_queued_tasks(upload_queue); @@ -746,7 +757,8 @@ impl RemoteTimelineClient { let metadata = layer.metadata(); upload_queue - .latest_files + .dirty + .layer_metadata .insert(layer.layer_desc().layer_name(), metadata.clone()); upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1; @@ -776,8 +788,8 @@ impl RemoteTimelineClient { let mut guard = self.upload_queue.lock().unwrap(); let upload_queue = guard.initialized_mut()?; - let with_metadata = - self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names.iter().cloned()); + let with_metadata = self + .schedule_unlinking_of_layers_from_index_part0(upload_queue, names.iter().cloned())?; self.schedule_deletion_of_unlinked0(upload_queue, with_metadata); @@ -801,7 +813,7 @@ impl RemoteTimelineClient { let names = gc_layers.iter().map(|x| x.layer_desc().layer_name()); - self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names); + self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names)?; self.launch_queued_tasks(upload_queue); @@ -814,7 +826,7 @@ impl RemoteTimelineClient { self: &Arc, upload_queue: &mut UploadQueueInitialized, names: I, - ) -> Vec<(LayerName, LayerFileMetadata)> + ) -> anyhow::Result> where I: IntoIterator, { @@ -824,7 +836,7 @@ impl RemoteTimelineClient { let with_metadata: Vec<_> = names .into_iter() .filter_map(|name| { - let meta = upload_queue.latest_files.remove(&name); + let meta = upload_queue.dirty.layer_metadata.remove(&name); if let Some(meta) = meta { upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1; @@ -856,10 +868,10 @@ impl RemoteTimelineClient { // index_part update, because that needs to be uploaded before we can actually delete the // files. if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 { - self.schedule_index_upload(upload_queue); + self.schedule_index_upload(upload_queue)?; } - with_metadata + Ok(with_metadata) } /// Schedules deletion for layer files which have previously been unlinked from the @@ -950,7 +962,7 @@ impl RemoteTimelineClient { let names = compacted_from.iter().map(|x| x.layer_desc().layer_name()); - self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names); + self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names)?; self.launch_queued_tasks(upload_queue); Ok(()) @@ -1085,7 +1097,7 @@ impl RemoteTimelineClient { let deleted_at = Utc::now().naive_utc(); stopped.deleted_at = SetDeletedFlagProgress::InProgress(deleted_at); - let mut index_part = IndexPart::from(&stopped.upload_queue_for_deletion); + let mut index_part = stopped.upload_queue_for_deletion.dirty.clone(); index_part.deleted_at = Some(deleted_at); index_part }; @@ -1296,7 +1308,8 @@ impl RemoteTimelineClient { stopped .upload_queue_for_deletion - .latest_files + .dirty + .layer_metadata .drain() .map(|(file_name, meta)| { remote_layer_path( @@ -1433,7 +1446,7 @@ impl RemoteTimelineClient { // Can always be scheduled. true } - UploadOp::UploadMetadata(_, _) => { + UploadOp::UploadMetadata { .. } => { // These can only be performed after all the preceding operations // have finished. upload_queue.inprogress_tasks.is_empty() @@ -1475,7 +1488,7 @@ impl RemoteTimelineClient { UploadOp::UploadLayer(_, _) => { upload_queue.num_inprogress_layer_uploads += 1; } - UploadOp::UploadMetadata(_, _) => { + UploadOp::UploadMetadata { .. } => { upload_queue.num_inprogress_metadata_uploads += 1; } UploadOp::Delete(_) => { @@ -1584,22 +1597,13 @@ impl RemoteTimelineClient { ) .await } - UploadOp::UploadMetadata(ref index_part, _lsn) => { - let mention_having_future_layers = if cfg!(feature = "testing") { - index_part - .layer_metadata - .keys() - .any(|x| x.is_in_future(*_lsn)) - } else { - false - }; - + UploadOp::UploadMetadata { ref uploaded } => { let res = upload::upload_index_part( &self.storage_impl, &self.tenant_shard_id, &self.timeline_id, self.generation, - index_part, + uploaded, &self.cancel, ) .measure_remote_op( @@ -1609,10 +1613,21 @@ impl RemoteTimelineClient { ) .await; if res.is_ok() { - self.update_remote_physical_size_gauge(Some(index_part)); + self.update_remote_physical_size_gauge(Some(uploaded)); + let mention_having_future_layers = if cfg!(feature = "testing") { + uploaded + .layer_metadata + .keys() + .any(|x| x.is_in_future(uploaded.metadata.disk_consistent_lsn())) + } else { + false + }; if mention_having_future_layers { // find rationale near crate::tenant::timeline::init::cleanup_future_layer - tracing::info!(disk_consistent_lsn=%_lsn, "uploaded an index_part.json with future layers -- this is ok! if shutdown now, expect future layer cleanup"); + tracing::info!( + disk_consistent_lsn = %uploaded.metadata.disk_consistent_lsn(), + "uploaded an index_part.json with future layers -- this is ok! if shutdown now, expect future layer cleanup" + ); } } res @@ -1713,11 +1728,23 @@ impl RemoteTimelineClient { upload_queue.num_inprogress_layer_uploads -= 1; None } - UploadOp::UploadMetadata(_, lsn) => { + UploadOp::UploadMetadata { ref uploaded } => { upload_queue.num_inprogress_metadata_uploads -= 1; - // XXX monotonicity check? - upload_queue.projected_remote_consistent_lsn = Some(lsn); + // the task id is reused as a monotonicity check for storing the "clean" + // IndexPart. + let last_updater = upload_queue.clean.1; + let is_later = last_updater.is_some_and(|task_id| task_id < task.task_id); + let monotone = is_later || last_updater.is_none(); + + assert!(monotone, "no two index uploads should be completing at the same time, prev={last_updater:?}, task.task_id={}", task.task_id); + + // not taking ownership is wasteful + upload_queue.clean.0.clone_from(uploaded); + upload_queue.clean.1 = Some(task.task_id); + + let lsn = upload_queue.clean.0.metadata.disk_consistent_lsn(); + if self.generation.is_none() { // Legacy mode: skip validating generation upload_queue.visible_remote_consistent_lsn.store(lsn); @@ -1771,7 +1798,7 @@ impl RemoteTimelineClient { RemoteOpKind::Upload, RemoteTimelineClientMetricsCallTrackSize::Bytes(m.file_size), ), - UploadOp::UploadMetadata(_, _) => ( + UploadOp::UploadMetadata { .. } => ( RemoteOpFileKind::Index, RemoteOpKind::Upload, DontTrackSize { @@ -1847,11 +1874,9 @@ impl RemoteTimelineClient { // Deletion is not really perf sensitive so there shouldnt be any problems with cloning a fraction of it. let upload_queue_for_deletion = UploadQueueInitialized { task_counter: 0, - latest_files: initialized.latest_files.clone(), + dirty: initialized.dirty.clone(), + clean: initialized.clean.clone(), latest_files_changes_since_metadata_upload_scheduled: 0, - latest_metadata: initialized.latest_metadata.clone(), - latest_lineage: initialized.latest_lineage.clone(), - projected_remote_consistent_lsn: None, visible_remote_consistent_lsn: initialized .visible_remote_consistent_lsn .clone(), @@ -1864,7 +1889,6 @@ impl RemoteTimelineClient { dangling_files: HashMap::default(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), - last_aux_file_policy: initialized.last_aux_file_policy, }; let upload_queue = std::mem::replace( diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index f5d939c747..6494261312 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -11,7 +11,6 @@ use utils::id::TimelineId; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::storage_layer::LayerName; -use crate::tenant::upload_queue::UploadQueueInitialized; use crate::tenant::Generation; use pageserver_api::shard::ShardIndex; @@ -42,7 +41,7 @@ pub struct IndexPart { // 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata. // It's duplicated for convenience when reading the serialized structure, but is // private because internally we would read from metadata instead. - disk_consistent_lsn: Lsn, + pub(super) disk_consistent_lsn: Lsn, #[serde(rename = "metadata_bytes")] pub metadata: TimelineMetadata, @@ -80,23 +79,15 @@ impl IndexPart { pub const FILE_NAME: &'static str = "index_part.json"; - fn new( - layers_and_metadata: &HashMap, - disk_consistent_lsn: Lsn, - metadata: TimelineMetadata, - lineage: Lineage, - last_aux_file_policy: Option, - ) -> Self { - let layer_metadata = layers_and_metadata.clone(); - - Self { + pub(crate) fn empty(metadata: TimelineMetadata) -> Self { + IndexPart { version: Self::LATEST_VERSION, - layer_metadata, - disk_consistent_lsn, + layer_metadata: Default::default(), + disk_consistent_lsn: metadata.disk_consistent_lsn(), metadata, deleted_at: None, - lineage, - last_aux_file_policy, + lineage: Default::default(), + last_aux_file_policy: None, } } @@ -106,7 +97,7 @@ impl IndexPart { /// If you want this under normal operations, read it from self.metadata: /// this method is just for the scrubber to use when validating an index. - pub fn get_disk_consistent_lsn(&self) -> Lsn { + pub fn duplicated_disk_consistent_lsn(&self) -> Lsn { self.disk_consistent_lsn } @@ -120,14 +111,7 @@ impl IndexPart { #[cfg(test)] pub(crate) fn example() -> Self { - let example_metadata = TimelineMetadata::example(); - Self::new( - &HashMap::new(), - example_metadata.disk_consistent_lsn(), - example_metadata, - Default::default(), - Some(AuxFilePolicy::V1), - ) + Self::empty(TimelineMetadata::example()) } pub(crate) fn last_aux_file_policy(&self) -> Option { @@ -135,22 +119,6 @@ impl IndexPart { } } -impl From<&UploadQueueInitialized> for IndexPart { - fn from(uq: &UploadQueueInitialized) -> Self { - let disk_consistent_lsn = uq.latest_metadata.disk_consistent_lsn(); - let metadata = uq.latest_metadata.clone(); - let lineage = uq.latest_lineage.clone(); - - Self::new( - &uq.latest_files, - disk_consistent_lsn, - metadata, - lineage, - uq.last_aux_file_policy, - ) - } -} - /// Metadata gathered for each of the layer files. /// /// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which @@ -236,11 +204,10 @@ impl Lineage { /// The queried lsn is most likely the basebackup lsn, and this answers question "is it allowed /// to start a read/write primary at this lsn". /// - /// Returns true if the Lsn was previously a branch point. + /// Returns true if the Lsn was previously our branch point. pub(crate) fn is_previous_ancestor_lsn(&self, lsn: Lsn) -> bool { self.original_ancestor - .as_ref() - .is_some_and(|(_, ancestor_lsn, _)| lsn == *ancestor_lsn) + .is_some_and(|(_, ancestor_lsn, _)| ancestor_lsn == lsn) } } diff --git a/pageserver/src/tenant/remote_timeline_client/upload.rs b/pageserver/src/tenant/remote_timeline_client/upload.rs index e8e824f415..c4dd184610 100644 --- a/pageserver/src/tenant/remote_timeline_client/upload.rs +++ b/pageserver/src/tenant/remote_timeline_client/upload.rs @@ -1,6 +1,7 @@ //! Helper functions to upload files to remote storage with a RemoteStorage use anyhow::{bail, Context}; +use bytes::Bytes; use camino::Utf8Path; use fail::fail_point; use pageserver_api::shard::TenantShardId; @@ -11,10 +12,10 @@ use tokio::io::AsyncSeekExt; use tokio_util::sync::CancellationToken; use utils::{backoff, pausable_failpoint}; +use super::index::IndexPart; use super::Generation; use crate::tenant::remote_timeline_client::{ - index::IndexPart, remote_index_path, remote_initdb_archive_path, - remote_initdb_preserved_archive_path, + remote_index_path, remote_initdb_archive_path, remote_initdb_preserved_archive_path, }; use remote_storage::{GenericRemoteStorage, RemotePath, TimeTravelError}; use utils::id::{TenantId, TimelineId}; @@ -27,7 +28,7 @@ pub(crate) async fn upload_index_part<'a>( tenant_shard_id: &TenantShardId, timeline_id: &TimelineId, generation: Generation, - index_part: &'a IndexPart, + index_part: &IndexPart, cancel: &CancellationToken, ) -> anyhow::Result<()> { tracing::trace!("uploading new index part"); @@ -37,16 +38,16 @@ pub(crate) async fn upload_index_part<'a>( }); pausable_failpoint!("before-upload-index-pausable"); - let index_part_bytes = index_part - .to_s3_bytes() - .context("serialize index part file into bytes")?; - let index_part_size = index_part_bytes.len(); - let index_part_bytes = bytes::Bytes::from(index_part_bytes); + // FIXME: this error comes too late + let serialized = index_part.to_s3_bytes()?; + let serialized = Bytes::from(serialized); + + let index_part_size = serialized.len(); let remote_path = remote_index_path(tenant_shard_id, timeline_id, generation); storage .upload_storage_object( - futures::stream::once(futures::future::ready(Ok(index_part_bytes))), + futures::stream::once(futures::future::ready(Ok(serialized))), index_part_size, &remote_path, cancel, diff --git a/pageserver/src/tenant/upload_queue.rs b/pageserver/src/tenant/upload_queue.rs index 02f87303d1..50c977a950 100644 --- a/pageserver/src/tenant/upload_queue.rs +++ b/pageserver/src/tenant/upload_queue.rs @@ -3,12 +3,10 @@ use super::storage_layer::ResidentLayer; use crate::tenant::metadata::TimelineMetadata; use crate::tenant::remote_timeline_client::index::IndexPart; use crate::tenant::remote_timeline_client::index::LayerFileMetadata; -use crate::tenant::remote_timeline_client::index::Lineage; use std::collections::{HashMap, VecDeque}; use std::fmt::Debug; use chrono::NaiveDateTime; -use pageserver_api::models::AuxFilePolicy; use std::sync::Arc; use tracing::info; use utils::lsn::AtomicLsn; @@ -45,34 +43,25 @@ pub(crate) struct UploadQueueInitialized { /// Counter to assign task IDs pub(crate) task_counter: u64, - /// All layer files stored in the remote storage, taking into account all - /// in-progress and queued operations - pub(crate) latest_files: HashMap, + /// The next uploaded index_part.json; assumed to be dirty. + /// + /// Should not be read, directly except for layer file updates. Instead you should add a + /// projected field. + pub(crate) dirty: IndexPart, + + /// The latest remote persisted IndexPart. + /// + /// Each completed metadata upload will update this. The second item is the task_id which last + /// updated the value, used to ensure we never store an older value over a newer one. + pub(crate) clean: (IndexPart, Option), /// How many file uploads or deletions been scheduled, since the /// last (scheduling of) metadata index upload? pub(crate) latest_files_changes_since_metadata_upload_scheduled: u64, - /// Metadata stored in the remote storage, taking into account all - /// in-progress and queued operations. - /// DANGER: do not return to outside world, e.g., safekeepers. - pub(crate) latest_metadata: TimelineMetadata, - - /// Part of the flattened "next" `index_part.json`. - pub(crate) latest_lineage: Lineage, - - /// The last aux file policy used on this timeline. - pub(crate) last_aux_file_policy: Option, - - /// `disk_consistent_lsn` from the last metadata file that was successfully - /// uploaded. `Lsn(0)` if nothing was uploaded yet. - /// Unlike `latest_files` or `latest_metadata`, this value is never ahead. - /// Safekeeper can rely on it to make decisions for WAL storage. - /// - /// visible_remote_consistent_lsn is only updated after our generation has been validated with + /// The Lsn is only updated after our generation has been validated with /// the control plane (unlesss a timeline's generation is None, in which case /// we skip validation) - pub(crate) projected_remote_consistent_lsn: Option, pub(crate) visible_remote_consistent_lsn: Arc, // Breakdown of different kinds of tasks currently in-progress @@ -118,7 +107,8 @@ impl UploadQueueInitialized { } pub(super) fn get_last_remote_consistent_lsn_projected(&self) -> Option { - self.projected_remote_consistent_lsn + let lsn = self.clean.0.metadata.disk_consistent_lsn(); + self.clean.1.map(|_| lsn) } } @@ -174,13 +164,12 @@ impl UploadQueue { info!("initializing upload queue for empty remote"); + let index_part = IndexPart::empty(metadata.clone()); + let state = UploadQueueInitialized { - // As described in the doc comment, it's ok for `latest_files` and `latest_metadata` to be ahead. - latest_files: HashMap::new(), + dirty: index_part.clone(), + clean: (index_part, None), latest_files_changes_since_metadata_upload_scheduled: 0, - latest_metadata: metadata.clone(), - latest_lineage: Lineage::default(), - projected_remote_consistent_lsn: None, visible_remote_consistent_lsn: Arc::new(AtomicLsn::new(0)), // what follows are boring default initializations task_counter: 0, @@ -193,7 +182,6 @@ impl UploadQueue { dangling_files: HashMap::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), - last_aux_file_policy: Default::default(), }; *self = UploadQueue::Initialized(state); @@ -211,22 +199,15 @@ impl UploadQueue { } } - let mut files = HashMap::with_capacity(index_part.layer_metadata.len()); - for (layer_name, layer_metadata) in &index_part.layer_metadata { - files.insert(layer_name.to_owned(), layer_metadata.clone()); - } - info!( "initializing upload queue with remote index_part.disk_consistent_lsn: {}", index_part.metadata.disk_consistent_lsn() ); let state = UploadQueueInitialized { - latest_files: files, + dirty: index_part.clone(), + clean: (index_part.clone(), None), latest_files_changes_since_metadata_upload_scheduled: 0, - latest_metadata: index_part.metadata.clone(), - latest_lineage: index_part.lineage.clone(), - projected_remote_consistent_lsn: Some(index_part.metadata.disk_consistent_lsn()), visible_remote_consistent_lsn: Arc::new( index_part.metadata.disk_consistent_lsn().into(), ), @@ -241,7 +222,6 @@ impl UploadQueue { dangling_files: HashMap::new(), shutting_down: false, shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)), - last_aux_file_policy: index_part.last_aux_file_policy(), }; *self = UploadQueue::Initialized(state); @@ -298,13 +278,16 @@ pub(crate) enum UploadOp { /// Upload a layer file UploadLayer(ResidentLayer, LayerFileMetadata), - /// Upload the metadata file - UploadMetadata(Box, Lsn), + /// Upload a index_part.json file + UploadMetadata { + /// The next [`UploadQueueInitialized::clean`] after this upload succeeds. + uploaded: Box, + }, /// Delete layer files Delete(Delete), - /// Barrier. When the barrier operation is reached, + /// Barrier. When the barrier operation is reached, the channel is closed. Barrier(tokio::sync::watch::Sender<()>), /// Shutdown; upon encountering this operation no new operations will be spawned, otherwise @@ -322,8 +305,12 @@ impl std::fmt::Display for UploadOp { layer, metadata.file_size, metadata.generation ) } - UploadOp::UploadMetadata(_, lsn) => { - write!(f, "UploadMetadata(lsn: {})", lsn) + UploadOp::UploadMetadata { uploaded, .. } => { + write!( + f, + "UploadMetadata(lsn: {})", + uploaded.metadata.disk_consistent_lsn() + ) } UploadOp::Delete(delete) => { write!(f, "Delete({} layers)", delete.layers.len()) diff --git a/s3_scrubber/src/checks.rs b/s3_scrubber/src/checks.rs index 2c14fef0af..44fb53696c 100644 --- a/s3_scrubber/src/checks.rs +++ b/s3_scrubber/src/checks.rs @@ -93,12 +93,12 @@ pub(crate) fn branch_cleanup_and_check_errors( } if index_part.metadata.disk_consistent_lsn() - != index_part.get_disk_consistent_lsn() + != index_part.duplicated_disk_consistent_lsn() { result.errors.push(format!( "Mismatching disk_consistent_lsn in TimelineMetadata ({}) and in the index_part ({})", index_part.metadata.disk_consistent_lsn(), - index_part.get_disk_consistent_lsn(), + index_part.duplicated_disk_consistent_lsn(), )) } From fd22fc5b7d29214e8544c65062494c2ad03d744d Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 4 Jun 2024 16:16:50 +0100 Subject: [PATCH 132/220] pageserver: include heatmap in tenant deletion (#7928) ## Problem This was an oversight when adding heatmaps: because they are at the top level of the tenant, they aren't included in the catch-all list & delete that happens for timeline paths. This doesn't break anything, but it leaves behind a few kilobytes of garbage in the S3 bucket after a tenant is deleted, generating work for the scrubber. ## Summary of changes - During deletion, explicitly remove the heatmap file - In test_tenant_delete_smoke, upload a heatmap so that the test would fail its "remote storage empty after delete" check if we didn't delete it. --- pageserver/src/tenant/delete.rs | 20 ++++++++++++++++++++ test_runner/regress/test_tenant_delete.py | 3 +++ 2 files changed, 23 insertions(+) diff --git a/pageserver/src/tenant/delete.rs b/pageserver/src/tenant/delete.rs index 7c6640eaac..8b36aa15e5 100644 --- a/pageserver/src/tenant/delete.rs +++ b/pageserver/src/tenant/delete.rs @@ -16,6 +16,7 @@ use crate::{ task_mgr::{self, TaskKind}, tenant::{ mgr::{TenantSlot, TenantsMapRemoveResult}, + remote_timeline_client::remote_heatmap_path, timeline::ShutdownMode, }, }; @@ -531,6 +532,25 @@ impl DeleteTenantFlow { } } + // Remove top-level tenant objects that don't belong to a timeline, such as heatmap + let heatmap_path = remote_heatmap_path(&tenant.tenant_shard_id()); + if let Some(Err(e)) = backoff::retry( + || async { + remote_storage + .delete(&heatmap_path, &task_mgr::shutdown_token()) + .await + }, + TimeoutOrCancel::caused_by_cancel, + FAILED_UPLOAD_WARN_THRESHOLD, + FAILED_REMOTE_OP_RETRIES, + "remove_remote_tenant_heatmap", + &task_mgr::shutdown_token(), + ) + .await + { + tracing::warn!("Failed to delete heatmap at {heatmap_path}: {e}"); + } + let timelines_path = conf.timelines_path(&tenant.tenant_shard_id); // May not exist if we fail in cleanup_remaining_fs_traces after removing it if timelines_path.exists() { diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index 3fc44de6fa..e120aa1a7c 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -88,6 +88,9 @@ def test_tenant_delete_smoke( parent = timeline + # Upload a heatmap so that we exercise deletion of that too + ps_http.tenant_heatmap_upload(tenant_id) + iterations = poll_for_remote_storage_iterations(remote_storage_kind) assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 2 From 17116f2ea941d6d5e821e81dd5f17c88edc968f3 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 4 Jun 2024 18:16:23 +0200 Subject: [PATCH 133/220] fix(pageserver): abort on duplicate layers, before doing damage (#7799) fixes https://github.com/neondatabase/neon/issues/7790 (duplicating most of the issue description here for posterity) # Background From the time before always-authoritative `index_part.json`, we had to handle duplicate layers. See the RFC for an illustration of how duplicate layers could happen: https://github.com/neondatabase/neon/blob/a8e6d259cb49d1bf156dfc2215b92c04d1e8a08f/docs/rfcs/027-crash-consistent-layer-map-through-index-part.md?plain=1#L41-L50 As of #5198 , we should not be exposed to that problem anymore. # Problem 1 We still have 1. [code in Pageserver](https://github.com/neondatabase/neon/blob/82960b2175211c0f666b91b5258c5e2253a245c7/pageserver/src/tenant/timeline.rs#L4502-L4521) than handles duplicate layers 2. [tests in the test suite](https://github.com/neondatabase/neon/blob/d9dcbffac37ccd3331ec9adcd12fd20ce0ea31aa/test_runner/regress/test_duplicate_layers.py#L15) that demonstrates the problem using a failpoint However, the test in the test suite doesn't use the failpoint to induce a crash that could legitimately happen in production. What is does instead is to return early with an `Ok()`, so that the code in Pageserver that handles duplicate layers (item 1) actually gets exercised. That "return early" would be a bug in the routine if it happened in production. So, the tests in the test suite are tests for their own sake, but don't serve to actually regress-test any production behavior. # Problem 2 Further, if production code _did_ (it nowawdays doesn't!) create a duplicate layer, the code in Pageserver that handles the condition (item 1 above) is too little and too late: * the code handles it by discarding the newer `struct Layer`; that's good. * however, on disk, we have already overwritten the old with the new layer file * the fact that we do it atomically doesn't matter because ... * if the new layer file is not bit-identical, then we have a cache coherency problem * PS PageCache block cache: caches old bit battern * blob_io offsets stored in variables, based on pre-overwrite bit pattern / offsets * => reading based on these offsets from the new file might yield different data than before # Solution - Remove the test suite code pertaining to Problem 1 - Move & rename test suite code that actually tests RFC-27 crash-consistent layer map. - Remove the Pageserver code that handles duplicate layers too late (Problem 1) - Use `RENAME_NOREPLACE` to prevent over-rename the file during `.finish()`, bail with an error if it happens (Problem 2) - This bailing prevents the caller from even trying to insert into the layer map, as they don't even get a `struct Layer` at hand. - Add `abort`s in the place where we have the layer map lock and check for duplicates (Problem 2) - Note again, we can't reach there because we bail from `.finish()` much earlier in the code. - Share the logic to clean up after failed `.finish()` between image layers and delta layers (drive-by cleanup) - This exposed that test `image_layer_rewrite` was overwriting layer files in place. Fix the test. # Future Work This PR adds a new failure scenario that was previously "papered over" by the overwriting of layers: 1. Start a compaction that will produce 3 layers: A, B, C 2. Layer A is `finish()`ed successfully. 3. Layer B fails mid-way at some `put_value()`. 4. Compaction bails out, sleeps 20s. 5. Some disk space gets freed in the meantime. 6. Compaction wakes from sleep, another iteration starts, it attempts to write Layer A again. But the `.finish()` **fails because A already exists on disk**. The failure in step 5 is new with this PR, and it **causes the compaction to get stuck**. Before, it would silently overwrite the file and "successfully" complete the second iteration. The mitigation for this is to `/reset` the tenant. --- libs/utils/src/fs_ext.rs | 3 + libs/utils/src/fs_ext/rename_noreplace.rs | 109 ++++++++++++++++++ pageserver/src/tenant.rs | 35 ++++-- .../src/tenant/storage_layer/delta_layer.rs | 35 +++--- .../src/tenant/storage_layer/image_layer.rs | 92 ++++++++++++--- pageserver/src/tenant/storage_layer/layer.rs | 7 +- pageserver/src/tenant/timeline/compaction.rs | 42 ------- test_runner/regress/test_compaction.py | 2 +- ...y => test_pageserver_crash_consistency.py} | 50 ++------ .../regress/test_pageserver_restart.py | 5 - .../regress/test_pageserver_secondary.py | 4 - 11 files changed, 252 insertions(+), 132 deletions(-) create mode 100644 libs/utils/src/fs_ext/rename_noreplace.rs rename test_runner/regress/{test_duplicate_layers.py => test_pageserver_crash_consistency.py} (66%) diff --git a/libs/utils/src/fs_ext.rs b/libs/utils/src/fs_ext.rs index 90ba348a02..8e53d2c79b 100644 --- a/libs/utils/src/fs_ext.rs +++ b/libs/utils/src/fs_ext.rs @@ -3,6 +3,9 @@ use std::{fs, io, path::Path}; use anyhow::Context; +mod rename_noreplace; +pub use rename_noreplace::rename_noreplace; + pub trait PathExt { /// Returns an error if `self` is not a directory. fn is_empty_dir(&self) -> io::Result; diff --git a/libs/utils/src/fs_ext/rename_noreplace.rs b/libs/utils/src/fs_ext/rename_noreplace.rs new file mode 100644 index 0000000000..897e30d7f1 --- /dev/null +++ b/libs/utils/src/fs_ext/rename_noreplace.rs @@ -0,0 +1,109 @@ +use nix::NixPath; + +/// Rename a file without replacing an existing file. +/// +/// This is a wrapper around platform-specific APIs. +pub fn rename_noreplace( + src: &P1, + dst: &P2, +) -> nix::Result<()> { + { + #[cfg(target_os = "linux")] + { + nix::fcntl::renameat2( + None, + src, + None, + dst, + nix::fcntl::RenameFlags::RENAME_NOREPLACE, + ) + } + #[cfg(target_os = "macos")] + { + let res = src.with_nix_path(|src| { + dst.with_nix_path(|dst| + // SAFETY: `src` and `dst` are valid C strings as per the NixPath trait and they outlive the call to renamex_np. + unsafe { + nix::libc::renamex_np(src.as_ptr(), dst.as_ptr(), nix::libc::RENAME_EXCL) + }) + })??; + nix::errno::Errno::result(res).map(drop) + } + #[cfg(not(any(target_os = "linux", target_os = "macos")))] + { + std::compile_error!("OS does not support no-replace renames"); + } + } +} + +#[cfg(test)] +mod test { + use std::{fs, path::PathBuf}; + + use super::*; + + fn testdir() -> camino_tempfile::Utf8TempDir { + match crate::env::var("NEON_UTILS_RENAME_NOREPLACE_TESTDIR") { + Some(path) => { + let path: camino::Utf8PathBuf = path; + camino_tempfile::tempdir_in(path).unwrap() + } + None => camino_tempfile::tempdir().unwrap(), + } + } + + #[test] + fn test_absolute_paths() { + let testdir = testdir(); + println!("testdir: {}", testdir.path()); + + let src = testdir.path().join("src"); + let dst = testdir.path().join("dst"); + + fs::write(&src, b"").unwrap(); + fs::write(&dst, b"").unwrap(); + + let src = src.canonicalize().unwrap(); + assert!(src.is_absolute()); + let dst = dst.canonicalize().unwrap(); + assert!(dst.is_absolute()); + + let result = rename_noreplace(&src, &dst); + assert_eq!(result.unwrap_err(), nix::Error::EEXIST); + } + + #[test] + fn test_relative_paths() { + let testdir = testdir(); + println!("testdir: {}", testdir.path()); + + // this is fine because we run in nextest => process per test + std::env::set_current_dir(testdir.path()).unwrap(); + + let src = PathBuf::from("src"); + let dst = PathBuf::from("dst"); + + fs::write(&src, b"").unwrap(); + fs::write(&dst, b"").unwrap(); + + let result = rename_noreplace(&src, &dst); + assert_eq!(result.unwrap_err(), nix::Error::EEXIST); + } + + #[test] + fn test_works_when_not_exists() { + let testdir = testdir(); + println!("testdir: {}", testdir.path()); + + let src = testdir.path().join("src"); + let dst = testdir.path().join("dst"); + + fs::write(&src, b"content").unwrap(); + + rename_noreplace(src.as_std_path(), dst.as_std_path()).unwrap(); + assert_eq!( + "content", + String::from_utf8(std::fs::read(&dst).unwrap()).unwrap() + ); + } +} diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 19a0f59b2a..60cd5c9695 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3865,6 +3865,9 @@ pub(crate) mod harness { pub fn create_custom( test_name: &'static str, tenant_conf: TenantConf, + tenant_id: TenantId, + shard_identity: ShardIdentity, + generation: Generation, ) -> anyhow::Result { setup_logging(); @@ -3877,8 +3880,12 @@ pub(crate) mod harness { // OK in a test. let conf: &'static PageServerConf = Box::leak(Box::new(conf)); - let tenant_id = TenantId::generate(); - let tenant_shard_id = TenantShardId::unsharded(tenant_id); + let shard = shard_identity.shard_index(); + let tenant_shard_id = TenantShardId { + tenant_id, + shard_number: shard.shard_number, + shard_count: shard.shard_count, + }; fs::create_dir_all(conf.tenant_path(&tenant_shard_id))?; fs::create_dir_all(conf.timelines_path(&tenant_shard_id))?; @@ -3896,8 +3903,8 @@ pub(crate) mod harness { conf, tenant_conf, tenant_shard_id, - generation: Generation::new(0xdeadbeef), - shard: ShardIndex::unsharded(), + generation, + shard, remote_storage, remote_fs_dir, deletion_queue, @@ -3912,8 +3919,15 @@ pub(crate) mod harness { compaction_period: Duration::ZERO, ..TenantConf::default() }; - - Self::create_custom(test_name, tenant_conf) + let tenant_id = TenantId::generate(); + let shard = ShardIdentity::unsharded(); + Self::create_custom( + test_name, + tenant_conf, + tenant_id, + shard, + Generation::new(0xdeadbeef), + ) } pub fn span(&self) -> tracing::Span { @@ -4037,6 +4051,7 @@ mod tests { use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; use utils::bin_ser::BeSer; + use utils::id::TenantId; static TEST_KEY: Lazy = Lazy::new(|| Key::from_slice(&hex!("010000000033333333444444445500000001"))); @@ -4936,7 +4951,13 @@ mod tests { ..TenantConf::default() }; - let harness = TenantHarness::create_custom("test_get_vectored_key_gap", tenant_conf)?; + let harness = TenantHarness::create_custom( + "test_get_vectored_key_gap", + tenant_conf, + TenantId::generate(), + ShardIdentity::unsharded(), + Generation::new(0xdeadbeef), + )?; let (tenant, ctx) = harness.load().await; let mut current_key = Key::from_hex("010000000033333333444444445500000000").unwrap(); diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 1b3802840f..999e2e8679 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -478,6 +478,23 @@ impl DeltaLayerWriterInner { key_end: Key, timeline: &Arc, ctx: &RequestContext, + ) -> anyhow::Result { + let temp_path = self.path.clone(); + let result = self.finish0(key_end, timeline, ctx).await; + if result.is_err() { + tracing::info!(%temp_path, "cleaning up temporary file after error during writing"); + if let Err(e) = std::fs::remove_file(&temp_path) { + tracing::warn!(error=%e, %temp_path, "error cleaning up temporary layer file after error during writing"); + } + } + result + } + + async fn finish0( + self, + key_end: Key, + timeline: &Arc, + ctx: &RequestContext, ) -> anyhow::Result { let index_start_blk = ((self.blob_writer.size() + PAGE_SZ as u64 - 1) / PAGE_SZ as u64) as u32; @@ -651,19 +668,11 @@ impl DeltaLayerWriter { timeline: &Arc, ctx: &RequestContext, ) -> anyhow::Result { - let inner = self.inner.take().unwrap(); - let temp_path = inner.path.clone(); - let result = inner.finish(key_end, timeline, ctx).await; - // The delta layer files can sometimes be really large. Clean them up. - if result.is_err() { - tracing::warn!( - "Cleaning up temporary delta file {temp_path} after error during writing" - ); - if let Err(e) = std::fs::remove_file(&temp_path) { - tracing::warn!("Error cleaning up temporary delta layer file {temp_path}: {e:?}") - } - } - result + self.inner + .take() + .unwrap() + .finish(key_end, timeline, ctx) + .await } } diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 8394b33f19..285618b146 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -917,26 +917,57 @@ impl Drop for ImageLayerWriter { #[cfg(test)] mod test { + use std::time::Duration; + use bytes::Bytes; use pageserver_api::{ key::Key, shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}, }; - use utils::{id::TimelineId, lsn::Lsn}; + use utils::{ + generation::Generation, + id::{TenantId, TimelineId}, + lsn::Lsn, + }; - use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION}; + use crate::{ + tenant::{config::TenantConf, harness::TenantHarness}, + DEFAULT_PG_VERSION, + }; use super::ImageLayerWriter; #[tokio::test] async fn image_layer_rewrite() { - let harness = TenantHarness::create("test_image_layer_rewrite").unwrap(); - let (tenant, ctx) = harness.load().await; - + let tenant_conf = TenantConf { + gc_period: Duration::ZERO, + compaction_period: Duration::ZERO, + ..TenantConf::default() + }; + let tenant_id = TenantId::generate(); + let mut gen = Generation::new(0xdead0001); + let mut get_next_gen = || { + let ret = gen; + gen = gen.next(); + ret + }; // The LSN at which we will create an image layer to filter let lsn = Lsn(0xdeadbeef0000); - let timeline_id = TimelineId::generate(); + + // + // Create an unsharded parent with a layer. + // + + let harness = TenantHarness::create_custom( + "test_image_layer_rewrite--parent", + tenant_conf.clone(), + tenant_id, + ShardIdentity::unsharded(), + get_next_gen(), + ) + .unwrap(); + let (tenant, ctx) = harness.load().await; let timeline = tenant .create_test_timeline(timeline_id, lsn, DEFAULT_PG_VERSION, &ctx) .await @@ -971,9 +1002,47 @@ mod test { }; let original_size = resident.metadata().file_size; + // + // Create child shards and do the rewrite, exercising filter(). + // TODO: abstraction in TenantHarness for splits. + // + // Filter for various shards: this exercises cases like values at start of key range, end of key // range, middle of key range. - for shard_number in 0..4 { + let shard_count = ShardCount::new(4); + for shard_number in 0..shard_count.count() { + // + // mimic the shard split + // + let shard_identity = ShardIdentity::new( + ShardNumber(shard_number), + shard_count, + ShardStripeSize(0x8000), + ) + .unwrap(); + let harness = TenantHarness::create_custom( + Box::leak(Box::new(format!( + "test_image_layer_rewrite--child{}", + shard_identity.shard_slug() + ))), + tenant_conf.clone(), + tenant_id, + shard_identity, + // NB: in reality, the shards would each fork off their own gen number sequence from the parent. + // But here, all we care about is that the gen number is unique. + get_next_gen(), + ) + .unwrap(); + let (tenant, ctx) = harness.load().await; + let timeline = tenant + .create_test_timeline(timeline_id, lsn, DEFAULT_PG_VERSION, &ctx) + .await + .unwrap(); + + // + // use filter() and make assertions + // + let mut filtered_writer = ImageLayerWriter::new( harness.conf, timeline_id, @@ -985,15 +1054,6 @@ mod test { .await .unwrap(); - // TenantHarness gave us an unsharded tenant, but we'll use a sharded ShardIdentity - // to exercise filter() - let shard_identity = ShardIdentity::new( - ShardNumber(shard_number), - ShardCount::new(4), - ShardStripeSize(0x8000), - ) - .unwrap(); - let wrote_keys = resident .filter(&shard_identity, &mut filtered_writer, &ctx) .await diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 1ec13882da..18f9ba4ef8 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -277,9 +277,10 @@ impl Layer { let downloaded = resident.expect("just initialized"); - // if the rename works, the path is as expected - // TODO: sync system call - std::fs::rename(temp_path, owner.local_path()) + // We never want to overwrite an existing file, so we use `RENAME_NOREPLACE`. + // TODO: this leaves the temp file in place if the rename fails, risking us running + // out of space. Should we clean it up here or does the calling context deal with this? + utils::fs_ext::rename_noreplace(temp_path.as_std_path(), owner.local_path().as_std_path()) .with_context(|| format!("rename temporary file as correct path for {owner}"))?; Ok(ResidentLayer { downloaded, owner }) diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 15c77d0316..d8de6aee7c 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -421,48 +421,6 @@ impl Timeline { return Ok(CompactLevel0Phase1Result::default()); } - // This failpoint is used together with `test_duplicate_layers` integration test. - // It returns the compaction result exactly the same layers as input to compaction. - // We want to ensure that this will not cause any problem when updating the layer map - // after the compaction is finished. - // - // Currently, there are two rare edge cases that will cause duplicated layers being - // inserted. - // 1. The compaction job is inturrupted / did not finish successfully. Assume we have file 1, 2, 3, 4, which - // is compacted to 5, but the page server is shut down, next time we start page server we will get a layer - // map containing 1, 2, 3, 4, and 5, whereas 5 has the same content as 4. If we trigger L0 compation at this - // point again, it is likely that we will get a file 6 which has the same content and the key range as 5, - // and this causes an overwrite. This is acceptable because the content is the same, and we should do a - // layer replace instead of the normal remove / upload process. - // 2. The input workload pattern creates exactly n files that are sorted, non-overlapping and is of target file - // size length. Compaction will likely create the same set of n files afterwards. - // - // This failpoint is a superset of both of the cases. - if cfg!(feature = "testing") { - let active = (|| { - ::fail::fail_point!("compact-level0-phase1-return-same", |_| true); - false - })(); - - if active { - let mut new_layers = Vec::with_capacity(level0_deltas.len()); - for delta in &level0_deltas { - // we are just faking these layers as being produced again for this failpoint - new_layers.push( - delta - .download_and_keep_resident() - .await - .context("download layer for failpoint")?, - ); - } - tracing::info!("compact-level0-phase1-return-same"); // so that we can check if we hit the failpoint - return Ok(CompactLevel0Phase1Result { - new_layers, - deltas_to_compact: level0_deltas, - }); - } - } - // Gather the files to compact in this iteration. // // Start with the oldest Level 0 delta file, and collect any other diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index b2e4d35cb8..49dcb9b86a 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -238,7 +238,7 @@ def test_uploads_and_deletions( # https://github.com/neondatabase/neon/issues/7707 # https://github.com/neondatabase/neon/issues/7759 allowed_errors = [ - ".*duplicated L1 layer.*", + ".*/checkpoint.*rename temporary file as correct path for.*", # EEXIST ".*delta layer created with.*duplicate values.*", ".*assertion failed: self.lsn_range.start <= lsn.*", ".*HTTP request handler task panicked: task.*panicked.*", diff --git a/test_runner/regress/test_duplicate_layers.py b/test_runner/regress/test_pageserver_crash_consistency.py similarity index 66% rename from test_runner/regress/test_duplicate_layers.py rename to test_runner/regress/test_pageserver_crash_consistency.py index 0ebb99c712..3831d2f917 100644 --- a/test_runner/regress/test_duplicate_layers.py +++ b/test_runner/regress/test_pageserver_crash_consistency.py @@ -12,42 +12,14 @@ from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from requests.exceptions import ConnectionError -def test_duplicate_layers(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): - env = neon_env_builder.init_start() - pageserver_http = env.pageserver.http_client() - - # use a failpoint to return all L0s as L1s - message = ".*duplicated L1 layer layer=.*" - env.pageserver.allowed_errors.append(message) - - # Use aggressive compaction and checkpoint settings - tenant_id, _ = env.neon_cli.create_tenant( - conf={ - "checkpoint_distance": f"{1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", - "compaction_period": "5 s", - "compaction_threshold": "3", - } - ) - - pageserver_http.configure_failpoints(("compact-level0-phase1-return-same", "return")) - - endpoint = env.endpoints.create_start("main", tenant_id=tenant_id) - connstr = endpoint.connstr(options="-csynchronous_commit=off") - pg_bin.run_capture(["pgbench", "-i", "-s1", connstr]) - - time.sleep(10) # let compaction to be performed - env.pageserver.assert_log_contains("compact-level0-phase1-return-same") - - -def test_actually_duplicated_l1(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): +def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): """ - Test sets fail point at the end of first compaction phase: after - flushing new L1 layer but before deletion of L0 layers. + Test case for docs/rfcs/027-crash-consistent-layer-map-through-index-part.md. - The L1 used to be overwritten, but with crash-consistency via remote - index_part.json, we end up deleting the not yet uploaded L1 layer on - startup. + Simulate crash after compaction has written layers to disk + but before they have been uploaded/linked into remote index_part.json. + + Startup handles this situation by deleting the not yet uploaded L1 layer files. """ neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) @@ -126,13 +98,6 @@ def test_actually_duplicated_l1(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin) # give time for log flush time.sleep(1) - message = f".*duplicated L1 layer layer={l1_found}" - found_msg = env.pageserver.log_contains(message) - # resident or evicted, it should not be overwritten, however it should had been non-existing at startup - assert ( - found_msg is None - ), "layer should had been removed during startup, did it live on as evicted?" - assert env.pageserver.layer_exists(tenant_id, timeline_id, l1_found), "the L1 reappears" wait_for_upload_queue_empty(pageserver_http, tenant_id, timeline_id) @@ -141,3 +106,6 @@ def test_actually_duplicated_l1(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin) tenant_id, timeline_id, l1_found.to_str() ) assert uploaded.exists(), "the L1 is uploaded" + + +# TODO: same test for L0s produced by ingest. diff --git a/test_runner/regress/test_pageserver_restart.py b/test_runner/regress/test_pageserver_restart.py index 759e845927..4ce53df214 100644 --- a/test_runner/regress/test_pageserver_restart.py +++ b/test_runner/regress/test_pageserver_restart.py @@ -163,11 +163,6 @@ def test_pageserver_chaos( env = neon_env_builder.init_start(initial_tenant_shard_count=shard_count) - # these can happen, if we shutdown at a good time. to be fixed as part of #5172. - message = ".*duplicated L1 layer layer=.*" - for ps in env.pageservers: - ps.allowed_errors.append(message) - # Use a tiny checkpoint distance, to create a lot of layers quickly. # That allows us to stress the compaction and layer flushing logic more. tenant, _ = env.neon_cli.create_tenant( diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 9b9bdb2b08..5bfa9cce8c 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -100,10 +100,6 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): ] ) - # these can happen, if we shutdown at a good time. to be fixed as part of #5172. - message = ".*duplicated L1 layer layer=.*" - ps.allowed_errors.append(message) - workload = Workload(env, tenant_id, timeline_id) workload.init(env.pageservers[0].id) workload.write_rows(256, env.pageservers[0].id) From 3d6e389aa2d04baf6adc60584951811df77da8c7 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 4 Jun 2024 19:36:22 +0300 Subject: [PATCH 134/220] feat: support changing IndexPart::metadata_bytes to json in future release (#7693) ## Problem Currently we serialize the `TimelineMetadata` into bytes to put it into `index_part.json`. This `Vec` (hopefully `[u8; 512]`) representation was chosen because of problems serializing TimelineId and Lsn between different serializers (bincode, json). After #5335, the serialization of those types became serialization format aware or format agnostic. We've removed the pageserver local `metadata` file writing in #6769. ## Summary of changes Allow switching from the current serialization format to plain JSON for the legacy TimelineMetadata format in the future by adding a competitive serialization method to the current one (`crate::tenant::metadata::modern_serde`), which accepts both old bytes and new plain JSON. The benefits of this are that dumping the index_part.json with pretty printing no longer produces more than 500 lines of output, but after enabling it produces lines only proportional to the layer count, like: ```json { "version": ???, "layer_metadata": { ... }, "disk_consistent_lsn": "0/15FD5D8", "legacy_metadata": { "disk_consistent_lsn": "0/15FD5D8", "prev_record_lsn": "0/15FD5A0", "ancestor_timeline": null, "ancestor_lsn": "0/0", "latest_gc_cutoff_lsn": "0/149FD18", "initdb_lsn": "0/149FD18", "pg_version": 15 } } ``` In the future, I propose we completely stop using this legacy metadata type and wasting time trying to come up with another version numbering scheme in addition to the informative-only one already found in `index_part.json`, and go ahead with storing metadata or feature flags on the `index_part.json` itself. #7699 is the "one release after" changes which starts to produce metadata in the index_part.json as json. --- pageserver/src/tenant/metadata.rs | 158 +++++++++++++++++- .../tenant/remote_timeline_client/index.rs | 6 +- 2 files changed, 159 insertions(+), 5 deletions(-) diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index fc71ea7642..c00672895a 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -267,7 +267,7 @@ impl<'de> Deserialize<'de> for TimelineMetadata { D: serde::Deserializer<'de>, { let bytes = Vec::::deserialize(deserializer)?; - Self::from_bytes(bytes.as_slice()).map_err(|e| D::Error::custom(format!("{e}"))) + Self::from_bytes(bytes.as_slice()).map_err(D::Error::custom) } } @@ -276,13 +276,163 @@ impl Serialize for TimelineMetadata { where S: Serializer, { - let bytes = self - .to_bytes() - .map_err(|e| serde::ser::Error::custom(format!("{e}")))?; + let bytes = self.to_bytes().map_err(serde::ser::Error::custom)?; bytes.serialize(serializer) } } +pub(crate) mod modern_serde { + use crate::tenant::metadata::METADATA_FORMAT_VERSION; + + use super::{ + TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader, METADATA_HDR_SIZE, + }; + use serde::{Deserialize, Serialize}; + + pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + // for legacy reasons versions 1-5 had TimelineMetadata serialized as a Vec field with + // BeSer. + struct Visitor; + + impl<'d> serde::de::Visitor<'d> for Visitor { + type Value = TimelineMetadata; + + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("BeSer bytes or json structure") + } + + fn visit_seq(self, seq: A) -> Result + where + A: serde::de::SeqAccess<'d>, + { + use serde::de::Error; + let de = serde::de::value::SeqAccessDeserializer::new(seq); + Vec::::deserialize(de) + .map(|v| TimelineMetadata::from_bytes(&v).map_err(A::Error::custom))? + } + + fn visit_map(self, map: A) -> Result + where + A: serde::de::MapAccess<'d>, + { + use serde::de::Error; + + let de = serde::de::value::MapAccessDeserializer::new(map); + let body = TimelineMetadataBodyV2::deserialize(de)?; + + // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works + // across serialization versions + let mut sink = Crc32Sink::default(); + ::ser_into(&body, &mut sink) + .map_err(|e| A::Error::custom(Crc32CalculationFailed(e)))?; + + let size = METADATA_HDR_SIZE + sink.count; + + Ok(TimelineMetadata { + hdr: TimelineMetadataHeader { + checksum: sink.crc, + size: size as u16, + format_version: METADATA_FORMAT_VERSION, + }, + body, + }) + } + } + + deserializer.deserialize_any(Visitor) + } + + #[derive(Default)] + struct Crc32Sink { + crc: u32, + count: usize, + } + + impl std::io::Write for Crc32Sink { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.crc = crc32c::crc32c_append(self.crc, buf); + self.count += buf.len(); + Ok(buf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + } + + #[derive(thiserror::Error)] + #[error("re-serializing for crc32 failed")] + struct Crc32CalculationFailed(#[source] E); + + // this should be true for one release, after that we can change it to false + // remember to check the IndexPart::metadata field TODO comment as well + const LEGACY_BINCODED_BYTES: bool = true; + + #[derive(serde::Serialize)] + #[serde(transparent)] + struct LegacyPaddedBytes<'a>(&'a TimelineMetadata); + + struct JustTheBodyV2<'a>(&'a TimelineMetadata); + + impl serde::Serialize for JustTheBodyV2<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // header is not needed, upon reading we've upgraded all v1 to v2 + self.0.body.serialize(serializer) + } + } + + pub(crate) fn serialize( + metadata: &TimelineMetadata, + serializer: S, + ) -> Result + where + S: serde::Serializer, + { + // we cannot use TimelineMetadata::serialize for now because it'll do + // TimelineMetadata::to_bytes + if LEGACY_BINCODED_BYTES { + LegacyPaddedBytes(metadata).serialize(serializer) + } else { + JustTheBodyV2(metadata).serialize(serializer) + } + } + + #[test] + fn deserializes_bytes_as_well_as_equivalent_body_v2() { + #[derive(serde::Deserialize, serde::Serialize)] + struct Wrapper(#[serde(deserialize_with = "deserialize")] TimelineMetadata); + + let too_many_bytes = "[216,111,252,208,0,54,0,4,0,0,0,0,1,73,253,144,1,0,0,0,0,1,73,253,24,0,0,0,0,0,0,0,0,0,0,0,0,0,1,73,253,24,0,0,0,0,1,73,253,24,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"; + + let wrapper_from_bytes = serde_json::from_str::(too_many_bytes).unwrap(); + + let serialized = serde_json::to_value(JustTheBodyV2(&wrapper_from_bytes.0)).unwrap(); + + assert_eq!( + serialized, + serde_json::json! {{ + "disk_consistent_lsn": "0/149FD90", + "prev_record_lsn": "0/149FD18", + "ancestor_timeline": null, + "ancestor_lsn": "0/0", + "latest_gc_cutoff_lsn": "0/149FD18", + "initdb_lsn": "0/149FD18", + "pg_version": 15 + }} + ); + + let wrapper_from_json = serde_json::value::from_value::(serialized).unwrap(); + + assert_eq!(wrapper_from_bytes.0, wrapper_from_json.0); + } +} + /// Parts of the metadata which are regularly modified. pub(crate) struct MetadataUpdate { disk_consistent_lsn: Lsn, diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index 6494261312..7d2e9b9a91 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -43,7 +43,11 @@ pub struct IndexPart { // private because internally we would read from metadata instead. pub(super) disk_consistent_lsn: Lsn, - #[serde(rename = "metadata_bytes")] + // TODO: later make this "rename" to "alias", rename field as "legacy_metadata" + #[serde( + rename = "metadata_bytes", + with = "crate::tenant::metadata::modern_serde" + )] pub metadata: TimelineMetadata, #[serde(default)] From 1a8d53ab9d8e3a3e0c4ea147ea4992e91a4ef9b6 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 4 Jun 2024 13:47:48 -0400 Subject: [PATCH 135/220] feat(pageserver): compute aux file size on initial logical size calculation (#7958) close https://github.com/neondatabase/neon/issues/7822 close https://github.com/neondatabase/neon/issues/7443 Aux file metrics is computed incrementally. If the size is not initialized, the metrics will never show up. This pull request adds the functionality to compute the aux file size on initial logical size calculation. Signed-off-by: Alex Chi Z --- pageserver/src/aux_file.rs | 3 ++- pageserver/src/pgdatadir_mapping.rs | 14 +++++++++++++- pageserver/src/tenant/timeline.rs | 16 ++++++++++------ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/pageserver/src/aux_file.rs b/pageserver/src/aux_file.rs index 38e1875db1..5e527b7d61 100644 --- a/pageserver/src/aux_file.rs +++ b/pageserver/src/aux_file.rs @@ -178,7 +178,8 @@ impl AuxFileSizeEstimator { } } - pub fn on_base_backup(&self, new_size: usize) { + /// When generating base backup or doing initial logical size calculation + pub fn on_initial(&self, new_size: usize) { let mut guard = self.size.lock().unwrap(); *guard = Some(new_size as isize); self.report(new_size as isize); diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 5eaf80bdaf..0bff4be150 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -718,10 +718,22 @@ impl Timeline { result.insert(fname, content); } } - self.aux_file_size_estimator.on_base_backup(sz); + self.aux_file_size_estimator.on_initial(sz); Ok(result) } + pub(crate) async fn trigger_aux_file_size_computation( + &self, + lsn: Lsn, + ctx: &RequestContext, + ) -> Result<(), PageReconstructError> { + let current_policy = self.last_aux_file_policy.load(); + if let Some(AuxFilePolicy::V2) | Some(AuxFilePolicy::CrossValidation) = current_policy { + self.list_aux_files_v2(lsn, ctx).await?; + } + Ok(()) + } + pub(crate) async fn list_aux_files( &self, lsn: Lsn, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 35e6d1f92f..4c46c4e635 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -2787,17 +2787,21 @@ impl Timeline { crate::metrics::initial_logical_size::START_CALCULATION.retry(circumstances) }; - match self_ref + let calculated_size = self_ref .logical_size_calculation_task( initial_part_end, LogicalSizeCalculationCause::Initial, background_ctx, ) - .await - { - Ok(calculated_size) => Ok((calculated_size, metrics_guard)), - Err(e) => Err(e), - } + .await?; + + self_ref + .trigger_aux_file_size_computation(initial_part_end, background_ctx) + .await?; + + // TODO: add aux file size to logical size + + Ok((calculated_size, metrics_guard)) } }; From 85ef6b16459bc344756f25ef62dd90591231242d Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Wed, 5 Jun 2024 10:32:03 +0200 Subject: [PATCH 136/220] upgrade pgvector from 0.7.0 to 0.7.1 (#7954) ## Problem ## Summary of changes performance improvements in pgvector 0.7.1 for hnsw index builds, see https://github.com/pgvector/pgvector/issues/570 --- Dockerfile.compute-node | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index db3734047e..90b8868b43 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -246,8 +246,8 @@ COPY patches/pgvector.patch /pgvector.patch # By default, pgvector Makefile uses `-march=native`. We don't want that, # because we build the images on different machines than where we run them. # Pass OPTFLAGS="" to remove it. -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.0.tar.gz -O pgvector.tar.gz && \ - echo "1b5503a35c265408b6eb282621c5e1e75f7801afc04eecb950796cfee2e3d1d8 pgvector.tar.gz" | sha256sum --check && \ +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.1.tar.gz -O pgvector.tar.gz && \ + echo "fe6c8cb4e0cd1a8cb60f5badf9e1701e0fcabcfc260931c26d01e155c4dd21d1 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \ patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ From 83ab14e27119ffdfef6ce0f5cd883b847de8c24a Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Wed, 5 Jun 2024 14:21:10 +0200 Subject: [PATCH 137/220] chore!: remove walredo_process_kind config option & kind type (#7756) refs https://github.com/neondatabase/neon/issues/7753 Preceding PR https://github.com/neondatabase/neon/pull/7754 laid out the plan, this one wraps it up. --- pageserver/src/config.rs | 21 - pageserver/src/walredo.rs | 48 ++- pageserver/src/walredo/process.rs | 397 +++++++++++++++--- .../process/process_impl/process_async.rs | 374 ----------------- 4 files changed, 374 insertions(+), 466 deletions(-) delete mode 100644 pageserver/src/walredo/process/process_impl/process_async.rs diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index b0afb6414b..b4a0d1ac02 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -99,8 +99,6 @@ pub mod defaults { pub const DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB: usize = 0; - pub const DEFAULT_WALREDO_PROCESS_KIND: &str = "async"; - /// /// Default built-in configuration file. /// @@ -146,8 +144,6 @@ pub mod defaults { #validate_vectored_get = '{DEFAULT_VALIDATE_VECTORED_GET}' -#walredo_process_kind = '{DEFAULT_WALREDO_PROCESS_KIND}' - [tenant_config] #checkpoint_distance = {DEFAULT_CHECKPOINT_DISTANCE} # in bytes #checkpoint_timeout = {DEFAULT_CHECKPOINT_TIMEOUT} @@ -300,8 +296,6 @@ pub struct PageServerConf { /// /// Setting this to zero disables limits on total ephemeral layer size. pub ephemeral_bytes_per_memory_kb: usize, - - pub walredo_process_kind: crate::walredo::ProcessKind, } /// We do not want to store this in a PageServerConf because the latter may be logged @@ -407,8 +401,6 @@ struct PageServerConfigBuilder { validate_vectored_get: BuilderValue, ephemeral_bytes_per_memory_kb: BuilderValue, - - walredo_process_kind: BuilderValue, } impl PageServerConfigBuilder { @@ -497,8 +489,6 @@ impl PageServerConfigBuilder { )), validate_vectored_get: Set(DEFAULT_VALIDATE_VECTORED_GET), ephemeral_bytes_per_memory_kb: Set(DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB), - - walredo_process_kind: Set(DEFAULT_WALREDO_PROCESS_KIND.parse().unwrap()), } } } @@ -686,10 +676,6 @@ impl PageServerConfigBuilder { self.ephemeral_bytes_per_memory_kb = BuilderValue::Set(value); } - pub fn get_walredo_process_kind(&mut self, value: crate::walredo::ProcessKind) { - self.walredo_process_kind = BuilderValue::Set(value); - } - pub fn build(self) -> anyhow::Result { let default = Self::default_values(); @@ -747,7 +733,6 @@ impl PageServerConfigBuilder { max_vectored_read_bytes, validate_vectored_get, ephemeral_bytes_per_memory_kb, - walredo_process_kind, } CUSTOM LOGIC { @@ -1044,9 +1029,6 @@ impl PageServerConf { "ephemeral_bytes_per_memory_kb" => { builder.get_ephemeral_bytes_per_memory_kb(parse_toml_u64("ephemeral_bytes_per_memory_kb", item)? as usize) } - "walredo_process_kind" => { - builder.get_walredo_process_kind(parse_toml_from_str("walredo_process_kind", item)?) - } _ => bail!("unrecognized pageserver option '{key}'"), } } @@ -1130,7 +1112,6 @@ impl PageServerConf { ), validate_vectored_get: defaults::DEFAULT_VALIDATE_VECTORED_GET, ephemeral_bytes_per_memory_kb: defaults::DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB, - walredo_process_kind: defaults::DEFAULT_WALREDO_PROCESS_KIND.parse().unwrap(), } } } @@ -1370,7 +1351,6 @@ background_task_maximum_delay = '334 s' ), validate_vectored_get: defaults::DEFAULT_VALIDATE_VECTORED_GET, ephemeral_bytes_per_memory_kb: defaults::DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB, - walredo_process_kind: defaults::DEFAULT_WALREDO_PROCESS_KIND.parse().unwrap(), }, "Correct defaults should be used when no config values are provided" ); @@ -1444,7 +1424,6 @@ background_task_maximum_delay = '334 s' ), validate_vectored_get: defaults::DEFAULT_VALIDATE_VECTORED_GET, ephemeral_bytes_per_memory_kb: defaults::DEFAULT_EPHEMERAL_BYTES_PER_MEMORY_KB, - walredo_process_kind: defaults::DEFAULT_WALREDO_PROCESS_KIND.parse().unwrap(), }, "Should be able to parse all basic config values correctly" ); diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index d660b68a34..d562540bde 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -20,7 +20,6 @@ /// Process lifecycle and abstracction for the IPC protocol. mod process; -pub use process::Kind as ProcessKind; /// Code to apply [`NeonWalRecord`]s. pub(crate) mod apply_neon; @@ -54,7 +53,7 @@ pub struct PostgresRedoManager { tenant_shard_id: TenantShardId, conf: &'static PageServerConf, last_redo_at: std::sync::Mutex>, - /// The current [`process::Process`] that is used by new redo requests. + /// The current [`process::WalRedoProcess`] that is used by new redo requests. /// We use [`heavier_once_cell`] for coalescing the spawning, but the redo /// requests don't use the [`heavier_once_cell::Guard`] to keep ahold of the /// their process object; we use [`Arc::clone`] for that. @@ -66,7 +65,7 @@ pub struct PostgresRedoManager { /// still be using the old redo process. But, those other tasks will most likely /// encounter an error as well, and errors are an unexpected condition anyway. /// So, probably we could get rid of the `Arc` in the future. - redo_process: heavier_once_cell::OnceCell>, + redo_process: heavier_once_cell::OnceCell>, } /// @@ -211,26 +210,31 @@ impl PostgresRedoManager { const MAX_RETRY_ATTEMPTS: u32 = 1; let mut n_attempts = 0u32; loop { - let proc: Arc = match self.redo_process.get_or_init_detached().await { - Ok(guard) => Arc::clone(&guard), - Err(permit) => { - // don't hold poison_guard, the launch code can bail - let start = Instant::now(); - let proc = Arc::new( - process::Process::launch(self.conf, self.tenant_shard_id, pg_version) + let proc: Arc = + match self.redo_process.get_or_init_detached().await { + Ok(guard) => Arc::clone(&guard), + Err(permit) => { + // don't hold poison_guard, the launch code can bail + let start = Instant::now(); + let proc = Arc::new( + process::WalRedoProcess::launch( + self.conf, + self.tenant_shard_id, + pg_version, + ) .context("launch walredo process")?, - ); - let duration = start.elapsed(); - WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64()); - info!( - duration_ms = duration.as_millis(), - pid = proc.id(), - "launched walredo process" - ); - self.redo_process.set(Arc::clone(&proc), permit); - proc - } - }; + ); + let duration = start.elapsed(); + WAL_REDO_PROCESS_LAUNCH_DURATION_HISTOGRAM.observe(duration.as_secs_f64()); + info!( + duration_ms = duration.as_millis(), + pid = proc.id(), + "launched walredo process" + ); + self.redo_process.set(Arc::clone(&proc), permit); + proc + } + }; let started_at = std::time::Instant::now(); diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 02c9c04bf1..5b0af334ee 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -1,64 +1,184 @@ -/// Layer of indirection previously used to support multiple implementations. -/// Subject to removal: -use std::time::Duration; - -use bytes::Bytes; -use pageserver_api::{reltag::RelTag, shard::TenantShardId}; -use tracing::warn; -use utils::lsn::Lsn; - -use crate::{config::PageServerConf, walrecord::NeonWalRecord}; - mod no_leak_child; /// The IPC protocol that pageserver and walredo process speak over their shared pipe. mod protocol; -mod process_impl { - pub(super) mod process_async; +use self::no_leak_child::NoLeakChild; +use crate::{ + config::PageServerConf, + metrics::{WalRedoKillCause, WAL_REDO_PROCESS_COUNTERS, WAL_REDO_RECORD_COUNTER}, + walrecord::NeonWalRecord, +}; +use anyhow::Context; +use bytes::Bytes; +use pageserver_api::{reltag::RelTag, shard::TenantShardId}; +use postgres_ffi::BLCKSZ; +#[cfg(feature = "testing")] +use std::sync::atomic::AtomicUsize; +use std::{ + collections::VecDeque, + process::{Command, Stdio}, + time::Duration, +}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tracing::{debug, error, instrument, Instrument}; +use utils::{lsn::Lsn, poison::Poison}; + +pub struct WalRedoProcess { + #[allow(dead_code)] + conf: &'static PageServerConf, + tenant_shard_id: TenantShardId, + // Some() on construction, only becomes None on Drop. + child: Option, + stdout: tokio::sync::Mutex>, + stdin: tokio::sync::Mutex>, + /// Counter to separate same sized walredo inputs failing at the same millisecond. + #[cfg(feature = "testing")] + dump_sequence: AtomicUsize, } -#[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - strum_macros::EnumString, - strum_macros::Display, - strum_macros::IntoStaticStr, - serde_with::DeserializeFromStr, - serde_with::SerializeDisplay, -)] -#[strum(serialize_all = "kebab-case")] -#[repr(u8)] -pub enum Kind { - Sync, - Async, +struct ProcessInput { + stdin: tokio::process::ChildStdin, + n_requests: usize, } -pub(crate) struct Process(process_impl::process_async::WalRedoProcess); +struct ProcessOutput { + stdout: tokio::process::ChildStdout, + pending_responses: VecDeque>, + n_processed_responses: usize, +} -impl Process { - #[inline(always)] - pub fn launch( +impl WalRedoProcess { + // + // Start postgres binary in special WAL redo mode. + // + #[instrument(skip_all,fields(pg_version=pg_version))] + pub(crate) fn launch( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, pg_version: u32, ) -> anyhow::Result { - if conf.walredo_process_kind != Kind::Async { - warn!( - configured = %conf.walredo_process_kind, - "the walredo_process_kind setting has been turned into a no-op, using async implementation" - ); - } - Ok(Self(process_impl::process_async::WalRedoProcess::launch( + crate::span::debug_assert_current_span_has_tenant_id(); + + let pg_bin_dir_path = conf.pg_bin_dir(pg_version).context("pg_bin_dir")?; // TODO these should be infallible. + let pg_lib_dir_path = conf.pg_lib_dir(pg_version).context("pg_lib_dir")?; + + use no_leak_child::NoLeakChildCommandExt; + // Start postgres itself + let child = Command::new(pg_bin_dir_path.join("postgres")) + // the first arg must be --wal-redo so the child process enters into walredo mode + .arg("--wal-redo") + // the child doesn't process this arg, but, having it in the argv helps indentify the + // walredo process for a particular tenant when debugging a pagserver + .args(["--tenant-shard-id", &format!("{tenant_shard_id}")]) + .stdin(Stdio::piped()) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .env_clear() + .env("LD_LIBRARY_PATH", &pg_lib_dir_path) + .env("DYLD_LIBRARY_PATH", &pg_lib_dir_path) + // NB: The redo process is not trusted after we sent it the first + // walredo work. Before that, it is trusted. Specifically, we trust + // it to + // 1. close all file descriptors except stdin, stdout, stderr because + // pageserver might not be 100% diligent in setting FD_CLOEXEC on all + // the files it opens, and + // 2. to use seccomp to sandbox itself before processing the first + // walredo request. + .spawn_no_leak_child(tenant_shard_id) + .context("spawn process")?; + WAL_REDO_PROCESS_COUNTERS.started.inc(); + let mut child = scopeguard::guard(child, |child| { + error!("killing wal-redo-postgres process due to a problem during launch"); + child.kill_and_wait(WalRedoKillCause::Startup); + }); + + let stdin = child.stdin.take().unwrap(); + let stdout = child.stdout.take().unwrap(); + let stderr = child.stderr.take().unwrap(); + let stderr = tokio::process::ChildStderr::from_std(stderr) + .context("convert to tokio::ChildStderr")?; + let stdin = + tokio::process::ChildStdin::from_std(stdin).context("convert to tokio::ChildStdin")?; + let stdout = tokio::process::ChildStdout::from_std(stdout) + .context("convert to tokio::ChildStdout")?; + + // all fallible operations post-spawn are complete, so get rid of the guard + let child = scopeguard::ScopeGuard::into_inner(child); + + tokio::spawn( + async move { + scopeguard::defer! { + debug!("wal-redo-postgres stderr_logger_task finished"); + crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_finished.inc(); + } + debug!("wal-redo-postgres stderr_logger_task started"); + crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_started.inc(); + + use tokio::io::AsyncBufReadExt; + let mut stderr_lines = tokio::io::BufReader::new(stderr); + let mut buf = Vec::new(); + let res = loop { + buf.clear(); + // TODO we don't trust the process to cap its stderr length. + // Currently it can do unbounded Vec allocation. + match stderr_lines.read_until(b'\n', &mut buf).await { + Ok(0) => break Ok(()), // eof + Ok(num_bytes) => { + let output = String::from_utf8_lossy(&buf[..num_bytes]); + error!(%output, "received output"); + } + Err(e) => { + break Err(e); + } + } + }; + match res { + Ok(()) => (), + Err(e) => { + error!(error=?e, "failed to read from walredo stderr"); + } + } + }.instrument(tracing::info_span!(parent: None, "wal-redo-postgres-stderr", pid = child.id(), tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %pg_version)) + ); + + Ok(Self { conf, tenant_shard_id, - pg_version, - )?)) + child: Some(child), + stdin: tokio::sync::Mutex::new(Poison::new( + "stdin", + ProcessInput { + stdin, + n_requests: 0, + }, + )), + stdout: tokio::sync::Mutex::new(Poison::new( + "stdout", + ProcessOutput { + stdout, + pending_responses: VecDeque::new(), + n_processed_responses: 0, + }, + )), + #[cfg(feature = "testing")] + dump_sequence: AtomicUsize::default(), + }) } - #[inline(always)] + pub(crate) fn id(&self) -> u32 { + self.child + .as_ref() + .expect("must not call this during Drop") + .id() + } + + /// Apply given WAL records ('records') over an old page image. Returns + /// new page image. + /// + /// # Cancel-Safety + /// + /// Cancellation safe. + #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), pid=%self.id()))] pub(crate) async fn apply_wal_records( &self, rel: RelTag, @@ -67,12 +187,191 @@ impl Process { records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, ) -> anyhow::Result { - self.0 - .apply_wal_records(rel, blknum, base_img, records, wal_redo_timeout) - .await + let tag = protocol::BufferTag { rel, blknum }; + + // Serialize all the messages to send the WAL redo process first. + // + // This could be problematic if there are millions of records to replay, + // but in practice the number of records is usually so small that it doesn't + // matter, and it's better to keep this code simple. + // + // Most requests start with a before-image with BLCKSZ bytes, followed by + // by some other WAL records. Start with a buffer that can hold that + // comfortably. + let mut writebuf: Vec = Vec::with_capacity((BLCKSZ as usize) * 3); + protocol::build_begin_redo_for_block_msg(tag, &mut writebuf); + if let Some(img) = base_img { + protocol::build_push_page_msg(tag, img, &mut writebuf); + } + for (lsn, rec) in records.iter() { + if let NeonWalRecord::Postgres { + will_init: _, + rec: postgres_rec, + } = rec + { + protocol::build_apply_record_msg(*lsn, postgres_rec, &mut writebuf); + } else { + anyhow::bail!("tried to pass neon wal record to postgres WAL redo"); + } + } + protocol::build_get_page_msg(tag, &mut writebuf); + WAL_REDO_RECORD_COUNTER.inc_by(records.len() as u64); + + let Ok(res) = + tokio::time::timeout(wal_redo_timeout, self.apply_wal_records0(&writebuf)).await + else { + anyhow::bail!("WAL redo timed out"); + }; + + if res.is_err() { + // not all of these can be caused by this particular input, however these are so rare + // in tests so capture all. + self.record_and_log(&writebuf); + } + + res } - pub(crate) fn id(&self) -> u32 { - self.0.id() + /// # Cancel-Safety + /// + /// When not polled to completion (e.g. because in `tokio::select!` another + /// branch becomes ready before this future), concurrent and subsequent + /// calls may fail due to [`utils::poison::Poison::check_and_arm`] calls. + /// Dispose of this process instance and create a new one. + async fn apply_wal_records0(&self, writebuf: &[u8]) -> anyhow::Result { + let request_no = { + let mut lock_guard = self.stdin.lock().await; + let mut poison_guard = lock_guard.check_and_arm()?; + let input = poison_guard.data_mut(); + input + .stdin + .write_all(writebuf) + .await + .context("write to walredo stdin")?; + let request_no = input.n_requests; + input.n_requests += 1; + poison_guard.disarm(); + request_no + }; + + // To improve walredo performance we separate sending requests and receiving + // responses. Them are protected by different mutexes (output and input). + // If thread T1, T2, T3 send requests D1, D2, D3 to walredo process + // then there is not warranty that T1 will first granted output mutex lock. + // To address this issue we maintain number of sent requests, number of processed + // responses and ring buffer with pending responses. After sending response + // (under input mutex), threads remembers request number. Then it releases + // input mutex, locks output mutex and fetch in ring buffer all responses until + // its stored request number. The it takes correspondent element from + // pending responses ring buffer and truncate all empty elements from the front, + // advancing processed responses number. + + let mut lock_guard = self.stdout.lock().await; + let mut poison_guard = lock_guard.check_and_arm()?; + let output = poison_guard.data_mut(); + let n_processed_responses = output.n_processed_responses; + while n_processed_responses + output.pending_responses.len() <= request_no { + // We expect the WAL redo process to respond with an 8k page image. We read it + // into this buffer. + let mut resultbuf = vec![0; BLCKSZ.into()]; + output + .stdout + .read_exact(&mut resultbuf) + .await + .context("read walredo stdout")?; + output + .pending_responses + .push_back(Some(Bytes::from(resultbuf))); + } + // Replace our request's response with None in `pending_responses`. + // Then make space in the ring buffer by clearing out any seqence of contiguous + // `None`'s from the front of `pending_responses`. + // NB: We can't pop_front() because other requests' responses because another + // requester might have grabbed the output mutex before us: + // T1: grab input mutex + // T1: send request_no 23 + // T1: release input mutex + // T2: grab input mutex + // T2: send request_no 24 + // T2: release input mutex + // T2: grab output mutex + // T2: n_processed_responses + output.pending_responses.len() <= request_no + // 23 0 24 + // T2: enters poll loop that reads stdout + // T2: put response for 23 into pending_responses + // T2: put response for 24 into pending_resposnes + // pending_responses now looks like this: Front Some(response_23) Some(response_24) Back + // T2: takes its response_24 + // pending_responses now looks like this: Front Some(response_23) None Back + // T2: does the while loop below + // pending_responses now looks like this: Front Some(response_23) None Back + // T2: releases output mutex + // T1: grabs output mutex + // T1: n_processed_responses + output.pending_responses.len() > request_no + // 23 2 23 + // T1: skips poll loop that reads stdout + // T1: takes its response_23 + // pending_responses now looks like this: Front None None Back + // T2: does the while loop below + // pending_responses now looks like this: Front Back + // n_processed_responses now has value 25 + let res = output.pending_responses[request_no - n_processed_responses] + .take() + .expect("we own this request_no, nobody else is supposed to take it"); + while let Some(front) = output.pending_responses.front() { + if front.is_none() { + output.pending_responses.pop_front(); + output.n_processed_responses += 1; + } else { + break; + } + } + poison_guard.disarm(); + Ok(res) + } + + #[cfg(feature = "testing")] + fn record_and_log(&self, writebuf: &[u8]) { + use std::sync::atomic::Ordering; + + let millis = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis(); + + let seq = self.dump_sequence.fetch_add(1, Ordering::Relaxed); + + // these files will be collected to an allure report + let filename = format!("walredo-{millis}-{}-{seq}.walredo", writebuf.len()); + + let path = self.conf.tenant_path(&self.tenant_shard_id).join(&filename); + + use std::io::Write; + let res = std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .read(true) + .open(path) + .and_then(|mut f| f.write_all(writebuf)); + + // trip up allowed_errors + if let Err(e) = res { + tracing::error!(target=%filename, length=writebuf.len(), "failed to write out the walredo errored input: {e}"); + } else { + tracing::error!(filename, "erroring walredo input saved"); + } + } + + #[cfg(not(feature = "testing"))] + fn record_and_log(&self, _: &[u8]) {} +} + +impl Drop for WalRedoProcess { + fn drop(&mut self) { + self.child + .take() + .expect("we only do this once") + .kill_and_wait(WalRedoKillCause::WalRedoProcessDrop); + // no way to wait for stderr_logger_task from Drop because that is async only } } diff --git a/pageserver/src/walredo/process/process_impl/process_async.rs b/pageserver/src/walredo/process/process_impl/process_async.rs deleted file mode 100644 index 262858b033..0000000000 --- a/pageserver/src/walredo/process/process_impl/process_async.rs +++ /dev/null @@ -1,374 +0,0 @@ -use self::no_leak_child::NoLeakChild; -use crate::{ - config::PageServerConf, - metrics::{WalRedoKillCause, WAL_REDO_PROCESS_COUNTERS, WAL_REDO_RECORD_COUNTER}, - walrecord::NeonWalRecord, - walredo::process::{no_leak_child, protocol}, -}; -use anyhow::Context; -use bytes::Bytes; -use pageserver_api::{reltag::RelTag, shard::TenantShardId}; -use postgres_ffi::BLCKSZ; -#[cfg(feature = "testing")] -use std::sync::atomic::AtomicUsize; -use std::{ - collections::VecDeque, - process::{Command, Stdio}, - time::Duration, -}; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tracing::{debug, error, instrument, Instrument}; -use utils::{lsn::Lsn, poison::Poison}; - -pub struct WalRedoProcess { - #[allow(dead_code)] - conf: &'static PageServerConf, - tenant_shard_id: TenantShardId, - // Some() on construction, only becomes None on Drop. - child: Option, - stdout: tokio::sync::Mutex>, - stdin: tokio::sync::Mutex>, - /// Counter to separate same sized walredo inputs failing at the same millisecond. - #[cfg(feature = "testing")] - dump_sequence: AtomicUsize, -} - -struct ProcessInput { - stdin: tokio::process::ChildStdin, - n_requests: usize, -} - -struct ProcessOutput { - stdout: tokio::process::ChildStdout, - pending_responses: VecDeque>, - n_processed_responses: usize, -} - -impl WalRedoProcess { - // - // Start postgres binary in special WAL redo mode. - // - #[instrument(skip_all,fields(pg_version=pg_version))] - pub(crate) fn launch( - conf: &'static PageServerConf, - tenant_shard_id: TenantShardId, - pg_version: u32, - ) -> anyhow::Result { - crate::span::debug_assert_current_span_has_tenant_id(); - - let pg_bin_dir_path = conf.pg_bin_dir(pg_version).context("pg_bin_dir")?; // TODO these should be infallible. - let pg_lib_dir_path = conf.pg_lib_dir(pg_version).context("pg_lib_dir")?; - - use no_leak_child::NoLeakChildCommandExt; - // Start postgres itself - let child = Command::new(pg_bin_dir_path.join("postgres")) - // the first arg must be --wal-redo so the child process enters into walredo mode - .arg("--wal-redo") - // the child doesn't process this arg, but, having it in the argv helps indentify the - // walredo process for a particular tenant when debugging a pagserver - .args(["--tenant-shard-id", &format!("{tenant_shard_id}")]) - .stdin(Stdio::piped()) - .stderr(Stdio::piped()) - .stdout(Stdio::piped()) - .env_clear() - .env("LD_LIBRARY_PATH", &pg_lib_dir_path) - .env("DYLD_LIBRARY_PATH", &pg_lib_dir_path) - // NB: The redo process is not trusted after we sent it the first - // walredo work. Before that, it is trusted. Specifically, we trust - // it to - // 1. close all file descriptors except stdin, stdout, stderr because - // pageserver might not be 100% diligent in setting FD_CLOEXEC on all - // the files it opens, and - // 2. to use seccomp to sandbox itself before processing the first - // walredo request. - .spawn_no_leak_child(tenant_shard_id) - .context("spawn process")?; - WAL_REDO_PROCESS_COUNTERS.started.inc(); - let mut child = scopeguard::guard(child, |child| { - error!("killing wal-redo-postgres process due to a problem during launch"); - child.kill_and_wait(WalRedoKillCause::Startup); - }); - - let stdin = child.stdin.take().unwrap(); - let stdout = child.stdout.take().unwrap(); - let stderr = child.stderr.take().unwrap(); - let stderr = tokio::process::ChildStderr::from_std(stderr) - .context("convert to tokio::ChildStderr")?; - let stdin = - tokio::process::ChildStdin::from_std(stdin).context("convert to tokio::ChildStdin")?; - let stdout = tokio::process::ChildStdout::from_std(stdout) - .context("convert to tokio::ChildStdout")?; - - // all fallible operations post-spawn are complete, so get rid of the guard - let child = scopeguard::ScopeGuard::into_inner(child); - - tokio::spawn( - async move { - scopeguard::defer! { - debug!("wal-redo-postgres stderr_logger_task finished"); - crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_finished.inc(); - } - debug!("wal-redo-postgres stderr_logger_task started"); - crate::metrics::WAL_REDO_PROCESS_COUNTERS.active_stderr_logger_tasks_started.inc(); - - use tokio::io::AsyncBufReadExt; - let mut stderr_lines = tokio::io::BufReader::new(stderr); - let mut buf = Vec::new(); - let res = loop { - buf.clear(); - // TODO we don't trust the process to cap its stderr length. - // Currently it can do unbounded Vec allocation. - match stderr_lines.read_until(b'\n', &mut buf).await { - Ok(0) => break Ok(()), // eof - Ok(num_bytes) => { - let output = String::from_utf8_lossy(&buf[..num_bytes]); - error!(%output, "received output"); - } - Err(e) => { - break Err(e); - } - } - }; - match res { - Ok(()) => (), - Err(e) => { - error!(error=?e, "failed to read from walredo stderr"); - } - } - }.instrument(tracing::info_span!(parent: None, "wal-redo-postgres-stderr", pid = child.id(), tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %pg_version)) - ); - - Ok(Self { - conf, - tenant_shard_id, - child: Some(child), - stdin: tokio::sync::Mutex::new(Poison::new( - "stdin", - ProcessInput { - stdin, - n_requests: 0, - }, - )), - stdout: tokio::sync::Mutex::new(Poison::new( - "stdout", - ProcessOutput { - stdout, - pending_responses: VecDeque::new(), - n_processed_responses: 0, - }, - )), - #[cfg(feature = "testing")] - dump_sequence: AtomicUsize::default(), - }) - } - - pub(crate) fn id(&self) -> u32 { - self.child - .as_ref() - .expect("must not call this during Drop") - .id() - } - - /// Apply given WAL records ('records') over an old page image. Returns - /// new page image. - /// - /// # Cancel-Safety - /// - /// Cancellation safe. - #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), pid=%self.id()))] - pub(crate) async fn apply_wal_records( - &self, - rel: RelTag, - blknum: u32, - base_img: &Option, - records: &[(Lsn, NeonWalRecord)], - wal_redo_timeout: Duration, - ) -> anyhow::Result { - let tag = protocol::BufferTag { rel, blknum }; - - // Serialize all the messages to send the WAL redo process first. - // - // This could be problematic if there are millions of records to replay, - // but in practice the number of records is usually so small that it doesn't - // matter, and it's better to keep this code simple. - // - // Most requests start with a before-image with BLCKSZ bytes, followed by - // by some other WAL records. Start with a buffer that can hold that - // comfortably. - let mut writebuf: Vec = Vec::with_capacity((BLCKSZ as usize) * 3); - protocol::build_begin_redo_for_block_msg(tag, &mut writebuf); - if let Some(img) = base_img { - protocol::build_push_page_msg(tag, img, &mut writebuf); - } - for (lsn, rec) in records.iter() { - if let NeonWalRecord::Postgres { - will_init: _, - rec: postgres_rec, - } = rec - { - protocol::build_apply_record_msg(*lsn, postgres_rec, &mut writebuf); - } else { - anyhow::bail!("tried to pass neon wal record to postgres WAL redo"); - } - } - protocol::build_get_page_msg(tag, &mut writebuf); - WAL_REDO_RECORD_COUNTER.inc_by(records.len() as u64); - - let Ok(res) = - tokio::time::timeout(wal_redo_timeout, self.apply_wal_records0(&writebuf)).await - else { - anyhow::bail!("WAL redo timed out"); - }; - - if res.is_err() { - // not all of these can be caused by this particular input, however these are so rare - // in tests so capture all. - self.record_and_log(&writebuf); - } - - res - } - - /// # Cancel-Safety - /// - /// When not polled to completion (e.g. because in `tokio::select!` another - /// branch becomes ready before this future), concurrent and subsequent - /// calls may fail due to [`utils::poison::Poison::check_and_arm`] calls. - /// Dispose of this process instance and create a new one. - async fn apply_wal_records0(&self, writebuf: &[u8]) -> anyhow::Result { - let request_no = { - let mut lock_guard = self.stdin.lock().await; - let mut poison_guard = lock_guard.check_and_arm()?; - let input = poison_guard.data_mut(); - input - .stdin - .write_all(writebuf) - .await - .context("write to walredo stdin")?; - let request_no = input.n_requests; - input.n_requests += 1; - poison_guard.disarm(); - request_no - }; - - // To improve walredo performance we separate sending requests and receiving - // responses. Them are protected by different mutexes (output and input). - // If thread T1, T2, T3 send requests D1, D2, D3 to walredo process - // then there is not warranty that T1 will first granted output mutex lock. - // To address this issue we maintain number of sent requests, number of processed - // responses and ring buffer with pending responses. After sending response - // (under input mutex), threads remembers request number. Then it releases - // input mutex, locks output mutex and fetch in ring buffer all responses until - // its stored request number. The it takes correspondent element from - // pending responses ring buffer and truncate all empty elements from the front, - // advancing processed responses number. - - let mut lock_guard = self.stdout.lock().await; - let mut poison_guard = lock_guard.check_and_arm()?; - let output = poison_guard.data_mut(); - let n_processed_responses = output.n_processed_responses; - while n_processed_responses + output.pending_responses.len() <= request_no { - // We expect the WAL redo process to respond with an 8k page image. We read it - // into this buffer. - let mut resultbuf = vec![0; BLCKSZ.into()]; - output - .stdout - .read_exact(&mut resultbuf) - .await - .context("read walredo stdout")?; - output - .pending_responses - .push_back(Some(Bytes::from(resultbuf))); - } - // Replace our request's response with None in `pending_responses`. - // Then make space in the ring buffer by clearing out any seqence of contiguous - // `None`'s from the front of `pending_responses`. - // NB: We can't pop_front() because other requests' responses because another - // requester might have grabbed the output mutex before us: - // T1: grab input mutex - // T1: send request_no 23 - // T1: release input mutex - // T2: grab input mutex - // T2: send request_no 24 - // T2: release input mutex - // T2: grab output mutex - // T2: n_processed_responses + output.pending_responses.len() <= request_no - // 23 0 24 - // T2: enters poll loop that reads stdout - // T2: put response for 23 into pending_responses - // T2: put response for 24 into pending_resposnes - // pending_responses now looks like this: Front Some(response_23) Some(response_24) Back - // T2: takes its response_24 - // pending_responses now looks like this: Front Some(response_23) None Back - // T2: does the while loop below - // pending_responses now looks like this: Front Some(response_23) None Back - // T2: releases output mutex - // T1: grabs output mutex - // T1: n_processed_responses + output.pending_responses.len() > request_no - // 23 2 23 - // T1: skips poll loop that reads stdout - // T1: takes its response_23 - // pending_responses now looks like this: Front None None Back - // T2: does the while loop below - // pending_responses now looks like this: Front Back - // n_processed_responses now has value 25 - let res = output.pending_responses[request_no - n_processed_responses] - .take() - .expect("we own this request_no, nobody else is supposed to take it"); - while let Some(front) = output.pending_responses.front() { - if front.is_none() { - output.pending_responses.pop_front(); - output.n_processed_responses += 1; - } else { - break; - } - } - poison_guard.disarm(); - Ok(res) - } - - #[cfg(feature = "testing")] - fn record_and_log(&self, writebuf: &[u8]) { - use std::sync::atomic::Ordering; - - let millis = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis(); - - let seq = self.dump_sequence.fetch_add(1, Ordering::Relaxed); - - // these files will be collected to an allure report - let filename = format!("walredo-{millis}-{}-{seq}.walredo", writebuf.len()); - - let path = self.conf.tenant_path(&self.tenant_shard_id).join(&filename); - - use std::io::Write; - let res = std::fs::OpenOptions::new() - .write(true) - .create_new(true) - .read(true) - .open(path) - .and_then(|mut f| f.write_all(writebuf)); - - // trip up allowed_errors - if let Err(e) = res { - tracing::error!(target=%filename, length=writebuf.len(), "failed to write out the walredo errored input: {e}"); - } else { - tracing::error!(filename, "erroring walredo input saved"); - } - } - - #[cfg(not(feature = "testing"))] - fn record_and_log(&self, _: &[u8]) {} -} - -impl Drop for WalRedoProcess { - fn drop(&mut self) { - self.child - .take() - .expect("we only do this once") - .kill_and_wait(WalRedoKillCause::WalRedoProcessDrop); - // no way to wait for stderr_logger_task from Drop because that is async only - } -} From 91dd99038e8e85a29d028fe59e9abd1cc978e415 Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 5 Jun 2024 21:22:54 +0100 Subject: [PATCH 138/220] pageserver/controller: enable tenant deletion without attachment (#7957) ## Problem As described in #7952, the controller's attempt to reconcile a tenant before finally deleting it can get hung up waiting for the compute notification hook to accept updates. The fact that we try and reconcile a tenant at all during deletion is part of a more general design issue (#5080), where deletion was implemented as an operation on attached tenant, requiring the tenant to be attached in order to delete it, which is not in principle necessary. Closes: #7952 ## Summary of changes - In the pageserver deletion API, only do the traditional deletion path if the tenant is attached. If it's secondary, then tear down the secondary location, and then do a remote delete. If it's not attached at all, just do the remote delete. - In the storage controller, instead of ensuring a tenant is attached before deletion, do a best-effort detach of the tenant, and then call into some arbitrary pageserver to issue a deletion of remote content. The pageserver retains its existing delete behavior when invoked on attached locations. We can remove this later when all users of the API are updated to either do a detach-before-delete. This will enable removing the "weird" code paths during startup that sometimes load a tenant and then immediately delete it, and removing the deletion markers on tenants. --- pageserver/src/http/openapi_spec.yml | 4 +- pageserver/src/http/routes.rs | 11 +- pageserver/src/tenant/mgr.rs | 86 +++++++++++-- storage_controller/src/http.rs | 109 +++++++++-------- storage_controller/src/service.rs | 113 ++++++++++-------- .../regress/test_storage_controller.py | 83 +++++++++++++ test_runner/regress/test_tenant_delete.py | 23 +++- 7 files changed, 312 insertions(+), 117 deletions(-) diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index e5eafc51f4..71b486a4d3 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -81,8 +81,10 @@ paths: Attempts to delete specified tenant. 500, 503 and 409 errors should be retried until 404 is retrieved. 404 means that deletion successfully finished" responses: + "200": + description: Tenant was successfully deleted, or was already not found. "404": - description: Tenant not found. This is the success path. + description: Tenant not found. This is a success result, equivalent to 200. content: application/json: schema: diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 6b6a131c88..7fa6c35ad6 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1073,7 +1073,7 @@ async fn tenant_delete_handler( let state = get_state(&request); - state + let status = state .tenant_manager .delete_tenant(tenant_shard_id, ACTIVE_TENANT_TIMEOUT) .instrument(info_span!("tenant_delete_handler", @@ -1082,7 +1082,14 @@ async fn tenant_delete_handler( )) .await?; - json_response(StatusCode::ACCEPTED, ()) + // Callers use 404 as success for deletions, for historical reasons. + if status == StatusCode::NOT_FOUND { + return Err(ApiError::NotFound( + anyhow::anyhow!("Deletion complete").into(), + )); + } + + json_response(status, ()) } /// HTTP endpoint to query the current tenant_size of a tenant. diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 0bb1d750aa..4520bb9295 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -3,6 +3,7 @@ use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf}; use futures::StreamExt; +use hyper::StatusCode; use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::models::LocationConfigMode; @@ -54,6 +55,7 @@ use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; use super::delete::DeleteTenantError; +use super::remote_timeline_client::remote_tenant_path; use super::secondary::SecondaryTenant; use super::timeline::detach_ancestor::PreparedTimelineDetach; use super::TenantSharedResources; @@ -1369,7 +1371,7 @@ impl TenantManager { &self, tenant_shard_id: TenantShardId, activation_timeout: Duration, - ) -> Result<(), DeleteTenantError> { + ) -> Result { super::span::debug_assert_current_span_has_tenant_id(); // We acquire a SlotGuard during this function to protect against concurrent // changes while the ::prepare phase of DeleteTenantFlow executes, but then @@ -1382,18 +1384,79 @@ impl TenantManager { // // See https://github.com/neondatabase/neon/issues/5080 - let slot_guard = - tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist)?; + // Tenant deletion can happen two ways: + // - Legacy: called on an attached location. The attached Tenant object stays alive in Stopping + // state until deletion is complete. + // - New: called on a pageserver without an attached location. We proceed with deletion from + // remote storage. + // + // See https://github.com/neondatabase/neon/issues/5080 for more context on this transition. - // unwrap is safe because we used MustExist mode when acquiring - let tenant = match slot_guard.get_old_value().as_ref().unwrap() { - TenantSlot::Attached(tenant) => tenant.clone(), - _ => { - // Express "not attached" as equivalent to "not found" - return Err(DeleteTenantError::NotAttached); + let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; + match &slot_guard.old_value { + Some(TenantSlot::Attached(tenant)) => { + // Legacy deletion flow: the tenant remains attached, goes to Stopping state, and + // deletion will be resumed across restarts. + let tenant = tenant.clone(); + return self + .delete_tenant_attached(slot_guard, tenant, activation_timeout) + .await; } + Some(TenantSlot::Secondary(secondary_tenant)) => { + secondary_tenant.shutdown().await; + let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id); + let tmp_dir = safe_rename_tenant_dir(&local_tenant_directory) + .await + .with_context(|| { + format!("local tenant directory {local_tenant_directory:?} rename") + })?; + spawn_background_purge(tmp_dir); + } + Some(TenantSlot::InProgress(_)) => unreachable!(), + None => {} }; + // Fall through: local state for this tenant is no longer present, proceed with remote delete + let remote_path = remote_tenant_path(&tenant_shard_id); + let keys = match self + .resources + .remote_storage + .list( + Some(&remote_path), + remote_storage::ListingMode::NoDelimiter, + None, + &self.cancel, + ) + .await + { + Ok(listing) => listing.keys, + Err(remote_storage::DownloadError::Cancelled) => { + return Err(DeleteTenantError::Cancelled) + } + Err(remote_storage::DownloadError::NotFound) => return Ok(StatusCode::NOT_FOUND), + Err(other) => return Err(DeleteTenantError::Other(anyhow::anyhow!(other))), + }; + + if keys.is_empty() { + tracing::info!("Remote storage already deleted"); + } else { + tracing::info!("Deleting {} keys from remote storage", keys.len()); + self.resources + .remote_storage + .delete_objects(&keys, &self.cancel) + .await?; + } + + // Callers use 404 as success for deletions, for historical reasons. + Ok(StatusCode::NOT_FOUND) + } + + async fn delete_tenant_attached( + &self, + slot_guard: SlotGuard, + tenant: Arc, + activation_timeout: Duration, + ) -> Result { match tenant.current_state() { TenantState::Broken { .. } | TenantState::Stopping { .. } => { // If deletion is already in progress, return success (the semantics of this @@ -1403,7 +1466,7 @@ impl TenantManager { // The `delete_progress` lock is held: deletion is already happening // in the bacckground slot_guard.revert(); - return Ok(()); + return Ok(StatusCode::ACCEPTED); } } _ => { @@ -1436,7 +1499,8 @@ impl TenantManager { // The Tenant goes back into the map in Stopping state, it will eventually be removed by DeleteTenantFLow slot_guard.revert(); - result + let () = result?; + Ok(StatusCode::ACCEPTED) } #[instrument(skip_all, fields(tenant_id=%tenant.get_tenant_shard_id().tenant_id, shard_id=%tenant.get_tenant_shard_id().shard_slug(), new_shard_count=%new_shard_count.literal()))] diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 604ad6fbaa..bbb6d2cb32 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -142,52 +142,6 @@ async fn handle_tenant_create( ) } -// For tenant and timeline deletions, which both implement an "initially return 202, then 404 once -// we're done" semantic, we wrap with a retry loop to expose a simpler API upstream. This avoids -// needing to track a "deleting" state for tenants. -async fn deletion_wrapper(service: Arc, f: F) -> Result, ApiError> -where - R: std::future::Future> + Send + 'static, - F: Fn(Arc) -> R + Send + Sync + 'static, -{ - let started_at = Instant::now(); - // To keep deletion reasonably snappy for small tenants, initially check after 1 second if deletion - // completed. - let mut retry_period = Duration::from_secs(1); - // On subsequent retries, wait longer. - let max_retry_period = Duration::from_secs(5); - // Enable callers with a 30 second request timeout to reliably get a response - let max_wait = Duration::from_secs(25); - - loop { - let status = f(service.clone()).await?; - match status { - StatusCode::ACCEPTED => { - tracing::info!("Deletion accepted, waiting to try again..."); - tokio::time::sleep(retry_period).await; - retry_period = max_retry_period; - } - StatusCode::NOT_FOUND => { - tracing::info!("Deletion complete"); - return json_response(StatusCode::OK, ()); - } - _ => { - tracing::warn!("Unexpected status {status}"); - return json_response(status, ()); - } - } - - let now = Instant::now(); - if now + retry_period > started_at + max_wait { - tracing::info!("Deletion timed out waiting for 404"); - // REQUEST_TIMEOUT would be more appropriate, but CONFLICT is already part of - // the pageserver's swagger definition for this endpoint, and has the same desired - // effect of causing the control plane to retry later. - return json_response(StatusCode::CONFLICT, ()); - } - } -} - async fn handle_tenant_location_config( service: Arc, mut req: Request, @@ -283,13 +237,17 @@ async fn handle_tenant_delete( let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; check_permissions(&req, Scope::PageServerApi)?; - deletion_wrapper(service, move |service| async move { - service - .tenant_delete(tenant_id) - .await - .and_then(map_reqwest_hyper_status) - }) - .await + let status_code = service + .tenant_delete(tenant_id) + .await + .and_then(map_reqwest_hyper_status)?; + + if status_code == StatusCode::NOT_FOUND { + // The pageserver uses 404 for successful deletion, but we use 200 + json_response(StatusCode::OK, ()) + } else { + json_response(status_code, ()) + } } async fn handle_tenant_timeline_create( @@ -317,6 +275,51 @@ async fn handle_tenant_timeline_delete( let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?; + // For timeline deletions, which both implement an "initially return 202, then 404 once + // we're done" semantic, we wrap with a retry loop to expose a simpler API upstream. + async fn deletion_wrapper(service: Arc, f: F) -> Result, ApiError> + where + R: std::future::Future> + Send + 'static, + F: Fn(Arc) -> R + Send + Sync + 'static, + { + let started_at = Instant::now(); + // To keep deletion reasonably snappy for small tenants, initially check after 1 second if deletion + // completed. + let mut retry_period = Duration::from_secs(1); + // On subsequent retries, wait longer. + let max_retry_period = Duration::from_secs(5); + // Enable callers with a 30 second request timeout to reliably get a response + let max_wait = Duration::from_secs(25); + + loop { + let status = f(service.clone()).await?; + match status { + StatusCode::ACCEPTED => { + tracing::info!("Deletion accepted, waiting to try again..."); + tokio::time::sleep(retry_period).await; + retry_period = max_retry_period; + } + StatusCode::NOT_FOUND => { + tracing::info!("Deletion complete"); + return json_response(StatusCode::OK, ()); + } + _ => { + tracing::warn!("Unexpected status {status}"); + return json_response(status, ()); + } + } + + let now = Instant::now(); + if now + retry_period > started_at + max_wait { + tracing::info!("Deletion timed out waiting for 404"); + // REQUEST_TIMEOUT would be more appropriate, but CONFLICT is already part of + // the pageserver's swagger definition for this endpoint, and has the same desired + // effect of causing the control plane to retry later. + return json_response(StatusCode::CONFLICT, ()); + } + } + } + deletion_wrapper(service, move |service| async move { service .tenant_timeline_delete(tenant_id, timeline_id) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index f914f4e0bb..756dc10a2a 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -2376,61 +2376,80 @@ impl Service { let _tenant_lock = trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await; - self.ensure_attached_wait(tenant_id).await?; - - // TODO: refactor into helper - let targets = { - let locked = self.inner.read().unwrap(); - let mut targets = Vec::new(); - + // Detach all shards + let (detach_waiters, shard_ids, node) = { + let mut shard_ids = Vec::new(); + let mut detach_waiters = Vec::new(); + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); for (tenant_shard_id, shard) in - locked.tenants.range(TenantShardId::tenant_range(tenant_id)) + tenants.range_mut(TenantShardId::tenant_range(tenant_id)) { - let node_id = shard.intent.get_attached().ok_or_else(|| { - ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled")) - })?; - let node = locked - .nodes - .get(&node_id) - .expect("Pageservers may not be deleted while referenced"); + shard_ids.push(*tenant_shard_id); - targets.push((*tenant_shard_id, node.clone())); + // Update the tenant's intent to remove all attachments + shard.policy = PlacementPolicy::Detached; + shard + .schedule(scheduler, &mut ScheduleContext::default()) + .expect("De-scheduling is infallible"); + debug_assert!(shard.intent.get_attached().is_none()); + debug_assert!(shard.intent.get_secondary().is_empty()); + + if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) { + detach_waiters.push(waiter); + } } - targets + + // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant + // was attached, just has to be able to see the S3 content) + let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?; + let node = nodes + .get(&node_id) + .expect("Pageservers may not be deleted while lock is active"); + (detach_waiters, shard_ids, node.clone()) }; - // Phase 1: delete on the pageservers - let mut any_pending = false; - for (tenant_shard_id, node) in targets { - let client = PageserverClient::new( - node.get_id(), - node.base_url(), - self.config.jwt_token.as_deref(), - ); - // TODO: this, like many other places, requires proper retry handling for 503, timeout: those should not - // surface immediately as an error to our caller. - let status = client.tenant_delete(tenant_shard_id).await.map_err(|e| { - ApiError::InternalServerError(anyhow::anyhow!( - "Error deleting shard {tenant_shard_id} on node {node}: {e}", - )) - })?; - tracing::info!( - "Shard {tenant_shard_id} on node {node}, delete returned {}", - status - ); - if status == StatusCode::ACCEPTED { - any_pending = true; - } + if let Err(e) = self.await_waiters(detach_waiters, RECONCILE_TIMEOUT).await { + // Failing to detach shouldn't hold up deletion, e.g. if a node is offline we should be able + // to use some other node to run the remote deletion. + tracing::warn!("Failed to detach some locations: {e}"); } - if any_pending { - // Caller should call us again later. When we eventually see 404s from - // all the shards, we may proceed to delete our records of the tenant. - tracing::info!( - "Tenant {} has some shards pending deletion, returning 202", - tenant_id - ); - return Ok(StatusCode::ACCEPTED); + let locations = shard_ids + .into_iter() + .map(|s| (s, node.clone())) + .collect::>(); + let results = self.tenant_for_shards_api( + locations, + |tenant_shard_id, client| async move { client.tenant_delete(tenant_shard_id).await }, + 1, + 3, + RECONCILE_TIMEOUT, + &self.cancel, + ) + .await; + for result in results { + match result { + Ok(StatusCode::ACCEPTED) => { + // This could happen if we failed detach above, and hit a pageserver where the tenant + // is still attached: it will accept the deletion in the background + tracing::warn!( + "Unexpectedly still attached on {}, client should retry", + node + ); + return Ok(StatusCode::ACCEPTED); + } + Ok(_) => {} + Err(mgmt_api::Error::Cancelled) => { + return Err(ApiError::ShuttingDown); + } + Err(e) => { + // This is unexpected: remote deletion should be infallible, unless the object store + // at large is unavailable. + tracing::error!("Error deleting via node {}: {e}", node); + return Err(ApiError::InternalServerError(anyhow::anyhow!(e))); + } + } } // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 3a9a522f3f..2031feaa83 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -7,6 +7,7 @@ from typing import Any, Dict, List, Union import pytest from fixtures.common_types import TenantId, TenantShardId, TimelineId +from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnv, @@ -18,6 +19,8 @@ from fixtures.neon_fixtures import ( from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import ( MANY_SMALL_LAYERS_TENANT_CONFIG, + assert_prefix_empty, + assert_prefix_not_empty, enable_remote_storage_versioning, list_prefix, remote_storage_delete_key, @@ -839,6 +842,86 @@ def test_storage_controller_tenant_conf(neon_env_builder: NeonEnvBuilder): env.storage_controller.consistency_check() +def test_storage_controller_tenant_deletion( + neon_env_builder: NeonEnvBuilder, + compute_reconfigure_listener: ComputeReconfigure, +): + """ + Validate that: + - Deleting a tenant deletes all its shards + - Deletion does not require the compute notification hook to be responsive + - Deleting a tenant also removes all secondary locations + """ + neon_env_builder.num_pageservers = 4 + neon_env_builder.enable_pageserver_remote_storage(s3_storage()) + neon_env_builder.control_plane_compute_hook_api = ( + compute_reconfigure_listener.control_plane_compute_hook_api + ) + + env = neon_env_builder.init_configs() + env.start() + + tenant_id = TenantId.generate() + timeline_id = TimelineId.generate() + env.neon_cli.create_tenant( + tenant_id, timeline_id, shard_count=2, placement_policy='{"Attached":1}' + ) + + # Ensure all the locations are configured, including secondaries + env.storage_controller.reconcile_until_idle() + + shard_ids = [ + TenantShardId.parse(shard["shard_id"]) for shard in env.storage_controller.locate(tenant_id) + ] + + # Assert attachments all have local content + for shard_id in shard_ids: + pageserver = env.get_tenant_pageserver(shard_id) + assert pageserver.tenant_dir(shard_id).exists() + + # Assert all shards have some content in remote storage + for shard_id in shard_ids: + assert_prefix_not_empty( + neon_env_builder.pageserver_remote_storage, + prefix="/".join( + ( + "tenants", + str(shard_id), + ) + ), + ) + + # Break the compute hook: we are checking that deletion does not depend on the compute hook being available + def break_hook(): + raise RuntimeError("Unexpected call to compute hook") + + compute_reconfigure_listener.register_on_notify(break_hook) + + # No retry loop: deletion should complete in one shot without polling for 202 responses, because + # it cleanly detaches all the shards first, and then deletes them in remote storage + env.storage_controller.pageserver_api().tenant_delete(tenant_id) + + # Assert no pageservers have any local content + for pageserver in env.pageservers: + for shard_id in shard_ids: + assert not pageserver.tenant_dir(shard_id).exists() + + for shard_id in shard_ids: + assert_prefix_empty( + neon_env_builder.pageserver_remote_storage, + prefix="/".join( + ( + "tenants", + str(shard_id), + ) + ), + ) + + # Assert the tenant is not visible in storage controller API + with pytest.raises(StorageControllerApiException): + env.storage_controller.tenant_describe(tenant_id) + + class Failure: pageserver_id: int diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index e120aa1a7c..fa7cead1bd 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -54,9 +54,26 @@ def test_tenant_delete_smoke( # first try to delete non existing tenant tenant_id = TenantId.generate() - env.pageserver.allowed_errors.append(f".*NotFound: tenant {tenant_id}.*") - with pytest.raises(PageserverApiException, match=f"NotFound: tenant {tenant_id}"): - ps_http.tenant_delete(tenant_id=tenant_id) + env.pageserver.allowed_errors.append(".*NotFound.*") + env.pageserver.allowed_errors.append(".*simulated failure.*") + + # Check that deleting a non-existent tenant gives the expected result: this is a loop because we + # may need to retry on some remote storage errors injected by the test harness + while True: + try: + ps_http.tenant_delete(tenant_id=tenant_id) + except PageserverApiException as e: + if e.status_code == 500: + # This test uses failure injection, which can produce 500s as the pageserver expects + # the object store to always be available, and the ListObjects during deletion is generally + # an infallible operation + assert "simulated failure of remote operation" in e.message + elif e.status_code == 404: + # This is our expected result: trying to erase a non-existent tenant gives us 404 + assert "NotFound" in e.message + break + else: + raise env.neon_cli.create_tenant( tenant_id=tenant_id, From 0a65333fff72a6781b005432737261b54a6756be Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 6 Jun 2024 15:10:16 +0200 Subject: [PATCH 139/220] chore(walredo): avoid duplicate tenant_id and shard_slug fields (#7977) spotted during reviews of async walredo work in #6628 --- pageserver/src/walredo/process.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 5b0af334ee..9140d4f6aa 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -6,6 +6,7 @@ use self::no_leak_child::NoLeakChild; use crate::{ config::PageServerConf, metrics::{WalRedoKillCause, WAL_REDO_PROCESS_COUNTERS, WAL_REDO_RECORD_COUNTER}, + span::debug_assert_current_span_has_tenant_id, walrecord::NeonWalRecord, }; use anyhow::Context; @@ -26,6 +27,7 @@ use utils::{lsn::Lsn, poison::Poison}; pub struct WalRedoProcess { #[allow(dead_code)] conf: &'static PageServerConf, + #[cfg(feature = "testing")] tenant_shard_id: TenantShardId, // Some() on construction, only becomes None on Drop. child: Option, @@ -143,6 +145,7 @@ impl WalRedoProcess { Ok(Self { conf, + #[cfg(feature = "testing")] tenant_shard_id, child: Some(child), stdin: tokio::sync::Mutex::new(Poison::new( @@ -178,7 +181,7 @@ impl WalRedoProcess { /// # Cancel-Safety /// /// Cancellation safe. - #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(), pid=%self.id()))] + #[instrument(skip_all, fields(pid=%self.id()))] pub(crate) async fn apply_wal_records( &self, rel: RelTag, @@ -187,6 +190,8 @@ impl WalRedoProcess { records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, ) -> anyhow::Result { + debug_assert_current_span_has_tenant_id(); + let tag = protocol::BufferTag { rel, blknum }; // Serialize all the messages to send the WAL redo process first. From 630cfbe4206b1e3f5439cde96b048f345dce3266 Mon Sep 17 00:00:00 2001 From: Yuchen Liang <70461588+yliang412@users.noreply.github.com> Date: Thu, 6 Jun 2024 10:00:14 -0400 Subject: [PATCH 140/220] refactor(pageserver): designated api error type for cancelled request (#7949) Closes #7406. ## Problem When a `get_lsn_by_timestamp` request is cancelled, an anyhow error is exposed to handle that case, which verbosely logs the error. However, we don't benefit from having the full backtrace provided by anyhow in this case. ## Summary of changes This PR introduces a new `ApiError` type to handle errors caused by cancelled request more robustly. - A new enum variant `ApiError::Cancelled` - Currently the cancelled request is mapped to status code 500. - Need to handle this error in proxy's `http_util` as well. - Added a failpoint test to simulate cancelled `get_lsn_by_timestamp` request. Signed-off-by: Yuchen Liang --- libs/utils/src/http/error.rs | 8 ++++ pageserver/src/http/routes.rs | 4 +- pageserver/src/pgdatadir_mapping.rs | 4 ++ proxy/src/serverless/http_util.rs | 4 ++ test_runner/fixtures/pageserver/http.py | 2 + test_runner/regress/test_lsn_mapping.py | 51 ++++++++++++++++++++++++- 6 files changed, 69 insertions(+), 4 deletions(-) diff --git a/libs/utils/src/http/error.rs b/libs/utils/src/http/error.rs index d55823b0b7..3d863a6518 100644 --- a/libs/utils/src/http/error.rs +++ b/libs/utils/src/http/error.rs @@ -34,6 +34,9 @@ pub enum ApiError { #[error("Timeout")] Timeout(Cow<'static, str>), + #[error("Request cancelled")] + Cancelled, + #[error(transparent)] InternalServerError(anyhow::Error), } @@ -74,6 +77,10 @@ impl ApiError { err.to_string(), StatusCode::REQUEST_TIMEOUT, ), + ApiError::Cancelled => HttpErrorBody::response_from_msg_and_status( + self.to_string(), + StatusCode::INTERNAL_SERVER_ERROR, + ), ApiError::InternalServerError(err) => HttpErrorBody::response_from_msg_and_status( err.to_string(), StatusCode::INTERNAL_SERVER_ERROR, @@ -133,6 +140,7 @@ pub fn api_error_handler(api_error: ApiError) -> Response { ApiError::InternalServerError(_) => error!("Error processing HTTP request: {api_error:?}"), ApiError::ShuttingDown => info!("Shut down while processing HTTP request"), ApiError::Timeout(_) => info!("Timeout while processing HTTP request: {api_error:#}"), + ApiError::Cancelled => info!("Request cancelled while processing HTTP request"), _ => info!("Error processing HTTP request: {api_error:#}"), } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 7fa6c35ad6..19bc88fbc7 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -181,9 +181,7 @@ impl From for ApiError { PageReconstructError::MissingKey(e) => { ApiError::InternalServerError(anyhow::anyhow!("{e}")) } - PageReconstructError::Cancelled => { - ApiError::InternalServerError(anyhow::anyhow!("request was cancelled")) - } + PageReconstructError::Cancelled => ApiError::Cancelled, PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()), PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre), } diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 0bff4be150..336d1c3fb8 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -36,6 +36,7 @@ use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; use tracing::{debug, info, trace, warn}; use utils::bin_ser::DeserializeError; +use utils::pausable_failpoint; use utils::vec_map::{VecMap, VecMapOrdering}; use utils::{bin_ser::BeSer, lsn::Lsn}; @@ -409,6 +410,8 @@ impl Timeline { cancel: &CancellationToken, ctx: &RequestContext, ) -> Result { + pausable_failpoint!("find-lsn-for-timestamp-pausable"); + let gc_cutoff_lsn_guard = self.get_latest_gc_cutoff_lsn(); // We use this method to figure out the branching LSN for the new branch, but the // GC cutoff could be before the branching point and we cannot create a new branch @@ -424,6 +427,7 @@ impl Timeline { let mut found_smaller = false; let mut found_larger = false; + while low < high { if cancel.is_cancelled() { return Err(PageReconstructError::Cancelled); diff --git a/proxy/src/serverless/http_util.rs b/proxy/src/serverless/http_util.rs index ab9127b13e..701ab58f63 100644 --- a/proxy/src/serverless/http_util.rs +++ b/proxy/src/serverless/http_util.rs @@ -45,6 +45,10 @@ pub fn api_error_into_response(this: ApiError) -> Response> { err.to_string(), StatusCode::REQUEST_TIMEOUT, ), + ApiError::Cancelled => HttpErrorBody::response_from_msg_and_status( + this.to_string(), + StatusCode::INTERNAL_SERVER_ERROR, + ), ApiError::InternalServerError(err) => HttpErrorBody::response_from_msg_and_status( err.to_string(), StatusCode::INTERNAL_SERVER_ERROR, diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index f1f96f6d5f..08bf66058a 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -630,12 +630,14 @@ class PageserverHttpClient(requests.Session, MetricsGetter): tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, timestamp: datetime, + **kwargs, ): log.info( f"Requesting lsn by timestamp {timestamp}, tenant {tenant_id}, timeline {timeline_id}" ) res = self.get( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp?timestamp={timestamp.isoformat()}Z", + **kwargs, ) self.verbose_error(res) res_json = res.json() diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index 83d52d4c4c..263730a823 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -1,12 +1,15 @@ import re import time +from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timedelta, timezone +import pytest from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn from fixtures.pageserver.http import PageserverApiException -from fixtures.utils import query_scalar +from fixtures.utils import query_scalar, wait_until +from requests.exceptions import ReadTimeout # @@ -108,6 +111,52 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder): assert Lsn(result["lsn"]) >= last_flush_lsn +def test_get_lsn_by_timestamp_cancelled(neon_env_builder: NeonEnvBuilder): + """ + Test if cancelled pageserver get_lsn_by_timestamp request is correctly handled. + Added as an effort to improve error handling and avoid full anyhow backtrace. + """ + + env = neon_env_builder.init_start() + env.pageserver.allowed_errors.extend( + [ + ".*request was dropped before completing.*", + ".*Cancelled request finished with an error: Cancelled", + ] + ) + + client = env.pageserver.http_client() + failpoint = "find-lsn-for-timestamp-pausable" + client.configure_failpoints((failpoint, "pause")) + + with ThreadPoolExecutor(max_workers=1) as exec: + # Request get_lsn_by_timestamp, hit the pausable failpoint + failing = exec.submit( + client.timeline_get_lsn_by_timestamp, + env.initial_tenant, + env.initial_timeline, + datetime.now(), + timeout=2, + ) + + _, offset = wait_until( + 20, 0.5, lambda: env.pageserver.assert_log_contains(f"at failpoint {failpoint}") + ) + + with pytest.raises(ReadTimeout): + failing.result() + + client.configure_failpoints((failpoint, "off")) + + _, offset = wait_until( + 20, + 0.5, + lambda: env.pageserver.assert_log_contains( + "Cancelled request finished with an error: Cancelled$", offset + ), + ) + + # Test pageserver get_timestamp_of_lsn API def test_ts_of_lsn_api(neon_env_builder: NeonEnvBuilder): key_not_found_error = r".*could not find data for key.*" From a8be07785ebf388ba51a2084e6add16f1f269056 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 6 Jun 2024 17:20:54 +0300 Subject: [PATCH 141/220] fix: do TimelineMetrics::shutdown only once (#7983) Related to #7341 tenant deletion will end up shutting down timelines twice, once before actually starting and the second time when per timeline deletion is requested. Shutting down TimelineMetrics causes underflows. Add an atomic boolean and only do the shutdown once. --- pageserver/src/metrics.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 4f2c75d308..e8a1e063c5 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -2108,6 +2108,7 @@ pub(crate) struct TimelineMetrics { pub directory_entries_count_gauge: Lazy UIntGauge>>, pub evictions: IntCounter, pub evictions_with_low_residence_duration: std::sync::RwLock, + shutdown: std::sync::atomic::AtomicBool, } impl TimelineMetrics { @@ -2227,6 +2228,7 @@ impl TimelineMetrics { evictions_with_low_residence_duration: std::sync::RwLock::new( evictions_with_low_residence_duration, ), + shutdown: std::sync::atomic::AtomicBool::default(), } } @@ -2249,6 +2251,17 @@ impl TimelineMetrics { } pub(crate) fn shutdown(&self) { + let was_shutdown = self + .shutdown + .swap(true, std::sync::atomic::Ordering::Relaxed); + + if was_shutdown { + // this happens on tenant deletion because tenant first shuts down timelines, then + // invokes timeline deletion which first shuts down the timeline again. + // TODO: this can be removed once https://github.com/neondatabase/neon/issues/5080 + return; + } + let tenant_id = &self.tenant_id; let timeline_id = &self.timeline_id; let shard_id = &self.shard_id; From 75bca9bb19b9943db7358c06db5523337eb9e239 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Thu, 6 Jun 2024 16:21:27 +0200 Subject: [PATCH 142/220] Perform retries on azure bulk deletion (#7964) This adds retries to the bulk deletion, because if there is a certain chance n that a request fails, the chance that at least one of the requests in a chain of requests fails increases exponentially. We've had similar issues with the S3 DR tests, which in the end yielded in adding retries at the remote_storage level. Retries at the top level are not sufficient when one remote_storage "operation" is multiple network requests in a trench coat, especially when there is no notion of saving the progress: even if prior deletions had been successful, we'd still need to get a 404 in order to continue the loop and get to the point where we failed in the last iteration. Maybe we'll fail again but before we've even reached it. Retries at the bottom level avoid this issue because they have the notion of progress and also when one network operation fails, only that operation is retried. First part of #7931. --- libs/remote_storage/src/azure_blob.rs | 70 ++++++++++++++++++++------- 1 file changed, 52 insertions(+), 18 deletions(-) diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index aca22c6b3e..2aa05a9d30 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -3,6 +3,7 @@ use std::borrow::Cow; use std::collections::HashMap; use std::env; +use std::fmt::Display; use std::io; use std::num::NonZeroU32; use std::pin::Pin; @@ -29,6 +30,7 @@ use http_types::{StatusCode, Url}; use scopeguard::ScopeGuard; use tokio_util::sync::CancellationToken; use tracing::debug; +use utils::backoff; use crate::metrics::{start_measuring_requests, AttemptOutcome, RequestKind}; use crate::{ @@ -451,26 +453,58 @@ impl RemoteStorage for AzureBlobStorage { // TODO batch requests are not supported by the SDK // https://github.com/Azure/azure-sdk-for-rust/issues/1068 for path in paths { - let blob_client = self.client.blob_client(self.relative_path_to_name(path)); - - let request = blob_client.delete().into_future(); - - let res = tokio::time::timeout(self.timeout, request).await; - - match res { - Ok(Ok(_response)) => continue, - Ok(Err(e)) => { - if let Some(http_err) = e.as_http_error() { - if http_err.status() == StatusCode::NotFound { - continue; - } - } - return Err(e.into()); - } - Err(_elapsed) => return Err(TimeoutOrCancel::Timeout.into()), + #[derive(Debug)] + enum AzureOrTimeout { + AzureError(azure_core::Error), + Timeout, + Cancel, } - } + impl Display for AzureOrTimeout { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:?}") + } + } + let warn_threshold = 3; + let max_retries = 5; + backoff::retry( + || async { + let blob_client = self.client.blob_client(self.relative_path_to_name(path)); + let request = blob_client.delete().into_future(); + + let res = tokio::time::timeout(self.timeout, request).await; + + match res { + Ok(Ok(_v)) => Ok(()), + Ok(Err(azure_err)) => { + if let Some(http_err) = azure_err.as_http_error() { + if http_err.status() == StatusCode::NotFound { + return Ok(()); + } + } + Err(AzureOrTimeout::AzureError(azure_err)) + } + Err(_elapsed) => Err(AzureOrTimeout::Timeout), + } + }, + |err| match err { + AzureOrTimeout::AzureError(_) | AzureOrTimeout::Timeout => false, + AzureOrTimeout::Cancel => true, + }, + warn_threshold, + max_retries, + "deleting remote object", + cancel, + ) + .await + .ok_or_else(|| AzureOrTimeout::Cancel) + .and_then(|x| x) + .map_err(|e| match e { + AzureOrTimeout::AzureError(err) => anyhow::Error::from(err), + AzureOrTimeout::Timeout => TimeoutOrCancel::Timeout.into(), + AzureOrTimeout::Cancel => TimeoutOrCancel::Cancel.into(), + })?; + } Ok(()) }; From 014509987dbc714f3a80459c5c30fd70a0a1f517 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 6 Jun 2024 10:40:58 -0400 Subject: [PATCH 143/220] fix(pageserver): more flexible layer size test (#7945) M-series macOS has different alignments/size for some fields (which I did not investigate in detail) and therefore this test cannot pass on macOS. Fixed by using `<=` for the comparison so that we do not test for an exact match. observed by @yliang412 Signed-off-by: Alex Chi Z --- pageserver/src/tenant/storage_layer/layer/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pageserver/src/tenant/storage_layer/layer/tests.rs b/pageserver/src/tenant/storage_layer/layer/tests.rs index fa9142d5e9..3a7aca7a6c 100644 --- a/pageserver/src/tenant/storage_layer/layer/tests.rs +++ b/pageserver/src/tenant/storage_layer/layer/tests.rs @@ -815,6 +815,7 @@ async fn eviction_cancellation_on_drop() { /// A test case to remind you the cost of these structures. You can bump the size limit /// below if it is really necessary to add more fields to the structures. #[test] +#[cfg(target_arch = "x86_64")] fn layer_size() { assert_eq!(std::mem::size_of::(), 2040); assert_eq!(std::mem::size_of::(), 104); From 5d05013857fd8a4b868b796ff9514dd773bca2bb Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 6 Jun 2024 11:34:44 -0400 Subject: [PATCH 144/220] fix(pageserver): skip metadata compaction is LSN is not accumulated enough (#7962) close https://github.com/neondatabase/neon/issues/7937 Only trigger metadata image layer creation if enough delta layers are accumulated. Signed-off-by: Alex Chi Z --- pageserver/src/tenant/timeline.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 4c46c4e635..59480ba141 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4446,6 +4446,12 @@ impl Timeline { if mode == ImageLayerCreationMode::Initial { return Err(CreateImageLayersError::Other(anyhow::anyhow!("no image layer should be created for metadata keys when flushing frozen layers"))); } + if mode == ImageLayerCreationMode::Try && !check_for_image_layers { + // Skip compaction if there are not enough updates. Metadata compaction will do a scan and + // might mess up with evictions. + start = img_range.end; + continue; + } } else if let ImageLayerCreationMode::Try = mode { // check_for_image_layers = false -> skip // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate From d46d19456d4a089dad16552996d211ccd818dab1 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Thu, 6 Jun 2024 20:18:39 +0300 Subject: [PATCH 145/220] raise the warning for oversized L0 to 2*target (#7985) currently we warn even by going over a single byte. even that will be hit much more rarely once #7927 lands, but get this in earlier. rationale for 2*checkpoint_distance: anything smaller is not really worth a warn. we have an global allowed_error for this warning, which still cannot be removed nor can it be removed with #7927 because of many tests with very small `checkpoint_distance`. --- pageserver/src/tenant/timeline.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 59480ba141..32cf7be0f7 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -5678,7 +5678,7 @@ impl<'a> TimelineWriter<'a> { self.tl.flush_frozen_layers(); let current_size = self.write_guard.as_ref().unwrap().current_size; - if current_size > self.get_checkpoint_distance() { + if current_size >= self.get_checkpoint_distance() * 2 { warn!("Flushed oversized open layer with size {}", current_size) } From e4e444f59fc1ba2c5241acaabd143d2d3cabfb31 Mon Sep 17 00:00:00 2001 From: Arthur Petukhovsky Date: Thu, 6 Jun 2024 18:54:44 +0100 Subject: [PATCH 146/220] Remove random sleep in partial backup (#7982) We had a random sleep in the beginning of partial backup task, which was needed for the first partial backup deploy. It helped with gradual upload of segments without causing network overload. Now partial backup is deployed everywhere, so we don't need this random sleep anymore. We also had an issue related to this, in which manager task was not shut down for a long time. The cause of the issue is this random sleep that didn't take timeline cancellation into account, meanwhile manager task waited for partial backup to complete. Fixes https://github.com/neondatabase/neon/issues/7967 --- safekeeper/src/timeline_manager.rs | 3 +++ safekeeper/src/wal_backup_partial.rs | 8 -------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 7174d843fc..087b988c69 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -213,6 +213,9 @@ pub async fn main_task( } }; + // remove timeline from the broker active set sooner, before waiting for background tasks + tli_broker_active.set(false); + // shutdown background tasks if conf.is_wal_backup_enabled() { wal_backup::update_task(&conf, &tli, false, &last_state, &mut backup_task).await; diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index 6c0f35095b..ed5ddb71f5 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -20,7 +20,6 @@ use camino::Utf8PathBuf; use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI}; -use rand::Rng; use remote_storage::RemotePath; use serde::{Deserialize, Serialize}; @@ -276,13 +275,6 @@ pub async fn main_task(tli: FullAccessTimeline, conf: SafeKeeperConf) { debug!("started"); let await_duration = conf.partial_backup_timeout; - // sleep for random time to avoid thundering herd - { - let randf64 = rand::thread_rng().gen_range(0.0..1.0); - let sleep_duration = await_duration.mul_f64(randf64); - tokio::time::sleep(sleep_duration).await; - } - let (_, persistent_state) = tli.get_state().await; let mut commit_lsn_rx = tli.get_commit_lsn_watch_rx(); let mut flush_lsn_rx = tli.get_term_flush_lsn_watch_rx(); From 66c6b270f1a2a0bca51349db35e876ce7e135de4 Mon Sep 17 00:00:00 2001 From: Anastasia Lubennikova Date: Thu, 6 Jun 2024 20:11:38 +0100 Subject: [PATCH 147/220] Downgrade No response from reading prefetch entry WARNING to LOG --- pgxn/neon/pagestore_smgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 0e4d210be8..6305f2ec92 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -605,7 +605,7 @@ prefetch_read(PrefetchRequest *slot) } else { - neon_shard_log(slot->shard_no, WARNING, + neon_shard_log(slot->shard_no, LOG, "No response from reading prefetch entry %lu: %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", (long)slot->my_ring_index, RelFileInfoFmt(BufTagGetNRelFileInfo(slot->buftag)), From 8ee191c2714ca404f51fb13f3c86c7d50e8daa27 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Fri, 7 Jun 2024 10:18:05 +0300 Subject: [PATCH 148/220] test_local_only_layers_after_crash: various fixes (#7986) In #7927 I needed to fix this test case, but the fixes should be possible to land irrespective of the layer ingestion code change. The most important fix is the behavior if an image layer is found: the assertion message formatting raises a runtime error, which obscures the fact that we found an image layer. --- .../test_pageserver_crash_consistency.py | 29 +++++++++---------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/test_runner/regress/test_pageserver_crash_consistency.py b/test_runner/regress/test_pageserver_crash_consistency.py index 3831d2f917..2d6b50490e 100644 --- a/test_runner/regress/test_pageserver_crash_consistency.py +++ b/test_runner/regress/test_pageserver_crash_consistency.py @@ -1,11 +1,8 @@ -import time - import pytest from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_last_flush_lsn -from fixtures.pageserver.common_types import parse_layer_file_name +from fixtures.pageserver.common_types import ImageLayerName, parse_layer_file_name from fixtures.pageserver.utils import ( wait_for_last_record_lsn, - wait_for_upload_queue_empty, wait_until_tenant_active, ) from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind @@ -25,10 +22,9 @@ def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: env = neon_env_builder.init_start( initial_tenant_conf={ - "checkpoint_distance": f"{1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", + "checkpoint_distance": f"{10 * 1024**2}", "compaction_period": "0 s", - "compaction_threshold": "3", + "compaction_threshold": "999999", } ) pageserver_http = env.pageserver.http_client() @@ -42,13 +38,13 @@ def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: pg_bin.run_capture(["pgbench", "-i", "-s1", connstr]) lsn = wait_for_last_flush_lsn(env, endpoint, tenant_id, timeline_id) - endpoint.stop() # make sure we receive no new wal after this, so that we'll write over the same L1 file. endpoint.stop() for sk in env.safekeepers: sk.stop() + pageserver_http.patch_tenant_config_client_side(tenant_id, {"compaction_threshold": 3}) # hit the exit failpoint with pytest.raises(ConnectionError, match="Remote end closed connection without response"): pageserver_http.timeline_checkpoint(tenant_id, timeline_id) @@ -72,9 +68,15 @@ def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: # L0 continue + candidate = parse_layer_file_name(path.name) + + if isinstance(candidate, ImageLayerName): + continue + if l1_found is not None: - raise RuntimeError(f"found multiple L1: {l1_found.name} and {path.name}") - l1_found = parse_layer_file_name(path.name) + raise RuntimeError(f"found multiple L1: {l1_found.to_str()} and {path.name}") + + l1_found = candidate assert l1_found is not None, "failed to find L1 locally" @@ -93,15 +95,10 @@ def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: # wait for us to catch up again wait_for_last_record_lsn(pageserver_http, tenant_id, timeline_id, lsn) - pageserver_http.timeline_compact(tenant_id, timeline_id) - - # give time for log flush - time.sleep(1) + pageserver_http.timeline_compact(tenant_id, timeline_id, wait_until_uploaded=True) assert env.pageserver.layer_exists(tenant_id, timeline_id, l1_found), "the L1 reappears" - wait_for_upload_queue_empty(pageserver_http, tenant_id, timeline_id) - uploaded = env.pageserver_remote_storage.remote_layer_path( tenant_id, timeline_id, l1_found.to_str() ) From 2078dc827b0495703f9ddcb8ea944df3a4fe9c41 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Fri, 7 Jun 2024 10:04:59 +0200 Subject: [PATCH 149/220] CI: copy run-* labels from external contributors' PRs (#7915) ## Problem We don't carry run-* labels from external contributors' PRs to ci-run/pr-* PRs. This is not really convenient. Need to sync labels in approved-for-ci-run workflow. ## Summary of changes Added the procedure of transition of labels from the original PR ## Checklist before requesting a review - [x] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --------- Co-authored-by: Alexander Bayandin --- .github/workflows/approved-for-ci-run.yml | 53 ++++++++++++++++++++--- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/.github/workflows/approved-for-ci-run.yml b/.github/workflows/approved-for-ci-run.yml index b14b66a439..0a0898d30c 100644 --- a/.github/workflows/approved-for-ci-run.yml +++ b/.github/workflows/approved-for-ci-run.yml @@ -69,15 +69,41 @@ jobs: with: ref: main token: ${{ secrets.CI_ACCESS_TOKEN }} + + - name: Look for existing PR + id: get-pr + env: + GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} + run: | + ALREADY_CREATED="$(gh pr --repo ${GITHUB_REPOSITORY} list --head ${BRANCH} --base main --json number --jq '.[].number')" + echo "ALREADY_CREATED=${ALREADY_CREATED}" >> ${GITHUB_OUTPUT} + + - name: Get changed labels + id: get-labels + if: steps.get-pr.outputs.ALREADY_CREATED != '' + env: + ALREADY_CREATED: ${{ steps.get-pr.outputs.ALREADY_CREATED }} + GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} + run: | + LABELS_TO_REMOVE=$(comm -23 <(gh pr --repo ${GITHUB_REPOSITORY} view ${ALREADY_CREATED} --json labels --jq '.labels.[].name'| ( grep -E '^run' || true ) | sort) \ + <(gh pr --repo ${GITHUB_REPOSITORY} view ${PR_NUMBER} --json labels --jq '.labels.[].name' | ( grep -E '^run' || true ) | sort ) |\ + ( grep -v run-e2e-tests-in-draft || true ) | paste -sd , -) + LABELS_TO_ADD=$(comm -13 <(gh pr --repo ${GITHUB_REPOSITORY} view ${ALREADY_CREATED} --json labels --jq '.labels.[].name'| ( grep -E '^run' || true ) |sort) \ + <(gh pr --repo ${GITHUB_REPOSITORY} view ${PR_NUMBER} --json labels --jq '.labels.[].name' | ( grep -E '^run' || true ) | sort ) |\ + paste -sd , -) + echo "LABELS_TO_ADD=${LABELS_TO_ADD}" >> ${GITHUB_OUTPUT} + echo "LABELS_TO_REMOVE=${LABELS_TO_REMOVE}" >> ${GITHUB_OUTPUT} - run: gh pr checkout "${PR_NUMBER}" - run: git checkout -b "${BRANCH}" - run: git push --force origin "${BRANCH}" + if: steps.get-pr.outputs.ALREADY_CREATED == '' - name: Create a Pull Request for CI run (if required) - env: + if: steps.get-pr.outputs.ALREADY_CREATED == '' + env: GH_TOKEN: ${{ secrets.CI_ACCESS_TOKEN }} run: | cat << EOF > body.md @@ -88,16 +114,33 @@ jobs: Feel free to review/comment/discuss the original PR #${PR_NUMBER}. EOF - ALREADY_CREATED="$(gh pr --repo ${GITHUB_REPOSITORY} list --head ${BRANCH} --base main --json number --jq '.[].number')" - if [ -z "${ALREADY_CREATED}" ]; then - gh pr --repo "${GITHUB_REPOSITORY}" create --title "CI run for PR #${PR_NUMBER}" \ + LABELS=$( (gh pr --repo "${GITHUB_REPOSITORY}" view ${PR_NUMBER} --json labels --jq '.labels.[].name'; echo run-e2e-tests-in-draft )| \ + grep -E '^run' | paste -sd , -) + gh pr --repo "${GITHUB_REPOSITORY}" create --title "CI run for PR #${PR_NUMBER}" \ --body-file "body.md" \ --head "${BRANCH}" \ --base "main" \ - --label "run-e2e-tests-in-draft" \ + --label ${LABELS} \ --draft + - name: Modify the existing pull request (if required) + if: steps.get-pr.outputs.ALREADY_CREATED != '' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LABELS_TO_ADD: ${{ steps.get-labels.outputs.LABELS_TO_ADD }} + LABELS_TO_REMOVE: ${{ steps.get-labels.outputs.LABELS_TO_REMOVE }} + ALREADY_CREATED: ${{ steps.get-pr.outputs.ALREADY_CREATED }} + run: | + ADD_CMD= + REMOVE_CMD= + [ -z "${LABELS_TO_ADD}" ] || ADD_CMD="--add-label ${LABELS_TO_ADD}" + [ -z "${LABELS_TO_REMOVE}" ] || REMOVE_CMD="--remove-label ${LABELS_TO_REMOVE}" + if [ -n "${ADD_CMD}" ] || [ -n "${REMOVE_CMD}" ]; then + gh pr --repo "${GITHUB_REPOSITORY}" edit ${ALREADY_CREATED} ${ADD_CMD} ${REMOVE_CMD} fi + - run: git push --force origin "${BRANCH}" + if: steps.get-pr.outputs.ALREADY_CREATED != '' + cleanup: # Close PRs and delete branchs if the original PR is closed. From 26c68f91f3ca2c0d5f06004bb0fa3b7f8cb98bcd Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 6 Jun 2024 16:43:29 -0500 Subject: [PATCH 150/220] Move SQL migrations out of line It makes them much easier to reason about, and allows other SQL tooling to operate on them like language servers, formatters, etc. I also brought back the removed migrations such that we can more easily understand what they were. I included a "-- SKIP" comment describing why those migrations are now skipped. We no longer skip migrations by checking if it is empty, but instead check to see if the migration starts with "-- SKIP". --- .../0000-neon_superuser_bypass_rls.sql | 1 + .../src/migrations/0001-alter_roles.sql | 18 ++++++ ..._create_subscription_to_neon_superuser.sql | 6 ++ ...003-grant_pg_monitor_to_neon_superuser.sql | 1 + ...-grant_all_on_tables_to_neon_superuser.sql | 4 ++ ...ant_all_on_sequences_to_neon_superuser.sql | 4 ++ ...es_to_neon_superuser_with_grant_option.sql | 3 + ...es_to_neon_superuser_with_grant_option.sql | 3 + ...plication_for_previously_allowed_roles.sql | 13 ++++ compute_tools/src/spec.rs | 60 +++++++------------ 10 files changed, 73 insertions(+), 40 deletions(-) create mode 100644 compute_tools/src/migrations/0000-neon_superuser_bypass_rls.sql create mode 100644 compute_tools/src/migrations/0001-alter_roles.sql create mode 100644 compute_tools/src/migrations/0002-grant_pg_create_subscription_to_neon_superuser.sql create mode 100644 compute_tools/src/migrations/0003-grant_pg_monitor_to_neon_superuser.sql create mode 100644 compute_tools/src/migrations/0004-grant_all_on_tables_to_neon_superuser.sql create mode 100644 compute_tools/src/migrations/0005-grant_all_on_sequences_to_neon_superuser.sql create mode 100644 compute_tools/src/migrations/0006-grant_all_on_tables_to_neon_superuser_with_grant_option.sql create mode 100644 compute_tools/src/migrations/0007-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql create mode 100644 compute_tools/src/migrations/0008-revoke_replication_for_previously_allowed_roles.sql diff --git a/compute_tools/src/migrations/0000-neon_superuser_bypass_rls.sql b/compute_tools/src/migrations/0000-neon_superuser_bypass_rls.sql new file mode 100644 index 0000000000..73b36a37f6 --- /dev/null +++ b/compute_tools/src/migrations/0000-neon_superuser_bypass_rls.sql @@ -0,0 +1 @@ +ALTER ROLE neon_superuser BYPASSRLS; diff --git a/compute_tools/src/migrations/0001-alter_roles.sql b/compute_tools/src/migrations/0001-alter_roles.sql new file mode 100644 index 0000000000..6cb49f873f --- /dev/null +++ b/compute_tools/src/migrations/0001-alter_roles.sql @@ -0,0 +1,18 @@ +DO $$ +DECLARE + role_name text; +BEGIN + FOR role_name IN SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, 'neon_superuser', 'member') + LOOP + RAISE NOTICE 'EXECUTING ALTER ROLE % INHERIT', quote_ident(role_name); + EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' INHERIT'; + END LOOP; + + FOR role_name IN SELECT rolname FROM pg_roles + WHERE + NOT pg_has_role(rolname, 'neon_superuser', 'member') AND NOT starts_with(rolname, 'pg_') + LOOP + RAISE NOTICE 'EXECUTING ALTER ROLE % NOBYPASSRLS', quote_ident(role_name); + EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOBYPASSRLS'; + END LOOP; +END $$; diff --git a/compute_tools/src/migrations/0002-grant_pg_create_subscription_to_neon_superuser.sql b/compute_tools/src/migrations/0002-grant_pg_create_subscription_to_neon_superuser.sql new file mode 100644 index 0000000000..37f0ce211f --- /dev/null +++ b/compute_tools/src/migrations/0002-grant_pg_create_subscription_to_neon_superuser.sql @@ -0,0 +1,6 @@ +DO $$ +BEGIN + IF (SELECT setting::numeric >= 160000 FROM pg_settings WHERE name = 'server_version_num') THEN + EXECUTE 'GRANT pg_create_subscription TO neon_superuser'; + END IF; +END $$; diff --git a/compute_tools/src/migrations/0003-grant_pg_monitor_to_neon_superuser.sql b/compute_tools/src/migrations/0003-grant_pg_monitor_to_neon_superuser.sql new file mode 100644 index 0000000000..11afd3b635 --- /dev/null +++ b/compute_tools/src/migrations/0003-grant_pg_monitor_to_neon_superuser.sql @@ -0,0 +1 @@ +GRANT pg_monitor TO neon_superuser WITH ADMIN OPTION; diff --git a/compute_tools/src/migrations/0004-grant_all_on_tables_to_neon_superuser.sql b/compute_tools/src/migrations/0004-grant_all_on_tables_to_neon_superuser.sql new file mode 100644 index 0000000000..8abe052494 --- /dev/null +++ b/compute_tools/src/migrations/0004-grant_all_on_tables_to_neon_superuser.sql @@ -0,0 +1,4 @@ +-- SKIP: Deemed insufficient for allowing relations created by extensions to be +-- interacted with by neon_superuser without permission issues. + +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser; diff --git a/compute_tools/src/migrations/0005-grant_all_on_sequences_to_neon_superuser.sql b/compute_tools/src/migrations/0005-grant_all_on_sequences_to_neon_superuser.sql new file mode 100644 index 0000000000..5bcb026e0c --- /dev/null +++ b/compute_tools/src/migrations/0005-grant_all_on_sequences_to_neon_superuser.sql @@ -0,0 +1,4 @@ +-- SKIP: Deemed insufficient for allowing relations created by extensions to be +-- interacted with by neon_superuser without permission issues. + +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser; diff --git a/compute_tools/src/migrations/0006-grant_all_on_tables_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/0006-grant_all_on_tables_to_neon_superuser_with_grant_option.sql new file mode 100644 index 0000000000..ce7c96753e --- /dev/null +++ b/compute_tools/src/migrations/0006-grant_all_on_tables_to_neon_superuser_with_grant_option.sql @@ -0,0 +1,3 @@ +-- SKIP: Moved inline to the handle_grants() functions. + +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser WITH GRANT OPTION; diff --git a/compute_tools/src/migrations/0007-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/0007-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql new file mode 100644 index 0000000000..72baf920cd --- /dev/null +++ b/compute_tools/src/migrations/0007-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql @@ -0,0 +1,3 @@ +-- SKIP: Moved inline to the handle_grants() functions. + +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser WITH GRANT OPTION; diff --git a/compute_tools/src/migrations/0008-revoke_replication_for_previously_allowed_roles.sql b/compute_tools/src/migrations/0008-revoke_replication_for_previously_allowed_roles.sql new file mode 100644 index 0000000000..47129d65b8 --- /dev/null +++ b/compute_tools/src/migrations/0008-revoke_replication_for_previously_allowed_roles.sql @@ -0,0 +1,13 @@ +-- SKIP: The original goal of this migration was to prevent creating +-- subscriptions, but this migration was insufficient. + +DO $$ +DECLARE + role_name TEXT; +BEGIN + FOR role_name IN SELECT rolname FROM pg_roles WHERE rolreplication IS TRUE + LOOP + RAISE NOTICE 'EXECUTING ALTER ROLE % NOREPLICATION', quote_ident(role_name); + EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOREPLICATION'; + END LOOP; +END $$; diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 3a6e18b638..143f6c1e5f 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -774,44 +774,21 @@ pub fn handle_migrations(client: &mut Client) -> Result<()> { // !BE SURE TO ONLY ADD MIGRATIONS TO THE END OF THIS ARRAY. IF YOU DO NOT, VERY VERY BAD THINGS MAY HAPPEN! // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // Add new migrations in numerical order. let migrations = [ - "ALTER ROLE neon_superuser BYPASSRLS", - r#" -DO $$ -DECLARE - role_name text; -BEGIN - FOR role_name IN SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, 'neon_superuser', 'member') - LOOP - RAISE NOTICE 'EXECUTING ALTER ROLE % INHERIT', quote_ident(role_name); - EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' INHERIT'; - END LOOP; - - FOR role_name IN SELECT rolname FROM pg_roles - WHERE - NOT pg_has_role(rolname, 'neon_superuser', 'member') AND NOT starts_with(rolname, 'pg_') - LOOP - RAISE NOTICE 'EXECUTING ALTER ROLE % NOBYPASSRLS', quote_ident(role_name); - EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOBYPASSRLS'; - END LOOP; -END $$; -"#, - r#" -DO $$ -BEGIN - IF (SELECT setting::numeric >= 160000 FROM pg_settings WHERE name = 'server_version_num') THEN - EXECUTE 'GRANT pg_create_subscription TO neon_superuser'; - END IF; -END -$$;"#, - "GRANT pg_monitor TO neon_superuser WITH ADMIN OPTION", - // Don't remove: these are some SQLs that we originally applied in migrations but turned out to execute somewhere else. - "", - "", - "", - "", - "", - // Add new migrations below. + include_str!("./migrations/0000-neon_superuser_bypass_rls.sql"), + include_str!("./migrations/0001-alter_roles.sql"), + include_str!("./migrations/0002-grant_pg_create_subscription_to_neon_superuser.sql"), + include_str!("./migrations/0003-grant_pg_monitor_to_neon_superuser.sql"), + include_str!("./migrations/0004-grant_all_on_tables_to_neon_superuser.sql"), + include_str!("./migrations/0005-grant_all_on_sequences_to_neon_superuser.sql"), + include_str!( + "./migrations/0006-grant_all_on_tables_to_neon_superuser_with_grant_option.sql" + ), + include_str!( + "./migrations/0007-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql" + ), + include_str!("./migrations/0008-revoke_replication_for_previously_allowed_roles.sql"), ]; let mut func = || { @@ -847,10 +824,13 @@ $$;"#, while current_migration < migrations.len() { let migration = &migrations[current_migration]; - if migration.is_empty() { - info!("Skip migration id={}", current_migration); + if migration.starts_with("-- SKIP") { + info!("Skipping migration id={}", current_migration); } else { - info!("Running migration:\n{}\n", migration); + info!( + "Running migration id={}:\n{}\n", + current_migration, migration + ); client.simple_query(migration).with_context(|| { format!("handle_migrations current_migration={}", current_migration) })?; From 3b647cd55d7254945718227f7849a67813192fc6 Mon Sep 17 00:00:00 2001 From: Rahul Patil Date: Fri, 7 Jun 2024 19:28:10 +0200 Subject: [PATCH 151/220] Include openssl and ICU statically linked (#7956) ## Problem Due to the upcoming End of Life (EOL) for Debian 11, we need to upgrade the base OS for Pageservers from Debian 11 to Debian 12 for security reasons. When deploying a new Pageserver on Debian 12 with the same binary built on Debian 11, we encountered the following errors: ``` could not execute operation: pageserver error, status: 500, msg: Command failed with status ExitStatus(unix_wait_status(32512)): /usr/local/neon/v16/bin/initdb: error while loading shared libraries: libicuuc.so.67: cannot open shared object file: No such file or directory ``` and ``` could not execute operation: pageserver error, status: 500, msg: Command failed with status ExitStatus(unix_wait_status(32512)): /usr/local/neon/v14/bin/initdb: error while loading shared libraries: libssl.so.1.1: cannot open shared object file: No such file or directory ``` These issues occur when creating new projects. ## Summary of changes - To address these issues, we configured PostgreSQL build to use statically linked OpenSSL and ICU libraries. - This resolves the missing shared library errors when running the binaries on Debian 12. Closes: https://github.com/neondatabase/cloud/issues/12648 ## Checklist before requesting a review - [x] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [x] Do not forget to reformat commit message to not include the above checklist --- .github/workflows/build_and_test.yml | 6 +++--- Dockerfile | 2 -- Dockerfile.build-tools | 32 ++++++++++++++++++++++++++++ Makefile | 15 ++++++++++++- 4 files changed, 49 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index b9caf76060..1fc0fbb0b6 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -299,21 +299,21 @@ jobs: uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' diff --git a/Dockerfile b/Dockerfile index 5f82df3e18..b4900d4a94 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,8 +69,6 @@ RUN set -e \ && apt install -y \ libreadline-dev \ libseccomp-dev \ - libicu67 \ - openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 460b8c996d..91194eda1a 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -112,6 +112,35 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && make install \ && rm -rf ../lcov.tar.gz +# Compile and install the static OpenSSL library +ENV OPENSSL_VERSION=3.2.2 +ENV OPENSSL_PREFIX=/usr/local/openssl +RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ + echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ + cd /tmp && \ + tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + cd /tmp/openssl-${OPENSSL_VERSION} && \ + ./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \ + make ${MAKE_ARGS} && \ + make install && \ + cd /tmp && \ + rm -rf /tmp/openssl-${OPENSSL_VERSION} + +# Set the ICU version +ENV ICU_VERSION=72.1 +ENV ICU_PREFIX=/usr/local/icu + +# Download and build static ICU +RUN wget https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \ + tar -xzf icu4c-${ICU_VERSION//./_}-src.tgz && \ + cd icu/source && \ + ./configure --prefix=${ICU_PREFIX} --enable-static --enable-shared=no CXXFLAGS="-fPIC" CFLAGS="-fPIC" && \ + make && \ + make install && \ + cd ../.. && \ + rm -rf icu icu4c-${ICU_VERSION//./_}-src.tgz + # Switch to nonroot user USER nonroot:nonroot WORKDIR /home/nonroot @@ -170,3 +199,6 @@ RUN whoami \ && rustup --version --verbose \ && rustc --version --verbose \ && clang --version + +# Set following flag to check in Makefile if its running in Docker +RUN touch /home/nonroot/.docker_build diff --git a/Makefile b/Makefile index dcbfdbcbc1..b5f426344e 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package managers POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +OPENSSL_PREFIX_DIR := /usr/local/openssl +ICU_PREFIX_DIR := /usr/local/icu + # # We differentiate between release / debug build types using the BUILD_TYPE # environment variable. @@ -20,6 +23,16 @@ else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif +ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes) + # Exclude static build openssl, icu for local build (MacOS, Linux) + # Only keep for build type release and debug + PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include + PG_CONFIGURE_OPTS += --with-icu + PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION' + PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm' + PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread' +endif + UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) # Seccomp BPF is only available for Linux @@ -28,7 +41,7 @@ else ifeq ($(UNAME_S),Darwin) ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable - OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) + OPENSSL_PREFIX := $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure From 3e63d0f9e0532d0e49668bf41b2534ffcee0ca2a Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 10 Jun 2024 04:42:13 -0400 Subject: [PATCH 152/220] test(pageserver): quantify compaction outcome (#7867) A simple API to collect some statistics after compaction to easily understand the result. The tool reads the layer map, and analyze range by range instead of doing single-key operations, which is more efficient than doing a benchmark to collect the result. It currently computes two key metrics: * Latest data access efficiency, which finds how many delta layers / image layers the system needs to iterate before returning any key in a key range. * (Approximate) PiTR efficiency, as in https://github.com/neondatabase/neon/issues/7770, which is simply the number of delta files in the range. The reason behind that is, assume no image layer is created, PiTR efficiency is simply the cost of collect records from the delta layers, and the replay time. Number of delta files (or in the future, estimated size of reads) is a simple yet efficient way of estimating how much effort the page server needs to reconstruct a page. Signed-off-by: Alex Chi Z --- pageserver/src/http/routes.rs | 23 +++++ pageserver/src/tenant/timeline.rs | 1 + pageserver/src/tenant/timeline/analysis.rs | 90 +++++++++++++++++++ .../src/tenant/timeline/layer_manager.rs | 5 ++ test_runner/fixtures/pageserver/http.py | 15 ++++ test_runner/performance/test_gc_feedback.py | 17 ++++ 6 files changed, 151 insertions(+) create mode 100644 pageserver/src/tenant/timeline/analysis.rs diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 19bc88fbc7..12d02c52fe 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -2429,6 +2429,25 @@ async fn list_aux_files( json_response(StatusCode::OK, files) } +async fn perf_info( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let state = get_state(&request); + + let timeline = + active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) + .await?; + + let result = timeline.perf_info().await; + + json_response(StatusCode::OK, result) +} + async fn ingest_aux_files( mut request: Request, _cancel: CancellationToken, @@ -2856,5 +2875,9 @@ pub fn make_router( |r| testing_api_handler("list_aux_files", r, list_aux_files), ) .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants)) + .post( + "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info", + |r| testing_api_handler("perf_info", r, perf_info), + ) .any(handler_404)) } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 32cf7be0f7..388d5b9d54 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1,3 +1,4 @@ +pub(crate) mod analysis; mod compaction; pub mod delete; pub(crate) mod detach_ancestor; diff --git a/pageserver/src/tenant/timeline/analysis.rs b/pageserver/src/tenant/timeline/analysis.rs new file mode 100644 index 0000000000..cd61418f3d --- /dev/null +++ b/pageserver/src/tenant/timeline/analysis.rs @@ -0,0 +1,90 @@ +use std::{collections::BTreeSet, ops::Range}; + +use utils::lsn::Lsn; + +use super::Timeline; + +#[derive(serde::Serialize)] +pub(crate) struct RangeAnalysis { + start: String, + end: String, + has_image: bool, + num_of_deltas_above_image: usize, + total_num_of_deltas: usize, +} + +impl Timeline { + pub(crate) async fn perf_info(&self) -> Vec { + // First, collect all split points of the layers. + let mut split_points = BTreeSet::new(); + let mut delta_ranges = Vec::new(); + let mut image_ranges = Vec::new(); + + let all_layer_files = { + let guard = self.layers.read().await; + guard.all_persistent_layers() + }; + let lsn = self.get_last_record_lsn(); + + for key in all_layer_files { + split_points.insert(key.key_range.start); + split_points.insert(key.key_range.end); + if key.is_delta { + delta_ranges.push((key.key_range.clone(), key.lsn_range.clone())); + } else { + image_ranges.push((key.key_range.clone(), key.lsn_range.start)); + } + } + + // For each split range, compute the estimated read amplification. + let split_points = split_points.into_iter().collect::>(); + + let mut result = Vec::new(); + + for i in 0..(split_points.len() - 1) { + let start = split_points[i]; + let end = split_points[i + 1]; + // Find the latest image layer that contains the information. + let mut maybe_image_layers = image_ranges + .iter() + // We insert split points for all image layers, and therefore a `contains` check for the start point should be enough. + .filter(|(key_range, img_lsn)| key_range.contains(&start) && img_lsn <= &lsn) + .cloned() + .collect::>(); + maybe_image_layers.sort_by(|a, b| a.1.cmp(&b.1)); + let image_layer = maybe_image_layers.last().cloned(); + let lsn_filter_start = image_layer + .as_ref() + .map(|(_, lsn)| *lsn) + .unwrap_or(Lsn::INVALID); + + fn overlaps_with(lsn_range_a: &Range, lsn_range_b: &Range) -> bool { + !(lsn_range_a.end <= lsn_range_b.start || lsn_range_a.start >= lsn_range_b.end) + } + + let maybe_delta_layers = delta_ranges + .iter() + .filter(|(key_range, lsn_range)| { + key_range.contains(&start) && overlaps_with(&(lsn_filter_start..lsn), lsn_range) + }) + .cloned() + .collect::>(); + + let pitr_delta_layers = delta_ranges + .iter() + .filter(|(key_range, _)| key_range.contains(&start)) + .cloned() + .collect::>(); + + result.push(RangeAnalysis { + start: start.to_string(), + end: end.to_string(), + has_image: image_layer.is_some(), + num_of_deltas_above_image: maybe_delta_layers.len(), + total_num_of_deltas: pitr_delta_layers.len(), + }); + } + + result + } +} diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index b78c98a506..0e82dedecb 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -1,4 +1,5 @@ use anyhow::{bail, ensure, Context, Result}; +use itertools::Itertools; use pageserver_api::shard::TenantShardId; use std::{collections::HashMap, sync::Arc}; use tracing::trace; @@ -308,6 +309,10 @@ impl LayerManager { pub(crate) fn contains(&self, layer: &Layer) -> bool { self.layer_fmgr.contains(layer) } + + pub(crate) fn all_persistent_layers(&self) -> Vec { + self.layer_fmgr.0.keys().cloned().collect_vec() + } } pub(crate) struct LayerFileManager(HashMap); diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 08bf66058a..d5441bd694 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -923,3 +923,18 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ) self.verbose_error(res) return res.json() # type: ignore + + def perf_info( + self, + tenant_id: Union[TenantId, TenantShardId], + timeline_id: TimelineId, + ): + self.is_testing_enabled_or_skip() + + log.info(f"Requesting perf info: tenant {tenant_id}, timeline {timeline_id}") + res = self.post( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/perf_info", + ) + log.info(f"Got perf info response code: {res.status_code}") + self.verbose_error(res) + return res.json() diff --git a/test_runner/performance/test_gc_feedback.py b/test_runner/performance/test_gc_feedback.py index be56203b26..9a03994b29 100644 --- a/test_runner/performance/test_gc_feedback.py +++ b/test_runner/performance/test_gc_feedback.py @@ -75,12 +75,29 @@ def test_gc_feedback(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma physical_size = client.timeline_detail(tenant_id, timeline_id)["current_physical_size"] log.info(f"Physical storage size {physical_size}") + max_num_of_deltas_above_image = 0 + max_total_num_of_deltas = 0 + for key_range in client.perf_info(tenant_id, timeline_id): + max_total_num_of_deltas = max(max_total_num_of_deltas, key_range["total_num_of_deltas"]) + max_num_of_deltas_above_image = max( + max_num_of_deltas_above_image, key_range["num_of_deltas_above_image"] + ) + MB = 1024 * 1024 zenbenchmark.record("logical_size", logical_size // MB, "Mb", MetricReport.LOWER_IS_BETTER) zenbenchmark.record("physical_size", physical_size // MB, "Mb", MetricReport.LOWER_IS_BETTER) zenbenchmark.record( "physical/logical ratio", physical_size / logical_size, "", MetricReport.LOWER_IS_BETTER ) + zenbenchmark.record( + "max_total_num_of_deltas", max_total_num_of_deltas, "", MetricReport.LOWER_IS_BETTER + ) + zenbenchmark.record( + "max_num_of_deltas_above_image", + max_num_of_deltas_above_image, + "", + MetricReport.LOWER_IS_BETTER, + ) layer_map_path = env.repo_dir / "layer-map.json" log.info(f"Writing layer map to {layer_map_path}") From ae5badd375e284ed6098503c3e4ead09995b902f Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Mon, 10 Jun 2024 13:20:20 +0200 Subject: [PATCH 153/220] Revert "Include openssl and ICU statically linked" (#8003) Reverts neondatabase/neon#7956 Rationale: compute incompatibilties Slack thread: https://neondb.slack.com/archives/C033RQ5SPDH/p1718011276665839?thread_ts=1718008160.431869&cid=C033RQ5SPDH Relevant quotes from @hlinnaka > If we go through with the current release candidate, but the compute is pinned, people who create new projects will get that warning, which is silly. To them, it looks like the ICU version was downgraded, because initdb was run with newer version. > We should upgrade the ICU version eventually. And when we do that, users with old projects that use ICU will start to see that warning. I think that's acceptable, as long as we do homework, notify users, and communicate that properly. > When do that, we should to try to upgrade the storage and compute versions at roughly the same time. --- .github/workflows/build_and_test.yml | 6 +++--- Dockerfile | 2 ++ Dockerfile.build-tools | 32 ---------------------------- Makefile | 15 +------------ 4 files changed, 6 insertions(+), 49 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1fc0fbb0b6..b9caf76060 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -299,21 +299,21 @@ jobs: uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }}-${{ hashFiles('Dockerfile.build-tools') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' diff --git a/Dockerfile b/Dockerfile index b4900d4a94..5f82df3e18 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,6 +69,8 @@ RUN set -e \ && apt install -y \ libreadline-dev \ libseccomp-dev \ + libicu67 \ + openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 91194eda1a..460b8c996d 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -112,35 +112,6 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && make install \ && rm -rf ../lcov.tar.gz -# Compile and install the static OpenSSL library -ENV OPENSSL_VERSION=3.2.2 -ENV OPENSSL_PREFIX=/usr/local/openssl -RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ - echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ - cd /tmp && \ - tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ - rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ - cd /tmp/openssl-${OPENSSL_VERSION} && \ - ./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \ - make ${MAKE_ARGS} && \ - make install && \ - cd /tmp && \ - rm -rf /tmp/openssl-${OPENSSL_VERSION} - -# Set the ICU version -ENV ICU_VERSION=72.1 -ENV ICU_PREFIX=/usr/local/icu - -# Download and build static ICU -RUN wget https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \ - tar -xzf icu4c-${ICU_VERSION//./_}-src.tgz && \ - cd icu/source && \ - ./configure --prefix=${ICU_PREFIX} --enable-static --enable-shared=no CXXFLAGS="-fPIC" CFLAGS="-fPIC" && \ - make && \ - make install && \ - cd ../.. && \ - rm -rf icu icu4c-${ICU_VERSION//./_}-src.tgz - # Switch to nonroot user USER nonroot:nonroot WORKDIR /home/nonroot @@ -199,6 +170,3 @@ RUN whoami \ && rustup --version --verbose \ && rustc --version --verbose \ && clang --version - -# Set following flag to check in Makefile if its running in Docker -RUN touch /home/nonroot/.docker_build diff --git a/Makefile b/Makefile index b5f426344e..dcbfdbcbc1 100644 --- a/Makefile +++ b/Makefile @@ -3,9 +3,6 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package managers POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ -OPENSSL_PREFIX_DIR := /usr/local/openssl -ICU_PREFIX_DIR := /usr/local/icu - # # We differentiate between release / debug build types using the BUILD_TYPE # environment variable. @@ -23,16 +20,6 @@ else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif -ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes) - # Exclude static build openssl, icu for local build (MacOS, Linux) - # Only keep for build type release and debug - PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include - PG_CONFIGURE_OPTS += --with-icu - PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION' - PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm' - PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread' -endif - UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) # Seccomp BPF is only available for Linux @@ -41,7 +28,7 @@ else ifeq ($(UNAME_S),Darwin) ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable - OPENSSL_PREFIX := $(shell brew --prefix openssl@3) + OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure From 5a7e285c2c98d0ae15c6e2d7059881bf52a23027 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 10 Jun 2024 15:52:49 +0300 Subject: [PATCH 154/220] Simplify scanning compute logs in tests (#7997) Implement LogUtils in the Endpoint fixture class, so that the "log_contains" function can be used on compute logs too. Per discussion at: https://github.com/neondatabase/neon/pull/7288#discussion_r1623633803 --- test_runner/fixtures/neon_fixtures.py | 3 ++- test_runner/regress/test_hot_standby.py | 21 +++++---------------- test_runner/regress/test_migrations.py | 10 ++-------- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index a25b8bfca1..6fdad2188c 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3386,7 +3386,7 @@ def static_proxy( yield proxy -class Endpoint(PgProtocol): +class Endpoint(PgProtocol, LogUtils): """An object representing a Postgres compute endpoint managed by the control plane.""" def __init__( @@ -3452,6 +3452,7 @@ class Endpoint(PgProtocol): ) path = Path("endpoints") / self.endpoint_id / "pgdata" self.pgdata_dir = os.path.join(self.env.repo_dir, path) + self.logfile = self.endpoint_path() / "compute.log" config_lines = config_lines or [] diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index cf7a1c56ee..1d1b2fb485 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -1,6 +1,5 @@ import asyncio import os -import re import threading import time from functools import partial @@ -18,20 +17,6 @@ from fixtures.neon_fixtures import ( from fixtures.utils import wait_until -# Check for corrupted WAL messages which might otherwise go unnoticed if -# reconnection fixes this. -def scan_standby_log_for_errors(secondary): - log_path = secondary.endpoint_path() / "compute.log" - with log_path.open("r") as f: - markers = re.compile( - r"incorrect resource manager data|record with incorrect|invalid magic number|unexpected pageaddr" - ) - for line in f: - if markers.search(line): - log.info(f"bad error in standby log: {line}") - raise AssertionError() - - def test_hot_standby(neon_simple_env: NeonEnv): env = neon_simple_env @@ -91,7 +76,11 @@ def test_hot_standby(neon_simple_env: NeonEnv): assert response is not None assert response == responses[query] - scan_standby_log_for_errors(secondary) + # Check for corrupted WAL messages which might otherwise go unnoticed if + # reconnection fixes this. + assert not secondary.log_contains( + "incorrect resource manager data|record with incorrect|invalid magic number|unexpected pageaddr" + ) # clean up if slow_down_send: diff --git a/test_runner/regress/test_migrations.py b/test_runner/regress/test_migrations.py index 526ae14b87..5637f160cf 100644 --- a/test_runner/regress/test_migrations.py +++ b/test_runner/regress/test_migrations.py @@ -8,8 +8,6 @@ def test_migrations(neon_simple_env: NeonEnv): env.neon_cli.create_branch("test_migrations", "empty") endpoint = env.endpoints.create("test_migrations") - log_path = endpoint.endpoint_path() / "compute.log" - endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() @@ -22,9 +20,7 @@ def test_migrations(neon_simple_env: NeonEnv): migration_id = cur.fetchall() assert migration_id[0][0] == num_migrations - with open(log_path, "r") as log_file: - logs = log_file.read() - assert f"INFO handle_migrations: Ran {num_migrations} migrations" in logs + endpoint.assert_log_contains(f"INFO handle_migrations: Ran {num_migrations} migrations") endpoint.stop() endpoint.start() @@ -36,6 +32,4 @@ def test_migrations(neon_simple_env: NeonEnv): migration_id = cur.fetchall() assert migration_id[0][0] == num_migrations - with open(log_path, "r") as log_file: - logs = log_file.read() - assert "INFO handle_migrations: Ran 0 migrations" in logs + endpoint.assert_log_contains("INFO handle_migrations: Ran 0 migrations") From b52e31c1a42e186d578975cce632bf244c5f2957 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 10 Jun 2024 16:50:17 +0300 Subject: [PATCH 155/220] fix: allow layer flushes more often (#7927) As seen with the pgvector 0.7.0 index builds, we can receive large batches of images, leading to very large L0 layers in the range of 1GB. These large layers are produced because we are only able to roll the layer after we have witnessed two different Lsns in a single `DataDirModification::commit`. As the single Lsn batches of images can span over multiple `DataDirModification` lifespans, we will rarely get to write two different Lsns in a single `put_batch` currently. The solution is to remember the TimelineWriterState instead of eagerly forgetting it until we really open the next layer or someone else flushes (while holding the write_guard). Additional changes are test fixes to avoid "initdb image layer optimization" or ignoring initdb layers for assertion. Cc: #7197 because small `checkpoint_distance` will now trigger the "initdb image layer optimization" --- .../tenant/storage_layer/inmemory_layer.rs | 2 +- pageserver/src/tenant/timeline.rs | 40 +++-- test_runner/fixtures/pageserver/utils.py | 2 +- test_runner/fixtures/utils.py | 17 ++ .../regress/test_disk_usage_eviction.py | 22 +-- .../regress/test_ingestion_layer_size.py | 151 ++++++++++++++++++ .../regress/test_pageserver_secondary.py | 1 + test_runner/regress/test_s3_restore.py | 18 ++- test_runner/regress/test_sharding.py | 29 +++- 9 files changed, 245 insertions(+), 37 deletions(-) create mode 100644 test_runner/regress/test_ingestion_layer_size.py diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 9553f83026..1ecc56ce99 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -52,7 +52,7 @@ pub struct InMemoryLayer { /// Frozen layers have an exclusive end LSN. /// Writes are only allowed when this is `None`. - end_lsn: OnceLock, + pub(crate) end_lsn: OnceLock, /// Used for traversal path. Cached representation of the in-memory layer before frozen. local_path_str: Arc, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 388d5b9d54..58bdd84906 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -322,6 +322,8 @@ pub struct Timeline { /// Locked automatically by [`TimelineWriter`] and checkpointer. /// Must always be acquired before the layer map/individual layer lock /// to avoid deadlock. + /// + /// The state is cleared upon freezing. write_lock: tokio::sync::Mutex>, /// Used to avoid multiple `flush_loop` tasks running @@ -1578,7 +1580,7 @@ impl Timeline { // an ephemeral layer open forever when idle. It also freezes layers if the global limit on // ephemeral layer bytes has been breached. pub(super) async fn maybe_freeze_ephemeral_layer(&self) { - let Ok(_write_guard) = self.write_lock.try_lock() else { + let Ok(mut write_guard) = self.write_lock.try_lock() else { // If the write lock is held, there is an active wal receiver: rolling open layers // is their responsibility while they hold this lock. return; @@ -1672,6 +1674,7 @@ impl Timeline { .await; } } + write_guard.take(); self.flush_frozen_layers(); } } @@ -2036,11 +2039,11 @@ impl Timeline { true } else if distance > 0 && opened_at.elapsed() >= self.get_checkpoint_timeout() { info!( - "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})", - projected_lsn, - layer_size, - opened_at.elapsed() - ); + "Will roll layer at {} with layer size {} due to time since first write to the layer ({:?})", + projected_lsn, + layer_size, + opened_at.elapsed() + ); true } else { @@ -3653,7 +3656,10 @@ impl Timeline { let _write_guard = if write_lock_held { None } else { - Some(self.write_lock.lock().await) + let mut g = self.write_lock.lock().await; + // remove the reference to an open layer + g.take(); + Some(g) }; let to_lsn = self.get_last_record_lsn(); @@ -5541,6 +5547,9 @@ impl Timeline { type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); +/// Tracking writes ingestion does to a particular in-memory layer. +/// +/// Cleared upon freezing a layer. struct TimelineWriterState { open_layer: Arc, current_size: u64, @@ -5581,12 +5590,6 @@ impl Deref for TimelineWriter<'_> { } } -impl Drop for TimelineWriter<'_> { - fn drop(&mut self) { - self.write_guard.take(); - } -} - #[derive(PartialEq)] enum OpenLayerAction { Roll, @@ -5692,6 +5695,17 @@ impl<'a> TimelineWriter<'a> { return OpenLayerAction::Open; }; + if state.cached_last_freeze_at < self.tl.last_freeze_at.load() { + // TODO(#7993): branch is needed before refactoring the many places of freezing for the + // possibility `state` having a "dangling" reference to an already frozen in-memory + // layer. + assert!( + state.open_layer.end_lsn.get().is_some(), + "our open_layer must be outdated" + ); + return OpenLayerAction::Open; + } + if state.prev_lsn == Some(lsn) { // Rolling mid LSN is not supported by downstream code. // Hence, only roll at LSN boundaries. diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 91435e8a1f..72384c138b 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -313,7 +313,7 @@ def assert_prefix_empty( # https://neon-github-public-dev.s3.amazonaws.com/reports/pr-5322/6207777020/index.html#suites/3556ed71f2d69272a7014df6dcb02317/53b5c368b5a68865 # this seems like a mock_s3 issue log.warning( - f"contrading ListObjectsV2 response with KeyCount={keys} and Contents={objects}, CommonPrefixes={common_prefixes}, assuming this means KeyCount=0" + f"contradicting ListObjectsV2 response with KeyCount={keys} and Contents={objects}, CommonPrefixes={common_prefixes}, assuming this means KeyCount=0" ) keys = 0 elif keys != 0 and len(objects) == 0: diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index b55329e054..0989dc1893 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -582,3 +582,20 @@ class PropagatingThread(threading.Thread): if self.exc: raise self.exc return self.ret + + +def human_bytes(amt: float) -> str: + """ + Render a bytes amount into nice IEC bytes string. + """ + + suffixes = ["", "Ki", "Mi", "Gi"] + + last = suffixes[-1] + + for name in suffixes: + if amt < 1024 or name == last: + return f"{int(round(amt))} {name}B" + amt = amt / 1024 + + raise RuntimeError("unreachable") diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index 7ae2352c06..7722828c79 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -17,7 +17,7 @@ from fixtures.neon_fixtures import ( from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_upload_queue_empty from fixtures.remote_storage import RemoteStorageKind -from fixtures.utils import wait_until +from fixtures.utils import human_bytes, wait_until GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy" @@ -218,19 +218,6 @@ def count_layers_per_tenant( return dict(ret) -def human_bytes(amt: float) -> str: - suffixes = ["", "Ki", "Mi", "Gi"] - - last = suffixes[-1] - - for name in suffixes: - if amt < 1024 or name == last: - return f"{int(round(amt))} {name}B" - amt = amt / 1024 - - raise RuntimeError("unreachable") - - def _eviction_env( request, neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, num_pageservers: int ) -> EvictionEnv: @@ -294,7 +281,7 @@ def pgbench_init_tenant( "gc_period": "0s", "compaction_period": "0s", "checkpoint_distance": f"{layer_size}", - "image_creation_threshold": "100", + "image_creation_threshold": "999999", "compaction_target_size": f"{layer_size}", } ) @@ -668,11 +655,10 @@ def test_fast_growing_tenant(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, or finish_tenant_creation(env, tenant_id, timeline_id, min_expected_layers) tenant_layers = count_layers_per_tenant(env.pageserver, map(lambda x: x[0], timelines)) - (total_on_disk, _, _) = poor_mans_du(env, map(lambda x: x[0], timelines), env.pageserver, False) + (total_on_disk, _, _) = poor_mans_du(env, map(lambda x: x[0], timelines), env.pageserver, True) - # cut 10 percent response = env.pageserver.http_client().disk_usage_eviction_run( - {"evict_bytes": total_on_disk // 10, "eviction_order": order.config()} + {"evict_bytes": total_on_disk // 5, "eviction_order": order.config()} ) log.info(f"{response}") diff --git a/test_runner/regress/test_ingestion_layer_size.py b/test_runner/regress/test_ingestion_layer_size.py new file mode 100644 index 0000000000..44c77b3410 --- /dev/null +++ b/test_runner/regress/test_ingestion_layer_size.py @@ -0,0 +1,151 @@ +from dataclasses import dataclass +from typing import Iterable, List, Union + +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn +from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo +from fixtures.utils import human_bytes + + +def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder, build_type: str): + """ + Build a non-small GIN index which includes similarly batched up images in WAL stream as does pgvector + to show that we no longer create oversized layers. + """ + + if build_type == "debug": + pytest.skip("debug run is unnecessarily slow") + + minimum_initdb_size = 20 * 1024**2 + checkpoint_distance = 32 * 1024**2 + minimum_good_layer_size = checkpoint_distance * 0.9 + minimum_too_large_layer_size = 2 * checkpoint_distance + + # index size: 99MiB + rows = 2_500_000 + + # bucket lower limits + buckets = [0, minimum_initdb_size, minimum_good_layer_size, minimum_too_large_layer_size] + + assert ( + minimum_initdb_size < minimum_good_layer_size + ), "keep checkpoint_distance higher than the initdb size (find it by experimenting)" + + env = neon_env_builder.init_start( + initial_tenant_conf={ + "checkpoint_distance": f"{checkpoint_distance}", + "compaction_target_size": f"{checkpoint_distance}", + # this test is primarly interested in L0 sizes but we'll compact after ingestion to ensure sizes are good even then + "compaction_period": "0s", + "gc_period": "0s", + "compaction_threshold": "255", + "image_creation_threshold": "99999", + } + ) + + # build a larger than 3*checkpoint_distance sized gin index. + # gin index building exhibits the same behaviour as the pgvector with the two phase build + with env.endpoints.create_start("main") as ep, ep.cursor() as cur: + cur.execute( + f"create table int_array_test as select array_agg(g) as int_array from generate_series(1, {rows}) g group by g / 10;" + ) + cur.execute( + "create index int_array_test_gin_index on int_array_test using gin (int_array);" + ) + cur.execute("select pg_table_size('int_array_test_gin_index')") + size = cur.fetchone() + assert size is not None + assert isinstance(size[0], int) + log.info(f"gin index size: {human_bytes(size[0])}") + assert ( + size[0] > checkpoint_distance * 3 + ), f"gin index is not large enough: {human_bytes(size[0])}" + wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline) + + ps_http = env.pageserver.http_client() + ps_http.timeline_checkpoint(env.initial_tenant, env.initial_timeline) + + infos = ps_http.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(infos.in_memory_layers) == 0, "should had flushed open layers" + post_ingest = histogram_historic_layers(infos, buckets) + + # describe first, assert later for easier debugging + log.info("non-cumulative layer size distribution after ingestion:") + print_layer_size_histogram(post_ingest) + + # since all we have are L0s, we should be getting nice L1s and images out of them now + ps_http.patch_tenant_config_client_side( + env.initial_tenant, + { + "compaction_threshold": 1, + "image_creation_threshold": 1, + }, + ) + + ps_http.timeline_compact(env.initial_tenant, env.initial_timeline, True, True) + + infos = ps_http.layer_map_info(env.initial_tenant, env.initial_timeline) + assert len(infos.in_memory_layers) == 0, "no new inmem layers expected" + post_compact = histogram_historic_layers(infos, buckets) + + log.info("non-cumulative layer size distribution after compaction:") + print_layer_size_histogram(post_compact) + + assert ( + post_ingest.counts[3] == 0 + ), f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2*checkpoint_distance)})" + assert post_ingest.counts[1] == 1, "expect one smaller layer for initdb" + assert ( + post_ingest.counts[0] <= 1 + ), "expect at most one tiny layer from shutting down the endpoint" + + # just make sure we don't have trouble splitting the layers apart + assert post_compact.counts[3] == 0 + + +@dataclass +class Histogram: + buckets: List[Union[int, float]] + counts: List[int] + sums: List[int] + + +def histogram_historic_layers( + infos: LayerMapInfo, minimum_sizes: List[Union[int, float]] +) -> Histogram: + def log_layer(layer: HistoricLayerInfo) -> HistoricLayerInfo: + log.info( + f"{layer.layer_file_name} {human_bytes(layer.layer_file_size)} ({layer.layer_file_size} bytes)" + ) + return layer + + layers = map(log_layer, infos.historic_layers) + sizes = (x.layer_file_size for x in layers) + return histogram(sizes, minimum_sizes) + + +def histogram(sizes: Iterable[int], minimum_sizes: List[Union[int, float]]) -> Histogram: + assert all(minimum_sizes[i] < minimum_sizes[i + 1] for i in range(len(minimum_sizes) - 1)) + buckets = list(enumerate(minimum_sizes)) + counts = [0 for _ in buckets] + sums = [0 for _ in buckets] + + for size in sizes: + found = False + for index, min_size in reversed(buckets): + if size >= min_size: + counts[index] += 1 + sums[index] += size + found = True + break + assert found + + return Histogram(minimum_sizes, counts, sums) + + +def print_layer_size_histogram(h: Histogram): + for index, min_size in enumerate(h.buckets): + log.info( + f">= {human_bytes(min_size)}: {h.counts[index]} layers total {human_bytes(h.sums[index])}" + ) diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 5bfa9cce8c..757ea60882 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -563,6 +563,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): ) ), ) + workload.stop() def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 7fdabaaec7..6383d24c57 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -2,6 +2,7 @@ import time from datetime import datetime, timezone from fixtures.common_types import Lsn +from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, @@ -32,7 +33,12 @@ def test_tenant_s3_restore( assert remote_storage, "remote storage not configured" enable_remote_storage_versioning(remote_storage) - env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) + # change it back after initdb, recovery doesn't work if the two + # index_part.json uploads happen at same second or too close to each other. + initial_tenant_conf = MANY_SMALL_LAYERS_TENANT_CONFIG + del initial_tenant_conf["checkpoint_distance"] + + env = neon_env_builder.init_start(initial_tenant_conf) env.pageserver.allowed_errors.extend( [ # The deletion queue will complain when it encounters simulated S3 errors @@ -43,14 +49,16 @@ def test_tenant_s3_restore( ) ps_http = env.pageserver.http_client() - tenant_id = env.initial_tenant + # now lets create the small layers + ps_http.set_tenant_config(tenant_id, MANY_SMALL_LAYERS_TENANT_CONFIG) + # Default tenant and the one we created assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 # create two timelines one being the parent of another, both with non-trivial data - parent = None + parent = "main" last_flush_lsns = [] for timeline in ["first", "second"]: @@ -64,6 +72,7 @@ def test_tenant_s3_restore( last_flush_lsns.append(last_flush_lsn) ps_http.timeline_checkpoint(tenant_id, timeline_id) wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn) + log.info(f"{timeline} timeline {timeline_id} {last_flush_lsn=}") parent = timeline # These sleeps are important because they fend off differences in clocks between us and S3 @@ -108,6 +117,9 @@ def test_tenant_s3_restore( ps_http.tenant_attach(tenant_id, generation=generation) env.pageserver.quiesce_tenants() + for tline in ps_http.timeline_list(env.initial_tenant): + log.info(f"timeline detail: {tline}") + for i, timeline in enumerate(["first", "second"]): with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint: endpoint.safe_psql(f"SELECT * FROM created_{timeline};") diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 545ba05b17..1996e99557 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -697,6 +697,9 @@ def test_sharding_ingest_layer_sizes( # small checkpointing and compaction targets to ensure we generate many upload operations "checkpoint_distance": f"{expect_layer_size}", "compaction_target_size": f"{expect_layer_size}", + # aim to reduce flakyness, we are not doing explicit checkpointing + "compaction_period": "0s", + "gc_period": "0s", } shard_count = 4 neon_env_builder.num_pageservers = shard_count @@ -712,6 +715,23 @@ def test_sharding_ingest_layer_sizes( tenant_id = env.initial_tenant timeline_id = env.initial_timeline + # ignore the initdb layer(s) for the purposes of the size comparison as a initdb image layer optimization + # will produce a lot more smaller layers. + initial_layers_per_shard = {} + log.info("initdb distribution (not asserted on):") + for shard in env.storage_controller.locate(tenant_id): + pageserver = env.get_pageserver(shard["node_id"]) + shard_id = shard["shard_id"] + layers = ( + env.get_pageserver(shard["node_id"]).http_client().layer_map_info(shard_id, timeline_id) + ) + for layer in layers.historic_layers: + log.info( + f"layer[{pageserver.id}]: {layer.layer_file_name} (size {layer.layer_file_size})" + ) + + initial_layers_per_shard[shard_id] = set(layers.historic_layers) + workload = Workload(env, tenant_id, timeline_id) workload.init() workload.write_rows(4096, upload=False) @@ -733,7 +753,13 @@ def test_sharding_ingest_layer_sizes( historic_layers = sorted(layer_map.historic_layers, key=lambda layer: layer.lsn_start) + initial_layers = initial_layers_per_shard[shard_id] + for layer in historic_layers: + if layer in initial_layers: + # ignore the initdb image layers for the size histogram + continue + if layer.layer_file_size < expect_layer_size // 2: classification = "Small" small_layer_count += 1 @@ -763,7 +789,8 @@ def test_sharding_ingest_layer_sizes( pass else: # General case: - assert float(small_layer_count) / float(ok_layer_count) < 0.25 + # old limit was 0.25 but pg14 is right at the limit with 7/28 + assert float(small_layer_count) / float(ok_layer_count) < 0.3 # Each shard may emit up to one huge layer, because initdb ingest doesn't respect checkpoint_distance. assert huge_layer_count <= shard_count From a8ca7a1a1d88c6cff476eaf55c9f38c46dbfc645 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Mon, 10 Jun 2024 12:08:16 -0400 Subject: [PATCH 156/220] docs: highlight neon env comes with an initial timeline (#7995) Quite a few existing test cases create their own timelines instead of using the default one. This pull request highlights that and hopefully people can write simpler tests in the future. Signed-off-by: Alex Chi Z Co-authored-by: Yuchen Liang <70461588+yliang412@users.noreply.github.com> --- test_runner/README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test_runner/README.md b/test_runner/README.md index fd68cfff79..7d95634ea8 100644 --- a/test_runner/README.md +++ b/test_runner/README.md @@ -285,6 +285,21 @@ def test_foobar(neon_env_builder: NeonEnvBuilder): ... ``` +The env includes a default tenant and timeline. Therefore, you do not need to create your own +tenant/timeline for testing. + +```python +def test_foobar2(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_start() # Start the environment + with env.endpoints.create_start("main") as endpoint: + # Start the compute endpoint + client = env.pageserver.http_client() # Get the pageserver client + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + client.timeline_detail(tenant_id=tenant_id, timeline_id=timeline_id) +``` + For more information about pytest fixtures, see https://docs.pytest.org/en/stable/fixture.html At the end of a test, all the nodes in the environment are automatically stopped, so you From e46692788e9d3e2b010b156c034af0c95a13a2a8 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Mon, 10 Jun 2024 19:34:34 +0300 Subject: [PATCH 157/220] refactor: Timeline layer flushing (#7993) The new features have deteriorated layer flushing, most recently with #7927. Changes: - inline `Timeline::freeze_inmem_layer` to the only caller - carry the TimelineWriterState guard to the actual point of freezing the layer - this allows us to `#[cfg(feature = "testing")]` the assertion added in #7927 - remove duplicate `flush_frozen_layer` in favor of splitting the `flush_frozen_layers_and_wait` - this requires starting the flush loop earlier for `checkpoint_distance < initdb size` tests --- pageserver/src/tenant.rs | 12 +- pageserver/src/tenant/timeline.rs | 140 ++++++++++-------- .../src/tenant/timeline/layer_manager.rs | 30 +++- 3 files changed, 106 insertions(+), 76 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 60cd5c9695..2e3ce45c2b 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3395,6 +3395,12 @@ impl Tenant { let tenant_shard_id = raw_timeline.owning_tenant.tenant_shard_id; let unfinished_timeline = raw_timeline.raw_timeline()?; + // Flush the new layer files to disk, before we make the timeline as available to + // the outside world. + // + // Flush loop needs to be spawned in order to be able to flush. + unfinished_timeline.maybe_spawn_flush_loop(); + import_datadir::import_timeline_from_postgres_datadir( unfinished_timeline, &pgdata_path, @@ -3406,12 +3412,6 @@ impl Tenant { format!("Failed to import pgdatadir for timeline {tenant_shard_id}/{timeline_id}") })?; - // Flush the new layer files to disk, before we make the timeline as available to - // the outside world. - // - // Flush loop needs to be spawned in order to be able to flush. - unfinished_timeline.maybe_spawn_flush_loop(); - fail::fail_point!("before-checkpoint-new-timeline", |_| { anyhow::bail!("failpoint before-checkpoint-new-timeline"); }); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 58bdd84906..6da0f9d91c 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1571,7 +1571,15 @@ impl Timeline { // This exists to provide a non-span creating version of `freeze_and_flush` we can call without // polluting the span hierarchy. pub(crate) async fn freeze_and_flush0(&self) -> Result<(), FlushLayerError> { - let to_lsn = self.freeze_inmem_layer(false).await; + let to_lsn = { + // Freeze the current open in-memory layer. It will be written to disk on next + // iteration. + let mut g = self.write_lock.lock().await; + + let to_lsn = self.get_last_record_lsn(); + self.freeze_inmem_layer_at(to_lsn, &mut g).await; + to_lsn + }; self.flush_frozen_layers_and_wait(to_lsn).await } @@ -1657,25 +1665,35 @@ impl Timeline { self.last_freeze_at.load(), open_layer.get_opened_at(), ) { - match open_layer.info() { + let at_lsn = match open_layer.info() { InMemoryLayerInfo::Frozen { lsn_start, lsn_end } => { // We may reach this point if the layer was already frozen by not yet flushed: flushing // happens asynchronously in the background. tracing::debug!( "Not freezing open layer, it's already frozen ({lsn_start}..{lsn_end})" ); + None } InMemoryLayerInfo::Open { .. } => { // Upgrade to a write lock and freeze the layer drop(layers_guard); let mut layers_guard = self.layers.write().await; - layers_guard - .try_freeze_in_memory_layer(current_lsn, &self.last_freeze_at) + let froze = layers_guard + .try_freeze_in_memory_layer( + current_lsn, + &self.last_freeze_at, + &mut write_guard, + ) .await; + Some(current_lsn).filter(|_| froze) + } + }; + if let Some(lsn) = at_lsn { + let res: Result = self.flush_frozen_layers(lsn); + if let Err(e) = res { + tracing::info!("failed to flush frozen layer after background freeze: {e:#}"); } } - write_guard.take(); - self.flush_frozen_layers(); } } @@ -2384,7 +2402,7 @@ impl Timeline { let background_ctx = RequestContext::todo_child(TaskKind::LayerFlushTask, DownloadBehavior::Error); self_clone.flush_loop(layer_flush_start_rx, &background_ctx).await; let mut flush_loop_state = self_clone.flush_loop_state.lock().unwrap(); - assert!(matches!(*flush_loop_state, FlushLoopState::Running{ ..})); + assert!(matches!(*flush_loop_state, FlushLoopState::Running{..})); *flush_loop_state = FlushLoopState::Exited; Ok(()) } @@ -3647,31 +3665,21 @@ impl Timeline { self.last_record_lsn.advance(new_lsn); } - /// Whether there was a layer to freeze or not, return the value of get_last_record_lsn - /// before we attempted the freeze: this guarantees that ingested data is frozen up to this lsn (inclusive). - async fn freeze_inmem_layer(&self, write_lock_held: bool) -> Lsn { - // Freeze the current open in-memory layer. It will be written to disk on next - // iteration. - - let _write_guard = if write_lock_held { - None - } else { - let mut g = self.write_lock.lock().await; - // remove the reference to an open layer - g.take(); - Some(g) + async fn freeze_inmem_layer_at( + &self, + at: Lsn, + write_lock: &mut tokio::sync::MutexGuard<'_, Option>, + ) { + let frozen = { + let mut guard = self.layers.write().await; + guard + .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock) + .await }; - - let to_lsn = self.get_last_record_lsn(); - self.freeze_inmem_layer_at(to_lsn).await; - to_lsn - } - - async fn freeze_inmem_layer_at(&self, at: Lsn) { - let mut guard = self.layers.write().await; - guard - .try_freeze_in_memory_layer(at, &self.last_freeze_at) - .await; + if frozen { + let now = Instant::now(); + *(self.last_freeze_ts.write().unwrap()) = now; + } } /// Layer flusher task's main loop. @@ -3765,18 +3773,14 @@ impl Timeline { } } - /// Request the flush loop to write out all frozen layers up to `to_lsn` as Delta L0 files to disk. - /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer`]. + /// Request the flush loop to write out all frozen layers up to `at_lsn` as Delta L0 files to disk. + /// The caller is responsible for the freezing, e.g., [`Self::freeze_inmem_layer_at`]. /// - /// `last_record_lsn` may be higher than the highest LSN of a frozen layer: if this is the case, - /// it means no data will be written between the top of the highest frozen layer and to_lsn, - /// e.g. because this tenant shard has ingested up to to_lsn and not written any data locally for that part of the WAL. - async fn flush_frozen_layers_and_wait( - &self, - last_record_lsn: Lsn, - ) -> Result<(), FlushLayerError> { - let mut rx = self.layer_flush_done_tx.subscribe(); - + /// `at_lsn` may be higher than the highest LSN of a frozen layer: if this is the + /// case, it means no data will be written between the top of the highest frozen layer and + /// to_lsn, e.g. because this tenant shard has ingested up to to_lsn and not written any data + /// locally for that part of the WAL. + fn flush_frozen_layers(&self, at_lsn: Lsn) -> Result { // Increment the flush cycle counter and wake up the flush task. // Remember the new value, so that when we listen for the flush // to finish, we know when the flush that we initiated has @@ -3791,13 +3795,18 @@ impl Timeline { self.layer_flush_start_tx.send_modify(|(counter, lsn)| { my_flush_request = *counter + 1; *counter = my_flush_request; - *lsn = std::cmp::max(last_record_lsn, *lsn); + *lsn = std::cmp::max(at_lsn, *lsn); }); + Ok(my_flush_request) + } + + async fn wait_flush_completion(&self, request: u64) -> Result<(), FlushLayerError> { + let mut rx = self.layer_flush_done_tx.subscribe(); loop { { let (last_result_counter, last_result) = &*rx.borrow(); - if *last_result_counter >= my_flush_request { + if *last_result_counter >= request { if let Err(err) = last_result { // We already logged the original error in // flush_loop. We cannot propagate it to the caller @@ -3824,12 +3833,9 @@ impl Timeline { } } - fn flush_frozen_layers(&self) { - self.layer_flush_start_tx.send_modify(|(counter, lsn)| { - *counter += 1; - - *lsn = std::cmp::max(*lsn, Lsn(self.last_freeze_at.load().0 - 1)); - }); + async fn flush_frozen_layers_and_wait(&self, at_lsn: Lsn) -> Result<(), FlushLayerError> { + let token = self.flush_frozen_layers(at_lsn)?; + self.wait_flush_completion(token).await } /// Flush one frozen in-memory layer to disk, as a new delta layer. @@ -5672,16 +5678,15 @@ impl<'a> TimelineWriter<'a> { } async fn roll_layer(&mut self, freeze_at: Lsn) -> anyhow::Result<()> { - assert!(self.write_guard.is_some()); - - self.tl.freeze_inmem_layer_at(freeze_at).await; - - let now = Instant::now(); - *(self.last_freeze_ts.write().unwrap()) = now; - - self.tl.flush_frozen_layers(); - let current_size = self.write_guard.as_ref().unwrap().current_size; + + // self.write_guard will be taken by the freezing + self.tl + .freeze_inmem_layer_at(freeze_at, &mut self.write_guard) + .await; + + self.tl.flush_frozen_layers(freeze_at)?; + if current_size >= self.get_checkpoint_distance() * 2 { warn!("Flushed oversized open layer with size {}", current_size) } @@ -5695,20 +5700,27 @@ impl<'a> TimelineWriter<'a> { return OpenLayerAction::Open; }; + #[cfg(feature = "testing")] if state.cached_last_freeze_at < self.tl.last_freeze_at.load() { - // TODO(#7993): branch is needed before refactoring the many places of freezing for the - // possibility `state` having a "dangling" reference to an already frozen in-memory - // layer. + // this check and assertion are not really needed because + // LayerManager::try_freeze_in_memory_layer will always clear out the + // TimelineWriterState if something is frozen. however, we can advance last_freeze_at when there + // is no TimelineWriterState. assert!( state.open_layer.end_lsn.get().is_some(), "our open_layer must be outdated" ); - return OpenLayerAction::Open; + + // this would be a memory leak waiting to happen because the in-memory layer always has + // an index + panic!("BUG: TimelineWriterState held on to frozen in-memory layer."); } if state.prev_lsn == Some(lsn) { - // Rolling mid LSN is not supported by downstream code. + // Rolling mid LSN is not supported by [downstream code]. // Hence, only roll at LSN boundaries. + // + // [downstream code]: https://github.com/neondatabase/neon/pull/7993#discussion_r1633345422 return OpenLayerAction::None; } diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 0e82dedecb..21e64d562a 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -21,6 +21,8 @@ use crate::{ }, }; +use super::TimelineWriterState; + /// Provides semantic APIs to manipulate the layer map. #[derive(Default)] pub(crate) struct LayerManager { @@ -120,18 +122,20 @@ impl LayerManager { Ok(layer) } - /// Called from `freeze_inmem_layer`, returns true if successfully frozen. - pub(crate) async fn try_freeze_in_memory_layer( + /// Tries to freeze an open layer and also manages clearing the TimelineWriterState. + /// + /// Returns true if anything was frozen. + pub(super) async fn try_freeze_in_memory_layer( &mut self, lsn: Lsn, last_freeze_at: &AtomicLsn, - ) { + write_lock: &mut tokio::sync::MutexGuard<'_, Option>, + ) -> bool { let Lsn(last_record_lsn) = lsn; let end_lsn = Lsn(last_record_lsn + 1); - if let Some(open_layer) = &self.layer_map.open_layer { + let froze = if let Some(open_layer) = &self.layer_map.open_layer { let open_layer_rc = Arc::clone(open_layer); - // Does this layer need freezing? open_layer.freeze(end_lsn).await; // The layer is no longer open, update the layer map to reflect this. @@ -139,11 +143,25 @@ impl LayerManager { self.layer_map.frozen_layers.push_back(open_layer_rc); self.layer_map.open_layer = None; self.layer_map.next_open_layer_at = Some(end_lsn); - } + + true + } else { + false + }; // Even if there was no layer to freeze, advance last_freeze_at to last_record_lsn+1: this // accounts for regions in the LSN range where we might have ingested no data due to sharding. last_freeze_at.store(end_lsn); + + // the writer state must no longer have a reference to the frozen layer + let taken = write_lock.take(); + assert_eq!( + froze, + taken.is_some(), + "should only had frozen a layer when TimelineWriterState existed" + ); + + froze } /// Add image layers to the layer map, called from `create_image_layers`. From e27ce3861914e89ad43aafb36e5fb96f13cd2bc2 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:07:51 +0200 Subject: [PATCH 158/220] Add testing for extensions (#7818) ## Problem We need automated tests of extensions shipped with Neon to detect possible problems. ## Summary of changes A new image neon-test-extensions is added. Workflow changes to test the shipped extensions are added as well. Currently, the regression tests, shipped with extensions are in use. Some extensions, i.e. rum, timescaledb, rdkit, postgis, pgx_ulid, pgtap, pg_tiktoken, pg_jsonschema, pg_graphql, kq_imcx, wal2json_2_5 are excluded due to problems or absence of internal tests. --------- Co-authored-by: Alexander Bayandin Co-authored-by: Heikki Linnakangas --- .dockerignore | 1 + .github/workflows/build_and_test.yml | 29 ++- Dockerfile.compute-node | 63 +++++ docker-compose/compute_wrapper/Dockerfile | 7 +- .../var/db/postgres/specs/spec.json | 12 +- docker-compose/docker-compose.yml | 12 +- docker-compose/docker_compose_test.sh | 83 +++++-- docker-compose/run-tests.sh | 15 ++ patches/pg_anon.patch | 223 ++++++++++++++++++ patches/pg_cron.patch | 19 ++ patches/pg_hintplan.patch | 39 +++ 11 files changed, 478 insertions(+), 25 deletions(-) create mode 100644 docker-compose/run-tests.sh create mode 100644 patches/pg_anon.patch create mode 100644 patches/pg_cron.patch create mode 100644 patches/pg_hintplan.patch diff --git a/.dockerignore b/.dockerignore index 1258532db8..eead727994 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,6 +8,7 @@ !scripts/combine_control_files.py !scripts/ninstall.sh !vm-cgconfig.conf +!docker-compose/run-tests.sh # Directories !.cargo/ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index b9caf76060..79a0a77638 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -858,6 +858,26 @@ jobs: cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} + + - name: Build neon extensions test image + if: matrix.version == 'v16' + uses: docker/build-push-action@v5 + with: + context: . + build-args: | + GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} + PG_VERSION=${{ matrix.version }} + BUILD_TAG=${{ needs.tag.outputs.build-tag }} + TAG=${{ needs.build-build-tools-image.outputs.image-tag }} + provenance: false + push: true + pull: true + file: Dockerfile.compute-node + target: neon-pg-ext-test + cache-from: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }} + cache-to: type=registry,ref=neondatabase/neon-test-extensions-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max + tags: | + neondatabase/neon-test-extensions-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}-${{ matrix.arch }} - name: Build compute-tools image # compute-tools are Postgres independent, so build it only once @@ -902,6 +922,13 @@ jobs: neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch neon-test-extensions image + if: matrix.version == 'v16' + run: | + docker buildx imagetools create -t neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }} \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-x64 \ + neondatabase/neon-test-extensions-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-arm64 + - name: Create multi-arch compute-tools image if: matrix.version == 'v16' run: | @@ -1020,7 +1047,7 @@ jobs: exit 1 fi - - name: Verify docker-compose example + - name: Verify docker-compose example and test extensions timeout-minutes: 20 run: env TAG=${{needs.tag.outputs.build-tag}} ./docker-compose/docker_compose_test.sh diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index 90b8868b43..a86fdd0bc3 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -928,6 +928,69 @@ RUN rm -r /usr/local/pgsql/include # if they were to be used by other libraries. RUN rm /usr/local/pgsql/lib/lib*.a + +######################################################################################### +# +# Layer neon-pg-ext-test +# +######################################################################################### + +FROM neon-pg-ext-build AS neon-pg-ext-test +ARG PG_VERSION +RUN mkdir /ext-src + +#COPY --from=postgis-build /postgis.tar.gz /ext-src/ +#COPY --from=postgis-build /sfcgal/* /usr +COPY --from=plv8-build /plv8.tar.gz /ext-src/ +COPY --from=h3-pg-build /h3-pg.tar.gz /ext-src/ +COPY --from=unit-pg-build /postgresql-unit.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.tar.gz /ext-src/ +COPY --from=vector-pg-build /pgvector.patch /ext-src/ +COPY --from=pgjwt-pg-build /pgjwt.tar.gz /ext-src +#COPY --from=pg-jsonschema-pg-build /home/nonroot/pg_jsonschema.tar.gz /ext-src +#COPY --from=pg-graphql-pg-build /home/nonroot/pg_graphql.tar.gz /ext-src +#COPY --from=pg-tiktoken-pg-build /home/nonroot/pg_tiktoken.tar.gz /ext-src +COPY --from=hypopg-pg-build /hypopg.tar.gz /ext-src +COPY --from=pg-hashids-pg-build /pg_hashids.tar.gz /ext-src +#COPY --from=rum-pg-build /rum.tar.gz /ext-src +#COPY --from=pgtap-pg-build /pgtap.tar.gz /ext-src +COPY --from=ip4r-pg-build /ip4r.tar.gz /ext-src +COPY --from=prefix-pg-build /prefix.tar.gz /ext-src +COPY --from=hll-pg-build /hll.tar.gz /ext-src +COPY --from=plpgsql-check-pg-build /plpgsql_check.tar.gz /ext-src +#COPY --from=timescaledb-pg-build /timescaledb.tar.gz /ext-src +COPY --from=pg-hint-plan-pg-build /pg_hint_plan.tar.gz /ext-src +COPY patches/pg_hintplan.patch /ext-src +#COPY --from=kq-imcx-pg-build /kq_imcx.tar.gz /ext-src +COPY --from=pg-cron-pg-build /pg_cron.tar.gz /ext-src +COPY patches/pg_cron.patch /ext-src +#COPY --from=pg-pgx-ulid-build /home/nonroot/pgx_ulid.tar.gz /ext-src +COPY --from=rdkit-pg-build /rdkit.tar.gz /ext-src +COPY --from=pg-uuidv7-pg-build /pg_uuidv7.tar.gz /ext-src +COPY --from=pg-roaringbitmap-pg-build /pg_roaringbitmap.tar.gz /ext-src +COPY --from=pg-semver-pg-build /pg_semver.tar.gz /ext-src +#COPY --from=pg-embedding-pg-build /home/nonroot/pg_embedding-src/ /ext-src +#COPY --from=wal2json-pg-build /wal2json_2_5.tar.gz /ext-src +COPY --from=pg-anon-pg-build /pg_anon.tar.gz /ext-src +COPY patches/pg_anon.patch /ext-src +COPY --from=pg-ivm-build /pg_ivm.tar.gz /ext-src +COPY --from=pg-partman-build /pg_partman.tar.gz /ext-src +RUN cd /ext-src/ && for f in *.tar.gz; \ + do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \ + rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \ + || exit 1; rm -f $f; done +RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch +# cmake is required for the h3 test +RUN apt-get update && apt-get install -y cmake +RUN patch -p1 < /ext-src/pg_hintplan.patch +COPY --chmod=755 docker-compose/run-tests.sh /run-tests.sh +RUN patch -p1 /dev/null && pwd )" -COMPOSE_FILE=$SCRIPT_DIR/docker-compose.yml - +COMPOSE_FILE='docker-compose.yml' +cd $(dirname $0) +docker compose -f $COMPOSE_FILE COMPUTE_CONTAINER_NAME=docker-compose-compute-1 -SQL="CREATE TABLE t(key int primary key, value text); insert into t values(1,1); select * from t;" -PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -c '$SQL' postgres" +TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1 +PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" +: ${http_proxy:=} +: ${https_proxy:=} +export http_proxy https_proxy cleanup() { echo "show container information" @@ -25,34 +31,71 @@ cleanup() { docker compose -f $COMPOSE_FILE down } -echo "clean up containers if exists" -cleanup - for pg_version in 14 15 16; do - echo "start containers (pg_version=$pg_version)." - PG_VERSION=$pg_version docker compose -f $COMPOSE_FILE up --build -d + echo "clean up containers if exists" + cleanup + PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version)) + PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose -f $COMPOSE_FILE up --build -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 - while sleep 1; do + while sleep 3; do # check timeout - cnt=`expr $cnt + 1` + cnt=`expr $cnt + 3` if [ $cnt -gt 60 ]; then echo "timeout before the compute is ready." cleanup exit 1 fi - - # check if the compute is ready - set +o pipefail - result=`docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep "accepting connections" | wc -l` - set -o pipefail - if [ $result -eq 1 ]; then + if docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then echo "OK. The compute is ready to connect." echo "execute simple queries." docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION" - cleanup break fi done + + if [ $pg_version -ge 16 ] + then + echo Enabling trust connection + docker exec $COMPUTE_CONTAINER_NAME bash -c "sed -i '\$d' /var/db/postgres/compute/pg_hba.conf && echo -e 'host\t all\t all\t all\t trust' >> /var/db/postgres/compute/pg_hba.conf && psql $PSQL_OPTION -c 'select pg_reload_conf()' " + echo Adding postgres role + docker exec $COMPUTE_CONTAINER_NAME psql $PSQL_OPTION -c "CREATE ROLE postgres SUPERUSER LOGIN" + # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail + # It cannot be moved to Dockerfile now because the database directory is created after the start of the container + echo Adding dummy config + docker exec $COMPUTE_CONTAINER_NAME touch /var/db/postgres/compute/compute_ctl_temp_override.conf + # This block is required for the pg_anon extension test. + # The test assumes that it is running on the same host with the postgres engine. + # In our case it's not true, that's why we are copying files to the compute node + TMPDIR=$(mktemp -d) + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_anon-src/data $TMPDIR/data + echo -e '1\t too \t many \t tabs' > $TMPDIR/data/bad.csv + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/tmp/tmp_anon_alternate_data + rm -rf $TMPDIR + TMPDIR=$(mktemp -d) + # The following block does the same for the pg_hintplan test + docker cp $TEST_CONTAINER_NAME:/ext-src/pg_hint_plan-src/data $TMPDIR/data + docker cp $TMPDIR/data $COMPUTE_CONTAINER_NAME:/ext-src/pg_hint_plan-src/ + rm -rf $TMPDIR + # We are running tests now + if docker exec -e SKIP=rum-src,timescaledb-src,rdkit-src,postgis-src,pgx_ulid-src,pgtap-src,pg_tiktoken-src,pg_jsonschema-src,pg_graphql-src,kq_imcx-src,wal2json_2_5-src \ + $TEST_CONTAINER_NAME /run-tests.sh | tee testout.txt + then + cleanup + else + FAILED=$(tail -1 testout.txt) + for d in $FAILED + do + mkdir $d + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.diffs $d || true + docker cp $TEST_CONTAINER_NAME:/ext-src/$d/regression.out $d || true + cat $d/regression.out $d/regression.diffs || true + done + rm -rf $FAILED + cleanup + exit 1 + fi + fi + cleanup done diff --git a/docker-compose/run-tests.sh b/docker-compose/run-tests.sh new file mode 100644 index 0000000000..c05fc159aa --- /dev/null +++ b/docker-compose/run-tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -x + +cd /ext-src +FAILED= +LIST=$((echo ${SKIP} | sed 's/,/\n/g'; ls -d *-src) | sort | uniq -u) +for d in ${LIST} +do + [ -d ${d} ] || continue + psql -c "select 1" >/dev/null || break + make -C ${d} installcheck || FAILED="${d} ${FAILED}" +done +[ -z "${FAILED}" ] && exit 0 +echo ${FAILED} +exit 1 \ No newline at end of file diff --git a/patches/pg_anon.patch b/patches/pg_anon.patch new file mode 100644 index 0000000000..15dfd3c5a0 --- /dev/null +++ b/patches/pg_anon.patch @@ -0,0 +1,223 @@ +commit 7dd414ee75f2875cffb1d6ba474df1f135a6fc6f +Author: Alexey Masterov +Date: Fri May 31 06:34:26 2024 +0000 + + These alternative expected files were added to consider the neon features + +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +new file mode 100644 +index 0000000..2539cfd +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_masked_role_1.out +@@ -0,0 +1,101 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE mallory_the_masked_user; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.anonymize_table('t1'); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ERROR: Only supersusers can start the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.start_dynamic_masking(boolean) line 18 at RAISE ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT * FROM mask.t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ SELECT * FROM public.t1; ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: Only supersusers can stop the dynamic masking engine. ++CONTEXT: PL/pgSQL function anon.stop_dynamic_masking() line 18 at RAISE ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE mallory_the_masked_user; ++SELECT COUNT(*)=1 FROM anon.pg_masking_rules; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; +diff --git a/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +new file mode 100644 +index 0000000..8b090fe +--- /dev/null ++++ b/ext-src/pg_anon-src/tests/expected/permissions_owner_1.out +@@ -0,0 +1,104 @@ ++BEGIN; ++CREATE EXTENSION anon CASCADE; ++NOTICE: installing required extension "pgcrypto" ++SELECT anon.init(); ++ init ++------ ++ t ++(1 row) ++ ++CREATE ROLE oscar_the_owner; ++ALTER DATABASE :DBNAME OWNER TO oscar_the_owner; ++CREATE ROLE mallory_the_masked_user; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS 'MASKED'; ++-- ++-- We're checking the owner's permissions ++-- ++-- see ++-- https://postgresql-anonymizer.readthedocs.io/en/latest/SECURITY/#permissions ++-- ++SET ROLE oscar_the_owner; ++SELECT anon.pseudo_first_name(0) IS NOT NULL; ++ ?column? ++---------- ++ t ++(1 row) ++ ++-- SHOULD FAIL ++DO $$ ++BEGIN ++ PERFORM anon.init(); ++ EXCEPTION WHEN insufficient_privilege ++ THEN RAISE NOTICE 'insufficient_privilege'; ++END$$; ++NOTICE: insufficient_privilege ++CREATE TABLE t1(i INT); ++ALTER TABLE t1 ADD COLUMN t TEXT; ++SECURITY LABEL FOR anon ON COLUMN t1.t ++IS 'MASKED WITH VALUE NULL'; ++INSERT INTO t1 VALUES (1,'test'); ++SELECT anon.anonymize_table('t1'); ++ anonymize_table ++----------------- ++ t ++(1 row) ++ ++SELECT * FROM t1; ++ i | t ++---+--- ++ 1 | ++(1 row) ++ ++UPDATE t1 SET t='test' WHERE i=1; ++-- SHOULD FAIL ++SAVEPOINT fail_start_engine; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++ROLLBACK TO fail_start_engine; ++RESET ROLE; ++SELECT anon.start_dynamic_masking(); ++ start_dynamic_masking ++----------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++SELECT * FROM t1; ++ i | t ++---+------ ++ 1 | test ++(1 row) ++ ++--SELECT * FROM mask.t1; ++-- SHOULD FAIL ++SAVEPOINT fail_stop_engine; ++SELECT anon.stop_dynamic_masking(); ++ERROR: permission denied for schema mask ++CONTEXT: SQL statement "DROP VIEW mask.t1;" ++PL/pgSQL function anon.mask_drop_view(oid) line 3 at EXECUTE ++SQL statement "SELECT anon.mask_drop_view(oid) ++ FROM pg_catalog.pg_class ++ WHERE relnamespace=quote_ident(pg_catalog.current_setting('anon.sourceschema'))::REGNAMESPACE ++ AND relkind IN ('r','p','f')" ++PL/pgSQL function anon.stop_dynamic_masking() line 22 at PERFORM ++ROLLBACK TO fail_stop_engine; ++RESET ROLE; ++SELECT anon.stop_dynamic_masking(); ++NOTICE: The previous priviledges of 'mallory_the_masked_user' are not restored. You need to grant them manually. ++ stop_dynamic_masking ++---------------------- ++ t ++(1 row) ++ ++SET ROLE oscar_the_owner; ++-- SHOULD FAIL ++SAVEPOINT fail_seclabel_on_role; ++SECURITY LABEL FOR anon ON ROLE mallory_the_masked_user IS NULL; ++ERROR: permission denied ++DETAIL: The current user must have the CREATEROLE attribute. ++ROLLBACK TO fail_seclabel_on_role; ++ROLLBACK; diff --git a/patches/pg_cron.patch b/patches/pg_cron.patch new file mode 100644 index 0000000000..c2b648c20c --- /dev/null +++ b/patches/pg_cron.patch @@ -0,0 +1,19 @@ +commit b3ea51ee158f113f2f82d0b97c12c54343c9a695 (HEAD -> master) +Author: Alexey Masterov +Date: Fri Jun 7 19:23:42 2024 +0000 + + Disable REGRESS_OPTIONS causing initdb + +diff --git a/ext-src/pg_cron-src/Makefile b/ext-src/pg_cron-src/Makefile +index 053314c..fbd5fb5 100644 +--- a/ext-src/pg_cron-src/Makefile ++++ b/ext-src/pg_cron-src/Makefile +@@ -5,7 +5,7 @@ EXTENSION = pg_cron + DATA_built = $(EXTENSION)--1.0.sql + DATA = $(wildcard $(EXTENSION)--*--*.sql) + +-REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check ++#REGRESS_OPTS =--temp-config=./pg_cron.conf --temp-instance=./tmp_check + REGRESS = pg_cron-test + + # compilation configuration diff --git a/patches/pg_hintplan.patch b/patches/pg_hintplan.patch new file mode 100644 index 0000000000..61a5ecbb90 --- /dev/null +++ b/patches/pg_hintplan.patch @@ -0,0 +1,39 @@ +commit f7925d4d1406c0f0229e3c691c94b69e381899b1 (HEAD -> master) +Author: Alexey Masterov +Date: Thu Jun 6 08:02:42 2024 +0000 + + Patch expected files to consider Neon's log messages + +diff --git a/ext-src/pg_hint_plan-src/expected/ut-A.out b/ext-src/pg_hint_plan-src/expected/ut-A.out +index da723b8..f8d0102 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-A.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-A.out +@@ -9,13 +9,16 @@ SET search_path TO public; + ---- + -- No.A-1-1-3 + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + -- No.A-1-2-3 + DROP EXTENSION pg_hint_plan; + -- No.A-1-1-4 + CREATE SCHEMA other_schema; + CREATE EXTENSION pg_hint_plan SCHEMA other_schema; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + ERROR: extension "pg_hint_plan" must be installed in schema "hint_plan" + CREATE EXTENSION pg_hint_plan; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/pg_hint_plan + DROP SCHEMA other_schema; + ---- + ---- No. A-5-1 comment pattern +diff --git a/ext-src/pg_hint_plan-src/expected/ut-fdw.out b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +index d372459..6282afe 100644 +--- a/ext-src/pg_hint_plan-src/expected/ut-fdw.out ++++ b/ext-src/pg_hint_plan-src/expected/ut-fdw.out +@@ -7,6 +7,7 @@ SET pg_hint_plan.debug_print TO on; + SET client_min_messages TO LOG; + SET pg_hint_plan.enable_hint TO on; + CREATE EXTENSION file_fdw; ++LOG: Sending request to compute_ctl: http://localhost:3080/extension_server/file_fdw + CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; + CREATE USER MAPPING FOR PUBLIC SERVER file_server; + CREATE FOREIGN TABLE ft1 (id int, val int) SERVER file_server OPTIONS (format 'csv', filename :'filename'); From 7515d0f368e14dfb82520c8a493a49e5671e479e Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 11 Jun 2024 15:38:54 +0300 Subject: [PATCH 159/220] fix: stop storing TimelineMetadata in index_part.json as bytes (#7699) We've stored metadata as bytes within the `index_part.json` for long fixed reasons. #7693 added support for reading out normal json serialization of the `TimelineMetadata`. Change the serialization to only write `TimelineMetadata` as json for going forward, keeping the backward compatibility to reading the metadata as bytes. Because of failure to include `alias = "metadata"` in #7693, one more follow-up is required to make the switch from the old name to `"metadata": `, but that affects only the field name in serialized format. In documentation and naming, an effort is made to add enough warning signs around TimelineMetadata so that it will receive no changes in the future. We can add those fields to `IndexPart` directly instead. Additionally, the path to cleaning up `metadata.rs` is documented in the `metadata.rs` module comment. If we must extend `TimelineMetadata` before that, the duplication suggested in [review comment] is the way to go. [review comment]: https://github.com/neondatabase/neon/pull/7699#pullrequestreview-2107081558 --- pageserver/ctl/src/index_part.rs | 22 +- pageserver/src/tenant/metadata.rs | 256 +++++++----------- .../tenant/remote_timeline_client/index.rs | 82 +++++- s3_scrubber/src/checks.rs | 13 +- s3_scrubber/src/scan_pageserver_metadata.rs | 2 +- .../regress/test_layers_from_future.py | 3 +- 6 files changed, 178 insertions(+), 200 deletions(-) diff --git a/pageserver/ctl/src/index_part.rs b/pageserver/ctl/src/index_part.rs index a33cae6769..20018846f8 100644 --- a/pageserver/ctl/src/index_part.rs +++ b/pageserver/ctl/src/index_part.rs @@ -1,11 +1,6 @@ -use std::collections::HashMap; - use anyhow::Context; use camino::Utf8PathBuf; -use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata; -use pageserver::tenant::storage_layer::LayerName; -use pageserver::tenant::{metadata::TimelineMetadata, IndexPart}; -use utils::lsn::Lsn; +use pageserver::tenant::IndexPart; #[derive(clap::Subcommand)] pub(crate) enum IndexPartCmd { @@ -17,20 +12,7 @@ pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { IndexPartCmd::Dump { path } => { let bytes = tokio::fs::read(path).await.context("read file")?; let des: IndexPart = IndexPart::from_s3_bytes(&bytes).context("deserialize")?; - #[derive(serde::Serialize)] - struct Output<'a> { - layer_metadata: &'a HashMap, - disk_consistent_lsn: Lsn, - timeline_metadata: &'a TimelineMetadata, - } - - let output = Output { - layer_metadata: &des.layer_metadata, - disk_consistent_lsn: des.metadata.disk_consistent_lsn(), - timeline_metadata: &des.metadata, - }; - - let output = serde_json::to_string_pretty(&output).context("serialize output")?; + let output = serde_json::to_string_pretty(&des).context("serialize output")?; println!("{output}"); Ok(()) } diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index c00672895a..6ba1bdef9b 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -1,15 +1,23 @@ -//! Every image of a certain timeline from [`crate::tenant::Tenant`] -//! has a metadata that needs to be stored persistently. +//! Describes the legacy now hopefully no longer modified per-timeline metadata stored in +//! `index_part.json` managed by [`remote_timeline_client`]. For many tenants and their timelines, +//! this struct and it's original serialization format is still needed because they were written a +//! long time ago. //! -//! Later, the file gets used in [`remote_timeline_client`] as a part of -//! external storage import and export operations. +//! Instead of changing and adding versioning to this, just change [`IndexPart`] with soft json +//! versioning. //! -//! The module contains all structs and related helper methods related to timeline metadata. +//! To clean up this module we need to migrate all index_part.json files to a later version. +//! While doing this, we need to be mindful about s3 based recovery as well, so it might take +//! however long we keep the old versions to be able to delete the old code. After that, we can +//! remove everything else than [`TimelineMetadataBodyV2`], rename it as `TimelineMetadata` and +//! move it to `index.rs`. Before doing all of this, we need to keep the structures for backwards +//! compatibility. //! //! [`remote_timeline_client`]: super::remote_timeline_client +//! [`IndexPart`]: super::remote_timeline_client::index::IndexPart use anyhow::ensure; -use serde::{de::Error, Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; use utils::bin_ser::SerializeError; use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn}; @@ -17,17 +25,37 @@ use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn}; const METADATA_FORMAT_VERSION: u16 = 4; /// Previous supported format versions. +/// +/// In practice, none of these should remain, all are [`METADATA_FORMAT_VERSION`], but confirming +/// that requires a scrubber run which is yet to be done. const METADATA_OLD_FORMAT_VERSION: u16 = 3; -/// We assume that a write of up to METADATA_MAX_SIZE bytes is atomic. +/// When the file existed on disk we assumed that a write of up to METADATA_MAX_SIZE bytes is atomic. /// /// This is the same assumption that PostgreSQL makes with the control file, +/// /// see PG_CONTROL_MAX_SAFE_SIZE const METADATA_MAX_SIZE: usize = 512; -/// Metadata stored on disk for each timeline +/// Legacy metadata stored as a component of `index_part.json` per timeline. /// -/// The fields correspond to the values we hold in memory, in Timeline. +/// Do not make new changes to this type or the module. In production, we have two different kinds +/// of serializations of this type: bincode and json. Bincode version reflects what used to be +/// stored on disk in earlier versions and does internal crc32 checksumming. +/// +/// This type should not implement `serde::Serialize` or `serde::Deserialize` because there would +/// be a confusion whether you want the old version ([`TimelineMetadata::from_bytes`]) or the modern +/// as-exists in `index_part.json` ([`self::modern_serde`]). +/// +/// ```compile_fail +/// #[derive(serde::Serialize)] +/// struct DoNotDoThis(pageserver::tenant::metadata::TimelineMetadata); +/// ``` +/// +/// ```compile_fail +/// #[derive(serde::Deserialize)] +/// struct NeitherDoThis(pageserver::tenant::metadata::TimelineMetadata); +/// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimelineMetadata { hdr: TimelineMetadataHeader, @@ -40,6 +68,49 @@ struct TimelineMetadataHeader { size: u16, // size of serialized metadata format_version: u16, // metadata format version (used for compatibility checks) } + +impl TryFrom<&TimelineMetadataBodyV2> for TimelineMetadataHeader { + type Error = Crc32CalculationFailed; + + fn try_from(value: &TimelineMetadataBodyV2) -> Result { + #[derive(Default)] + struct Crc32Sink { + crc: u32, + count: usize, + } + + impl std::io::Write for Crc32Sink { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.crc = crc32c::crc32c_append(self.crc, buf); + self.count += buf.len(); + Ok(buf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + } + + // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works + // across serialization versions + let mut sink = Crc32Sink::default(); + ::ser_into(value, &mut sink) + .map_err(Crc32CalculationFailed)?; + + let size = METADATA_HDR_SIZE + sink.count; + + Ok(TimelineMetadataHeader { + checksum: sink.crc, + size: size as u16, + format_version: METADATA_FORMAT_VERSION, + }) + } +} + +#[derive(thiserror::Error, Debug)] +#[error("re-serializing for crc32 failed")] +struct Crc32CalculationFailed(#[source] utils::bin_ser::SerializeError); + const METADATA_HDR_SIZE: usize = std::mem::size_of::(); #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -111,6 +182,12 @@ impl TimelineMetadata { } } + #[cfg(test)] + pub(crate) fn with_recalculated_checksum(mut self) -> anyhow::Result { + self.hdr = TimelineMetadataHeader::try_from(&self.body)?; + Ok(self) + } + fn upgrade_timeline_metadata(metadata_bytes: &[u8]) -> anyhow::Result { let mut hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?; @@ -261,32 +338,8 @@ impl TimelineMetadata { } } -impl<'de> Deserialize<'de> for TimelineMetadata { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let bytes = Vec::::deserialize(deserializer)?; - Self::from_bytes(bytes.as_slice()).map_err(D::Error::custom) - } -} - -impl Serialize for TimelineMetadata { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let bytes = self.to_bytes().map_err(serde::ser::Error::custom)?; - bytes.serialize(serializer) - } -} - pub(crate) mod modern_serde { - use crate::tenant::metadata::METADATA_FORMAT_VERSION; - - use super::{ - TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader, METADATA_HDR_SIZE, - }; + use super::{TimelineMetadata, TimelineMetadataBodyV2, TimelineMetadataHeader}; use serde::{Deserialize, Serialize}; pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result @@ -322,71 +375,15 @@ pub(crate) mod modern_serde { let de = serde::de::value::MapAccessDeserializer::new(map); let body = TimelineMetadataBodyV2::deserialize(de)?; + let hdr = TimelineMetadataHeader::try_from(&body).map_err(A::Error::custom)?; - // jump through hoops to calculate the crc32 so that TimelineMetadata::ne works - // across serialization versions - let mut sink = Crc32Sink::default(); - ::ser_into(&body, &mut sink) - .map_err(|e| A::Error::custom(Crc32CalculationFailed(e)))?; - - let size = METADATA_HDR_SIZE + sink.count; - - Ok(TimelineMetadata { - hdr: TimelineMetadataHeader { - checksum: sink.crc, - size: size as u16, - format_version: METADATA_FORMAT_VERSION, - }, - body, - }) + Ok(TimelineMetadata { hdr, body }) } } deserializer.deserialize_any(Visitor) } - #[derive(Default)] - struct Crc32Sink { - crc: u32, - count: usize, - } - - impl std::io::Write for Crc32Sink { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.crc = crc32c::crc32c_append(self.crc, buf); - self.count += buf.len(); - Ok(buf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } - } - - #[derive(thiserror::Error)] - #[error("re-serializing for crc32 failed")] - struct Crc32CalculationFailed(#[source] E); - - // this should be true for one release, after that we can change it to false - // remember to check the IndexPart::metadata field TODO comment as well - const LEGACY_BINCODED_BYTES: bool = true; - - #[derive(serde::Serialize)] - #[serde(transparent)] - struct LegacyPaddedBytes<'a>(&'a TimelineMetadata); - - struct JustTheBodyV2<'a>(&'a TimelineMetadata); - - impl serde::Serialize for JustTheBodyV2<'_> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // header is not needed, upon reading we've upgraded all v1 to v2 - self.0.body.serialize(serializer) - } - } - pub(crate) fn serialize( metadata: &TimelineMetadata, serializer: S, @@ -394,25 +391,23 @@ pub(crate) mod modern_serde { where S: serde::Serializer, { - // we cannot use TimelineMetadata::serialize for now because it'll do - // TimelineMetadata::to_bytes - if LEGACY_BINCODED_BYTES { - LegacyPaddedBytes(metadata).serialize(serializer) - } else { - JustTheBodyV2(metadata).serialize(serializer) - } + // header is not needed, upon reading we've upgraded all v1 to v2 + metadata.body.serialize(serializer) } #[test] fn deserializes_bytes_as_well_as_equivalent_body_v2() { #[derive(serde::Deserialize, serde::Serialize)] - struct Wrapper(#[serde(deserialize_with = "deserialize")] TimelineMetadata); + struct Wrapper( + #[serde(deserialize_with = "deserialize", serialize_with = "serialize")] + TimelineMetadata, + ); let too_many_bytes = "[216,111,252,208,0,54,0,4,0,0,0,0,1,73,253,144,1,0,0,0,0,1,73,253,24,0,0,0,0,0,0,0,0,0,0,0,0,0,1,73,253,24,0,0,0,0,1,73,253,24,0,0,0,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"; let wrapper_from_bytes = serde_json::from_str::(too_many_bytes).unwrap(); - let serialized = serde_json::to_value(JustTheBodyV2(&wrapper_from_bytes.0)).unwrap(); + let serialized = serde_json::to_value(&wrapper_from_bytes).unwrap(); assert_eq!( serialized, @@ -553,59 +548,6 @@ mod tests { ); } - #[test] - fn test_metadata_bincode_serde() { - let original_metadata = TimelineMetadata::new( - Lsn(0x200), - Some(Lsn(0x100)), - Some(TIMELINE_ID), - Lsn(0), - Lsn(0), - Lsn(0), - // Any version will do here, so use the default - crate::DEFAULT_PG_VERSION, - ); - let metadata_bytes = original_metadata - .to_bytes() - .expect("Cannot create bytes array from metadata"); - - let metadata_bincode_be_bytes = original_metadata - .ser() - .expect("Cannot serialize the metadata"); - - // 8 bytes for the length of the vector - assert_eq!(metadata_bincode_be_bytes.len(), 8 + metadata_bytes.len()); - - let expected_bincode_bytes = { - let mut temp = vec![]; - let len_bytes = metadata_bytes.len().to_be_bytes(); - temp.extend_from_slice(&len_bytes); - temp.extend_from_slice(&metadata_bytes); - temp - }; - assert_eq!(metadata_bincode_be_bytes, expected_bincode_bytes); - - let deserialized_metadata = TimelineMetadata::des(&metadata_bincode_be_bytes).unwrap(); - // Deserialized metadata has the metadata header, which is different from the serialized one. - // Reference: TimelineMetaData::to_bytes() - let expected_metadata = { - let mut temp_metadata = original_metadata; - let body_bytes = temp_metadata - .body - .ser() - .expect("Cannot serialize the metadata body"); - let metadata_size = METADATA_HDR_SIZE + body_bytes.len(); - let hdr = TimelineMetadataHeader { - size: metadata_size as u16, - format_version: METADATA_FORMAT_VERSION, - checksum: crc32c::crc32c(&body_bytes), - }; - temp_metadata.hdr = hdr; - temp_metadata - }; - assert_eq!(deserialized_metadata, expected_metadata); - } - #[test] fn test_metadata_bincode_serde_ensure_roundtrip() { let original_metadata = TimelineMetadata::new( @@ -619,8 +561,6 @@ mod tests { crate::DEFAULT_PG_VERSION, ); let expected_bytes = vec![ - /* bincode length encoding bytes */ - 0, 0, 0, 0, 0, 0, 2, 0, // 8 bytes for the length of the serialized vector /* TimelineMetadataHeader */ 4, 37, 101, 34, 0, 70, 0, 4, // checksum, size, format_version (4 + 2 + 2) /* TimelineMetadataBodyV2 */ @@ -650,7 +590,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - let metadata_ser_bytes = original_metadata.ser().unwrap(); + let metadata_ser_bytes = original_metadata.to_bytes().unwrap(); assert_eq!(metadata_ser_bytes, expected_bytes); let expected_metadata = { @@ -668,7 +608,7 @@ mod tests { temp_metadata.hdr = hdr; temp_metadata }; - let des_metadata = TimelineMetadata::des(&metadata_ser_bytes).unwrap(); + let des_metadata = TimelineMetadata::from_bytes(&metadata_ser_bytes).unwrap(); assert_eq!(des_metadata, expected_metadata); } } diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index 7d2e9b9a91..6233a3477e 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -38,14 +38,17 @@ pub struct IndexPart { /// that latest version stores. pub layer_metadata: HashMap, - // 'disk_consistent_lsn' is a copy of the 'disk_consistent_lsn' in the metadata. - // It's duplicated for convenience when reading the serialized structure, but is - // private because internally we would read from metadata instead. + /// Because of the trouble of eyeballing the legacy "metadata" field, we copied the + /// "disk_consistent_lsn" out. After version 7 this is no longer needed, but the name cannot be + /// reused. pub(super) disk_consistent_lsn: Lsn, - // TODO: later make this "rename" to "alias", rename field as "legacy_metadata" + // TODO: rename as "metadata" next week, keep the alias = "metadata_bytes", bump version Adding + // the "alias = metadata" was forgotten in #7693, so we have to use "rewrite = metadata_bytes" + // for backwards compatibility. #[serde( rename = "metadata_bytes", + alias = "metadata", with = "crate::tenant::metadata::modern_serde" )] pub metadata: TimelineMetadata, @@ -76,10 +79,11 @@ impl IndexPart { /// - 4: timeline_layers is fully removed. /// - 5: lineage was added /// - 6: last_aux_file_policy is added. - const LATEST_VERSION: usize = 6; + /// - 7: metadata_bytes is no longer written, but still read + const LATEST_VERSION: usize = 7; // Versions we may see when reading from a bucket. - pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6]; + pub const KNOWN_VERSIONS: &'static [usize] = &[1, 2, 3, 4, 5, 6, 7]; pub const FILE_NAME: &'static str = "index_part.json"; @@ -95,7 +99,7 @@ impl IndexPart { } } - pub fn get_version(&self) -> usize { + pub fn version(&self) -> usize { self.version } @@ -217,9 +221,9 @@ impl Lineage { #[cfg(test)] mod tests { - use std::str::FromStr; - use super::*; + use std::str::FromStr; + use utils::id::TimelineId; #[test] fn v1_indexpart_is_parsed() { @@ -338,8 +342,7 @@ mod tests { ]), disk_consistent_lsn: "0/16960E8".parse::().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), - deleted_at: Some(chrono::NaiveDateTime::parse_from_str( - "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), lineage: Lineage::default(), last_aux_file_policy: None, }; @@ -515,8 +518,7 @@ mod tests { ]), disk_consistent_lsn: "0/16960E8".parse::().unwrap(), metadata: TimelineMetadata::from_bytes(&[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(), - deleted_at: Some(chrono::NaiveDateTime::parse_from_str( - "2023-07-31T09:00:00.123000000", "%Y-%m-%dT%H:%M:%S.%f").unwrap()), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), lineage: Lineage { reparenting_history_truncated: false, reparenting_history: vec![TimelineId::from_str("e1bfd8c633d713d279e6fcd2bcc15b6d").unwrap()], @@ -529,6 +531,60 @@ mod tests { assert_eq!(part, expected); } + #[test] + fn v7_indexpart_is_parsed() { + let example = r#"{ + "version": 7, + "layer_metadata":{ + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 }, + "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 } + }, + "disk_consistent_lsn":"0/16960E8", + "metadata": { + "disk_consistent_lsn": "0/16960E8", + "prev_record_lsn": "0/1696070", + "ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e", + "ancestor_lsn": "0/0", + "latest_gc_cutoff_lsn": "0/1696070", + "initdb_lsn": "0/1696070", + "pg_version": 14 + }, + "deleted_at": "2023-07-31T09:00:00.123" + }"#; + + let expected = IndexPart { + version: 7, + layer_metadata: HashMap::from([ + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata { + file_size: 25600000, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }), + ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata { + file_size: 9007199254741001, + generation: Generation::none(), + shard: ShardIndex::unsharded() + }) + ]), + disk_consistent_lsn: "0/16960E8".parse::().unwrap(), + metadata: TimelineMetadata::new( + Lsn::from_str("0/16960E8").unwrap(), + Some(Lsn::from_str("0/1696070").unwrap()), + Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()), + Lsn::INVALID, + Lsn::from_str("0/1696070").unwrap(), + Lsn::from_str("0/1696070").unwrap(), + 14, + ).with_recalculated_checksum().unwrap(), + deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), + lineage: Default::default(), + last_aux_file_policy: Default::default(), + }; + + let part = IndexPart::from_s3_bytes(example.as_bytes()).unwrap(); + assert_eq!(part, expected); + } + fn parse_naive_datetime(s: &str) -> NaiveDateTime { chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%f").unwrap() } diff --git a/s3_scrubber/src/checks.rs b/s3_scrubber/src/checks.rs index 44fb53696c..4eb8580e32 100644 --- a/s3_scrubber/src/checks.rs +++ b/s3_scrubber/src/checks.rs @@ -78,17 +78,16 @@ pub(crate) fn branch_cleanup_and_check_errors( index_part_generation: _index_part_generation, s3_layers: _s3_layers, } => { - if !IndexPart::KNOWN_VERSIONS.contains(&index_part.get_version()) { - result.errors.push(format!( - "index_part.json version: {}", - index_part.get_version() - )) + if !IndexPart::KNOWN_VERSIONS.contains(&index_part.version()) { + result + .errors + .push(format!("index_part.json version: {}", index_part.version())) } - if &index_part.get_version() != IndexPart::KNOWN_VERSIONS.last().unwrap() { + if &index_part.version() != IndexPart::KNOWN_VERSIONS.last().unwrap() { result.warnings.push(format!( "index_part.json version is not latest: {}", - index_part.get_version() + index_part.version() )) } diff --git a/s3_scrubber/src/scan_pageserver_metadata.rs b/s3_scrubber/src/scan_pageserver_metadata.rs index 6ff9783875..af74ffa4cd 100644 --- a/s3_scrubber/src/scan_pageserver_metadata.rs +++ b/s3_scrubber/src/scan_pageserver_metadata.rs @@ -125,7 +125,7 @@ impl MetadataSummary { { *self .indices_by_version - .entry(index_part.get_version()) + .entry(index_part.version()) .or_insert(0) += 1; if let Err(e) = self.update_histograms(index_part) { diff --git a/test_runner/regress/test_layers_from_future.py b/test_runner/regress/test_layers_from_future.py index 18e5111786..54d3b2d515 100644 --- a/test_runner/regress/test_layers_from_future.py +++ b/test_runner/regress/test_layers_from_future.py @@ -37,7 +37,8 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder): """ neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) - env = neon_env_builder.init_start() + env = neon_env_builder.init_configs() + env.start() env.pageserver.allowed_errors.extend( [".*Dropped remote consistent LSN updates.*", ".*Dropping stale deletions.*"] ) From d3b892e9ad39c50e869ada51b7f892666d4bf476 Mon Sep 17 00:00:00 2001 From: Joonas Koivunen Date: Tue, 11 Jun 2024 17:10:05 +0300 Subject: [PATCH 160/220] test: fix duplicated harness name (#8010) We need unique tenant harness names in case you want to inspect the results of the last failing run. We are not using any proc macros to get the test name as there is no stable way of doing that, and there will not be one in the future, so we need to fix these duplicates. Also, clean up the duplicated tests to not mix `?` and `unwrap/assert`. --- pageserver/src/tenant.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 2e3ce45c2b..10842c1504 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -6584,8 +6584,8 @@ mod tests { } #[tokio::test] - async fn test_metadata_tombstone_image_creation() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + async fn test_metadata_tombstone_image_creation() { + let harness = TenantHarness::create("test_metadata_tombstone_image_creation").unwrap(); let (tenant, ctx) = harness.load().await; let key0 = Key::from_hex("620000000033333333444444445500000000").unwrap(); @@ -6613,7 +6613,8 @@ mod tests { vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], Lsn(0x30), ) - .await?; + .await + .unwrap(); let cancel = CancellationToken::new(); @@ -6628,23 +6629,24 @@ mod tests { }, &ctx, ) - .await?; + .await + .unwrap(); // Image layers are created at last_record_lsn let images = tline .inspect_image_layers(Lsn(0x30), &ctx) - .await? + .await + .unwrap() .into_iter() .filter(|(k, _)| k.is_metadata_key()) .collect::>(); assert_eq!(images.len(), 2); // the image layer should only contain two existing keys, tombstones should be removed. - - Ok(()) } #[tokio::test] - async fn test_metadata_tombstone_empty_image_creation() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_metadata_tombstone_image_creation")?; + async fn test_metadata_tombstone_empty_image_creation() { + let harness = + TenantHarness::create("test_metadata_tombstone_empty_image_creation").unwrap(); let (tenant, ctx) = harness.load().await; let key1 = Key::from_hex("620000000033333333444444445500000001").unwrap(); @@ -6666,7 +6668,8 @@ mod tests { vec![(Lsn(0x10), vec![(key1, test_img("metadata key 1"))])], Lsn(0x30), ) - .await?; + .await + .unwrap(); let cancel = CancellationToken::new(); @@ -6681,17 +6684,17 @@ mod tests { }, &ctx, ) - .await?; + .await + .unwrap(); // Image layers are created at last_record_lsn let images = tline .inspect_image_layers(Lsn(0x30), &ctx) - .await? + .await + .unwrap() .into_iter() .filter(|(k, _)| k.is_metadata_key()) .collect::>(); assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created - - Ok(()) } } From 4c2100794b97c1f635bf9dcc9013d2b2c8733de6 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 11 Jun 2024 10:14:51 -0400 Subject: [PATCH 161/220] feat(pageserver): initial code sketch & test case for combined gc+compaction at gc_horizon (#7948) A demo for a building block for compaction. The GC-compaction operation iterates all layers below/intersect with the GC horizon, and do a full layer rewrite of all of them. The end result will be image layer covering the full keyspace at GC-horizon, and a bunch of delta layers above the GC-horizon. This helps us collect the garbages of the test_gc_feedback test case to reduce space amplification. This operation can be manually triggered using an HTTP API or be triggered based on some metrics. Actual method TBD. The test is very basic and it's very likely that most part of the algorithm will be rewritten. I would like to get this merged so that I can have a basic skeleton for the algorithm and then make incremental changes. image --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 160 ++++++++++++++++ .../src/tenant/storage_layer/delta_layer.rs | 39 ++++ .../src/tenant/storage_layer/image_layer.rs | 28 +++ pageserver/src/tenant/storage_layer/layer.rs | 31 ++++ pageserver/src/tenant/timeline.rs | 13 ++ pageserver/src/tenant/timeline/compaction.rs | 172 ++++++++++++++++++ .../src/tenant/timeline/layer_manager.rs | 12 ++ 7 files changed, 455 insertions(+) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 10842c1504..f9ed6d3071 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4044,10 +4044,12 @@ mod tests { use crate::DEFAULT_PG_VERSION; use bytes::{Bytes, BytesMut}; use hex_literal::hex; + use itertools::Itertools; use pageserver_api::key::{AUX_FILES_KEY, AUX_KEY_PREFIX, NON_INHERITED_RANGE}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; use rand::{thread_rng, Rng}; + use storage_layer::PersistentLayerKey; use tests::storage_layer::ValuesReconstructState; use tests::timeline::{GetVectoredError, ShutdownMode}; use utils::bin_ser::BeSer; @@ -6697,4 +6699,162 @@ mod tests { .collect::>(); assert_eq!(images.len(), 0); // the image layer should not contain tombstones, or it is not created } + + #[tokio::test] + async fn test_simple_bottom_most_compaction() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_simple_bottom_most_compaction")?; + let (tenant, ctx) = harness.load().await; + + fn get_key(id: u32) -> Key { + // using aux key here b/c they are guaranteed to be inside `collect_keyspace`. + let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + + // We create one bottom-most image layer, a delta layer D1 crossing the GC horizon, D2 below the horizon, and D3 above the horizon. + // + // | D1 | | D3 | + // -| |-- gc horizon ----------------- + // | | | D2 | + // --------- img layer ------------------ + // + // What we should expact from this compaction is: + // | Part of D1 | | D3 | + // --------- img layer with D1+D2 at GC horizon------------------ + + // img layer at 0x10 + let img_layer = (0..10) + .map(|id| (get_key(id), test_img(&format!("value {id}@0x10")))) + .collect_vec(); + + let delta1 = vec![ + // TODO: we should test a real delta record here, which requires us to add a variant of NeonWalRecord for testing purpose. + ( + get_key(1), + Lsn(0x20), + Value::Image(test_img("value 1@0x20")), + ), + ( + get_key(2), + Lsn(0x30), + Value::Image(test_img("value 2@0x30")), + ), + ( + get_key(3), + Lsn(0x40), + Value::Image(test_img("value 3@0x40")), + ), + ]; + let delta2 = vec![ + ( + get_key(5), + Lsn(0x20), + Value::Image(test_img("value 5@0x20")), + ), + ( + get_key(6), + Lsn(0x20), + Value::Image(test_img("value 6@0x20")), + ), + ]; + let delta3 = vec![ + ( + get_key(8), + Lsn(0x40), + Value::Image(test_img("value 8@0x40")), + ), + ( + get_key(9), + Lsn(0x40), + Value::Image(test_img("value 9@0x40")), + ), + ]; + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + vec![delta1, delta2, delta3], // delta layers + vec![(Lsn(0x10), img_layer)], // image layers + Lsn(0x50), + ) + .await?; + { + // Update GC info + let mut guard = tline.gc_info.write().unwrap(); + guard.cutoffs.pitr = Lsn(0x30); + guard.cutoffs.horizon = Lsn(0x30); + } + + let cancel = CancellationToken::new(); + tline.compact_with_gc(&cancel, &ctx).await.unwrap(); + + // Check if the image layer at the GC horizon contains exactly what we want + let image_at_gc_horizon = tline + .inspect_image_layers(Lsn(0x30), &ctx) + .await + .unwrap() + .into_iter() + .filter(|(k, _)| k.is_metadata_key()) + .collect::>(); + + assert_eq!(image_at_gc_horizon.len(), 10); + let expected_lsn = [0x10, 0x20, 0x30, 0x10, 0x10, 0x20, 0x20, 0x10, 0x10, 0x10]; + for idx in 0..10 { + assert_eq!( + image_at_gc_horizon[idx], + ( + get_key(idx as u32), + test_img(&format!("value {idx}@{:#x}", expected_lsn[idx])) + ) + ); + } + + // Check if old layers are removed / new layers have the expected LSN + let mut all_layers = tline.inspect_historic_layers().await.unwrap(); + all_layers.sort_by(|k1, k2| { + ( + k1.is_delta, + k1.key_range.start, + k1.key_range.end, + k1.lsn_range.start, + k1.lsn_range.end, + ) + .cmp(&( + k2.is_delta, + k2.key_range.start, + k2.key_range.end, + k2.lsn_range.start, + k2.lsn_range.end, + )) + }); + assert_eq!( + all_layers, + vec![ + // Image layer at GC horizon + PersistentLayerKey { + key_range: Key::MIN..get_key(10), + lsn_range: Lsn(0x30)..Lsn(0x31), + is_delta: false + }, + // The delta layer that is cut in the middle + PersistentLayerKey { + key_range: Key::MIN..get_key(9), + lsn_range: Lsn(0x30)..Lsn(0x41), + is_delta: true + }, + // The delta layer we created and should not be picked for the compaction + PersistentLayerKey { + key_range: get_key(8)..get_key(10), + lsn_range: Lsn(0x40)..Lsn(0x41), + is_delta: true + } + ] + ); + + Ok(()) + } } diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 999e2e8679..eb7cf81643 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -929,6 +929,45 @@ impl DeltaLayerInner { Ok(()) } + /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. + #[cfg(test)] + pub(super) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let index_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new( + self.index_start_blk, + self.index_root_blk, + block_reader, + ); + let mut result = Vec::new(); + let mut stream = + Box::pin(self.stream_index_forwards(&index_reader, &[0; DELTA_KEY_SIZE], ctx)); + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let cursor = block_reader.block_cursor(); + let mut buf = Vec::new(); + while let Some(item) = stream.next().await { + let (key, lsn, pos) = item?; + // TODO: dedup code with get_reconstruct_value + // TODO: ctx handling and sharding + cursor + .read_blob_into_buf(pos.pos(), &mut buf, ctx) + .await + .with_context(|| { + format!("Failed to read blob from virtual file {}", self.file.path) + })?; + let val = Value::des(&buf).with_context(|| { + format!( + "Failed to deserialize file blob from virtual file {}", + self.file.path + ) + })?; + result.push((key, lsn, val)); + } + Ok(result) + } + async fn plan_reads( keyspace: &KeySpace, lsn_range: Range, diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 285618b146..06e2f09384 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -485,6 +485,34 @@ impl ImageLayerInner { Ok(()) } + /// Load all key-values in the delta layer, should be replaced by an iterator-based interface in the future. + #[cfg(test)] + pub(super) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let tree_reader = + DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, &block_reader); + let mut result = Vec::new(); + let mut stream = Box::pin(tree_reader.get_stream_from(&[0; KEY_SIZE], ctx)); + let block_reader = FileBlockReader::new(&self.file, self.file_id); + let cursor = block_reader.block_cursor(); + while let Some(item) = stream.next().await { + // TODO: dedup code with get_reconstruct_value + let (raw_key, offset) = item?; + let key = Key::from_slice(&raw_key[..KEY_SIZE]); + // TODO: ctx handling and sharding + let blob = cursor + .read_blob(offset, ctx) + .await + .with_context(|| format!("failed to read value from offset {}", offset))?; + let value = Bytes::from(blob); + result.push((key, self.lsn, Value::Image(value))); + } + Ok(result) + } + /// Traverse the layer's index to build read operations on the overlap of the input keyspace /// and the keys in this layer. /// diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 18f9ba4ef8..32acb3f0cd 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -388,6 +388,23 @@ impl Layer { }) } + /// Get all key/values in the layer. Should be replaced with an iterator-based API in the future. + #[cfg(test)] + pub(crate) async fn load_key_values( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let layer = self + .0 + .get_or_maybe_download(true, Some(ctx)) + .await + .map_err(|err| match err { + DownloadError::DownloadCancelled => GetVectoredError::Cancelled, + other => GetVectoredError::Other(anyhow::anyhow!(other)), + })?; + layer.load_key_values(&self.0, ctx).await + } + /// Download the layer if evicted. /// /// Will not error when the layer is already downloaded. @@ -1757,6 +1774,20 @@ impl DownloadedLayer { } } + #[cfg(test)] + async fn load_key_values( + &self, + owner: &Arc, + ctx: &RequestContext, + ) -> anyhow::Result> { + use LayerKind::*; + + match self.get(owner, ctx).await? { + Delta(d) => d.load_key_values(ctx).await, + Image(i) => i.load_key_values(ctx).await, + } + } + async fn dump(&self, owner: &Arc, ctx: &RequestContext) -> anyhow::Result<()> { use LayerKind::*; match self.get(owner, ctx).await? { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 6da0f9d91c..54a4ceeaf3 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -5549,6 +5549,19 @@ impl Timeline { all_data.sort(); Ok(all_data) } + + /// Get all historic layer descriptors in the layer map + #[cfg(test)] + pub(crate) async fn inspect_historic_layers( + self: &Arc, + ) -> anyhow::Result> { + let mut layers = Vec::new(); + let guard = self.layers.read().await; + for layer in guard.layer_map().iter_historic_layers() { + layers.push(layer.key()); + } + Ok(layers) + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index d8de6aee7c..8a95029f33 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -952,6 +952,178 @@ impl Timeline { adaptor.flush_updates().await?; Ok(()) } + + /// An experimental compaction building block that combines compaction with garbage collection. + /// + /// The current implementation picks all delta + image layers that are below or intersecting with + /// the GC horizon without considering retain_lsns. Then, it does a full compaction over all these delta + /// layers and image layers, which generates image layers on the gc horizon, drop deltas below gc horizon, + /// and create delta layers with all deltas >= gc horizon. + #[cfg(test)] + pub(crate) async fn compact_with_gc( + self: &Arc, + _cancel: &CancellationToken, + ctx: &RequestContext, + ) -> Result<(), CompactionError> { + use crate::tenant::storage_layer::ValueReconstructState; + // Step 0: pick all delta layers + image layers below/intersect with the GC horizon. + // The layer selection has the following properties: + // 1. If a layer is in the selection, all layers below it are in the selection. + // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection. + let (layer_selection, gc_cutoff) = { + let guard = self.layers.read().await; + let layers = guard.layer_map(); + let gc_info = self.gc_info.read().unwrap(); + let gc_cutoff = Lsn::min(gc_info.cutoffs.horizon, gc_info.cutoffs.pitr); + let mut selected_layers = Vec::new(); + // TODO: consider retain_lsns + drop(gc_info); + for desc in layers.iter_historic_layers() { + if desc.get_lsn_range().start <= gc_cutoff { + selected_layers.push(guard.get_from_desc(&desc)); + } + } + (selected_layers, gc_cutoff) + }; + // Step 1: (In the future) construct a k-merge iterator over all layers. For now, simply collect all keys + LSNs. + let mut all_key_values = Vec::new(); + for layer in &layer_selection { + all_key_values.extend(layer.load_key_values(ctx).await?); + } + // Key small to large, LSN low to high, if the same LSN has both image and delta due to the merge of delta layers and + // image layers, make image appear later than delta. + struct ValueWrapper<'a>(&'a crate::repository::Value); + impl Ord for ValueWrapper<'_> { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + use crate::repository::Value; + use std::cmp::Ordering; + match (self.0, other.0) { + (Value::Image(_), Value::WalRecord(_)) => Ordering::Greater, + (Value::WalRecord(_), Value::Image(_)) => Ordering::Less, + _ => Ordering::Equal, + } + } + } + impl PartialOrd for ValueWrapper<'_> { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl PartialEq for ValueWrapper<'_> { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == std::cmp::Ordering::Equal + } + } + impl Eq for ValueWrapper<'_> {} + all_key_values.sort_by(|(k1, l1, v1), (k2, l2, v2)| { + (k1, l1, ValueWrapper(v1)).cmp(&(k2, l2, ValueWrapper(v2))) + }); + let max_lsn = all_key_values + .iter() + .map(|(_, lsn, _)| lsn) + .max() + .copied() + .unwrap() + + 1; + // Step 2: Produce images+deltas. TODO: ensure newly-produced delta does not overlap with other deltas. + // Data of the same key. + let mut accumulated_values = Vec::new(); + let mut last_key = all_key_values.first().unwrap().0; // TODO: assert all_key_values not empty + + /// Take a list of images and deltas, produce an image at the GC horizon, and a list of deltas above the GC horizon. + async fn flush_accumulated_states( + tline: &Arc, + key: Key, + accumulated_values: &[&(Key, Lsn, crate::repository::Value)], + horizon: Lsn, + ) -> anyhow::Result<(Vec<(Key, Lsn, crate::repository::Value)>, bytes::Bytes)> { + let mut base_image = None; + let mut keys_above_horizon = Vec::new(); + let mut delta_above_base_image = Vec::new(); + // We have a list of deltas/images. We want to create image layers while collect garbages. + for (key, lsn, val) in accumulated_values.iter().rev() { + if *lsn > horizon { + keys_above_horizon.push((*key, *lsn, val.clone())); // TODO: ensure one LSN corresponds to either delta or image instead of both + } else if *lsn <= horizon { + match val { + crate::repository::Value::Image(image) => { + if lsn <= &horizon { + base_image = Some((*lsn, image.clone())); + break; + } + } + crate::repository::Value::WalRecord(wal) => { + delta_above_base_image.push((*lsn, wal.clone())); + } + } + } + } + delta_above_base_image.reverse(); + keys_above_horizon.reverse(); + let state = ValueReconstructState { + img: base_image, + records: delta_above_base_image, + }; + let img = tline.reconstruct_value(key, horizon, state).await?; + Ok((keys_above_horizon, img)) + } + + let mut delta_layer_writer = DeltaLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + all_key_values.first().unwrap().0, + gc_cutoff..max_lsn, // TODO: off by one? + ctx, + ) + .await?; + let mut image_layer_writer = ImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + &(all_key_values.first().unwrap().0..all_key_values.last().unwrap().0.next()), + gc_cutoff, + ctx, + ) + .await?; + + for item @ (key, _, _) in &all_key_values { + if &last_key == key { + accumulated_values.push(item); + } else { + let (deltas, image) = + flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff) + .await?; + image_layer_writer.put_image(last_key, image, ctx).await?; + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + accumulated_values.clear(); + accumulated_values.push(item); + last_key = *key; + } + } + let (deltas, image) = + flush_accumulated_states(self, last_key, &accumulated_values, gc_cutoff).await?; + image_layer_writer.put_image(last_key, image, ctx).await?; + for (key, lsn, val) in deltas { + delta_layer_writer.put_value(key, lsn, val, ctx).await?; + } + accumulated_values.clear(); + // TODO: split layers + let delta_layer = delta_layer_writer.finish(last_key, self, ctx).await?; + let image_layer = image_layer_writer.finish(self, ctx).await?; + // Step 3: Place back to the layer map. + { + let mut guard = self.layers.write().await; + guard.finish_gc_compaction( + &layer_selection, + &[delta_layer.clone(), image_layer.clone()], + &self.metrics, + ) + }; + Ok(()) + } } struct TimelineAdaptor { diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 21e64d562a..550a9a567a 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -226,6 +226,18 @@ impl LayerManager { updates.flush(); } + /// Called when a GC-compaction is completed. + #[cfg(test)] + pub(crate) fn finish_gc_compaction( + &mut self, + compact_from: &[Layer], + compact_to: &[ResidentLayer], + metrics: &TimelineMetrics, + ) { + // We can simply reuse compact l0 logic. Use a different function name to indicate a different type of layer map modification. + self.finish_compact_l0(compact_from, compact_to, metrics) + } + /// Called when compaction is completed. pub(crate) fn rewrite_layers( &mut self, From 126bcc3794a41e3b776108f826c68c6871044876 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Tue, 11 Jun 2024 16:03:25 +0100 Subject: [PATCH 162/220] storcon: track number of attached shards for each node (#8011) ## Problem The storage controller does not track the number of shards attached to a given pageserver. This is a requirement for various scheduling operations (e.g. draining and filling will use this to figure out if the cluster is balanced) ## Summary of Changes Track the number of shards attached to each node. Related https://github.com/neondatabase/neon/issues/7387 --- storage_controller/src/scheduler.rs | 101 ++++++++++++++++++------- storage_controller/src/service.rs | 2 +- storage_controller/src/tenant_shard.rs | 56 +++++++++----- 3 files changed, 114 insertions(+), 45 deletions(-) diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 3ff0d87988..4ab85509dc 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -29,6 +29,8 @@ pub enum MaySchedule { struct SchedulerNode { /// How many shards are currently scheduled on this node, via their [`crate::tenant_shard::IntentState`]. shard_count: usize, + /// How many shards are currently attached on this node, via their [`crate::tenant_shard::IntentState`]. + attached_shard_count: usize, /// Whether this node is currently elegible to have new shards scheduled (this is derived /// from a node's availability state and scheduling policy). @@ -42,7 +44,9 @@ impl PartialEq for SchedulerNode { (MaySchedule::Yes(_), MaySchedule::Yes(_)) | (MaySchedule::No, MaySchedule::No) ); - may_schedule_matches && self.shard_count == other.shard_count + may_schedule_matches + && self.shard_count == other.shard_count + && self.attached_shard_count == other.attached_shard_count } } @@ -138,6 +142,15 @@ impl ScheduleContext { } } +pub(crate) enum RefCountUpdate { + PromoteSecondary, + Attach, + Detach, + DemoteAttached, + AddSecondary, + RemoveSecondary, +} + impl Scheduler { pub(crate) fn new<'a>(nodes: impl Iterator) -> Self { let mut scheduler_nodes = HashMap::new(); @@ -146,6 +159,7 @@ impl Scheduler { node.get_id(), SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }, ); @@ -171,6 +185,7 @@ impl Scheduler { node.get_id(), SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }, ); @@ -179,7 +194,10 @@ impl Scheduler { for shard in shards { if let Some(node_id) = shard.intent.get_attached() { match expect_nodes.get_mut(node_id) { - Some(node) => node.shard_count += 1, + Some(node) => { + node.shard_count += 1; + node.attached_shard_count += 1; + } None => anyhow::bail!( "Tenant {} references nonexistent node {}", shard.tenant_shard_id, @@ -227,31 +245,42 @@ impl Scheduler { Ok(()) } - /// Increment the reference count of a node. This reference count is used to guide scheduling - /// decisions, not for memory management: it represents one tenant shard whose IntentState targets - /// this node. + /// Update the reference counts of a node. These reference counts are used to guide scheduling + /// decisions, not for memory management: they represent the number of tenant shard whose IntentState + /// targets this node and the number of tenants shars whose IntentState is attached to this + /// node. /// /// It is an error to call this for a node that is not known to the scheduler (i.e. passed into /// [`Self::new`] or [`Self::node_upsert`]) - pub(crate) fn node_inc_ref(&mut self, node_id: NodeId) { - let Some(node) = self.nodes.get_mut(&node_id) else { - tracing::error!("Scheduler missing node {node_id}"); - debug_assert!(false); - return; - }; - - node.shard_count += 1; - } - - /// Decrement a node's reference count. Inverse of [`Self::node_inc_ref`]. - pub(crate) fn node_dec_ref(&mut self, node_id: NodeId) { + pub(crate) fn update_node_ref_counts(&mut self, node_id: NodeId, update: RefCountUpdate) { let Some(node) = self.nodes.get_mut(&node_id) else { debug_assert!(false); tracing::error!("Scheduler missing node {node_id}"); return; }; - node.shard_count -= 1; + match update { + RefCountUpdate::PromoteSecondary => { + node.attached_shard_count += 1; + } + RefCountUpdate::Attach => { + node.shard_count += 1; + node.attached_shard_count += 1; + } + RefCountUpdate::Detach => { + node.shard_count -= 1; + node.attached_shard_count -= 1; + } + RefCountUpdate::DemoteAttached => { + node.attached_shard_count -= 1; + } + RefCountUpdate::AddSecondary => { + node.shard_count += 1; + } + RefCountUpdate::RemoveSecondary => { + node.shard_count -= 1; + } + } } pub(crate) fn node_upsert(&mut self, node: &Node) { @@ -263,6 +292,7 @@ impl Scheduler { Vacant(entry) => { entry.insert(SchedulerNode { shard_count: 0, + attached_shard_count: 0, may_schedule: node.may_schedule(), }); } @@ -385,6 +415,11 @@ impl Scheduler { pub(crate) fn get_node_shard_count(&self, node_id: NodeId) -> usize { self.nodes.get(&node_id).unwrap().shard_count } + + #[cfg(test)] + pub(crate) fn get_node_attached_shard_count(&self, node_id: NodeId) -> usize { + self.nodes.get(&node_id).unwrap().attached_shard_count + } } #[cfg(test)] @@ -437,18 +472,28 @@ mod tests { let scheduled = scheduler.schedule_shard(&[], &context)?; t2_intent.set_attached(&mut scheduler, Some(scheduled)); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); let scheduled = scheduler.schedule_shard(&t1_intent.all_pageservers(), &context)?; t1_intent.push_secondary(&mut scheduler, scheduled); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 1); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 2); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); t1_intent.clear(&mut scheduler); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 0); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 1); + + let total_attached = scheduler.get_node_attached_shard_count(NodeId(1)) + + scheduler.get_node_attached_shard_count(NodeId(2)); + assert_eq!(total_attached, 1); if cfg!(debug_assertions) { // Dropping an IntentState without clearing it causes a panic in debug mode, @@ -459,8 +504,12 @@ mod tests { assert!(result.is_err()); } else { t2_intent.clear(&mut scheduler); - assert_eq!(scheduler.nodes.get(&NodeId(1)).unwrap().shard_count, 0); - assert_eq!(scheduler.nodes.get(&NodeId(2)).unwrap().shard_count, 0); + + assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 0); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 0); + + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 0); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 0); } Ok(()) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 756dc10a2a..1e81b5c5a2 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -4312,7 +4312,7 @@ impl Service { continue; } - if tenant_shard.intent.demote_attached(node_id) { + if tenant_shard.intent.demote_attached(scheduler, node_id) { tenant_shard.sequence = tenant_shard.sequence.next(); // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index dda17f9887..77bbf4c604 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -8,7 +8,7 @@ use crate::{ metrics::{self, ReconcileCompleteLabelGroup, ReconcileOutcome}, persistence::TenantShardPersistence, reconciler::ReconcileUnits, - scheduler::{AffinityScore, MaySchedule, ScheduleContext}, + scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext}, }; use pageserver_api::controller_api::{PlacementPolicy, ShardSchedulingPolicy}; use pageserver_api::{ @@ -153,7 +153,7 @@ impl IntentState { } pub(crate) fn single(scheduler: &mut Scheduler, node_id: Option) -> Self { if let Some(node_id) = node_id { - scheduler.node_inc_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::Attach); } Self { attached: node_id, @@ -164,10 +164,10 @@ impl IntentState { pub(crate) fn set_attached(&mut self, scheduler: &mut Scheduler, new_attached: Option) { if self.attached != new_attached { if let Some(old_attached) = self.attached.take() { - scheduler.node_dec_ref(old_attached); + scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach); } if let Some(new_attached) = &new_attached { - scheduler.node_inc_ref(*new_attached); + scheduler.update_node_ref_counts(*new_attached, RefCountUpdate::Attach); } self.attached = new_attached; } @@ -177,22 +177,27 @@ impl IntentState { /// secondary to attached while maintaining the scheduler's reference counts. pub(crate) fn promote_attached( &mut self, - _scheduler: &mut Scheduler, + scheduler: &mut Scheduler, promote_secondary: NodeId, ) { // If we call this with a node that isn't in secondary, it would cause incorrect // scheduler reference counting, since we assume the node is already referenced as a secondary. debug_assert!(self.secondary.contains(&promote_secondary)); - // TODO: when scheduler starts tracking attached + secondary counts separately, we will - // need to call into it here. self.secondary.retain(|n| n != &promote_secondary); + + let demoted = self.attached; self.attached = Some(promote_secondary); + + scheduler.update_node_ref_counts(promote_secondary, RefCountUpdate::PromoteSecondary); + if let Some(demoted) = demoted { + scheduler.update_node_ref_counts(demoted, RefCountUpdate::DemoteAttached); + } } pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) { debug_assert!(!self.secondary.contains(&new_secondary)); - scheduler.node_inc_ref(new_secondary); + scheduler.update_node_ref_counts(new_secondary, RefCountUpdate::AddSecondary); self.secondary.push(new_secondary); } @@ -200,27 +205,27 @@ impl IntentState { pub(crate) fn remove_secondary(&mut self, scheduler: &mut Scheduler, node_id: NodeId) { let index = self.secondary.iter().position(|n| *n == node_id); if let Some(index) = index { - scheduler.node_dec_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary); self.secondary.remove(index); } } pub(crate) fn clear_secondary(&mut self, scheduler: &mut Scheduler) { for secondary in self.secondary.drain(..) { - scheduler.node_dec_ref(secondary); + scheduler.update_node_ref_counts(secondary, RefCountUpdate::RemoveSecondary); } } /// Remove the last secondary node from the list of secondaries pub(crate) fn pop_secondary(&mut self, scheduler: &mut Scheduler) { if let Some(node_id) = self.secondary.pop() { - scheduler.node_dec_ref(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary); } } pub(crate) fn clear(&mut self, scheduler: &mut Scheduler) { if let Some(old_attached) = self.attached.take() { - scheduler.node_dec_ref(old_attached); + scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach); } self.clear_secondary(scheduler); @@ -251,12 +256,11 @@ impl IntentState { /// forget the location on the offline node. /// /// Returns true if a change was made - pub(crate) fn demote_attached(&mut self, node_id: NodeId) -> bool { + pub(crate) fn demote_attached(&mut self, scheduler: &mut Scheduler, node_id: NodeId) -> bool { if self.attached == Some(node_id) { - // TODO: when scheduler starts tracking attached + secondary counts separately, we will - // need to call into it here. self.attached = None; self.secondary.push(node_id); + scheduler.update_node_ref_counts(node_id, RefCountUpdate::DemoteAttached); true } else { false @@ -593,7 +597,7 @@ impl TenantShard { Secondary => { if let Some(node_id) = self.intent.get_attached() { // Populate secondary by demoting the attached node - self.intent.demote_attached(*node_id); + self.intent.demote_attached(scheduler, *node_id); modified = true; } else if self.intent.secondary.is_empty() { // Populate secondary by scheduling a fresh node @@ -783,7 +787,7 @@ impl TenantShard { old_attached_node_id, new_attached_node_id, }) => { - self.intent.demote_attached(old_attached_node_id); + self.intent.demote_attached(scheduler, old_attached_node_id); self.intent .promote_attached(scheduler, new_attached_node_id); } @@ -1321,7 +1325,9 @@ pub(crate) mod tests { assert_ne!(attached_node_id, secondary_node_id); // Notifying the attached node is offline should demote it to a secondary - let changed = tenant_shard.intent.demote_attached(attached_node_id); + let changed = tenant_shard + .intent + .demote_attached(&mut scheduler, attached_node_id); assert!(changed); assert!(tenant_shard.intent.attached.is_none()); assert_eq!(tenant_shard.intent.secondary.len(), 2); @@ -1604,7 +1610,14 @@ pub(crate) mod tests { // We should see equal number of locations on the two nodes. assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 4); + // Scheduling does not consider the number of attachments picking the initial + // pageserver to attach to (hence the assertion that all primaries are on the + // same node) + // TODO: Tweak the scheduling to evenly distribute attachments for new shards. + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 4); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 4); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 0); // Add another two nodes: we should see the shards spread out when their optimize // methods are called @@ -1613,9 +1626,16 @@ pub(crate) mod tests { optimize_til_idle(&nodes, &mut scheduler, &mut shards); assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(3)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(3)), 1); + assert_eq!(scheduler.get_node_shard_count(NodeId(4)), 2); + assert_eq!(scheduler.get_node_attached_shard_count(NodeId(4)), 1); for shard in shards.iter_mut() { shard.intent.clear(&mut scheduler); From 7121db3669349ad8be323f55d84906fe1f62af4f Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Tue, 11 Jun 2024 17:39:38 +0100 Subject: [PATCH 163/220] storcon_cli: add 'drain' command (#8007) ## Problem We need the ability to prepare a subset of storage controller managed pageservers for decommisioning. The storage controller cannot currently express this in terms of scheduling constraints (it's a pretty special case, so I'm not sure it even should). ## Summary of Changes A new `drain` command is added to `storcon_cli`. It takes a set of nodes to drain and migrates primary attachments outside of said set. Simple round robing assignment is used under the assumption that nodes outside of the draining set are evenly balanced. Note that secondary locations are not migrated. This is fine for staging, but the migration API will have to be extended for prod in order to allow migration of secondaries as well. I've tested this out against a neon local cluster. The immediate use for this command will be to migrate staging to ARM(Arch64) pageservers. Related https://github.com/neondatabase/cloud/issues/14029 --- Cargo.lock | 1 + control_plane/storcon_cli/Cargo.toml | 1 + control_plane/storcon_cli/src/main.rs | 208 ++++++++++++++++++++++++++ 3 files changed, 210 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index dbbf330cf9..66879fd743 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,6 +5820,7 @@ dependencies = [ "anyhow", "clap", "comfy-table", + "futures", "humantime", "hyper 0.14.26", "pageserver_api", diff --git a/control_plane/storcon_cli/Cargo.toml b/control_plane/storcon_cli/Cargo.toml index ed3462961f..f96f0084b2 100644 --- a/control_plane/storcon_cli/Cargo.toml +++ b/control_plane/storcon_cli/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true anyhow.workspace = true clap.workspace = true comfy-table.workspace = true +futures.workspace = true humantime.workspace = true hyper.workspace = true pageserver_api.workspace = true diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 05c4acdf90..8c84911d33 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -1,3 +1,4 @@ +use futures::StreamExt; use std::{collections::HashMap, str::FromStr, time::Duration}; use clap::{Parser, Subcommand}; @@ -148,6 +149,22 @@ enum Command { #[arg(long)] threshold: humantime::Duration, }, + // Drain a set of specified pageservers by moving the primary attachments to pageservers + // outside of the specified set. + Drain { + // Set of pageserver node ids to drain. + #[arg(long)] + nodes: Vec, + // Optional: migration concurrency (default is 8) + #[arg(long)] + concurrency: Option, + // Optional: maximum number of shards to migrate + #[arg(long)] + max_shards: Option, + // Optional: when set to true, nothing is migrated, but the plan is printed to stdout + #[arg(long)] + dry_run: Option, + }, } #[derive(Parser)] @@ -737,6 +754,197 @@ async fn main() -> anyhow::Result<()> { }) .await?; } + Command::Drain { + nodes, + concurrency, + max_shards, + dry_run, + } => { + // Load the list of nodes, split them up into the drained and filled sets, + // and validate that draining is possible. + let node_descs = storcon_client + .dispatch::<(), Vec>( + Method::GET, + "control/v1/node".to_string(), + None, + ) + .await?; + + let mut node_to_drain_descs = Vec::new(); + let mut node_to_fill_descs = Vec::new(); + + for desc in node_descs { + let to_drain = nodes.iter().any(|id| *id == desc.id); + if to_drain { + node_to_drain_descs.push(desc); + } else { + node_to_fill_descs.push(desc); + } + } + + if nodes.len() != node_to_drain_descs.len() { + anyhow::bail!("Drain requested for node which doesn't exist.") + } + + let can_fill = node_to_fill_descs + .iter() + .filter(|desc| { + matches!(desc.availability, NodeAvailabilityWrapper::Active) + && matches!( + desc.scheduling, + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling + ) + }) + .any(|_| true); + + if !can_fill { + anyhow::bail!("There are no nodes to drain to") + } + + // Set the node scheduling policy to draining for the nodes which + // we plan to drain. + for node_desc in node_to_drain_descs.iter() { + let req = NodeConfigureRequest { + node_id: node_desc.id, + availability: None, + scheduling: Some(NodeSchedulingPolicy::Draining), + }; + + storcon_client + .dispatch::<_, ()>( + Method::PUT, + format!("control/v1/node/{}/config", node_desc.id), + Some(req), + ) + .await?; + } + + // Perform the drain: move each tenant shard scheduled on a node to + // be drained to a node which is being filled. A simple round robin + // strategy is used to pick the new node. + let tenants = storcon_client + .dispatch::<(), Vec>( + Method::GET, + "control/v1/tenant".to_string(), + None, + ) + .await?; + + let mut selected_node_idx = 0; + + struct DrainMove { + tenant_shard_id: TenantShardId, + from: NodeId, + to: NodeId, + } + + let mut moves: Vec = Vec::new(); + + let shards = tenants + .into_iter() + .flat_map(|tenant| tenant.shards.into_iter()); + for shard in shards { + if let Some(max_shards) = max_shards { + if moves.len() >= max_shards { + println!( + "Stop planning shard moves since the requested maximum was reached" + ); + break; + } + } + + let should_migrate = { + if let Some(attached_to) = shard.node_attached { + node_to_drain_descs + .iter() + .map(|desc| desc.id) + .any(|id| id == attached_to) + } else { + false + } + }; + + if !should_migrate { + continue; + } + + moves.push(DrainMove { + tenant_shard_id: shard.tenant_shard_id, + from: shard + .node_attached + .expect("We only migrate attached tenant shards"), + to: node_to_fill_descs[selected_node_idx].id, + }); + selected_node_idx = (selected_node_idx + 1) % node_to_fill_descs.len(); + } + + let total_moves = moves.len(); + + if dry_run == Some(true) { + println!("Dryrun requested. Planned {total_moves} moves:"); + for mv in &moves { + println!("{}: {} -> {}", mv.tenant_shard_id, mv.from, mv.to) + } + + return Ok(()); + } + + const DEFAULT_MIGRATE_CONCURRENCY: usize = 8; + let mut stream = futures::stream::iter(moves) + .map(|mv| { + let client = Client::new(cli.api.clone(), cli.jwt.clone()); + async move { + client + .dispatch::( + Method::PUT, + format!("control/v1/tenant/{}/migrate", mv.tenant_shard_id), + Some(TenantShardMigrateRequest { + tenant_shard_id: mv.tenant_shard_id, + node_id: mv.to, + }), + ) + .await + .map_err(|e| (mv.tenant_shard_id, mv.from, mv.to, e)) + } + }) + .buffered(concurrency.unwrap_or(DEFAULT_MIGRATE_CONCURRENCY)); + + let mut success = 0; + let mut failure = 0; + + while let Some(res) = stream.next().await { + match res { + Ok(_) => { + success += 1; + } + Err((tenant_shard_id, from, to, error)) => { + failure += 1; + println!( + "Failed to migrate {} from node {} to node {}: {}", + tenant_shard_id, from, to, error + ); + } + } + + if (success + failure) % 20 == 0 { + println!( + "Processed {}/{} shards: {} succeeded, {} failed", + success + failure, + total_moves, + success, + failure + ); + } + } + + println!( + "Processed {}/{} shards: {} succeeded, {} failed", + success + failure, + total_moves, + success, + failure + ); + } } Ok(()) From 78a59b94f59a9679a6c8f3759d43b05de238ecbd Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 11 Jun 2024 23:19:18 +0300 Subject: [PATCH 164/220] Copy editor config for the neon extension from PostgreSQL (#8009) This makes IDEs and github diff format the code the same way as PostgreSQL sources, which is the style we try to maintain. --- pgxn/.dir-locals.el | 19 +++++++++++++++++++ pgxn/.editorconfig | 14 ++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 pgxn/.dir-locals.el create mode 100644 pgxn/.editorconfig diff --git a/pgxn/.dir-locals.el b/pgxn/.dir-locals.el new file mode 100644 index 0000000000..ab6208b698 --- /dev/null +++ b/pgxn/.dir-locals.el @@ -0,0 +1,19 @@ +;; see also src/tools/editors/emacs.samples for more complete settings + +((c-mode . ((c-basic-offset . 4) + (c-file-style . "bsd") + (fill-column . 78) + (indent-tabs-mode . t) + (tab-width . 4))) + (nxml-mode . ((fill-column . 78) + (indent-tabs-mode . nil))) + (perl-mode . ((perl-indent-level . 4) + (perl-continued-statement-offset . 2) + (perl-continued-brace-offset . -2) + (perl-brace-offset . 0) + (perl-brace-imaginary-offset . 0) + (perl-label-offset . -2) + (indent-tabs-mode . t) + (tab-width . 4))) + (sgml-mode . ((fill-column . 78) + (indent-tabs-mode . nil)))) diff --git a/pgxn/.editorconfig b/pgxn/.editorconfig new file mode 100644 index 0000000000..d69a3d1dc4 --- /dev/null +++ b/pgxn/.editorconfig @@ -0,0 +1,14 @@ +root = true + +[*.{c,h,l,y,pl,pm}] +indent_style = tab +indent_size = tab +tab_width = 4 + +[*.{sgml,xml}] +indent_style = space +indent_size = 1 + +[*.xsl] +indent_style = space +indent_size = 2 From 27518676d7aebb26ad81bf0a926749c5ed9e75d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Wed, 12 Jun 2024 00:45:22 +0200 Subject: [PATCH 165/220] Rename S3 scrubber to storage scrubber (#8013) The S3 scrubber contains "S3" in its name, but we want to make it generic in terms of which storage is used (#7547). Therefore, rename it to "storage scrubber", following the naming scheme of already existing components "storage broker" and "storage controller". Part of #7547 --- .dockerignore | 2 +- Cargo.lock | 96 +++++++++---------- Cargo.toml | 2 +- {s3_scrubber => storage_scrubber}/Cargo.toml | 2 +- {s3_scrubber => storage_scrubber}/README.md | 2 +- .../src/checks.rs | 0 .../src/cloud_admin_api.rs | 0 .../src/garbage.rs | 0 {s3_scrubber => storage_scrubber}/src/lib.rs | 0 {s3_scrubber => storage_scrubber}/src/main.rs | 10 +- .../src/metadata_stream.rs | 0 .../src/pageserver_physical_gc.rs | 0 .../src/scan_pageserver_metadata.rs | 0 .../src/scan_safekeeper_metadata.rs | 0 .../src/tenant_snapshot.rs | 0 test_runner/fixtures/neon_fixtures.py | 8 +- .../regress/test_pageserver_generations.py | 4 +- .../regress/test_pageserver_secondary.py | 6 +- test_runner/regress/test_sharding.py | 4 +- ...3_scrubber.py => test_storage_scrubber.py} | 10 +- test_runner/regress/test_tenant_delete.py | 4 +- 21 files changed, 75 insertions(+), 75 deletions(-) rename {s3_scrubber => storage_scrubber}/Cargo.toml (98%) rename {s3_scrubber => storage_scrubber}/README.md (99%) rename {s3_scrubber => storage_scrubber}/src/checks.rs (100%) rename {s3_scrubber => storage_scrubber}/src/cloud_admin_api.rs (100%) rename {s3_scrubber => storage_scrubber}/src/garbage.rs (100%) rename {s3_scrubber => storage_scrubber}/src/lib.rs (100%) rename {s3_scrubber => storage_scrubber}/src/main.rs (96%) rename {s3_scrubber => storage_scrubber}/src/metadata_stream.rs (100%) rename {s3_scrubber => storage_scrubber}/src/pageserver_physical_gc.rs (100%) rename {s3_scrubber => storage_scrubber}/src/scan_pageserver_metadata.rs (100%) rename {s3_scrubber => storage_scrubber}/src/scan_safekeeper_metadata.rs (100%) rename {s3_scrubber => storage_scrubber}/src/tenant_snapshot.rs (100%) rename test_runner/regress/{test_s3_scrubber.py => test_storage_scrubber.py} (94%) diff --git a/.dockerignore b/.dockerignore index eead727994..c7a2f78e32 100644 --- a/.dockerignore +++ b/.dockerignore @@ -21,7 +21,7 @@ !patches/ !pgxn/ !proxy/ -!s3_scrubber/ +!storage_scrubber/ !safekeeper/ !storage_broker/ !storage_controller/ diff --git a/Cargo.lock b/Cargo.lock index 66879fd743..1c8a8b0c0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5109,54 +5109,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" -[[package]] -name = "s3_scrubber" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-stream", - "aws-config", - "aws-sdk-s3", - "aws-smithy-async", - "bincode", - "bytes", - "camino", - "chrono", - "clap", - "crc32c", - "either", - "futures", - "futures-util", - "hex", - "histogram", - "humantime", - "itertools", - "once_cell", - "pageserver", - "pageserver_api", - "postgres_ffi", - "rand 0.8.5", - "remote_storage", - "reqwest 0.12.4", - "rustls 0.22.4", - "rustls-native-certs 0.7.0", - "serde", - "serde_json", - "serde_with", - "thiserror", - "tokio", - "tokio-postgres", - "tokio-postgres-rustls", - "tokio-rustls 0.25.0", - "tokio-stream", - "tokio-util", - "tracing", - "tracing-appender", - "tracing-subscriber", - "utils", - "workspace_hack", -] - [[package]] name = "safekeeper" version = "0.1.0" @@ -5813,6 +5765,54 @@ dependencies = [ "workspace_hack", ] +[[package]] +name = "storage_scrubber" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-stream", + "aws-config", + "aws-sdk-s3", + "aws-smithy-async", + "bincode", + "bytes", + "camino", + "chrono", + "clap", + "crc32c", + "either", + "futures", + "futures-util", + "hex", + "histogram", + "humantime", + "itertools", + "once_cell", + "pageserver", + "pageserver_api", + "postgres_ffi", + "rand 0.8.5", + "remote_storage", + "reqwest 0.12.4", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "serde", + "serde_json", + "serde_with", + "thiserror", + "tokio", + "tokio-postgres", + "tokio-postgres-rustls", + "tokio-rustls 0.25.0", + "tokio-stream", + "tokio-util", + "tracing", + "tracing-appender", + "tracing-subscriber", + "utils", + "workspace_hack", +] + [[package]] name = "storcon_cli" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 58715db32b..dc89c2341b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ members = [ "safekeeper", "storage_broker", "storage_controller", - "s3_scrubber", + "storage_scrubber", "workspace_hack", "trace", "libs/compute_api", diff --git a/s3_scrubber/Cargo.toml b/storage_scrubber/Cargo.toml similarity index 98% rename from s3_scrubber/Cargo.toml rename to storage_scrubber/Cargo.toml index 48b50ca21c..050be66483 100644 --- a/s3_scrubber/Cargo.toml +++ b/storage_scrubber/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "s3_scrubber" +name = "storage_scrubber" version = "0.1.0" edition.workspace = true license.workspace = true diff --git a/s3_scrubber/README.md b/storage_scrubber/README.md similarity index 99% rename from s3_scrubber/README.md rename to storage_scrubber/README.md index 8a96542ada..0930f343ec 100644 --- a/s3_scrubber/README.md +++ b/storage_scrubber/README.md @@ -1,4 +1,4 @@ -# Neon S3 scrubber +# Neon Storage Scrubber This tool directly accesses the S3 buckets used by the Neon `pageserver` and `safekeeper`, and does housekeeping such as cleaning up objects for tenants & timelines that no longer exist. diff --git a/s3_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs similarity index 100% rename from s3_scrubber/src/checks.rs rename to storage_scrubber/src/checks.rs diff --git a/s3_scrubber/src/cloud_admin_api.rs b/storage_scrubber/src/cloud_admin_api.rs similarity index 100% rename from s3_scrubber/src/cloud_admin_api.rs rename to storage_scrubber/src/cloud_admin_api.rs diff --git a/s3_scrubber/src/garbage.rs b/storage_scrubber/src/garbage.rs similarity index 100% rename from s3_scrubber/src/garbage.rs rename to storage_scrubber/src/garbage.rs diff --git a/s3_scrubber/src/lib.rs b/storage_scrubber/src/lib.rs similarity index 100% rename from s3_scrubber/src/lib.rs rename to storage_scrubber/src/lib.rs diff --git a/s3_scrubber/src/main.rs b/storage_scrubber/src/main.rs similarity index 96% rename from s3_scrubber/src/main.rs rename to storage_scrubber/src/main.rs index ade8ef7d7a..222bd10ed2 100644 --- a/s3_scrubber/src/main.rs +++ b/storage_scrubber/src/main.rs @@ -1,11 +1,11 @@ use anyhow::bail; use camino::Utf8PathBuf; use pageserver_api::shard::TenantShardId; -use s3_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode}; -use s3_scrubber::pageserver_physical_gc::GcMode; -use s3_scrubber::scan_pageserver_metadata::scan_metadata; -use s3_scrubber::tenant_snapshot::SnapshotDownloader; -use s3_scrubber::{ +use storage_scrubber::garbage::{find_garbage, purge_garbage, PurgeMode}; +use storage_scrubber::pageserver_physical_gc::GcMode; +use storage_scrubber::scan_pageserver_metadata::scan_metadata; +use storage_scrubber::tenant_snapshot::SnapshotDownloader; +use storage_scrubber::{ init_logging, pageserver_physical_gc::pageserver_physical_gc, scan_safekeeper_metadata::scan_safekeeper_metadata, BucketConfig, ConsoleConfig, NodeKind, TraversingDepth, diff --git a/s3_scrubber/src/metadata_stream.rs b/storage_scrubber/src/metadata_stream.rs similarity index 100% rename from s3_scrubber/src/metadata_stream.rs rename to storage_scrubber/src/metadata_stream.rs diff --git a/s3_scrubber/src/pageserver_physical_gc.rs b/storage_scrubber/src/pageserver_physical_gc.rs similarity index 100% rename from s3_scrubber/src/pageserver_physical_gc.rs rename to storage_scrubber/src/pageserver_physical_gc.rs diff --git a/s3_scrubber/src/scan_pageserver_metadata.rs b/storage_scrubber/src/scan_pageserver_metadata.rs similarity index 100% rename from s3_scrubber/src/scan_pageserver_metadata.rs rename to storage_scrubber/src/scan_pageserver_metadata.rs diff --git a/s3_scrubber/src/scan_safekeeper_metadata.rs b/storage_scrubber/src/scan_safekeeper_metadata.rs similarity index 100% rename from s3_scrubber/src/scan_safekeeper_metadata.rs rename to storage_scrubber/src/scan_safekeeper_metadata.rs diff --git a/s3_scrubber/src/tenant_snapshot.rs b/storage_scrubber/src/tenant_snapshot.rs similarity index 100% rename from s3_scrubber/src/tenant_snapshot.rs rename to storage_scrubber/src/tenant_snapshot.rs diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 6fdad2188c..394f5283f3 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -833,7 +833,7 @@ class NeonEnvBuilder: def enable_scrub_on_exit(self): """ Call this if you would like the fixture to automatically run - s3_scrubber at the end of the test, as a bidirectional test + storage_scrubber at the end of the test, as a bidirectional test that the scrubber is working properly, and that the code within the test didn't produce any invalid remote state. """ @@ -948,7 +948,7 @@ class NeonEnvBuilder: if self.scrub_on_exit: try: - S3Scrubber(self).scan_metadata() + StorageScrubber(self).scan_metadata() except Exception as e: log.error(f"Error during remote storage scrub: {e}") cleanup_error = e @@ -3937,7 +3937,7 @@ class Safekeeper(LogUtils): wait_until(20, 0.5, paused) -class S3Scrubber: +class StorageScrubber: def __init__(self, env: NeonEnvBuilder, log_dir: Optional[Path] = None): self.env = env self.log_dir = log_dir or env.test_output_dir @@ -3957,7 +3957,7 @@ class S3Scrubber: if s3_storage.endpoint is not None: env.update({"AWS_ENDPOINT_URL": s3_storage.endpoint}) - base_args = [str(self.env.neon_binpath / "s3_scrubber")] + base_args = [str(self.env.neon_binpath / "storage_scrubber")] args = base_args + args (output_path, stdout, status_code) = subprocess_capture( diff --git a/test_runner/regress/test_pageserver_generations.py b/test_runner/regress/test_pageserver_generations.py index 0235cf6d20..696af24e5c 100644 --- a/test_runner/regress/test_pageserver_generations.py +++ b/test_runner/regress/test_pageserver_generations.py @@ -22,7 +22,7 @@ from fixtures.neon_fixtures import ( NeonEnv, NeonEnvBuilder, PgBin, - S3Scrubber, + StorageScrubber, generate_uploads_and_deletions, ) from fixtures.pageserver.common_types import parse_layer_file_name @@ -215,7 +215,7 @@ def test_generations_upgrade(neon_env_builder: NeonEnvBuilder): # Having written a mixture of generation-aware and legacy index_part.json, # ensure the scrubber handles the situation as expected. - metadata_summary = S3Scrubber(neon_env_builder).scan_metadata() + metadata_summary = StorageScrubber(neon_env_builder).scan_metadata() assert metadata_summary["tenant_count"] == 1 # Scrubber should have seen our timeline assert metadata_summary["timeline_count"] == 1 assert metadata_summary["timeline_shard_count"] == 1 diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 757ea60882..2782d33e15 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -7,7 +7,7 @@ from typing import Any, Dict, Optional import pytest from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserver, S3Scrubber +from fixtures.neon_fixtures import NeonEnvBuilder, NeonPageserver, StorageScrubber from fixtures.pageserver.common_types import parse_layer_file_name from fixtures.pageserver.utils import ( assert_prefix_empty, @@ -214,7 +214,7 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, seed: int): # Having done a bunch of attach/detach cycles, we will have generated some index garbage: check # that the scrubber sees it and cleans it up. We do this before the final attach+validate pass, # to also validate that the scrubber isn't breaking anything. - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] > 0 @@ -536,7 +536,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): # Scrub the remote storage # ======================== # This confirms that the scrubber isn't upset by the presence of the heatmap - S3Scrubber(neon_env_builder).scan_metadata() + StorageScrubber(neon_env_builder).scan_metadata() # Detach secondary and delete tenant # =================================== diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 1996e99557..56075c5975 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -11,8 +11,8 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnv, NeonEnvBuilder, - S3Scrubber, StorageControllerApiException, + StorageScrubber, last_flush_lsn_upload, tenant_get_shards, wait_for_last_flush_lsn, @@ -128,7 +128,7 @@ def test_sharding_smoke( # Check the scrubber isn't confused by sharded content, then disable # it during teardown because we'll have deleted by then - S3Scrubber(neon_env_builder).scan_metadata() + StorageScrubber(neon_env_builder).scan_metadata() neon_env_builder.scrub_on_exit = False env.storage_controller.pageserver_api().tenant_delete(tenant_id) diff --git a/test_runner/regress/test_s3_scrubber.py b/test_runner/regress/test_storage_scrubber.py similarity index 94% rename from test_runner/regress/test_s3_scrubber.py rename to test_runner/regress/test_storage_scrubber.py index 6baba190f3..35ae61c380 100644 --- a/test_runner/regress/test_s3_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -6,7 +6,7 @@ import pytest from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.neon_fixtures import ( NeonEnvBuilder, - S3Scrubber, + StorageScrubber, ) from fixtures.remote_storage import S3Storage, s3_storage from fixtures.workload import Workload @@ -60,7 +60,7 @@ def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: output_path = neon_env_builder.test_output_dir / "snapshot" os.makedirs(output_path) - scrubber = S3Scrubber(neon_env_builder) + scrubber = StorageScrubber(neon_env_builder) scrubber.tenant_snapshot(tenant_id, output_path) assert len(os.listdir(output_path)) > 0 @@ -143,18 +143,18 @@ def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Opt workload.write_rows(1) # With a high min_age, the scrubber should decline to delete anything - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=3600) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=3600) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == 0 # If targeting a different tenant, the scrubber shouldn't do anything - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc( + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc( min_age_secs=1, tenant_ids=[TenantId.generate()] ) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == 0 # With a low min_age, the scrubber should go ahead and clean up all but the latest 2 generations - gc_summary = S3Scrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) + gc_summary = StorageScrubber(neon_env_builder).pageserver_physical_gc(min_age_secs=1) assert gc_summary["remote_storage_errors"] == 0 assert gc_summary["indices_deleted"] == (expect_indices_per_shard - 2) * shard_count diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index fa7cead1bd..fd3cc45c3f 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -10,7 +10,7 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, PgBin, - S3Scrubber, + StorageScrubber, last_flush_lsn_upload, wait_for_last_flush_lsn, ) @@ -707,7 +707,7 @@ def test_tenant_delete_scrubber(pg_bin: PgBin, neon_env_builder: NeonEnvBuilder) remote_storage_kind = RemoteStorageKind.MOCK_S3 neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind) - scrubber = S3Scrubber(neon_env_builder) + scrubber = StorageScrubber(neon_env_builder) env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG) ps_http = env.pageserver.http_client() From b7a0c2b61430eb5f88200b679acdcbee3503f15b Mon Sep 17 00:00:00 2001 From: Sasha Krassovsky Date: Tue, 11 Jun 2024 17:59:32 -0700 Subject: [PATCH 166/220] Add On-demand WAL Download to logicalfuncs (#7960) We implemented on-demand WAL download for walsender, but other things that may want to read the WAL from safekeepers don't do that yet. This PR makes it do that by adding the same set of hooks to logicalfuncs. Addresses https://github.com/neondatabase/neon/issues/7959 Also relies on: https://github.com/neondatabase/postgres/pull/438 https://github.com/neondatabase/postgres/pull/437 https://github.com/neondatabase/postgres/pull/436 --- Makefile | 2 ++ pgxn/neon/neon.c | 2 ++ pgxn/neon/walsender_hooks.c | 27 ++++++++++++++++- .../regress/test_logical_replication.py | 30 +++++++++++++++++++ vendor/postgres-v14 | 2 +- vendor/postgres-v15 | 2 +- vendor/postgres-v16 | 2 +- vendor/revisions.json | 6 ++-- 8 files changed, 66 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index dcbfdbcbc1..37bd19ba44 100644 --- a/Makefile +++ b/Makefile @@ -124,6 +124,8 @@ postgres-%: postgres-configure-% \ $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install +@echo "Compiling amcheck $*" $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/amcheck install + +@echo "Compiling test_decoding $*" + $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/test_decoding install .PHONY: postgres-clean-% postgres-clean-%: diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index b69a3819c9..276d1542fe 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -19,6 +19,7 @@ #include "catalog/pg_type.h" #include "postmaster/bgworker.h" #include "postmaster/interrupt.h" +#include "replication/logical.h" #include "replication/slot.h" #include "replication/walsender.h" #include "storage/procsignal.h" @@ -280,6 +281,7 @@ _PG_init(void) pg_init_libpagestore(); pg_init_walproposer(); WalSender_Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; + LogicalFuncs_Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; InitLogicalReplicationMonitor(); diff --git a/pgxn/neon/walsender_hooks.c b/pgxn/neon/walsender_hooks.c index 93dce9de84..8f8d1dfc01 100644 --- a/pgxn/neon/walsender_hooks.c +++ b/pgxn/neon/walsender_hooks.c @@ -24,8 +24,12 @@ #include "walproposer.h" static NeonWALReader *wal_reader = NULL; + +struct WalSnd; +extern struct WalSnd *MyWalSnd; extern XLogRecPtr WalSndWaitForWal(XLogRecPtr loc); extern bool GetDonorShmem(XLogRecPtr *donor_lsn); +extern XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI); static XLogRecPtr NeonWALReadWaitForWAL(XLogRecPtr loc) @@ -36,7 +40,28 @@ NeonWALReadWaitForWAL(XLogRecPtr loc) CHECK_FOR_INTERRUPTS(); } - return WalSndWaitForWal(loc); + // Walsender sends keepalives and stuff, so better use its normal wait + if (MyWalSnd != NULL) + return WalSndWaitForWal(loc); + + for (;;) + { + XLogRecPtr flush_ptr; + if (!RecoveryInProgress()) +#if PG_VERSION_NUM >= 150000 + flush_ptr = GetFlushRecPtr(NULL); +#else + flush_ptr = GetFlushRecPtr(); +#endif + else + flush_ptr = GetXLogReplayRecPtr(NULL); + + if (loc <= flush_ptr) + return flush_ptr; + + CHECK_FOR_INTERRUPTS(); + pg_usleep(1000); + } } static int diff --git a/test_runner/regress/test_logical_replication.py b/test_runner/regress/test_logical_replication.py index a657d5a035..ca3c81d6e5 100644 --- a/test_runner/regress/test_logical_replication.py +++ b/test_runner/regress/test_logical_replication.py @@ -221,6 +221,35 @@ def test_obsolete_slot_drop(neon_simple_env: NeonEnv, vanilla_pg): wait_until(number_of_iterations=10, interval=2, func=partial(slot_removed, endpoint)) +def test_ondemand_wal_download_in_replication_slot_funcs(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_safekeepers = 3 + env = neon_env_builder.init_start() + + env.neon_cli.create_branch("init") + endpoint = env.endpoints.create_start("init") + + with endpoint.connect().cursor() as cur: + cur.execute("create table wal_generator (id serial primary key, data text)") + cur.execute( + "SELECT * FROM pg_create_logical_replication_slot('slotty_mcslotface', 'test_decoding')" + ) + cur.execute( + """ +INSERT INTO wal_generator (data) +SELECT repeat('A', 1024) -- Generates a kilobyte of data per row +FROM generate_series(1, 16384) AS seq; -- Inserts enough rows to exceed 16MB of data +""" + ) + + endpoint.stop_and_destroy() + endpoint = env.endpoints.create_start("init") + + with endpoint.connect().cursor() as cur: + cur.execute( + "SELECT * FROM pg_logical_slot_peek_binary_changes('slotty_mcslotface', NULL, NULL, 'include-xids', '0')" + ) + + # Tests that walsender correctly blocks until WAL is downloaded from safekeepers def test_lr_with_slow_safekeeper(neon_env_builder: NeonEnvBuilder, vanilla_pg): neon_env_builder.num_safekeepers = 3 @@ -247,6 +276,7 @@ FROM generate_series(1, 16384) AS seq; -- Inserts enough rows to exceed 16MB of connstr = endpoint.connstr().replace("'", "''") vanilla_pg.safe_psql(f"create subscription sub1 connection '{connstr}' publication pub") logical_replication_sync(vanilla_pg, endpoint) + vanilla_pg.stop() # Pause the safekeepers so that they can't send WAL (except to pageserver) diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 17e0f5ff4e..4c51945a61 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 17e0f5ff4e1905691aa40e1e08f9b79b14c99652 +Subproject commit 4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index c2c3d40534..e22098d86d 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit c2c3d40534db97d83dd7e185d1971e707fa2f445 +Subproject commit e22098d86d6c40276b6bd75c29133a33fb283ab6 diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index b228f20372..9837db1578 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit b228f20372ebcabfd7946647cb7adbd38bacb14a +Subproject commit 9837db157837fcf43ef7348be0017d3a2238cd27 diff --git a/vendor/revisions.json b/vendor/revisions.json index 5bf4e289ef..f945ea6d73 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,5 +1,5 @@ { - "v16": ["16.3", "b228f20372ebcabfd7946647cb7adbd38bacb14a"], - "v15": ["15.7", "c2c3d40534db97d83dd7e185d1971e707fa2f445"], - "v14": ["14.12", "17e0f5ff4e1905691aa40e1e08f9b79b14c99652"] + "v16": ["16.3", "9837db157837fcf43ef7348be0017d3a2238cd27"], + "v15": ["15.7", "e22098d86d6c40276b6bd75c29133a33fb283ab6"], + "v14": ["14.12", "4c51945a6167ca06c0169e7a4ca5a8e7ffa3faba"] } From 9983ae291bf97fcd4a80fc3be6b00da39aca2663 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 09:18:52 +0300 Subject: [PATCH 167/220] Another attempt at making test_vm_bits less flaky (#7989) - Split the first and second parts of the test to two separate tests - In the first test, disable the aggressive GC, compaction, and autovacuum. They are only needed by the second test. I'd like to get the first test to a point that the VM page is never all-zeros. Disabling autovacuum in the first test is hopefully enough to accomplish that. - Compare the full page images, don't skip page header. After fixing the previous point, there should be no discrepancy. LSN still won't match, though, because of commit 387a36874c. Fixes issue https://github.com/neondatabase/neon/issues/7984 --- test_runner/regress/test_vm_bits.py | 116 +++++++++++++++++++++------- 1 file changed, 86 insertions(+), 30 deletions(-) diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index b549db1af6..225b952e73 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -1,7 +1,9 @@ import time +from contextlib import closing from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, fork_at_current_lsn +from fixtures.utils import query_scalar # @@ -113,11 +115,88 @@ def test_vm_bit_clear(neon_simple_env: NeonEnv): assert cur_new.fetchall() == [] -# -# Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK -# record. -# -def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): +def test_vm_bit_clear_on_heap_lock_whitebox(neon_env_builder: NeonEnvBuilder): + """ + Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK record. + + This is a repro for the bug fixed in commit 66fa176cc8. + """ + env = neon_env_builder.init_start() + endpoint = env.endpoints.create_start( + "main", + config_lines=[ + # If auto-analyze runs at the same time that we run VACUUM FREEZE, it + # can hold a snasphot that prevent the tuples from being frozen. + "autovacuum=off", + "log_checkpoints=on", + ], + ) + + # Run the tests in a dedicated database, because the activity monitor + # periodically runs some queries on to the 'postgres' database. If that + # happens at the same time that we're trying to freeze, the activity + # monitor's queries can hold back the xmin horizon and prevent freezing. + with closing(endpoint.connect()) as pg_conn: + pg_conn.cursor().execute("CREATE DATABASE vmbitsdb") + pg_conn = endpoint.connect(dbname="vmbitsdb") + cur = pg_conn.cursor() + + # Install extension containing function needed for test + cur.execute("CREATE EXTENSION neon_test_utils") + cur.execute("CREATE EXTENSION pageinspect") + + # Create a test table and freeze it to set the all-frozen VM bit on all pages. + cur.execute("CREATE TABLE vmtest_lock (id integer PRIMARY KEY)") + cur.execute("BEGIN") + cur.execute("INSERT INTO vmtest_lock SELECT g FROM generate_series(1, 50000) g") + xid = int(query_scalar(cur, "SELECT txid_current()")) + cur.execute("COMMIT") + cur.execute("VACUUM (FREEZE, DISABLE_PAGE_SKIPPING true, VERBOSE) vmtest_lock") + for notice in pg_conn.notices: + log.info(f"{notice}") + + # This test has been flaky in the past, because background activity like + # auto-analyze and compute_ctl's activity monitor queries have prevented the + # tuples from being frozen. Check that they were frozen. + relfrozenxid = int( + query_scalar(cur, "SELECT relfrozenxid FROM pg_class WHERE relname='vmtest_lock'") + ) + assert ( + relfrozenxid > xid + ), f"Inserted rows were not frozen. This can be caused by concurrent activity in the database. (XID {xid}, relfrozenxid {relfrozenxid}" + + # Lock a row. This clears the all-frozen VM bit for that page. + cur.execute("BEGIN") + cur.execute("SELECT * FROM vmtest_lock WHERE id = 40000 FOR UPDATE") + cur.execute("COMMIT") + + # The VM page in shared buffer cache, and the same page as reconstructed by + # the pageserver, should be equal. Except for the LSN: Clearing a bit in the + # VM doesn't bump the LSN in PostgreSQL, but the pageserver updates the LSN + # when it replays the VM-bit clearing record (since commit 387a36874c) + # + # This is a bit fragile, we've had lot of flakiness in this test before. For + # example, because all the VM bits were not set because concurrent + # autoanalyze prevented the VACUUM FREEZE from freezing the tuples. Or + # because autoavacuum kicked in and re-froze the page between the + # get_raw_page() and get_raw_page_at_lsn() calls. We disable autovacuum now, + # which should make this deterministic. + cur.execute("select get_raw_page( 'vmtest_lock', 'vm', 0 )") + vm_page_in_cache = (cur.fetchall()[0][0])[8:100].hex() + cur.execute( + "select get_raw_page_at_lsn( 'vmtest_lock', 'vm', 0, pg_current_wal_insert_lsn(), NULL )" + ) + vm_page_at_pageserver = (cur.fetchall()[0][0])[8:100].hex() + + assert vm_page_at_pageserver == vm_page_in_cache + + +def test_vm_bit_clear_on_heap_lock_blackbox(neon_env_builder: NeonEnvBuilder): + """ + The previous test is enough to verify the bug that was fixed in + commit 66fa176cc8. But for good measure, we also reproduce the + original problem that the missing VM page update caused. + """ tenant_conf = { "checkpoint_distance": f"{128 * 1024}", "compaction_target_size": f"{128 * 1024}", @@ -130,9 +209,9 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_vm_bit_clear_on_heap_lock") + timeline_id = env.initial_timeline endpoint = env.endpoints.create_start( - "test_vm_bit_clear_on_heap_lock", + "main", config_lines=[ "log_autovacuum_min_duration = 0", # Perform anti-wraparound vacuuming aggressively @@ -146,12 +225,10 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): # Install extension containing function needed for test cur.execute("CREATE EXTENSION neon_test_utils") - cur.execute("CREATE EXTENSION pageinspect") # Create a test table and freeze it to set the all-frozen VM bit on all pages. cur.execute("CREATE TABLE vmtest_lock (id integer PRIMARY KEY)") cur.execute("INSERT INTO vmtest_lock SELECT g FROM generate_series(1, 50000) g") - cur.execute("VACUUM (FREEZE, DISABLE_PAGE_SKIPPING true) vmtest_lock") # Lock a row. This clears the all-frozen VM bit for that page. @@ -165,27 +242,6 @@ def test_vm_bit_clear_on_heap_lock(neon_env_builder: NeonEnvBuilder): cur.execute("COMMIT") - # The VM page in shared buffer cache, and the same page as reconstructed - # by the pageserver, should be equal. - # - # Ignore page header (24 bytes) of visibility map. - # If the dirty VM page is flushed from the cache for some reason, - # it gets WAL-logged, which changes the LSN on the page. - # Also in neon SMGR we can replace empty heap page with zero (uninitialized) heap page. - cur.execute("select get_raw_page( 'vmtest_lock', 'vm', 0 )") - vm_page_in_cache = (cur.fetchall()[0][0])[24:100].hex() - cur.execute( - "select get_raw_page_at_lsn( 'vmtest_lock', 'vm', 0, pg_current_wal_insert_lsn(), NULL )" - ) - vm_page_at_pageserver = (cur.fetchall()[0][0])[24:100].hex() - - assert vm_page_at_pageserver == vm_page_in_cache - - # The above assert is enough to verify the bug that was fixed in - # commit 66fa176cc8. But for good measure, we also reproduce the - # original problem that the missing VM page update caused. The - # rest of the test does that. - # Kill and restart postgres, to clear the buffer cache. # # NOTE: clear_buffer_cache() will not do, because it evicts the dirty pages From 69aa1aca356b1893a48b06627b31de4933a172f9 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 09:19:24 +0300 Subject: [PATCH 168/220] Update default Postgres version in docker-compose.yml (#8019) Let's be modern. --- docker-compose/docker-compose.yml | 4 ++-- docs/docker.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index a395f0331b..3f097f2700 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -159,12 +159,12 @@ services: context: ./compute_wrapper/ args: - REPOSITORY=${REPOSITORY:-neondatabase} - - COMPUTE_IMAGE=compute-node-v${PG_VERSION:-14} + - COMPUTE_IMAGE=compute-node-v${PG_VERSION:-16} - TAG=${TAG:-latest} - http_proxy=$http_proxy - https_proxy=$https_proxy environment: - - PG_VERSION=${PG_VERSION:-14} + - PG_VERSION=${PG_VERSION:-16} #- RUST_BACKTRACE=1 # Mount the test files directly, for faster editing cycle. volumes: diff --git a/docs/docker.md b/docs/docker.md index cbf68be3a7..ccd2afc27a 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -34,12 +34,12 @@ You can see a [docker compose](https://docs.docker.com/compose/) example to crea 1. create containers You can specify version of neon cluster using following environment values. -- PG_VERSION: postgres version for compute (default is 14) +- PG_VERSION: postgres version for compute (default is 16) - TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) ``` $ cd docker-compose/ $ docker-compose down # remove the containers if exists -$ PG_VERSION=15 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version +$ PG_VERSION=16 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version Creating network "dockercompose_default" with the default driver Creating docker-compose_storage_broker_1 ... done (...omit...) From 0a256148b0345d4a04e63347947bc93bcacdfb24 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 12 Jun 2024 10:06:00 +0300 Subject: [PATCH 169/220] Update documentation on running locally with Docker (#8020) - Fix the dockerhub URLs - `neondatabase/compute-node` image has been replaced with Postgres version specific images like `neondatabase/compute-node-v16` - Use TAG=latest in the example, rather than some old tag. That's a sensible default for people to copy-past - For convenience, use a Postgres connection URL in the `psql` example that also includes the password. That way, there's no need to set up .pgpass - Update the image names in `docker ps` example to match what you get when you follow the example --- docs/docker.md | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index ccd2afc27a..ce806c4e6c 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -4,18 +4,18 @@ Currently we build two main images: -- [neondatabase/neon](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). -- [neondatabase/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [neondatabase/postgres](https://github.com/neondatabase/postgres). +- [neondatabase/neon](https://hub.docker.com/repository/docker/neondatabase/neon) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). +- [neondatabase/compute-node-v16](https://hub.docker.com/repository/docker/neondatabase/compute-node-v16) — compute node image with pre-built Postgres binaries from [neondatabase/postgres](https://github.com/neondatabase/postgres). Similar images exist for v15 and v14. And additional intermediate image: - [neondatabase/compute-tools](https://hub.docker.com/repository/docker/neondatabase/compute-tools) — compute node configuration management tools. -## Building pipeline +## Build pipeline We build all images after a successful `release` tests run and push automatically to Docker Hub with two parallel CI jobs -1. `neondatabase/compute-tools` and `neondatabase/compute-node` +1. `neondatabase/compute-tools` and `neondatabase/compute-node-v16` (and -v15 and -v14) 2. `neondatabase/neon` @@ -34,12 +34,12 @@ You can see a [docker compose](https://docs.docker.com/compose/) example to crea 1. create containers You can specify version of neon cluster using following environment values. -- PG_VERSION: postgres version for compute (default is 16) -- TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags) (default is latest), which is tagged in [CI test](/.github/workflows/build_and_test.yml) +- PG_VERSION: postgres version for compute (default is 16 as of this writing) +- TAG: the tag version of [docker image](https://registry.hub.docker.com/r/neondatabase/neon/tags), which is tagged in [CI test](/.github/workflows/build_and_test.yml). Default is 'latest' ``` $ cd docker-compose/ $ docker-compose down # remove the containers if exists -$ PG_VERSION=16 TAG=2937 docker-compose up --build -d # You can specify the postgres and image version +$ PG_VERSION=16 TAG=latest docker-compose up --build -d # You can specify the postgres and image version Creating network "dockercompose_default" with the default driver Creating docker-compose_storage_broker_1 ... done (...omit...) @@ -47,29 +47,31 @@ Creating docker-compose_storage_broker_1 ... done 2. connect compute node ``` -$ echo "localhost:55433:postgres:cloud_admin:cloud_admin" >> ~/.pgpass -$ chmod 600 ~/.pgpass -$ psql -h localhost -p 55433 -U cloud_admin +$ psql postgresql://cloud_admin:cloud_admin@localhost:55433/postgres +psql (16.3) +Type "help" for help. + postgres=# CREATE TABLE t(key int primary key, value text); CREATE TABLE -postgres=# insert into t values(1,1); +postgres=# insert into t values(1, 1); INSERT 0 1 postgres=# select * from t; - key | value + key | value -----+------- 1 | 1 (1 row) + ``` 3. If you want to see the log, you can use `docker-compose logs` command. ``` # check the container name you want to see $ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -d6968a5ae912 dockercompose_compute "/shell/compute.sh" 5 minutes ago Up 5 minutes 0.0.0.0:3080->3080/tcp, 0.0.0.0:55433->55433/tcp dockercompose_compute_1 +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3582f6d76227 docker-compose_compute "/shell/compute.sh" 2 minutes ago Up 2 minutes 0.0.0.0:3080->3080/tcp, :::3080->3080/tcp, 0.0.0.0:55433->55433/tcp, :::55433->55433/tcp docker-compose_compute_1 (...omit...) -$ docker logs -f dockercompose_compute_1 +$ docker logs -f docker-compose_compute_1 2022-10-21 06:15:48.757 GMT [56] LOG: connection authorized: user=cloud_admin database=postgres application_name=psql 2022-10-21 06:17:00.307 GMT [56] LOG: [NEON_SMGR] libpagestore: connected to 'host=pageserver port=6400' (...omit...) From f749437cec056bd4387dc8c17a5591003ef1fff6 Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Wed, 12 Jun 2024 12:25:13 +0200 Subject: [PATCH 170/220] Resolve the problem the docker compose caused by the extensions tests (#8024) ## Problem The merging of #7818 caused the problem with the docker-compose file. Running docker compose is now impossible due to the unavailability of the neon-test-extensions:latest image ## Summary of changes Fix the problem: Add the latest tag to the neon-test-extensions image and use the profiles feature of the docker-compose file to avoid loading the neon-test-extensions container if it is not needed. --- .github/workflows/build_and_test.yml | 2 ++ docker-compose/docker-compose.yml | 1 + docker-compose/docker_compose_test.sh | 9 ++++----- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 79a0a77638..57635f4920 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1101,6 +1101,8 @@ jobs: $repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} done done + docker buildx imagetools create -t neondatabase/neon-test-extensions:latest \ + neondatabase/neon-test-extensions:${{ needs.tag.outputs.build-tag }} trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index 3f097f2700..5503b6611a 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -194,6 +194,7 @@ services: - compute neon-test-extensions: + profiles: ["test-extensions"] image: ${REPOSITORY:-neondatabase}/neon-test-extensions-v${PG_TEST_VERSION:-16}:${TAG:-latest} entrypoint: - "/bin/bash" diff --git a/docker-compose/docker_compose_test.sh b/docker-compose/docker_compose_test.sh index 71e73c2d0a..a00591afd0 100755 --- a/docker-compose/docker_compose_test.sh +++ b/docker-compose/docker_compose_test.sh @@ -15,7 +15,6 @@ set -eux -o pipefail COMPOSE_FILE='docker-compose.yml' cd $(dirname $0) -docker compose -f $COMPOSE_FILE COMPUTE_CONTAINER_NAME=docker-compose-compute-1 TEST_CONTAINER_NAME=docker-compose-neon-test-extensions-1 PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" @@ -26,16 +25,16 @@ export http_proxy https_proxy cleanup() { echo "show container information" docker ps - docker compose -f $COMPOSE_FILE logs + docker compose --profile test-extensions -f $COMPOSE_FILE logs echo "stop containers..." - docker compose -f $COMPOSE_FILE down + docker compose --profile test-extensions -f $COMPOSE_FILE down } for pg_version in 14 15 16; do echo "clean up containers if exists" cleanup PG_TEST_VERSION=$(($pg_version < 16 ? 16 : $pg_version)) - PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose -f $COMPOSE_FILE up --build -d + PG_VERSION=$pg_version PG_TEST_VERSION=$PG_TEST_VERSION docker compose --profile test-extensions -f $COMPOSE_FILE up --build -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 @@ -47,7 +46,7 @@ for pg_version in 14 15 16; do cleanup exit 1 fi - if docker compose -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then + if docker compose --profile test-extensions -f $COMPOSE_FILE logs "compute_is_ready" | grep -q "accepting connections"; then echo "OK. The compute is ready to connect." echo "execute simple queries." docker exec $COMPUTE_CONTAINER_NAME /bin/bash -c "psql $PSQL_OPTION" From 3099e1a787693c7a7b2c694648462fad7eb2bcf5 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Wed, 12 Jun 2024 12:33:54 +0100 Subject: [PATCH 171/220] storcon_cli: do not drain to undesirable nodes (#8027) ## Problem The previous code would attempt to drain to unavailable or unschedulable nodes. ## Summary of Changes Remove such nodes from the list of nodes to fill. --- control_plane/storcon_cli/src/main.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 8c84911d33..7b48b75c21 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -786,18 +786,15 @@ async fn main() -> anyhow::Result<()> { anyhow::bail!("Drain requested for node which doesn't exist.") } - let can_fill = node_to_fill_descs - .iter() - .filter(|desc| { - matches!(desc.availability, NodeAvailabilityWrapper::Active) - && matches!( - desc.scheduling, - NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling - ) - }) - .any(|_| true); + node_to_fill_descs.retain(|desc| { + matches!(desc.availability, NodeAvailabilityWrapper::Active) + && matches!( + desc.scheduling, + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Filling + ) + }); - if !can_fill { + if node_to_fill_descs.is_empty() { anyhow::bail!("There are no nodes to drain to") } From 9ba9f32dfe35ad99335497f7d22c14ba02ebea9f Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Wed, 12 Jun 2024 16:10:57 +0200 Subject: [PATCH 172/220] Reactivate page bench test in CI after ignoring CopyFail error in pageserver (#8023) ## Problem Testcase page bench test_pageserver_max_throughput_getpage_at_latest_lsn had been deactivated because it was flaky. We now ignore copy fail error messages like in https://github.com/neondatabase/neon/blob/270d3be507643f068120b52838c497f6c1b45b61/test_runner/regress/test_pageserver_getpage_throttle.py#L17-L20 and want to reactivate it to see it it is still flaky ## Summary of changes - reactivate the test in CI - ignore CopyFail error message during page bench test cases ## Checklist before requesting a review - [ ] I have performed a self-review of my code. - [ ] If it is a core feature, I have added thorough tests. - [ ] Do we need to implement analytics? if so did you add the relevant metrics to the dashboard? - [ ] If this PR requires public announcement, mark it with /release-notes label and add several sentences in this section. ## Checklist before merging - [ ] Do not forget to reformat commit message to not include the above checklist --- ...geserver_max_throughput_getpage_at_latest_lsn.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 1a0012397c..772a39fe35 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -1,5 +1,4 @@ import json -import os from pathlib import Path from typing import Any, Dict, Tuple @@ -35,10 +34,6 @@ from performance.pageserver.util import ( @pytest.mark.timeout( 10000 ) # TODO: this value is just "a really high number"; have this per instance type -@pytest.mark.skipif( - os.getenv("CI", "false") == "true", - reason="The test if flaky on CI: https://github.com/neondatabase/neon/issues/6724", -) def test_pageserver_max_throughput_getpage_at_latest_lsn( neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker, @@ -91,6 +86,14 @@ def test_pageserver_max_throughput_getpage_at_latest_lsn( n_tenants, setup_wrapper, ) + + env.pageserver.allowed_errors.append( + # https://github.com/neondatabase/neon/issues/6925 + # https://github.com/neondatabase/neon/issues/6390 + # https://github.com/neondatabase/neon/issues/6724 + r".*query handler for.*pagestream.*failed: unexpected message: CopyFail during COPY.*" + ) + run_benchmark_max_throughput_latest_lsn(env, pg_bin, record, duration) From 9dda13eccec8e2043566be1069709709dfec425a Mon Sep 17 00:00:00 2001 From: a-masterov <72613290+a-masterov@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:15:20 +0200 Subject: [PATCH 173/220] Add the image version to the neon-test-extensions image (#8032) ## Problem The version was missing in the image name causing the error during the workflow ## Summary of changes Added the version to the image name --- .github/workflows/build_and_test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 57635f4920..71ca7329ee 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1101,8 +1101,8 @@ jobs: $repo/vm-compute-node-${version}:${{ needs.tag.outputs.build-tag }} done done - docker buildx imagetools create -t neondatabase/neon-test-extensions:latest \ - neondatabase/neon-test-extensions:${{ needs.tag.outputs.build-tag }} + docker buildx imagetools create -t neondatabase/neon-test-extensions-v16:latest \ + neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }} trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] From 836d1f4af79cb74b69178213c76ef66993903307 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Wed, 12 Jun 2024 13:42:43 -0400 Subject: [PATCH 174/220] test(pageserver): add test keyspace into collect_keyspace (#8016) Some test cases add random keys into the timeline, but it is not part of the `collect_keyspace`, this will cause compaction remove the keys. The pull request adds a field to supply extra keyspaces during unit tests. --------- Signed-off-by: Alex Chi Z --- pageserver/src/pgdatadir_mapping.rs | 8 ++++++++ pageserver/src/tenant.rs | 8 ++++++-- pageserver/src/tenant/timeline.rs | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 336d1c3fb8..25d00d6dfd 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -919,6 +919,14 @@ impl Timeline { result.add_key(AUX_FILES_KEY); } + #[cfg(test)] + { + let guard = self.extra_test_dense_keyspace.load(); + for kr in &guard.ranges { + result.add_range(kr.clone()); + } + } + Ok(( result.to_keyspace(), /* AUX sparse key space */ diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index f9ed6d3071..d556f72335 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -5264,6 +5264,9 @@ mod tests { let cancel = CancellationToken::new(); let mut test_key = Key::from_hex("010000000033333333444444445500000000").unwrap(); + let mut test_key_end = test_key; + test_key_end.field6 = NUM_KEYS as u32; + tline.add_extra_test_dense_keyspace(KeySpace::single(test_key..test_key_end)); let mut keyspace = KeySpaceAccum::new(); @@ -6223,8 +6226,8 @@ mod tests { let cancel = CancellationToken::new(); - let mut base_key = Key::from_hex("000000000033333333444444445500000000").unwrap(); - base_key.field1 = AUX_KEY_PREFIX; + let base_key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + assert_eq!(base_key.field1, AUX_KEY_PREFIX); // in case someone accidentally changed the prefix... let mut test_key = base_key; let mut lsn = Lsn(0x10); @@ -6329,6 +6332,7 @@ mod tests { Lsn(0x20), // it's fine to not advance LSN to 0x30 while using 0x30 to get below because `get_vectored_impl` does not wait for LSN ) .await?; + tline.add_extra_test_dense_keyspace(KeySpace::single(base_key..(base_key_nonexist.next()))); let child = tenant .branch_timeline_test_with_layers( diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 54a4ceeaf3..28627e7911 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -426,6 +426,14 @@ pub struct Timeline { /// Indicate whether aux file v2 storage is enabled. pub(crate) last_aux_file_policy: AtomicAuxFilePolicy, + + /// Some test cases directly place keys into the timeline without actually modifying the directory + /// keys (i.e., DB_DIR). The test cases creating such keys will put the keyspaces here, so that + /// these keys won't get garbage-collected during compaction/GC. This field only modifies the dense + /// keyspace return value of `collect_keyspace`. For sparse keyspaces, use AUX keys for testing, and + /// in the future, add `extra_test_sparse_keyspace` if necessary. + #[cfg(test)] + pub(crate) extra_test_dense_keyspace: ArcSwap, } pub struct WalReceiverInfo { @@ -2344,6 +2352,9 @@ impl Timeline { aux_file_size_estimator: AuxFileSizeEstimator::new(aux_file_metrics), last_aux_file_policy: AtomicAuxFilePolicy::new(aux_file_policy), + + #[cfg(test)] + extra_test_dense_keyspace: ArcSwap::new(Arc::new(KeySpace::default())), }; result.repartition_threshold = result.get_checkpoint_distance() / REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE; @@ -5562,6 +5573,13 @@ impl Timeline { } Ok(layers) } + + #[cfg(test)] + pub(crate) fn add_extra_test_dense_keyspace(&self, ks: KeySpace) { + let mut keyspace = self.extra_test_dense_keyspace.load().as_ref().clone(); + keyspace.merge(&ks); + self.extra_test_dense_keyspace.store(Arc::new(keyspace)); + } } type TraversalPathItem = (ValueReconstructResult, Lsn, TraversalId); From ad0ab3b81bacc35e73a6902b6ce53db18f5e6293 Mon Sep 17 00:00:00 2001 From: MMeent Date: Wed, 12 Jun 2024 20:25:04 +0200 Subject: [PATCH 175/220] Fix query error in vm-image-spec.yaml (#8028) This query causes metrics exporter to complain about missing data because it can't find the correct column. Issue was introduced with https://github.com/neondatabase/neon/pull/7761 --- vm-image-spec.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 73a24c42d6..15f820bebd 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -304,7 +304,9 @@ files: - slot_name values: [restart_lsn] query: | - select slot_name, (restart_lsn - '0/0')::FLOAT8 from pg_replication_slots where slot_type = 'logical'; + select slot_name, (restart_lsn - '0/0')::FLOAT8 as restart_lsn + from pg_replication_slots + where slot_type = 'logical'; - metric_name: retained_wal type: gauge From dc2ab4407f8b9636a6e570818154f21fde14b9ce Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 13 Jun 2024 00:31:31 +0300 Subject: [PATCH 176/220] Fix on-demand SLRU download on standby starting at WAL segment boundary (#8031) If a standby is started right after switching to a new WAL segment, the request in the SLRU download request would point to the beginning of the segment (e.g. 0/5000000), while the not-modified-since LSN would point to just after the page header (e.g. 0/5000028). It's effectively the same position, as there cannot be any WAL records in between, but the pageserver rightly errors out on any request where the request LSN < not-modified since LSN. To fix, round down the not-modified since LSN to the beginning of the page like the request LSN. Fixes issue #8030 --- pgxn/neon/pagestore_smgr.c | 4 +-- .../regress/test_ondemand_slru_download.py | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 6305f2ec92..8edaf65639 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -3112,12 +3112,12 @@ neon_read_slru_segment(SMgrRelation reln, const char* path, int segno, void* buf request_lsn = UINT64_MAX; /* - * GetRedoStartLsn() returns LSN of basebackup. We know that the SLRU + * GetRedoStartLsn() returns LSN of the basebackup. We know that the SLRU * segment has not changed since the basebackup, because in order to * modify it, we would have had to download it already. And once * downloaded, we never evict SLRU segments from local disk. */ - not_modified_since = GetRedoStartLsn(); + not_modified_since = nm_adjust_lsn(GetRedoStartLsn()); SlruKind kind; diff --git a/test_runner/regress/test_ondemand_slru_download.py b/test_runner/regress/test_ondemand_slru_download.py index 4af7dcdfc3..d6babe4393 100644 --- a/test_runner/regress/test_ondemand_slru_download.py +++ b/test_runner/regress/test_ondemand_slru_download.py @@ -129,3 +129,33 @@ def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count cur_replica = conn_replica.cursor() cur_replica.execute("SELECT * FROM clogtest") assert cur_replica.fetchall() == [(1,), (3,)] + + +def test_ondemand_download_after_wal_switch(neon_env_builder: NeonEnvBuilder): + """ + Test on-demand SLRU download on standby, when starting right after + WAL segment switch. + + This is a repro for a bug in how the LSN at WAL page/segment + boundary was handled (https://github.com/neondatabase/neon/issues/8030) + """ + + tenant_conf = { + "lazy_slru_download": "true", + } + env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) + + endpoint = env.endpoints.create_start("main") + pg_conn = endpoint.connect() + cur = pg_conn.cursor() + + # Create a test table + cur.execute("CREATE TABLE clogtest (id integer)") + cur.execute("INSERT INTO clogtest VALUES (1)") + + # Start standby at WAL segment boundary + cur.execute("SELECT pg_switch_wal()") + lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")) + _endpoint_at_lsn = env.endpoints.create_start( + branch_name="main", endpoint_id="ep-at-lsn", lsn=lsn + ) From fbccd1e6762f61686a810ba6b4654da7da1247d9 Mon Sep 17 00:00:00 2001 From: Anna Khanova <32508607+khanova@users.noreply.github.com> Date: Thu, 13 Jun 2024 14:42:26 +0200 Subject: [PATCH 177/220] Proxy process updated errors (#8026) ## Problem Respect errors classification from cplane --- proxy/src/console/messages.rs | 169 ++++++++++++++++++++++++++++- proxy/src/console/provider.rs | 133 ++++++++++------------- proxy/src/console/provider/neon.rs | 23 ++-- proxy/src/proxy/tests.rs | 20 ++-- proxy/src/proxy/wake_compute.rs | 101 ++++++++++++----- 5 files changed, 324 insertions(+), 122 deletions(-) diff --git a/proxy/src/console/messages.rs b/proxy/src/console/messages.rs index 9869b95768..3b7d681a41 100644 --- a/proxy/src/console/messages.rs +++ b/proxy/src/console/messages.rs @@ -1,16 +1,183 @@ use measured::FixedCardinalityLabel; use serde::{Deserialize, Serialize}; -use std::fmt; +use std::fmt::{self, Display}; use crate::auth::IpPattern; use crate::intern::{BranchIdInt, EndpointIdInt, ProjectIdInt}; +use crate::proxy::retry::ShouldRetry; /// Generic error response with human-readable description. /// Note that we can't always present it to user as is. #[derive(Debug, Deserialize)] pub struct ConsoleError { pub error: Box, + #[serde(skip)] + pub http_status_code: http::StatusCode, + pub status: Option, +} + +impl ConsoleError { + pub fn get_reason(&self) -> Reason { + self.status + .as_ref() + .and_then(|s| s.details.error_info.as_ref()) + .map(|e| e.reason) + .unwrap_or(Reason::Unknown) + } + pub fn get_user_facing_message(&self) -> String { + use super::provider::errors::REQUEST_FAILED; + self.status + .as_ref() + .and_then(|s| s.details.user_facing_message.as_ref()) + .map(|m| m.message.clone().into()) + .unwrap_or_else(|| { + // Ask @neondatabase/control-plane for review before adding more. + match self.http_status_code { + http::StatusCode::NOT_FOUND => { + // Status 404: failed to get a project-related resource. + format!("{REQUEST_FAILED}: endpoint cannot be found") + } + http::StatusCode::NOT_ACCEPTABLE => { + // Status 406: endpoint is disabled (we don't allow connections). + format!("{REQUEST_FAILED}: endpoint is disabled") + } + http::StatusCode::LOCKED | http::StatusCode::UNPROCESSABLE_ENTITY => { + // Status 423: project might be in maintenance mode (or bad state), or quotas exceeded. + format!("{REQUEST_FAILED}: endpoint is temporarily unavailable. Check your quotas and/or contact our support.") + } + _ => REQUEST_FAILED.to_owned(), + } + }) + } +} + +impl Display for ConsoleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = self + .status + .as_ref() + .and_then(|s| s.details.user_facing_message.as_ref()) + .map(|m| m.message.as_ref()) + .unwrap_or_else(|| &self.error); + write!(f, "{}", msg) + } +} + +impl ShouldRetry for ConsoleError { + fn could_retry(&self) -> bool { + if self.status.is_none() || self.status.as_ref().unwrap().details.retry_info.is_none() { + // retry some temporary failures because the compute was in a bad state + // (bad request can be returned when the endpoint was in transition) + return match &self { + ConsoleError { + http_status_code: http::StatusCode::BAD_REQUEST, + .. + } => true, + // don't retry when quotas are exceeded + ConsoleError { + http_status_code: http::StatusCode::UNPROCESSABLE_ENTITY, + ref error, + .. + } => !error.contains("compute time quota of non-primary branches is exceeded"), + // locked can be returned when the endpoint was in transition + // or when quotas are exceeded. don't retry when quotas are exceeded + ConsoleError { + http_status_code: http::StatusCode::LOCKED, + ref error, + .. + } => { + !error.contains("quota exceeded") + && !error.contains("the limit for current plan reached") + } + _ => false, + }; + } + + // retry if the response has a retry delay + if let Some(retry_info) = self + .status + .as_ref() + .and_then(|s| s.details.retry_info.as_ref()) + { + retry_info.retry_delay_ms > 0 + } else { + false + } + } +} + +#[derive(Debug, Deserialize)] +pub struct Status { + pub code: Box, + pub message: Box, + pub details: Details, +} + +#[derive(Debug, Deserialize)] +pub struct Details { + pub error_info: Option, + pub retry_info: Option, + pub user_facing_message: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ErrorInfo { + pub reason: Reason, + // Schema could also have `metadata` field, but it's not structured. Skip it for now. +} + +#[derive(Clone, Copy, Debug, Deserialize, Default)] +pub enum Reason { + #[serde(rename = "ROLE_PROTECTED")] + RoleProtected, + #[serde(rename = "RESOURCE_NOT_FOUND")] + ResourceNotFound, + #[serde(rename = "PROJECT_NOT_FOUND")] + ProjectNotFound, + #[serde(rename = "ENDPOINT_NOT_FOUND")] + EndpointNotFound, + #[serde(rename = "BRANCH_NOT_FOUND")] + BranchNotFound, + #[serde(rename = "RATE_LIMIT_EXCEEDED")] + RateLimitExceeded, + #[serde(rename = "NON_PRIMARY_BRANCH_COMPUTE_TIME_EXCEEDED")] + NonPrimaryBranchComputeTimeExceeded, + #[serde(rename = "ACTIVE_TIME_QUOTA_EXCEEDED")] + ActiveTimeQuotaExceeded, + #[serde(rename = "COMPUTE_TIME_QUOTA_EXCEEDED")] + ComputeTimeQuotaExceeded, + #[serde(rename = "WRITTEN_DATA_QUOTA_EXCEEDED")] + WrittenDataQuotaExceeded, + #[serde(rename = "DATA_TRANSFER_QUOTA_EXCEEDED")] + DataTransferQuotaExceeded, + #[serde(rename = "LOGICAL_SIZE_QUOTA_EXCEEDED")] + LogicalSizeQuotaExceeded, + #[default] + #[serde(other)] + Unknown, +} + +impl Reason { + pub fn is_not_found(&self) -> bool { + matches!( + self, + Reason::ResourceNotFound + | Reason::ProjectNotFound + | Reason::EndpointNotFound + | Reason::BranchNotFound + ) + } +} + +#[derive(Debug, Deserialize)] +pub struct RetryInfo { + pub retry_delay_ms: u64, +} + +#[derive(Debug, Deserialize)] +pub struct UserFacingMessage { + pub message: Box, } /// Response which holds client's auth secret, e.g. [`crate::scram::ServerSecret`]. diff --git a/proxy/src/console/provider.rs b/proxy/src/console/provider.rs index 634ec9042c..915c2ee7a6 100644 --- a/proxy/src/console/provider.rs +++ b/proxy/src/console/provider.rs @@ -25,8 +25,8 @@ use tracing::info; pub mod errors { use crate::{ + console::messages::{self, ConsoleError}, error::{io_error, ReportableError, UserFacingError}, - http, proxy::retry::ShouldRetry, }; use thiserror::Error; @@ -34,17 +34,14 @@ pub mod errors { use super::ApiLockError; /// A go-to error message which doesn't leak any detail. - const REQUEST_FAILED: &str = "Console request failed"; + pub const REQUEST_FAILED: &str = "Console request failed"; /// Common console API error. #[derive(Debug, Error)] pub enum ApiError { /// Error returned by the console itself. - #[error("{REQUEST_FAILED} with {}: {}", .status, .text)] - Console { - status: http::StatusCode, - text: Box, - }, + #[error("{REQUEST_FAILED} with {0}")] + Console(ConsoleError), /// Various IO errors like broken pipe or malformed payload. #[error("{REQUEST_FAILED}: {0}")] @@ -53,11 +50,11 @@ pub mod errors { impl ApiError { /// Returns HTTP status code if it's the reason for failure. - pub fn http_status_code(&self) -> Option { + pub fn get_reason(&self) -> messages::Reason { use ApiError::*; match self { - Console { status, .. } => Some(*status), - _ => None, + Console(e) => e.get_reason(), + _ => messages::Reason::Unknown, } } } @@ -67,22 +64,7 @@ pub mod errors { use ApiError::*; match self { // To minimize risks, only select errors are forwarded to users. - // Ask @neondatabase/control-plane for review before adding more. - Console { status, .. } => match *status { - http::StatusCode::NOT_FOUND => { - // Status 404: failed to get a project-related resource. - format!("{REQUEST_FAILED}: endpoint cannot be found") - } - http::StatusCode::NOT_ACCEPTABLE => { - // Status 406: endpoint is disabled (we don't allow connections). - format!("{REQUEST_FAILED}: endpoint is disabled") - } - http::StatusCode::LOCKED | http::StatusCode::UNPROCESSABLE_ENTITY => { - // Status 423: project might be in maintenance mode (or bad state), or quotas exceeded. - format!("{REQUEST_FAILED}: endpoint is temporarily unavailable. Check your quotas and/or contact our support.") - } - _ => REQUEST_FAILED.to_owned(), - }, + Console(c) => c.get_user_facing_message(), _ => REQUEST_FAILED.to_owned(), } } @@ -91,29 +73,56 @@ pub mod errors { impl ReportableError for ApiError { fn get_error_kind(&self) -> crate::error::ErrorKind { match self { - ApiError::Console { - status: http::StatusCode::NOT_FOUND | http::StatusCode::NOT_ACCEPTABLE, - .. - } => crate::error::ErrorKind::User, - ApiError::Console { - status: http::StatusCode::UNPROCESSABLE_ENTITY, - text, - } if text.contains("compute time quota of non-primary branches is exceeded") => { - crate::error::ErrorKind::User + ApiError::Console(e) => { + use crate::error::ErrorKind::*; + match e.get_reason() { + crate::console::messages::Reason::RoleProtected => User, + crate::console::messages::Reason::ResourceNotFound => User, + crate::console::messages::Reason::ProjectNotFound => User, + crate::console::messages::Reason::EndpointNotFound => User, + crate::console::messages::Reason::BranchNotFound => User, + crate::console::messages::Reason::RateLimitExceeded => ServiceRateLimit, + crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { + User + } + crate::console::messages::Reason::ActiveTimeQuotaExceeded => User, + crate::console::messages::Reason::ComputeTimeQuotaExceeded => User, + crate::console::messages::Reason::WrittenDataQuotaExceeded => User, + crate::console::messages::Reason::DataTransferQuotaExceeded => User, + crate::console::messages::Reason::LogicalSizeQuotaExceeded => User, + crate::console::messages::Reason::Unknown => match &e { + ConsoleError { + http_status_code: + http::StatusCode::NOT_FOUND | http::StatusCode::NOT_ACCEPTABLE, + .. + } => crate::error::ErrorKind::User, + ConsoleError { + http_status_code: http::StatusCode::UNPROCESSABLE_ENTITY, + error, + .. + } if error.contains( + "compute time quota of non-primary branches is exceeded", + ) => + { + crate::error::ErrorKind::User + } + ConsoleError { + http_status_code: http::StatusCode::LOCKED, + error, + .. + } if error.contains("quota exceeded") + || error.contains("the limit for current plan reached") => + { + crate::error::ErrorKind::User + } + ConsoleError { + http_status_code: http::StatusCode::TOO_MANY_REQUESTS, + .. + } => crate::error::ErrorKind::ServiceRateLimit, + ConsoleError { .. } => crate::error::ErrorKind::ControlPlane, + }, + } } - ApiError::Console { - status: http::StatusCode::LOCKED, - text, - } if text.contains("quota exceeded") - || text.contains("the limit for current plan reached") => - { - crate::error::ErrorKind::User - } - ApiError::Console { - status: http::StatusCode::TOO_MANY_REQUESTS, - .. - } => crate::error::ErrorKind::ServiceRateLimit, - ApiError::Console { .. } => crate::error::ErrorKind::ControlPlane, ApiError::Transport(_) => crate::error::ErrorKind::ControlPlane, } } @@ -124,31 +133,7 @@ pub mod errors { match self { // retry some transport errors Self::Transport(io) => io.could_retry(), - // retry some temporary failures because the compute was in a bad state - // (bad request can be returned when the endpoint was in transition) - Self::Console { - status: http::StatusCode::BAD_REQUEST, - .. - } => true, - // don't retry when quotas are exceeded - Self::Console { - status: http::StatusCode::UNPROCESSABLE_ENTITY, - ref text, - } => !text.contains("compute time quota of non-primary branches is exceeded"), - // locked can be returned when the endpoint was in transition - // or when quotas are exceeded. don't retry when quotas are exceeded - Self::Console { - status: http::StatusCode::LOCKED, - ref text, - } => { - // written data quota exceeded - // data transfer quota exceeded - // compute time quota exceeded - // logical size quota exceeded - !text.contains("quota exceeded") - && !text.contains("the limit for current plan reached") - } - _ => false, + Self::Console(e) => e.could_retry(), } } } @@ -509,7 +494,7 @@ impl ApiLocks { self.metrics .semaphore_acquire_seconds .observe(now.elapsed().as_secs_f64()); - + info!("acquired permit {:?}", now.elapsed().as_secs_f64()); Ok(WakeComputePermit { permit: permit? }) } diff --git a/proxy/src/console/provider/neon.rs b/proxy/src/console/provider/neon.rs index d72229b029..41bd2f4956 100644 --- a/proxy/src/console/provider/neon.rs +++ b/proxy/src/console/provider/neon.rs @@ -94,12 +94,14 @@ impl Api { let body = match parse_body::(response).await { Ok(body) => body, // Error 404 is special: it's ok not to have a secret. - Err(e) => match e.http_status_code() { - Some(http::StatusCode::NOT_FOUND) => { + // TODO(anna): retry + Err(e) => { + if e.get_reason().is_not_found() { return Ok(AuthInfo::default()); + } else { + return Err(e.into()); } - _otherwise => return Err(e.into()), - }, + } }; let secret = if body.role_secret.is_empty() { @@ -328,19 +330,24 @@ async fn parse_body serde::Deserialize<'a>>( info!("request succeeded, processing the body"); return Ok(response.json().await?); } + let s = response.bytes().await?; + // Log plaintext to be able to detect, whether there are some cases not covered by the error struct. + info!("response_error plaintext: {:?}", s); // Don't throw an error here because it's not as important // as the fact that the request itself has failed. - let body = response.json().await.unwrap_or_else(|e| { + let mut body = serde_json::from_slice(&s).unwrap_or_else(|e| { warn!("failed to parse error body: {e}"); ConsoleError { error: "reason unclear (malformed error message)".into(), + http_status_code: status, + status: None, } }); + body.http_status_code = status; - let text = body.error; - error!("console responded with an error ({status}): {text}"); - Err(ApiError::Console { status, text }) + error!("console responded with an error ({status}): {body:?}"); + Err(ApiError::Console(body)) } fn parse_host_port(input: &str) -> Option<(&str, u16)> { diff --git a/proxy/src/proxy/tests.rs b/proxy/src/proxy/tests.rs index ad48af0093..96683511fe 100644 --- a/proxy/src/proxy/tests.rs +++ b/proxy/src/proxy/tests.rs @@ -12,7 +12,7 @@ use crate::auth::backend::{ }; use crate::config::{CertResolver, RetryConfig}; use crate::console::caches::NodeInfoCache; -use crate::console::messages::MetricsAuxInfo; +use crate::console::messages::{ConsoleError, MetricsAuxInfo}; use crate::console::provider::{CachedAllowedIps, CachedRoleSecret, ConsoleBackend}; use crate::console::{self, CachedNodeInfo, NodeInfo}; use crate::error::ErrorKind; @@ -484,18 +484,20 @@ impl TestBackend for TestConnectMechanism { match action { ConnectAction::Wake => Ok(helper_create_cached_node_info(self.cache)), ConnectAction::WakeFail => { - let err = console::errors::ApiError::Console { - status: http::StatusCode::FORBIDDEN, - text: "TEST".into(), - }; + let err = console::errors::ApiError::Console(ConsoleError { + http_status_code: http::StatusCode::FORBIDDEN, + error: "TEST".into(), + status: None, + }); assert!(!err.could_retry()); Err(console::errors::WakeComputeError::ApiError(err)) } ConnectAction::WakeRetry => { - let err = console::errors::ApiError::Console { - status: http::StatusCode::BAD_REQUEST, - text: "TEST".into(), - }; + let err = console::errors::ApiError::Console(ConsoleError { + http_status_code: http::StatusCode::BAD_REQUEST, + error: "TEST".into(), + status: None, + }); assert!(err.could_retry()); Err(console::errors::WakeComputeError::ApiError(err)) } diff --git a/proxy/src/proxy/wake_compute.rs b/proxy/src/proxy/wake_compute.rs index 94b03e1ccc..c166cf4389 100644 --- a/proxy/src/proxy/wake_compute.rs +++ b/proxy/src/proxy/wake_compute.rs @@ -1,4 +1,5 @@ use crate::config::RetryConfig; +use crate::console::messages::ConsoleError; use crate::console::{errors::WakeComputeError, provider::CachedNodeInfo}; use crate::context::RequestMonitoring; use crate::metrics::{ @@ -88,36 +89,76 @@ fn report_error(e: &WakeComputeError, retry: bool) { let kind = match e { WakeComputeError::BadComputeAddress(_) => WakeupFailureKind::BadComputeAddress, WakeComputeError::ApiError(ApiError::Transport(_)) => WakeupFailureKind::ApiTransportError, - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::LOCKED, - ref text, - }) if text.contains("written data quota exceeded") - || text.contains("the limit for current plan reached") => - { - WakeupFailureKind::QuotaExceeded - } - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::UNPROCESSABLE_ENTITY, - ref text, - }) if text.contains("compute time quota of non-primary branches is exceeded") => { - WakeupFailureKind::QuotaExceeded - } - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::LOCKED, - .. - }) => WakeupFailureKind::ApiConsoleLocked, - WakeComputeError::ApiError(ApiError::Console { - status: StatusCode::BAD_REQUEST, - .. - }) => WakeupFailureKind::ApiConsoleBadRequest, - WakeComputeError::ApiError(ApiError::Console { status, .. }) - if status.is_server_error() => - { - WakeupFailureKind::ApiConsoleOtherServerError - } - WakeComputeError::ApiError(ApiError::Console { .. }) => { - WakeupFailureKind::ApiConsoleOtherError - } + WakeComputeError::ApiError(ApiError::Console(e)) => match e.get_reason() { + crate::console::messages::Reason::RoleProtected => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::ResourceNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::ProjectNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::EndpointNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::BranchNotFound => { + WakeupFailureKind::ApiConsoleBadRequest + } + crate::console::messages::Reason::RateLimitExceeded => { + WakeupFailureKind::ApiConsoleLocked + } + crate::console::messages::Reason::NonPrimaryBranchComputeTimeExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::ActiveTimeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::ComputeTimeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::WrittenDataQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::DataTransferQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::LogicalSizeQuotaExceeded => { + WakeupFailureKind::QuotaExceeded + } + crate::console::messages::Reason::Unknown => match e { + ConsoleError { + http_status_code: StatusCode::LOCKED, + ref error, + .. + } if error.contains("written data quota exceeded") + || error.contains("the limit for current plan reached") => + { + WakeupFailureKind::QuotaExceeded + } + ConsoleError { + http_status_code: StatusCode::UNPROCESSABLE_ENTITY, + ref error, + .. + } if error.contains("compute time quota of non-primary branches is exceeded") => { + WakeupFailureKind::QuotaExceeded + } + ConsoleError { + http_status_code: StatusCode::LOCKED, + .. + } => WakeupFailureKind::ApiConsoleLocked, + ConsoleError { + http_status_code: StatusCode::BAD_REQUEST, + .. + } => WakeupFailureKind::ApiConsoleBadRequest, + ConsoleError { + http_status_code, .. + } if http_status_code.is_server_error() => { + WakeupFailureKind::ApiConsoleOtherServerError + } + ConsoleError { .. } => WakeupFailureKind::ApiConsoleOtherError, + }, + }, WakeComputeError::TooManyConnections => WakeupFailureKind::ApiConsoleLocked, WakeComputeError::TooManyConnectionAttempts(_) => WakeupFailureKind::TimeoutError, }; From d25f7e3dd575878df49925bead4c797a61757751 Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Thu, 13 Jun 2024 09:44:37 -0400 Subject: [PATCH 178/220] test(pageserver): add test wal record for unit testing (#8015) https://github.com/neondatabase/neon/issues/8002 We need mock WAL record to make it easier to write unit tests. This pull request adds such a record. It has `clear` flag and `append` field. The tests for legacy-enhanced compaction are not modified yet and will be part of the next pull request. --------- Signed-off-by: Alex Chi Z --- pageserver/src/tenant.rs | 80 +++++++++++++++++++++++++++- pageserver/src/walrecord.rs | 43 ++++++++++++++- pageserver/src/walredo/apply_neon.rs | 14 +++++ 3 files changed, 134 insertions(+), 3 deletions(-) diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index d556f72335..0bd3ece2e3 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -4041,6 +4041,7 @@ mod tests { use crate::repository::{Key, Value}; use crate::tenant::harness::*; use crate::tenant::timeline::CompactFlags; + use crate::walrecord::NeonWalRecord; use crate::DEFAULT_PG_VERSION; use bytes::{Bytes, BytesMut}; use hex_literal::hex; @@ -6705,8 +6706,8 @@ mod tests { } #[tokio::test] - async fn test_simple_bottom_most_compaction() -> anyhow::Result<()> { - let harness = TenantHarness::create("test_simple_bottom_most_compaction")?; + async fn test_simple_bottom_most_compaction_images() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_simple_bottom_most_compaction_images")?; let (tenant, ctx) = harness.load().await; fn get_key(id: u32) -> Key { @@ -6861,4 +6862,79 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_neon_test_record() -> anyhow::Result<()> { + let harness = TenantHarness::create("test_neon_test_record")?; + let (tenant, ctx) = harness.load().await; + + fn get_key(id: u32) -> Key { + // using aux key here b/c they are guaranteed to be inside `collect_keyspace`. + let mut key = Key::from_hex("620000000033333333444444445500000000").unwrap(); + key.field6 = id; + key + } + + let delta1 = vec![ + ( + get_key(1), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append(",0x20")), + ), + ( + get_key(1), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append(",0x30")), + ), + (get_key(2), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(2), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_append(",0x20")), + ), + ( + get_key(2), + Lsn(0x30), + Value::WalRecord(NeonWalRecord::wal_append(",0x30")), + ), + (get_key(3), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(3), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_clear()), + ), + (get_key(4), Lsn(0x10), Value::Image("0x10".into())), + ( + get_key(4), + Lsn(0x20), + Value::WalRecord(NeonWalRecord::wal_init()), + ), + ]; + let image1 = vec![(get_key(1), "0x10".into())]; + + let tline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + vec![delta1], // delta layers + vec![(Lsn(0x10), image1)], // image layers + Lsn(0x50), + ) + .await?; + + assert_eq!( + tline.get(get_key(1), Lsn(0x50), &ctx).await?, + Bytes::from_static(b"0x10,0x20,0x30") + ); + assert_eq!( + tline.get(get_key(2), Lsn(0x50), &ctx).await?, + Bytes::from_static(b"0x10,0x20,0x30") + ); + // assert_eq!(tline.get(get_key(3), Lsn(0x50), &ctx).await?, Bytes::new()); + // assert_eq!(tline.get(get_key(4), Lsn(0x50), &ctx).await?, Bytes::new()); + + Ok(()) + } } diff --git a/pageserver/src/walrecord.rs b/pageserver/src/walrecord.rs index 205f8dee4d..62a3a91b0b 100644 --- a/pageserver/src/walrecord.rs +++ b/pageserver/src/walrecord.rs @@ -49,6 +49,19 @@ pub enum NeonWalRecord { file_path: String, content: Option, }, + + /// A testing record for unit testing purposes. It supports append data to an existing image, or clear it. + #[cfg(test)] + Test { + /// Append a string to the image. + append: String, + /// Clear the image before appending. + clear: bool, + /// Treat this record as an init record. `clear` should be set to true if this field is set + /// to true. This record does not need the history WALs to reconstruct. See [`NeonWalRecord::will_init`] and + /// its references in `timeline.rs`. + will_init: bool, + }, } impl NeonWalRecord { @@ -58,11 +71,39 @@ impl NeonWalRecord { // If you change this function, you'll also need to change ValueBytes::will_init match self { NeonWalRecord::Postgres { will_init, rec: _ } => *will_init, - + #[cfg(test)] + NeonWalRecord::Test { will_init, .. } => *will_init, // None of the special neon record types currently initialize the page _ => false, } } + + #[cfg(test)] + pub(crate) fn wal_append(s: impl AsRef) -> Self { + Self::Test { + append: s.as_ref().to_string(), + clear: false, + will_init: false, + } + } + + #[cfg(test)] + pub(crate) fn wal_clear() -> Self { + Self::Test { + append: "".to_string(), + clear: true, + will_init: false, + } + } + + #[cfg(test)] + pub(crate) fn wal_init() -> Self { + Self::Test { + append: "".to_string(), + clear: true, + will_init: true, + } + } } /// DecodedBkpBlock represents per-page data contained in a WAL record. diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index 24e8d8b01c..facf01004c 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -244,6 +244,20 @@ pub(crate) fn apply_in_neon( let mut writer = page.writer(); dir.ser_into(&mut writer)?; } + #[cfg(test)] + NeonWalRecord::Test { + append, + clear, + will_init, + } => { + if *will_init { + assert!(*clear, "init record must be clear to ensure correctness"); + } + if *clear { + page.clear(); + } + page.put_slice(append.as_bytes()); + } } Ok(()) } From 82719542c617a74850e078c99c01a3c7f9e32beb Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 13 Jun 2024 20:20:47 +0200 Subject: [PATCH 179/220] fix: vectored get returns incorrect result on inexact materialized page cache hit (#8050) # Problem Suppose our vectored get starts with an inexact materialized page cache hit ("cached lsn") that is shadowed by a newer image layer image layer. Like so: ``` +-+ < delta layer | | -|-|----- < image layer | | | | -|-|----- < cached lsn for requested key +_+ ``` The correct visitation order is 1. inmemory layers 2. delta layer records in LSN range `[image_layer.lsn, oldest_inmemory_layer.lsn_range.start)` 3. image layer However, the vectored get code, when it visits the delta layer, it (incorrectly!) returns with state `Complete`. The reason why it returns is that it calls `on_lsn_advanced` with `self.lsn_range.start`, i.e., the layer's LSN range. Instead, it should use `lsn_range.start`, i.e., the LSN range from the correct visitation order listed above. # Solution Use `lsn_range.start` instead of `self.lsn_range.start`. # Refs discovered by & fixes https://github.com/neondatabase/neon/issues/6967 Co-authored-by: Vlad Lazar --- pageserver/src/tenant/storage_layer/delta_layer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index eb7cf81643..5e01ecd71d 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -219,7 +219,6 @@ pub struct DeltaLayerInner { // values copied from summary index_start_blk: u32, index_root_blk: u32, - lsn_range: Range, file: VirtualFile, file_id: FileId, @@ -785,7 +784,6 @@ impl DeltaLayerInner { file_id, index_start_blk: actual_summary.index_start_blk, index_root_blk: actual_summary.index_root_blk, - lsn_range: actual_summary.lsn_range, max_vectored_read_bytes, })) } @@ -911,7 +909,7 @@ impl DeltaLayerInner { let reads = Self::plan_reads( &keyspace, - lsn_range, + lsn_range.clone(), data_end_offset, index_reader, planner, @@ -924,7 +922,7 @@ impl DeltaLayerInner { self.do_reads_and_update_state(reads, reconstruct_state, ctx) .await; - reconstruct_state.on_lsn_advanced(&keyspace, self.lsn_range.start); + reconstruct_state.on_lsn_advanced(&keyspace, lsn_range.start); Ok(()) } From 0c3e3a8667294a3dc345b0f03364aa359a5154de Mon Sep 17 00:00:00 2001 From: Tristan Partin Date: Thu, 13 Jun 2024 10:31:58 -0500 Subject: [PATCH 180/220] Set application_name for internal connections to computes This will help when analyzing the origins of connections to a compute like in [0]. [0]: https://github.com/neondatabase/cloud/issues/14247 --- compute_tools/src/bin/compute_ctl.rs | 2 +- vm-image-spec.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 9295f091d5..7bf5db5a57 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -735,7 +735,7 @@ fn cli() -> clap::Command { Arg::new("filecache-connstr") .long("filecache-connstr") .default_value( - "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable", + "host=localhost port=5432 dbname=postgres user=cloud_admin sslmode=disable application_name=vm-monitor", ) .value_name("FILECACHE_CONNSTR"), ) diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 15f820bebd..99164645a7 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -18,7 +18,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres" /bin/postgres_exporter' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter' - name: sql-exporter user: nobody sysvInitAction: respawn @@ -93,7 +93,7 @@ files: target: # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable' + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' # Collectors (referenced by name) to execute on the target. # Glob patterns are supported (see for syntax). @@ -128,7 +128,7 @@ files: target: # Data source name always has a URI schema that matches the driver name. In some cases (e.g. MySQL) # the schema gets dropped or replaced to match the driver expected DSN format. - data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable' + data_source_name: 'postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' # Collectors (referenced by name) to execute on the target. # Glob patterns are supported (see for syntax). From f67010109f488cd20e7aae9b8feaf0c2016b63e9 Mon Sep 17 00:00:00 2001 From: James Broadhead Date: Fri, 14 Jun 2024 09:17:43 +0100 Subject: [PATCH 181/220] extensions: pgvector-0.7.2 (#8037) Update pgvector to 0.7.2 Purely mechanical update to pgvector.patch, just as a place to start from --- Dockerfile.compute-node | 6 +++--- patches/pgvector.patch | 32 ++++++++------------------------ 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/Dockerfile.compute-node b/Dockerfile.compute-node index a86fdd0bc3..3a73ac71b0 100644 --- a/Dockerfile.compute-node +++ b/Dockerfile.compute-node @@ -246,8 +246,8 @@ COPY patches/pgvector.patch /pgvector.patch # By default, pgvector Makefile uses `-march=native`. We don't want that, # because we build the images on different machines than where we run them. # Pass OPTFLAGS="" to remove it. -RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.1.tar.gz -O pgvector.tar.gz && \ - echo "fe6c8cb4e0cd1a8cb60f5badf9e1701e0fcabcfc260931c26d01e155c4dd21d1 pgvector.tar.gz" | sha256sum --check && \ +RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.2.tar.gz -O pgvector.tar.gz && \ + echo "617fba855c9bcb41a2a9bc78a78567fd2e147c72afd5bf9d37b31b9591632b30 pgvector.tar.gz" | sha256sum --check && \ mkdir pgvector-src && cd pgvector-src && tar xzf ../pgvector.tar.gz --strip-components=1 -C . && \ patch -p1 < /pgvector.patch && \ make -j $(getconf _NPROCESSORS_ONLN) OPTFLAGS="" PG_CONFIG=/usr/local/pgsql/bin/pg_config && \ @@ -979,7 +979,7 @@ RUN cd /ext-src/ && for f in *.tar.gz; \ do echo $f; dname=$(echo $f | sed 's/\.tar.*//')-src; \ rm -rf $dname; mkdir $dname; tar xzf $f --strip-components=1 -C $dname \ || exit 1; rm -f $f; done -RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch +RUN cd /ext-src/pgvector-src && patch -p1 <../pgvector.patch # cmake is required for the h3 test RUN apt-get update && apt-get install -y cmake RUN patch -p1 < /ext-src/pg_hintplan.patch diff --git a/patches/pgvector.patch b/patches/pgvector.patch index 84ac6644c5..3e1ffcaaaf 100644 --- a/patches/pgvector.patch +++ b/patches/pgvector.patch @@ -1,19 +1,8 @@ -From 0b0194a57bd0f3598bd57dbedd0df3932330169d Mon Sep 17 00:00:00 2001 -From: Heikki Linnakangas -Date: Fri, 2 Feb 2024 22:26:45 +0200 -Subject: [PATCH 1/1] Make v0.6.0 work with Neon - -Now that the WAL-logging happens as a separate step at the end of the -build, we need a few neon-specific hints to make it work. ---- - src/hnswbuild.c | 36 ++++++++++++++++++++++++++++++++++++ - 1 file changed, 36 insertions(+) - diff --git a/src/hnswbuild.c b/src/hnswbuild.c -index 680789b..ec54dea 100644 +index dcfb2bd..d5189ee 100644 --- a/src/hnswbuild.c +++ b/src/hnswbuild.c -@@ -840,9 +840,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) +@@ -860,9 +860,17 @@ HnswParallelBuildMain(dsm_segment *seg, shm_toc *toc) hnswarea = shm_toc_lookup(toc, PARALLEL_KEY_HNSW_AREA, false); @@ -31,7 +20,7 @@ index 680789b..ec54dea 100644 /* Close relations within worker */ index_close(indexRel, indexLockmode); table_close(heapRel, heapLockmode); -@@ -1089,13 +1097,41 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, +@@ -1117,12 +1125,38 @@ BuildIndex(Relation heap, Relation index, IndexInfo *indexInfo, SeedRandom(42); #endif @@ -43,14 +32,13 @@ index 680789b..ec54dea 100644 BuildGraph(buildstate, forkNum); +- if (RelationNeedsWAL(index) || forkNum == INIT_FORKNUM) +#ifdef NEON_SMGR + smgr_finish_unlogged_build_phase_1(RelationGetSmgr(index)); +#endif + - if (RelationNeedsWAL(index)) -+ { - log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocks(index), true); - ++ if (RelationNeedsWAL(index) || forkNum == INIT_FORKNUM) { + log_newpage_range(index, forkNum, 0, RelationGetNumberOfBlocksInFork(index, forkNum), true); +#ifdef NEON_SMGR + { +#if PG_VERSION_NUM >= 160000 @@ -60,7 +48,7 @@ index 680789b..ec54dea 100644 +#endif + + SetLastWrittenLSNForBlockRange(XactLastRecEnd, rlocator, -+ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); ++ MAIN_FORKNUM, 0, RelationGetNumberOfBlocks(index)); + SetLastWrittenLSNForRelation(XactLastRecEnd, rlocator, MAIN_FORKNUM); + } +#endif @@ -69,10 +57,6 @@ index 680789b..ec54dea 100644 +#ifdef NEON_SMGR + smgr_end_unlogged_build(RelationGetSmgr(index)); +#endif -+ + FreeBuildState(buildstate); } - --- -2.39.2 - From 425eed24e896a99d0ed03d118ee07fb0ae339bde Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 09:39:31 +0100 Subject: [PATCH 182/220] pageserver: refine shutdown handling in secondary download (#8052) ## Problem Some code paths during secondary mode download are returning Ok() rather than UpdateError::Cancelled. This is functionally okay, but it means that the end of TenantDownloader::download has a sanity check that the progress is 100% on success, and prints a "Correcting drift..." warning if not. This warning can be emitted in a test, e.g. https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8049/9503642976/index.html#/testresult/fff1624ba6adae9e. ## Summary of changes - In secondary download cancellation paths, use Err(UpdateError::Cancelled) rather than Ok(), so that we drop out of the download function and do not reach the progress sanity check. --- pageserver/src/tenant/secondary/downloader.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index 62803c7838..24176ecf19 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -513,7 +513,7 @@ impl<'a> TenantDownloader<'a> { // cover our access to local storage. let Ok(_guard) = self.secondary_state.gate.enter() else { // Shutting down - return Ok(()); + return Err(UpdateError::Cancelled); }; let tenant_shard_id = self.secondary_state.get_tenant_shard_id(); @@ -846,7 +846,7 @@ impl<'a> TenantDownloader<'a> { for layer in timeline.layers { if self.secondary_state.cancel.is_cancelled() { tracing::debug!("Cancelled -- dropping out of layer loop"); - return Ok(()); + return Err(UpdateError::Cancelled); } // Existing on-disk layers: just update their access time. From 789196572e5e2921371a053ae8cd70dd31b27c5b Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 14 Jun 2024 11:51:12 +0300 Subject: [PATCH 183/220] Fix test_replica_query_race flakiness (#8038) This failed once with `relation "test" does not exist` when trying to run the query on the standby. It's possible that the standby is started before the CREATE TABLE is processed in the pageserver, and the standby opens up for queries before it has received the CREATE TABLE transaction from the primary. To fix, wait for the standby to catch up to the primary before starting to run the queries. https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8025/9483658488/index.html --- test_runner/regress/test_hot_standby.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index 1d1b2fb485..8edc8c554c 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -300,7 +300,7 @@ def test_replica_query_race(neon_simple_env: NeonEnv): p_cur.execute("CREATE TABLE test AS SELECT 0 AS counter") standby_ep = env.endpoints.new_replica_start(origin=primary_ep, endpoint_id="standby") - time.sleep(1) + wait_replica_caughtup(primary_ep, standby_ep) # In primary, run a lot of UPDATEs on a single page finished = False From edc900028e5440bb500d16c1a05cef554d92f692 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Fri, 14 Jun 2024 10:24:13 +0100 Subject: [PATCH 184/220] CI: Update outdated GitHub Actions (#8042) ## Problem We have some amount of outdated action in the CI pipeline, GitHub complains about some of them. ## Summary of changes - Update `actions/checkout@1` (a really old one) in `vm-compute-node-image` - Update `actions/checkout@3` in `build-build-tools-image` - Update `docker/setup-buildx-action` in all workflows / jobs, it was downgraded in https://github.com/neondatabase/neon/pull/7445, but it it seems it works fine now --- .github/workflows/build-build-tools-image.yml | 4 ++-- .github/workflows/build_and_test.yml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 9aacb09d10..da1efe9571 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -55,7 +55,7 @@ jobs: exit 1 fi - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Use custom DOCKER_CONFIG directory to avoid conflicts with default settings # The default value is ~/.docker @@ -64,7 +64,7 @@ jobs: mkdir -p /tmp/.docker-custom echo DOCKER_CONFIG=/tmp/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v2 with: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 71ca7329ee..1b433a7033 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -744,7 +744,7 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v3 with: @@ -822,11 +822,11 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 with: # Disable parallelism for docker buildkit. # As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner. - config-inline: | + buildkitd-config-inline: | [worker.oci] max-parallelism = 1 @@ -858,7 +858,7 @@ jobs: cache-to: type=registry,ref=neondatabase/compute-node-${{ matrix.version }}:cache-${{ matrix.arch }},mode=max tags: | neondatabase/compute-node-${{ matrix.version }}:${{ needs.tag.outputs.build-tag }}-${{ matrix.arch }} - + - name: Build neon extensions test image if: matrix.version == 'v16' uses: docker/build-push-action@v5 @@ -965,7 +965,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v4 with: fetch-depth: 0 From 6843fd8f89a24aa08ad71bcabbb320a3211c979e Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 10:37:30 +0100 Subject: [PATCH 185/220] storage controller: always wait for tenant detach before delete (#8049) ## Problem This test could fail with a timeout waiting for tenant deletions. Tenant deletions could get tripped up on nodes transitioning from offline to online at the moment of the deletion. In a previous reconciliation, the reconciler would skip detaching a particular location because the node was offline, but then when we do the delete the node is marked as online and can be picked as the node to use for issuing a deletion request. This hits the "Unexpectedly still attached path", which would still work if the caller kept calling DELETE, but if a caller does a Delete,get,get,get poll, then it doesn't work because the GET calls fail after we've marked the tenant as detached. ## Summary of changes Fix the undesirable storage controller behavior highlighted by this test failure: - Change tenant deletion flow to _always_ wait for reconciliation to succeed: it was unsound to proceed and return 202 if something was still attached, because after the 202 callers can no longer GET the tenant. Stabilize the test: - Add a reconcile_until_idle to the test, so that it will not have reconciliations running in the background while we mark a node online. This test is not meant to be a chaos test: we should test that kind of complexity elsewhere. - This reconcile_until_idle also fixes another failure mode where the test might see a None for a tenant location because a reconcile was mutating it (https://neon-github-public-dev.s3.amazonaws.com/reports/pr-7288/9500177581/index.html#suites/8fc5d1648d2225380766afde7c428d81/4acece42ae00c442/) It remains the case that a motivated tester could produce a situation where a DELETE gives a 500, when precisely the wrong node transitions from offline to available at the precise moment of a deletion (but the 500 is better than returning 202 and then failing all subsequent GETs). Note that nodes don't go through the offline state during normal restarts, so this is super rare. We should eventually fix this by making DELETE to the pageserver implicitly detach the tenant if it's attached, but that should wait until nobody is using the legacy-style deletes (the ones that use 202 + polling) --- storage_controller/src/service.rs | 26 +++++++++++-------- .../regress/test_storage_controller.py | 3 +++ 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 1e81b5c5a2..cf6a95bf0b 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -2409,11 +2409,17 @@ impl Service { (detach_waiters, shard_ids, node.clone()) }; - if let Err(e) = self.await_waiters(detach_waiters, RECONCILE_TIMEOUT).await { - // Failing to detach shouldn't hold up deletion, e.g. if a node is offline we should be able - // to use some other node to run the remote deletion. - tracing::warn!("Failed to detach some locations: {e}"); - } + // This reconcile wait can fail in a few ways: + // A there is a very long queue for the reconciler semaphore + // B some pageserver is failing to handle a detach promptly + // C some pageserver goes offline right at the moment we send it a request. + // + // A and C are transient: the semaphore will eventually become available, and once a node is marked offline + // the next attempt to reconcile will silently skip detaches for an offline node and succeed. If B happens, + // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while + // deleting the underlying data). + self.await_waiters(detach_waiters, RECONCILE_TIMEOUT) + .await?; let locations = shard_ids .into_iter() @@ -2431,13 +2437,11 @@ impl Service { for result in results { match result { Ok(StatusCode::ACCEPTED) => { - // This could happen if we failed detach above, and hit a pageserver where the tenant - // is still attached: it will accept the deletion in the background - tracing::warn!( - "Unexpectedly still attached on {}, client should retry", + // This should never happen: we waited for detaches to finish above + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "Unexpectedly still attached on {}", node - ); - return Ok(StatusCode::ACCEPTED); + ))); } Ok(_) => {} Err(mgmt_api::Error::Cancelled) => { diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 2031feaa83..f41468210c 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -133,6 +133,9 @@ def test_storage_controller_smoke( wait_until(10, 1, lambda: node_evacuated(env.pageservers[0].id)) + # Let all the reconciliations after marking the node offline complete + env.storage_controller.reconcile_until_idle() + # Marking pageserver active should not migrate anything to it # immediately env.storage_controller.node_configure(env.pageservers[0].id, {"availability": "Active"}) From eb0ca9b648b745142969913a262b2aa4fccbf55a Mon Sep 17 00:00:00 2001 From: John Spray Date: Fri, 14 Jun 2024 11:08:11 +0100 Subject: [PATCH 186/220] pageserver: improved synthetic size & find_gc_cutoff error handling (#8051) ## Problem This PR refactors some error handling to avoid log spam on tenant/timeline shutdown. - "ignoring failure to find gc cutoffs: timeline shutting down." logs (https://github.com/neondatabase/neon/issues/8012) - "synthetic_size_worker: failed to calculate synthetic size for tenant ...: Failed to refresh gc_info before gathering inputs: tenant shutting down", for example here: https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8049/9502988669/index.html#suites/3fc871d9ee8127d8501d607e03205abb/1a074a66548bbcea Closes: https://github.com/neondatabase/neon/issues/8012 ## Summary of changes - Refactor: Add a PageReconstructError variant to GcError: this is the only kind of error that find_gc_cutoffs can emit. - Functional change: only ignore shutdown PageReconstructError variant: for other variants, treat it as a real error - Refactor: add a structured CalculateSyntheticSizeError type and use it instead of anyhow::Error in synthetic size calculations - Functional change: while iterating through timelines gathering logical sizes, only drop out if the whole tenant is cancelled: individual timeline cancellations indicate deletion in progress and we can just ignore those. --- pageserver/src/consumption_metrics.rs | 26 ++--- pageserver/src/http/routes.rs | 9 +- pageserver/src/tenant.rs | 37 +++--- pageserver/src/tenant/size.rs | 106 +++++++++++++----- pageserver/src/tenant/timeline.rs | 2 +- .../fixtures/pageserver/allowed_errors.py | 2 - test_runner/regress/test_tenant_size.py | 4 - 7 files changed, 115 insertions(+), 71 deletions(-) diff --git a/pageserver/src/consumption_metrics.rs b/pageserver/src/consumption_metrics.rs index 540d0d2e8c..18c1a6cd9b 100644 --- a/pageserver/src/consumption_metrics.rs +++ b/pageserver/src/consumption_metrics.rs @@ -2,10 +2,9 @@ //! and push them to a HTTP endpoint. use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::{self, TaskKind, BACKGROUND_RUNTIME}; +use crate::tenant::size::CalculateSyntheticSizeError; use crate::tenant::tasks::BackgroundLoopKind; -use crate::tenant::{ - mgr::TenantManager, LogicalSizeCalculationCause, PageReconstructError, Tenant, -}; +use crate::tenant::{mgr::TenantManager, LogicalSizeCalculationCause, Tenant}; use camino::Utf8PathBuf; use consumption_metrics::EventType; use pageserver_api::models::TenantState; @@ -350,19 +349,12 @@ async fn calculate_and_log(tenant: &Tenant, cancel: &CancellationToken, ctx: &Re // Same for the loop that fetches computed metrics. // By using the same limiter, we centralize metrics collection for "start" and "finished" counters, // which turns out is really handy to understand the system. - let Err(e) = tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await else { - return; - }; - - // this error can be returned if timeline is shutting down, but it does not - // mean the synthetic size worker should terminate. - let shutting_down = matches!( - e.downcast_ref::(), - Some(PageReconstructError::Cancelled) - ); - - if !shutting_down { - let tenant_shard_id = tenant.tenant_shard_id(); - error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}"); + match tenant.calculate_synthetic_size(CAUSE, cancel, ctx).await { + Ok(_) => {} + Err(CalculateSyntheticSizeError::Cancelled) => {} + Err(e) => { + let tenant_shard_id = tenant.tenant_shard_id(); + error!("failed to calculate synthetic size for tenant {tenant_shard_id}: {e:#}"); + } } } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 12d02c52fe..657708c0d6 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1135,7 +1135,10 @@ async fn tenant_size_handler( &ctx, ) .await - .map_err(ApiError::InternalServerError)?; + .map_err(|e| match e { + crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown, + other => ApiError::InternalServerError(anyhow::anyhow!(other)), + })?; let mut sizes = None; let accepts_html = headers @@ -1143,9 +1146,7 @@ async fn tenant_size_handler( .map(|v| v == "text/html") .unwrap_or_default(); if !inputs_only.unwrap_or(false) { - let storage_model = inputs - .calculate_model() - .map_err(ApiError::InternalServerError)?; + let storage_model = inputs.calculate_model(); let size = storage_model.calculate(); // If request header expects html, return html diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 0bd3ece2e3..a31fea1e58 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -509,11 +509,24 @@ pub(crate) enum GcError { #[error(transparent)] Remote(anyhow::Error), + // An error reading while calculating GC cutoffs + #[error(transparent)] + GcCutoffs(PageReconstructError), + // If GC was invoked for a particular timeline, this error means it didn't exist #[error("timeline not found")] TimelineNotFound, } +impl From for GcError { + fn from(value: PageReconstructError) -> Self { + match value { + PageReconstructError::Cancelled => Self::TimelineCancelled, + other => Self::GcCutoffs(other), + } + } +} + impl Tenant { /// Yet another helper for timeline initialization. /// @@ -2921,17 +2934,9 @@ impl Tenant { .checked_sub(horizon) .unwrap_or(Lsn(0)); - let res = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await; - - match res { - Ok(cutoffs) => { - let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs); - assert!(old.is_none()); - } - Err(e) => { - tracing::warn!(timeline_id = %timeline.timeline_id, "ignoring failure to find gc cutoffs: {e:#}"); - } - } + let cutoffs = timeline.find_gc_cutoffs(cutoff, pitr, cancel, ctx).await?; + let old = gc_cutoffs.insert(timeline.timeline_id, cutoffs); + assert!(old.is_none()); } if !self.is_active() || self.cancel.is_cancelled() { @@ -3553,7 +3558,7 @@ impl Tenant { cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let logical_sizes_at_once = self .conf .concurrent_tenant_size_logical_size_queries @@ -3568,8 +3573,8 @@ impl Tenant { // See more for on the issue #2748 condenced out of the initial PR review. let mut shared_cache = tokio::select! { locked = self.cached_logical_sizes.lock() => locked, - _ = cancel.cancelled() => anyhow::bail!("cancelled"), - _ = self.cancel.cancelled() => anyhow::bail!("tenant is shutting down"), + _ = cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled), + _ = self.cancel.cancelled() => return Err(size::CalculateSyntheticSizeError::Cancelled), }; size::gather_inputs( @@ -3593,10 +3598,10 @@ impl Tenant { cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let inputs = self.gather_size_inputs(None, cause, cancel, ctx).await?; - let size = inputs.calculate()?; + let size = inputs.calculate(); self.set_cached_synthetic_size(size); diff --git a/pageserver/src/tenant/size.rs b/pageserver/src/tenant/size.rs index 64fff5536c..cdd5b0cbe7 100644 --- a/pageserver/src/tenant/size.rs +++ b/pageserver/src/tenant/size.rs @@ -3,7 +3,6 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use anyhow::{bail, Context}; use tokio::sync::oneshot::error::RecvError; use tokio::sync::Semaphore; use tokio_util::sync::CancellationToken; @@ -11,7 +10,7 @@ use tokio_util::sync::CancellationToken; use crate::context::RequestContext; use crate::pgdatadir_mapping::CalculateLogicalSizeError; -use super::{LogicalSizeCalculationCause, Tenant}; +use super::{GcError, LogicalSizeCalculationCause, Tenant}; use crate::tenant::Timeline; use utils::id::TimelineId; use utils::lsn::Lsn; @@ -43,6 +42,44 @@ pub struct SegmentMeta { pub kind: LsnKind, } +#[derive(thiserror::Error, Debug)] +pub(crate) enum CalculateSyntheticSizeError { + /// Something went wrong internally to the calculation of logical size at a particular branch point + #[error("Failed to calculated logical size on timeline {timeline_id} at {lsn}: {error}")] + LogicalSize { + timeline_id: TimelineId, + lsn: Lsn, + error: CalculateLogicalSizeError, + }, + + /// Something went wrong internally when calculating GC parameters at start of size calculation + #[error(transparent)] + GcInfo(GcError), + + /// Totally unexpected errors, like panics joining a task + #[error(transparent)] + Fatal(anyhow::Error), + + /// The LSN we are trying to calculate a size at no longer exists at the point we query it + #[error("Could not find size at {lsn} in timeline {timeline_id}")] + LsnNotFound { timeline_id: TimelineId, lsn: Lsn }, + + /// Tenant shut down while calculating size + #[error("Cancelled")] + Cancelled, +} + +impl From for CalculateSyntheticSizeError { + fn from(value: GcError) -> Self { + match value { + GcError::TenantCancelled | GcError::TimelineCancelled => { + CalculateSyntheticSizeError::Cancelled + } + other => CalculateSyntheticSizeError::GcInfo(other), + } + } +} + impl SegmentMeta { fn size_needed(&self) -> bool { match self.kind { @@ -116,12 +153,9 @@ pub(super) async fn gather_inputs( cause: LogicalSizeCalculationCause, cancel: &CancellationToken, ctx: &RequestContext, -) -> anyhow::Result { +) -> Result { // refresh is needed to update gc related pitr_cutoff and horizon_cutoff - tenant - .refresh_gc_info(cancel, ctx) - .await - .context("Failed to refresh gc_info before gathering inputs")?; + tenant.refresh_gc_info(cancel, ctx).await?; // Collect information about all the timelines let mut timelines = tenant.list_timelines(); @@ -327,6 +361,12 @@ pub(super) async fn gather_inputs( ) .await?; + if tenant.cancel.is_cancelled() { + // If we're shutting down, return an error rather than a sparse result that might include some + // timelines from before we started shutting down + return Err(CalculateSyntheticSizeError::Cancelled); + } + Ok(ModelInputs { segments, timeline_inputs, @@ -345,7 +385,7 @@ async fn fill_logical_sizes( logical_size_cache: &mut HashMap<(TimelineId, Lsn), u64>, cause: LogicalSizeCalculationCause, ctx: &RequestContext, -) -> anyhow::Result<()> { +) -> Result<(), CalculateSyntheticSizeError> { let timeline_hash: HashMap> = HashMap::from_iter( timelines .iter() @@ -387,7 +427,7 @@ async fn fill_logical_sizes( } // Perform the size lookups - let mut have_any_error = false; + let mut have_any_error = None; while let Some(res) = joinset.join_next().await { // each of these come with Result, JoinError> // because of spawn + spawn_blocking @@ -398,21 +438,36 @@ async fn fill_logical_sizes( Err(join_error) => { // cannot really do anything, as this panic is likely a bug error!("task that calls spawn_ondemand_logical_size_calculation panicked: {join_error:#}"); - have_any_error = true; + + have_any_error = Some(CalculateSyntheticSizeError::Fatal( + anyhow::anyhow!(join_error) + .context("task that calls spawn_ondemand_logical_size_calculation"), + )); } Ok(Err(recv_result_error)) => { // cannot really do anything, as this panic is likely a bug error!("failed to receive logical size query result: {recv_result_error:#}"); - have_any_error = true; + have_any_error = Some(CalculateSyntheticSizeError::Fatal( + anyhow::anyhow!(recv_result_error) + .context("Receiving logical size query result"), + )); } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Err(error)))) => { - if !matches!(error, CalculateLogicalSizeError::Cancelled) { + if matches!(error, CalculateLogicalSizeError::Cancelled) { + // Skip this: it's okay if one timeline among many is shutting down while we + // calculate inputs for the overall tenant. + continue; + } else { warn!( timeline_id=%timeline.timeline_id, "failed to calculate logical size at {lsn}: {error:#}" ); + have_any_error = Some(CalculateSyntheticSizeError::LogicalSize { + timeline_id: timeline.timeline_id, + lsn, + error, + }); } - have_any_error = true; } Ok(Ok(TimelineAtLsnSizeResult(timeline, lsn, Ok(size)))) => { debug!(timeline_id=%timeline.timeline_id, %lsn, size, "size calculated"); @@ -426,10 +481,10 @@ async fn fill_logical_sizes( // prune any keys not needed anymore; we record every used key and added key. logical_size_cache.retain(|key, _| sizes_needed.contains_key(key)); - if have_any_error { + if let Some(error) = have_any_error { // we cannot complete this round, because we are missing data. // we have however cached all we were able to request calculation on. - anyhow::bail!("failed to calculate some logical_sizes"); + return Err(error); } // Insert the looked up sizes to the Segments @@ -444,32 +499,29 @@ async fn fill_logical_sizes( if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) { seg.segment.size = Some(*size); } else { - bail!("could not find size at {} in timeline {}", lsn, timeline_id); + return Err(CalculateSyntheticSizeError::LsnNotFound { timeline_id, lsn }); } } Ok(()) } impl ModelInputs { - pub fn calculate_model(&self) -> anyhow::Result { + pub fn calculate_model(&self) -> tenant_size_model::StorageModel { // Convert SegmentMetas into plain Segments - let storage = StorageModel { + StorageModel { segments: self .segments .iter() .map(|seg| seg.segment.clone()) .collect(), - }; - - Ok(storage) + } } // calculate total project size - pub fn calculate(&self) -> anyhow::Result { - let storage = self.calculate_model()?; + pub fn calculate(&self) -> u64 { + let storage = self.calculate_model(); let sizes = storage.calculate(); - - Ok(sizes.total_size) + sizes.total_size } } @@ -656,7 +708,7 @@ fn verify_size_for_multiple_branches() { "#; let inputs: ModelInputs = serde_json::from_str(doc).unwrap(); - assert_eq!(inputs.calculate().unwrap(), 37_851_408); + assert_eq!(inputs.calculate(), 37_851_408); } #[test] @@ -711,7 +763,7 @@ fn verify_size_for_one_branch() { let model: ModelInputs = serde_json::from_str(doc).unwrap(); - let res = model.calculate_model().unwrap().calculate(); + let res = model.calculate_model().calculate(); println!("calculated synthetic size: {}", res.total_size); println!("result: {:?}", serde_json::to_string(&res.segments)); diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 28627e7911..324d909dac 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -4823,7 +4823,7 @@ impl Timeline { pitr: Duration, cancel: &CancellationToken, ctx: &RequestContext, - ) -> anyhow::Result { + ) -> Result { let _timer = self .metrics .find_gc_cutoffs_histo diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index ef412cade7..147d5705d3 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -94,8 +94,6 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( ".*WARN.*path=/v1/utilization .*request was dropped before completing", # Can happen during shutdown ".*scheduling deletion on drop failed: queue is in state Stopped.*", - # Can happen during shutdown - ".*ignoring failure to find gc cutoffs: timeline shutting down.*", ) diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index d3a228dbeb..a3dd422903 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -678,10 +678,6 @@ def test_synthetic_size_while_deleting(neon_env_builder: NeonEnvBuilder): with pytest.raises(PageserverApiException, match=matcher): completion.result() - # this happens on both cases - env.pageserver.allowed_errors.append( - ".*ignoring failure to find gc cutoffs: timeline shutting down.*" - ) # this happens only in the case of deletion (http response logging) env.pageserver.allowed_errors.append(".*Failed to refresh gc_info before gathering inputs.*") From e6eb0020a10163475115960398f3c206b601d0b8 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Fri, 14 Jun 2024 12:23:52 +0100 Subject: [PATCH 187/220] update rust to 1.79.0 (#8048) ## Problem rust 1.79 new enabled by default lints ## Summary of changes * update to rust 1.79 * `s/default_features/default-features/` * fix proxy dead code. * fix pageserver dead code. --- Cargo.toml | 6 ++-- Dockerfile.build-tools | 2 +- libs/tracing-utils/Cargo.toml | 2 +- pageserver/src/tenant.rs | 3 -- pageserver/src/tenant/timeline.rs | 3 +- pageserver/src/tenant/timeline/delete.rs | 3 -- .../src/rate_limiter/limit_algorithm/aimd.rs | 2 -- proxy/src/scram/messages.rs | 33 ++++++++++--------- rust-toolchain.toml | 2 +- 9 files changed, 25 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dc89c2341b..8fddaaef12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,7 +120,7 @@ num_cpus = "1.15" num-traits = "0.2.15" once_cell = "1.13" opentelemetry = "0.20.0" -opentelemetry-otlp = { version = "0.13.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { version = "0.13.0", default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions = "0.12.0" parking_lot = "0.12" parquet = { version = "51.0.0", default-features = false, features = ["zstd"] } @@ -128,7 +128,7 @@ parquet_derive = "51.0.0" pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pin-project-lite = "0.2" procfs = "0.14" -prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency +prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency prost = "0.11" rand = "0.8" redis = { version = "0.25.2", features = ["tokio-rustls-comp", "keep-alive"] } @@ -184,7 +184,7 @@ tower-service = "0.3.2" tracing = "0.1" tracing-error = "0.2.0" tracing-opentelemetry = "0.21.0" -tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter", "json", "ansi"] } twox-hash = { version = "1.6.3", default-features = false } url = "2.2" urlencoding = "2.1" diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index 460b8c996d..e7c61ace0e 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -141,7 +141,7 @@ WORKDIR /home/nonroot # Rust # Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`) -ENV RUSTC_VERSION=1.78.0 +ENV RUSTC_VERSION=1.79.0 ENV RUSTUP_HOME="/home/nonroot/.rustup" ENV PATH="/home/nonroot/.cargo/bin:${PATH}" RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && whoami && \ diff --git a/libs/tracing-utils/Cargo.toml b/libs/tracing-utils/Cargo.toml index b285c9b5b0..512a748124 100644 --- a/libs/tracing-utils/Cargo.toml +++ b/libs/tracing-utils/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true [dependencies] hyper.workspace = true opentelemetry = { workspace = true, features=["rt-tokio"] } -opentelemetry-otlp = { workspace = true, default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } +opentelemetry-otlp = { workspace = true, default-features=false, features = ["http-proto", "trace", "http", "reqwest-client"] } opentelemetry-semantic-conventions.workspace = true reqwest = { workspace = true, default-features = false, features = ["rustls-tls"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index a31fea1e58..801321e36d 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -1046,7 +1046,6 @@ impl Tenant { remote_metadata, TimelineResources { remote_client, - deletion_queue_client: self.deletion_queue_client.clone(), timeline_get_throttle: self.timeline_get_throttle.clone(), }, ctx, @@ -1072,7 +1071,6 @@ impl Tenant { timeline_id, &index_part.metadata, remote_timeline_client, - self.deletion_queue_client.clone(), ) .instrument(tracing::info_span!("timeline_delete", %timeline_id)) .await @@ -3448,7 +3446,6 @@ impl Tenant { ); TimelineResources { remote_client, - deletion_queue_client: self.deletion_queue_client.clone(), timeline_get_throttle: self.timeline_get_throttle.clone(), } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 324d909dac..08bec329e1 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -62,6 +62,7 @@ use std::{ ops::ControlFlow, }; +use crate::metrics::GetKind; use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS; use crate::{ aux_file::AuxFileSizeEstimator, @@ -75,7 +76,6 @@ use crate::{ disk_usage_eviction_task::DiskUsageEvictionInfo, pgdatadir_mapping::CollectKeySpaceError, }; -use crate::{deletion_queue::DeletionQueueClient, metrics::GetKind}; use crate::{ disk_usage_eviction_task::finite_f32, tenant::storage_layer::{ @@ -205,7 +205,6 @@ fn drop_wlock(rlock: tokio::sync::RwLockWriteGuard<'_, T>) { /// The outward-facing resources required to build a Timeline pub struct TimelineResources { pub remote_client: RemoteTimelineClient, - pub deletion_queue_client: DeletionQueueClient, pub timeline_get_throttle: Arc< crate::tenant::throttle::Throttle<&'static crate::metrics::tenant_throttling::TimelineGet>, >, diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index 5ca8544d49..441298f3e9 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -11,7 +11,6 @@ use utils::{crashsafe, fs_ext, id::TimelineId, pausable_failpoint}; use crate::{ config::PageServerConf, - deletion_queue::DeletionQueueClient, task_mgr::{self, TaskKind}, tenant::{ metadata::TimelineMetadata, @@ -263,7 +262,6 @@ impl DeleteTimelineFlow { timeline_id: TimelineId, local_metadata: &TimelineMetadata, remote_client: RemoteTimelineClient, - deletion_queue_client: DeletionQueueClient, ) -> anyhow::Result<()> { // Note: here we even skip populating layer map. Timeline is essentially uninitialized. // RemoteTimelineClient is the only functioning part. @@ -274,7 +272,6 @@ impl DeleteTimelineFlow { None, // Ancestor is not needed for deletion. TimelineResources { remote_client, - deletion_queue_client, timeline_get_throttle: tenant.timeline_get_throttle.clone(), }, // Important. We dont pass ancestor above because it can be missing. diff --git a/proxy/src/rate_limiter/limit_algorithm/aimd.rs b/proxy/src/rate_limiter/limit_algorithm/aimd.rs index ccc9c42420..b39740bb21 100644 --- a/proxy/src/rate_limiter/limit_algorithm/aimd.rs +++ b/proxy/src/rate_limiter/limit_algorithm/aimd.rs @@ -1,5 +1,3 @@ -use std::usize; - use super::{LimitAlgorithm, Outcome, Sample}; /// Loss-based congestion avoidance. diff --git a/proxy/src/scram/messages.rs b/proxy/src/scram/messages.rs index f9372540ca..cf677a3334 100644 --- a/proxy/src/scram/messages.rs +++ b/proxy/src/scram/messages.rs @@ -32,8 +32,6 @@ pub struct ClientFirstMessage<'a> { pub bare: &'a str, /// Channel binding mode. pub cbind_flag: ChannelBinding<&'a str>, - /// (Client username)[]. - pub username: &'a str, /// Client nonce. pub nonce: &'a str, } @@ -58,6 +56,14 @@ impl<'a> ClientFirstMessage<'a> { // In theory, these might be preceded by "reserved-mext" (i.e. "m=") let username = parts.next()?.strip_prefix("n=")?; + + // https://github.com/postgres/postgres/blob/f83908798f78c4cafda217ca875602c88ea2ae28/src/backend/libpq/auth-scram.c#L13-L14 + if !username.is_empty() { + tracing::warn!(username, "scram username provided, but is not expected") + // TODO(conrad): + // return None; + } + let nonce = parts.next()?.strip_prefix("r=")?; // Validate but ignore auth extensions @@ -66,7 +72,6 @@ impl<'a> ClientFirstMessage<'a> { Some(Self { bare, cbind_flag, - username, nonce, }) } @@ -188,19 +193,18 @@ mod tests { // (Almost) real strings captured during debug sessions let cases = [ - (NotSupportedClient, "n,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju"), - (NotSupportedServer, "y,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju"), + (NotSupportedClient, "n,,n=,r=t8JwklwKecDLwSsA72rHmVju"), + (NotSupportedServer, "y,,n=,r=t8JwklwKecDLwSsA72rHmVju"), ( Required("tls-server-end-point"), - "p=tls-server-end-point,,n=pepe,r=t8JwklwKecDLwSsA72rHmVju", + "p=tls-server-end-point,,n=,r=t8JwklwKecDLwSsA72rHmVju", ), ]; for (cb, input) in cases { let msg = ClientFirstMessage::parse(input).unwrap(); - assert_eq!(msg.bare, "n=pepe,r=t8JwklwKecDLwSsA72rHmVju"); - assert_eq!(msg.username, "pepe"); + assert_eq!(msg.bare, "n=,r=t8JwklwKecDLwSsA72rHmVju"); assert_eq!(msg.nonce, "t8JwklwKecDLwSsA72rHmVju"); assert_eq!(msg.cbind_flag, cb); } @@ -208,14 +212,13 @@ mod tests { #[test] fn parse_client_first_message_with_invalid_gs2_authz() { - assert!(ClientFirstMessage::parse("n,authzid,n=user,r=nonce").is_none()) + assert!(ClientFirstMessage::parse("n,authzid,n=,r=nonce").is_none()) } #[test] fn parse_client_first_message_with_extra_params() { - let msg = ClientFirstMessage::parse("n,,n=user,r=nonce,a=foo,b=bar,c=baz").unwrap(); - assert_eq!(msg.bare, "n=user,r=nonce,a=foo,b=bar,c=baz"); - assert_eq!(msg.username, "user"); + let msg = ClientFirstMessage::parse("n,,n=,r=nonce,a=foo,b=bar,c=baz").unwrap(); + assert_eq!(msg.bare, "n=,r=nonce,a=foo,b=bar,c=baz"); assert_eq!(msg.nonce, "nonce"); assert_eq!(msg.cbind_flag, ChannelBinding::NotSupportedClient); } @@ -223,9 +226,9 @@ mod tests { #[test] fn parse_client_first_message_with_extra_params_invalid() { // must be of the form `=<...>` - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,abc=foo").is_none()); - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,1=foo").is_none()); - assert!(ClientFirstMessage::parse("n,,n=user,r=nonce,a").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,abc=foo").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,1=foo").is_none()); + assert!(ClientFirstMessage::parse("n,,n=,r=nonce,a").is_none()); } #[test] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 214de0a77d..dcae25a287 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.78.0" +channel = "1.79.0" profile = "default" # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. # https://rust-lang.github.io/rustup/concepts/profiles.html From a71f58e69c762e55f0fe9055a088f6232facbf28 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Fri, 14 Jun 2024 05:47:09 +0300 Subject: [PATCH 188/220] Fix test_segment_init_failure. Graceful shutdown broke it. --- test_runner/regress/test_wal_acceptor_async.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index 715d22eed8..971fad787a 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -601,13 +601,16 @@ async def run_segment_init_failure(env: NeonEnv): conn = await ep.connect_async() ep.safe_psql("select pg_switch_wal()") # jump to the segment boundary # next insertion should hang until failpoint is disabled. - asyncio.create_task(conn.execute("insert into t select generate_series(1,1), 'payload'")) + bg_query = asyncio.create_task( + conn.execute("insert into t select generate_series(1,1), 'payload'") + ) sleep_sec = 2 await asyncio.sleep(sleep_sec) - # also restart ep at segment boundary to make test more interesting - ep.stop() # it must still be not finished - # assert not bg_query.done() + assert not bg_query.done() + # Also restart ep at segment boundary to make test more interesting. Do it in immediate mode; + # fast will hang because it will try to gracefully finish sending WAL. + ep.stop(mode="immediate") # Without segment rename during init (#6402) previous statement created # partially initialized 16MB segment, so sk restart also triggers #6401. sk.stop().start() From 83eb02b07af20c27842231c995fe883f6d9a6299 Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Fri, 14 Jun 2024 12:43:51 +0100 Subject: [PATCH 189/220] CI: downgrade docker/setup-buildx-action (#8062) ## Problem I've bumped `docker/setup-buildx-action` in #8042 because I wasn't able to reproduce the issue from #7445. But now the issue appears again in https://github.com/neondatabase/neon/actions/runs/9514373620/job/26226626923?pr=8059 The steps to reproduce aren't clear, it required `docker/setup-buildx-action@v3` and rebuilding the image without cache, probably ## Summary of changes - Downgrade `docker/setup-buildx-action@v3` to `docker/setup-buildx-action@v2` --- .github/workflows/build-build-tools-image.yml | 2 +- .github/workflows/build_and_test.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index da1efe9571..2c994b08ae 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -64,7 +64,7 @@ jobs: mkdir -p /tmp/.docker-custom echo DOCKER_CONFIG=/tmp/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v2 with: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1b433a7033..703fc8d145 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -744,7 +744,7 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v3 with: @@ -822,11 +822,11 @@ jobs: run: | mkdir -p .docker-custom echo DOCKER_CONFIG=$(pwd)/.docker-custom >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v3 + - uses: docker/setup-buildx-action@v2 with: # Disable parallelism for docker buildkit. # As we already build everything with `make -j$(nproc)`, running it in additional level of parallelisam blows up the Runner. - buildkitd-config-inline: | + config-inline: | [worker.oci] max-parallelism = 1 From 81892199f627a1021b3f4f5f8043d35281501c1a Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Fri, 14 Jun 2024 11:57:58 -0400 Subject: [PATCH 190/220] chore(pageserver): vectored get target_keyspace directly accums (#8055) follow up on https://github.com/neondatabase/neon/pull/7904 avoid a layer of indirection introduced by `Vec>` Signed-off-by: Alex Chi Z --- libs/pageserver_api/src/keyspace.rs | 6 ++++++ pageserver/src/tenant/storage_layer.rs | 24 +++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index 12c6dc3a6d..9a61f2ad81 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -558,6 +558,12 @@ impl KeySpaceRandomAccum { self.ranges.push(range); } + pub fn add_keyspace(&mut self, keyspace: KeySpace) { + for range in keyspace.ranges { + self.add_range(range); + } + } + pub fn to_keyspace(mut self) -> KeySpace { let mut ranges = Vec::new(); if !self.ranges.is_empty() { diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 0b3f841ccf..9607546ce0 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -318,7 +318,7 @@ pub(crate) struct LayerFringe { #[derive(Debug)] struct LayerKeyspace { layer: ReadableLayer, - target_keyspace: Vec, + target_keyspace: KeySpaceRandomAccum, } impl LayerFringe { @@ -342,17 +342,13 @@ impl LayerFringe { _, LayerKeyspace { layer, - target_keyspace, + mut target_keyspace, }, - )) => { - let mut keyspace = KeySpaceRandomAccum::new(); - for ks in target_keyspace { - for part in ks.ranges { - keyspace.add_range(part); - } - } - Some((layer, keyspace.consume_keyspace(), read_desc.lsn_range)) - } + )) => Some(( + layer, + target_keyspace.consume_keyspace(), + read_desc.lsn_range, + )), None => unreachable!("fringe internals are always consistent"), } } @@ -367,16 +363,18 @@ impl LayerFringe { let entry = self.layers.entry(layer_id.clone()); match entry { Entry::Occupied(mut entry) => { - entry.get_mut().target_keyspace.push(keyspace); + entry.get_mut().target_keyspace.add_keyspace(keyspace); } Entry::Vacant(entry) => { self.planned_reads_by_lsn.push(ReadDesc { lsn_range, layer_id: layer_id.clone(), }); + let mut accum = KeySpaceRandomAccum::new(); + accum.add_keyspace(keyspace); entry.insert(LayerKeyspace { layer, - target_keyspace: vec![keyspace], + target_keyspace: accum, }); } } From 46210035c551212a1b9383fe5249d547f284c39a Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Fri, 14 Jun 2024 18:36:50 +0200 Subject: [PATCH 191/220] add halfvec indexing and queries to periodic pgvector performance tests (#8057) ## Problem halfvec data type was introduced in pgvector 0.7.0 and is popular because it allows smaller vectors, smaller indexes and potentially better performance. So far we have not tested halfvec in our periodic performance tests. This PR adds halfvec indexing and halfvec queries to the test. --- .github/workflows/benchmarking.yml | 8 ++--- .../performance/pgvector/halfvec_build.sql | 15 +++++++++ ...custom_script_pgvector_halfvec_queries.sql | 13 ++++++++ .../pgvector/pgbench_hnsw_queries.sql | 13 -------- test_runner/performance/test_perf_olap.py | 5 +++ test_runner/performance/test_perf_pgbench.py | 31 +++++++++++++------ .../performance/test_perf_pgvector_queries.py | 24 ++++++++++++++ 7 files changed, 82 insertions(+), 27 deletions(-) create mode 100644 test_runner/performance/pgvector/halfvec_build.sql create mode 100644 test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql delete mode 100644 test_runner/performance/pgvector/pgbench_hnsw_queries.sql create mode 100644 test_runner/performance/test_perf_pgvector_queries.py diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 57d24063bf..9eff483680 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -99,7 +99,7 @@ jobs: # Set --sparse-ordering option of pytest-order plugin # to ensure tests are running in order of appears in the file. # It's important for test_perf_pgbench.py::test_pgbench_remote_* tests - extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py + extra_params: -m remote_cluster --sparse-ordering --timeout 5400 --ignore test_runner/performance/test_perf_olap.py --ignore test_runner/performance/test_perf_pgvector_queries.py env: BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }} VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" @@ -410,14 +410,14 @@ jobs: PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} - - name: Benchmark pgvector hnsw queries + - name: Benchmark pgvector queries uses: ./.github/actions/run-python-test-set with: build_type: ${{ env.BUILD_TYPE }} - test_selection: performance + test_selection: performance/test_perf_pgvector_queries.py run_in_parallel: false save_perf_report: ${{ env.SAVE_PERF_REPORT }} - extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_pgvector + extra_params: -m remote_cluster --timeout 21600 env: BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" diff --git a/test_runner/performance/pgvector/halfvec_build.sql b/test_runner/performance/pgvector/halfvec_build.sql new file mode 100644 index 0000000000..7e923e4bde --- /dev/null +++ b/test_runner/performance/pgvector/halfvec_build.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS halfvec_test_table; + +CREATE TABLE halfvec_test_table ( + _id text NOT NULL, + title text, + text text, + embeddings halfvec(1536), + PRIMARY KEY (_id) +); + +INSERT INTO halfvec_test_table (_id, title, text, embeddings) +SELECT _id, title, text, embeddings::halfvec +FROM documents; + +CREATE INDEX documents_half_precision_hnsw_idx ON halfvec_test_table USING hnsw (embeddings halfvec_cosine_ops) WITH (m = 64, ef_construction = 128); \ No newline at end of file diff --git a/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql new file mode 100644 index 0000000000..70d0c18149 --- /dev/null +++ b/test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql @@ -0,0 +1,13 @@ +-- run with pooled connection +-- pgbench -T 300 -c 100 -j20 -f pgbench_halfvec_queries.sql -postgresql://neondb_owner:@ep-floral-thunder-w1gzhaxi-pooler.eu-west-1.aws.neon.build/neondb?sslmode=require" + +with x (x) as ( + select "embeddings" as x + from halfvec_test_table + TABLESAMPLE SYSTEM (1) + LIMIT 1 +) +SELECT title, "embeddings" <=> (select x from x) as distance +FROM halfvec_test_table +ORDER BY 2 +LIMIT 30; \ No newline at end of file diff --git a/test_runner/performance/pgvector/pgbench_hnsw_queries.sql b/test_runner/performance/pgvector/pgbench_hnsw_queries.sql deleted file mode 100644 index 5034063c1b..0000000000 --- a/test_runner/performance/pgvector/pgbench_hnsw_queries.sql +++ /dev/null @@ -1,13 +0,0 @@ --- run with pooled connection --- pgbench -T 300 -c 100 -j20 -f pgbench_hnsw_queries.sql -postgresql://neondb_owner:@ep-floral-thunder-w1gzhaxi-pooler.eu-west-1.aws.neon.build/neondb?sslmode=require" - -with x (x) as ( - select "embeddings" as x - from hnsw_test_table - TABLESAMPLE SYSTEM (1) - LIMIT 1 -) -SELECT title, "embeddings" <=> (select x from x) as distance -FROM hnsw_test_table -ORDER BY 2 -LIMIT 30; diff --git a/test_runner/performance/test_perf_olap.py b/test_runner/performance/test_perf_olap.py index 2367676e67..aaa2f8fec2 100644 --- a/test_runner/performance/test_perf_olap.py +++ b/test_runner/performance/test_perf_olap.py @@ -106,6 +106,7 @@ QUERIES: Tuple[LabelledQuery, ...] = ( # Disable auto formatting for the list of queries so that it's easier to read # fmt: off PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( + LabelledQuery("PGVPREP", r"ALTER EXTENSION VECTOR UPDATE;"), LabelledQuery("PGV0", r"DROP TABLE IF EXISTS hnsw_test_table;"), LabelledQuery("PGV1", r"CREATE TABLE hnsw_test_table AS TABLE documents WITH NO DATA;"), LabelledQuery("PGV2", r"INSERT INTO hnsw_test_table SELECT * FROM documents;"), @@ -115,6 +116,10 @@ PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( LabelledQuery("PGV6", r"CREATE INDEX ON hnsw_test_table USING hnsw (embeddings vector_l1_ops);"), LabelledQuery("PGV7", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_hamming_ops);"), LabelledQuery("PGV8", r"CREATE INDEX ON hnsw_test_table USING hnsw ((binary_quantize(embeddings)::bit(1536)) bit_jaccard_ops);"), + LabelledQuery("PGV9", r"DROP TABLE IF EXISTS halfvec_test_table;"), + LabelledQuery("PGV10", r"CREATE TABLE halfvec_test_table (_id text NOT NULL, title text, text text, embeddings halfvec(1536), PRIMARY KEY (_id));"), + LabelledQuery("PGV11", r"INSERT INTO halfvec_test_table (_id, title, text, embeddings) SELECT _id, title, text, embeddings::halfvec FROM documents;"), + LabelledQuery("PGV12", r"CREATE INDEX documents_half_precision_hnsw_idx ON halfvec_test_table USING hnsw (embeddings halfvec_cosine_ops) WITH (m = 64, ef_construction = 128);"), ) # fmt: on diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index d756d6eeca..6eaa29e4f8 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -18,6 +18,7 @@ class PgBenchLoadType(enum.Enum): SIMPLE_UPDATE = "simple-update" SELECT_ONLY = "select-only" PGVECTOR_HNSW = "pgvector-hnsw" + PGVECTOR_HALFVEC = "pgvector-halfvec" def utc_now_timestamp() -> int: @@ -153,6 +154,26 @@ def run_test_pgbench(env: PgCompare, scale: int, duration: int, workload_type: P password=password, ) + if workload_type == PgBenchLoadType.PGVECTOR_HALFVEC: + # Run simple-update workload + run_pgbench( + env, + "pgvector-halfvec", + [ + "pgbench", + "-f", + "test_runner/performance/pgvector/pgbench_custom_script_pgvector_halfvec_queries.sql", + "-c100", + "-j20", + f"-T{duration}", + "-P2", + "--protocol=prepared", + "--progress-timestamp", + connstr, + ], + password=password, + ) + env.report_size() @@ -222,13 +243,3 @@ def test_pgbench_remote_simple_update(remote_compare: PgCompare, scale: int, dur @pytest.mark.remote_cluster def test_pgbench_remote_select_only(remote_compare: PgCompare, scale: int, duration: int): run_test_pgbench(remote_compare, scale, duration, PgBenchLoadType.SELECT_ONLY) - - -# The following test runs on an existing database that has pgvector extension installed -# and a table with 1 million embedding vectors loaded and indexed with HNSW. -# -# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. -@pytest.mark.parametrize("duration", get_durations_matrix()) -@pytest.mark.remote_cluster -def test_pgbench_remote_pgvector(remote_compare: PgCompare, duration: int): - run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HNSW) diff --git a/test_runner/performance/test_perf_pgvector_queries.py b/test_runner/performance/test_perf_pgvector_queries.py new file mode 100644 index 0000000000..bb3db16305 --- /dev/null +++ b/test_runner/performance/test_perf_pgvector_queries.py @@ -0,0 +1,24 @@ +import pytest +from fixtures.compare_fixtures import PgCompare + +from performance.test_perf_pgbench import PgBenchLoadType, get_durations_matrix, run_test_pgbench + + +# The following test runs on an existing database that has pgvector extension installed +# and a table with 1 million embedding vectors loaded and indexed with HNSW. +# +# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_pgbench_remote_pgvector_hnsw(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HNSW) + + +# The following test runs on an existing database that has pgvector extension installed +# and a table with 1 million embedding vectors loaded and indexed with halfvec. +# +# Run this pgbench tests against an existing remote Postgres cluster with the necessary setup. +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_pgbench_remote_pgvector_halfvec(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, 1, duration, PgBenchLoadType.PGVECTOR_HALFVEC) From 2ba414525e7605c8570eadaccef576321d601571 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2024 22:56:01 +0300 Subject: [PATCH 192/220] Install rust binaries before running rust tests. cargo test (or nextest) might rebuild the binaries with different features/flags, so do install immediately after the build. Triggered by the particular case of nextest invocations missing $CARGO_FEATURES, which recompiled safekeeper without 'testing' feature which made python tests needing it (failpoints) not run in the CI. Also add CARGO_FEATURES to the nextest runs anyway because there doesn't seem to be an important reason not to. --- .github/workflows/build_and_test.yml | 56 ++++++++++++++-------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 703fc8d145..bd2996ec4c 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -337,34 +337,8 @@ jobs: run: | ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins --tests - - name: Run rust tests - env: - NEXTEST_RETRIES: 3 - run: | - #nextest does not yet support running doctests - cargo test --doc $CARGO_FLAGS $CARGO_FEATURES - - for io_engine in std-fs tokio-epoll-uring ; do - NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES - done - - # Run separate tests for real S3 - export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty - export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests - export REMOTE_STORAGE_S3_REGION=eu-central-1 - # Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now - ${cov_prefix} cargo nextest run $CARGO_FLAGS -E 'package(remote_storage)' -E 'test(test_real_s3)' - - # Run separate tests for real Azure Blob Storage - # XXX: replace region with `eu-central-1`-like region - export ENABLE_REAL_AZURE_REMOTE_STORAGE=y - export AZURE_STORAGE_ACCOUNT="${{ secrets.AZURE_STORAGE_ACCOUNT_DEV }}" - export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" - export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" - export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" - # Avoid `$CARGO_FEATURES` since there's no `testing` feature in the e2e tests now - ${cov_prefix} cargo nextest run $CARGO_FLAGS -E 'package(remote_storage)' -E 'test(test_real_azure)' - + # Do install *before* running rust tests because they might recompile the + # binaries with different features/flags. - name: Install rust binaries run: | # Install target binaries @@ -405,6 +379,32 @@ jobs: done fi + - name: Run rust tests + env: + NEXTEST_RETRIES: 3 + run: | + #nextest does not yet support running doctests + cargo test --doc $CARGO_FLAGS $CARGO_FEATURES + + for io_engine in std-fs tokio-epoll-uring ; do + NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=$io_engine ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES + done + + # Run separate tests for real S3 + export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty + export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests + export REMOTE_STORAGE_S3_REGION=eu-central-1 + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_s3)' + + # Run separate tests for real Azure Blob Storage + # XXX: replace region with `eu-central-1`-like region + export ENABLE_REAL_AZURE_REMOTE_STORAGE=y + export AZURE_STORAGE_ACCOUNT="${{ secrets.AZURE_STORAGE_ACCOUNT_DEV }}" + export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" + export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" + export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_azure)' + - name: Install postgres binaries run: cp -a pg_install /tmp/neon/pg_install From 16d80128eea32b0f2fd1051c90e93e8e0d537381 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Mon, 17 Jun 2024 11:40:35 +0100 Subject: [PATCH 193/220] storcon: handle entire cluster going unavailable correctly (#8060) ## Problem A period of unavailability for all pageservers in a cluster produced the following fallout in staging: all tenants became detached and required manual operation to re-attach. Manually restarting the storage controller re-attached all tenants due to a consistency bug. Turns out there are two related bugs which caused the issue: 1. Pageserver re-attach can be processed before the first heartbeat. Hence, when handling the availability delta produced by the heartbeater, `Node::get_availability_transition` claims that there's no need to reconfigure the node. 2. We would still attempt to reschedule tenant shards when handling offline transitions even if the entire cluster is down. This puts tenant shards into a state where the reconciler believes they have to be detached (no pageserver shows up in their intent state). This is doubly wrong because we don't mark the tenant shards as detached in the database, thus causing memory vs database consistency issues. Luckily, this bug allowed all tenant shards to re-attach after restart. ## Summary of changes * For (1), abuse the fact that re-attach requests do not contain an utilisation score and use that to differentiate from a node that replied to heartbeats. * For (2), introduce a special case that skips any rescheduling if the entire cluster is unavailable. * Update the storage controller heartbeat test with an extra scenario where the entire cluster goes for lunch. Fixes https://github.com/neondatabase/neon/issues/8044 --- storage_controller/src/heartbeater.rs | 4 + storage_controller/src/node.rs | 12 ++- storage_controller/src/service.rs | 88 +++++++++++++---- .../regress/test_storage_controller.py | 94 ++++++++++++------- 4 files changed, 141 insertions(+), 57 deletions(-) diff --git a/storage_controller/src/heartbeater.rs b/storage_controller/src/heartbeater.rs index 1ef97e78eb..14cda0a289 100644 --- a/storage_controller/src/heartbeater.rs +++ b/storage_controller/src/heartbeater.rs @@ -31,6 +31,7 @@ pub(crate) enum PageserverState { Available { last_seen_at: Instant, utilization: PageserverUtilization, + new: bool, }, Offline, } @@ -127,6 +128,7 @@ impl HeartbeaterTask { heartbeat_futs.push({ let jwt_token = self.jwt_token.clone(); let cancel = self.cancel.clone(); + let new_node = !self.state.contains_key(node_id); // Clone the node and mark it as available such that the request // goes through to the pageserver even when the node is marked offline. @@ -159,6 +161,7 @@ impl HeartbeaterTask { PageserverState::Available { last_seen_at: Instant::now(), utilization, + new: new_node, } } else { PageserverState::Offline @@ -220,6 +223,7 @@ impl HeartbeaterTask { } }, Vacant(_) => { + // This is a new node. Don't generate a delta for it. deltas.push((node_id, ps_state.clone())); } } diff --git a/storage_controller/src/node.rs b/storage_controller/src/node.rs index 7b5513c908..34dcf0c642 100644 --- a/storage_controller/src/node.rs +++ b/storage_controller/src/node.rs @@ -3,7 +3,7 @@ use std::{str::FromStr, time::Duration}; use pageserver_api::{ controller_api::{ NodeAvailability, NodeDescribeResponse, NodeRegisterRequest, NodeSchedulingPolicy, - TenantLocateResponseShard, + TenantLocateResponseShard, UtilizationScore, }, shard::TenantShardId, }; @@ -116,6 +116,16 @@ impl Node { match (self.availability, availability) { (Offline, Active(_)) => ToActive, (Active(_), Offline) => ToOffline, + // Consider the case when the storage controller handles the re-attach of a node + // before the heartbeats detect that the node is back online. We still need + // [`Service::node_configure`] to attempt reconciliations for shards with an + // unknown observed location. + // The unsavoury match arm below handles this situation. + (Active(lhs), Active(rhs)) + if lhs == UtilizationScore::worst() && rhs < UtilizationScore::worst() => + { + ToActive + } _ => Unchanged, } } diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index cf6a95bf0b..926332f946 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -12,7 +12,7 @@ use crate::{ id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, WrappedWriteGuard}, persistence::{AbortShardSplitStatus, TenantFilter}, reconciler::{ReconcileError, ReconcileUnits}, - scheduler::{ScheduleContext, ScheduleMode}, + scheduler::{MaySchedule, ScheduleContext, ScheduleMode}, tenant_shard::{ MigrateAttachment, ReconcileNeeded, ScheduleOptimization, ScheduleOptimizationAction, }, @@ -747,29 +747,61 @@ impl Service { let res = self.heartbeater.heartbeat(nodes).await; if let Ok(deltas) = res { for (node_id, state) in deltas.0 { - let new_availability = match state { - PageserverState::Available { utilization, .. } => NodeAvailability::Active( - UtilizationScore(utilization.utilization_score), + let (new_node, new_availability) = match state { + PageserverState::Available { + utilization, new, .. + } => ( + new, + NodeAvailability::Active(UtilizationScore( + utilization.utilization_score, + )), ), - PageserverState::Offline => NodeAvailability::Offline, + PageserverState::Offline => (false, NodeAvailability::Offline), }; - let res = self - .node_configure(node_id, Some(new_availability), None) - .await; - match res { - Ok(()) => {} - Err(ApiError::NotFound(_)) => { - // This should be rare, but legitimate since the heartbeats are done - // on a snapshot of the nodes. - tracing::info!("Node {} was not found after heartbeat round", node_id); + if new_node { + // When the heartbeats detect a newly added node, we don't wish + // to attempt to reconcile the shards assigned to it. The node + // is likely handling it's re-attach response, so reconciling now + // would be counterproductive. + // + // Instead, update the in-memory state with the details learned about the + // node. + let mut locked = self.inner.write().unwrap(); + let (nodes, _tenants, scheduler) = locked.parts_mut(); + + let mut new_nodes = (**nodes).clone(); + + if let Some(node) = new_nodes.get_mut(&node_id) { + node.set_availability(new_availability); + scheduler.node_upsert(node); } - Err(err) => { - tracing::error!( - "Failed to update node {} after heartbeat round: {}", - node_id, - err - ); + + locked.nodes = Arc::new(new_nodes); + } else { + // This is the code path for geniune availability transitions (i.e node + // goes unavailable and/or comes back online). + let res = self + .node_configure(node_id, Some(new_availability), None) + .await; + + match res { + Ok(()) => {} + Err(ApiError::NotFound(_)) => { + // This should be rare, but legitimate since the heartbeats are done + // on a snapshot of the nodes. + tracing::info!( + "Node {} was not found after heartbeat round", + node_id + ); + } + Err(err) => { + tracing::error!( + "Failed to update node {} after heartbeat round: {}", + node_id, + err + ); + } } } } @@ -4316,6 +4348,16 @@ impl Service { continue; } + if !new_nodes + .values() + .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_))) + { + // Special case for when all nodes are unavailable and/or unschedulable: there is no point + // trying to reschedule since there's nowhere else to go. Without this + // branch we incorrectly detach tenants in response to node unavailability. + continue; + } + if tenant_shard.intent.demote_attached(scheduler, node_id) { tenant_shard.sequence = tenant_shard.sequence.next(); @@ -4353,6 +4395,12 @@ impl Service { // When a node comes back online, we must reconcile any tenant that has a None observed // location on the node. for tenant_shard in locked.tenants.values_mut() { + // If a reconciliation is already in progress, rely on the previous scheduling + // decision and skip triggering a new reconciliation. + if tenant_shard.reconciler.is_some() { + continue; + } + if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) { if observed_loc.conf.is_none() { self.maybe_reconcile_shard(tenant_shard, &new_nodes); diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index f41468210c..8624a45f45 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -934,19 +934,27 @@ class Failure: def clear(self, env: NeonEnv): raise NotImplementedError() + def nodes(self): + raise NotImplementedError() + class NodeStop(Failure): - def __init__(self, pageserver_id, immediate): - self.pageserver_id = pageserver_id + def __init__(self, pageserver_ids, immediate): + self.pageserver_ids = pageserver_ids self.immediate = immediate def apply(self, env: NeonEnv): - pageserver = env.get_pageserver(self.pageserver_id) - pageserver.stop(immediate=self.immediate) + for ps_id in self.pageserver_ids: + pageserver = env.get_pageserver(ps_id) + pageserver.stop(immediate=self.immediate) def clear(self, env: NeonEnv): - pageserver = env.get_pageserver(self.pageserver_id) - pageserver.start() + for ps_id in self.pageserver_ids: + pageserver = env.get_pageserver(ps_id) + pageserver.start() + + def nodes(self): + return self.pageserver_ids class PageserverFailpoint(Failure): @@ -962,6 +970,9 @@ class PageserverFailpoint(Failure): pageserver = env.get_pageserver(self.pageserver_id) pageserver.http_client().configure_failpoints((self.failpoint, "off")) + def nodes(self): + return [self.pageserver_id] + def build_node_to_tenants_map(env: NeonEnv) -> dict[int, list[TenantId]]: tenants = env.storage_controller.tenant_list() @@ -985,8 +996,9 @@ def build_node_to_tenants_map(env: NeonEnv) -> dict[int, list[TenantId]]: @pytest.mark.parametrize( "failure", [ - NodeStop(pageserver_id=1, immediate=False), - NodeStop(pageserver_id=1, immediate=True), + NodeStop(pageserver_ids=[1], immediate=False), + NodeStop(pageserver_ids=[1], immediate=True), + NodeStop(pageserver_ids=[1, 2], immediate=True), PageserverFailpoint(pageserver_id=1, failpoint="get-utilization-http-handler"), ], ) @@ -1039,33 +1051,50 @@ def test_storage_controller_heartbeats( wait_until(10, 1, tenants_placed) # ... then we apply the failure - offline_node_id = failure.pageserver_id - online_node_id = (set(range(1, len(env.pageservers) + 1)) - {offline_node_id}).pop() - env.get_pageserver(offline_node_id).allowed_errors.append( - # In the case of the failpoint failure, the impacted pageserver - # still believes it has the tenant attached since location - # config calls into it will fail due to being marked offline. - ".*Dropped remote consistent LSN updates.*", - ) + offline_node_ids = set(failure.nodes()) + online_node_ids = set(range(1, len(env.pageservers) + 1)) - offline_node_ids + + for node_id in offline_node_ids: + env.get_pageserver(node_id).allowed_errors.append( + # In the case of the failpoint failure, the impacted pageserver + # still believes it has the tenant attached since location + # config calls into it will fail due to being marked offline. + ".*Dropped remote consistent LSN updates.*", + ) + + if len(offline_node_ids) > 1: + env.get_pageserver(node_id).allowed_errors.append( + ".*Scheduling error when marking pageserver.*offline.*", + ) failure.apply(env) # ... expecting the heartbeats to mark it offline - def node_offline(): + def nodes_offline(): nodes = env.storage_controller.node_list() log.info(f"{nodes=}") - target = next(n for n in nodes if n["id"] == offline_node_id) - assert target["availability"] == "Offline" + for node in nodes: + if node["id"] in offline_node_ids: + assert node["availability"] == "Offline" # A node is considered offline if the last successful heartbeat # was more than 10 seconds ago (hardcoded in the storage controller). - wait_until(20, 1, node_offline) + wait_until(20, 1, nodes_offline) # .. expecting the tenant on the offline node to be migrated def tenant_migrated(): + if len(online_node_ids) == 0: + time.sleep(5) + return + node_to_tenants = build_node_to_tenants_map(env) log.info(f"{node_to_tenants=}") - assert set(node_to_tenants[online_node_id]) == set(tenant_ids) + + observed_tenants = set() + for node_id in online_node_ids: + observed_tenants |= set(node_to_tenants[node_id]) + + assert observed_tenants == set(tenant_ids) wait_until(10, 1, tenant_migrated) @@ -1073,31 +1102,24 @@ def test_storage_controller_heartbeats( failure.clear(env) # ... expecting the offline node to become active again - def node_online(): + def nodes_online(): nodes = env.storage_controller.node_list() - target = next(n for n in nodes if n["id"] == offline_node_id) - assert target["availability"] == "Active" + for node in nodes: + if node["id"] in online_node_ids: + assert node["availability"] == "Active" - wait_until(10, 1, node_online) + wait_until(10, 1, nodes_online) time.sleep(5) - # ... then we create a new tenant - tid = TenantId.generate() - env.storage_controller.tenant_create(tid) - - # ... expecting it to be placed on the node that just came back online - tenants = env.storage_controller.tenant_list() - newest_tenant = next(t for t in tenants if t["tenant_shard_id"] == str(tid)) - locations = list(newest_tenant["observed"]["locations"].keys()) - locations = [int(node_id) for node_id in locations] - assert locations == [offline_node_id] + node_to_tenants = build_node_to_tenants_map(env) + log.info(f"Back online: {node_to_tenants=}") # ... expecting the storage controller to reach a consistent state def storage_controller_consistent(): env.storage_controller.consistency_check() - wait_until(10, 1, storage_controller_consistent) + wait_until(30, 1, storage_controller_consistent) def test_storage_controller_re_attach(neon_env_builder: NeonEnvBuilder): From b6e1c09c733a699cbaf76ff8508f9552eec1db7f Mon Sep 17 00:00:00 2001 From: Alexander Bayandin Date: Mon, 17 Jun 2024 12:47:20 +0100 Subject: [PATCH 194/220] CI(check-build-tools-image): change build-tools image persistent tag (#8059) ## Problem We don't rebuild `build-tools` image for changes in a workflow that builds this image itself (`.github/workflows/build-build-tools-image.yml`) or in a workflow that determines which tag to use (`.github/workflows/check-build-tools-image.yml`) ## Summary of changes - Use a hash of `Dockerfile.build-tools` and workflow files as a persistent tag instead of using a commit sha. --- .github/workflows/build-build-tools-image.yml | 1 - .github/workflows/check-build-tools-image.yml | 23 ++++++------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 2c994b08ae..6e90a80ab7 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -30,7 +30,6 @@ jobs: check-image: uses: ./.github/workflows/check-build-tools-image.yml - # This job uses older version of GitHub Actions because it's run on gen2 runners, which don't support node 20 (for newer versions) build-image: needs: [ check-image ] if: needs.check-image.outputs.found == 'false' diff --git a/.github/workflows/check-build-tools-image.yml b/.github/workflows/check-build-tools-image.yml index 97116940a0..807a9ef3bd 100644 --- a/.github/workflows/check-build-tools-image.yml +++ b/.github/workflows/check-build-tools-image.yml @@ -25,26 +25,17 @@ jobs: found: ${{ steps.check-image.outputs.found }} steps: + - uses: actions/checkout@v4 + - name: Get build-tools image tag for the current commit id: get-build-tools-tag env: - # Usually, for COMMIT_SHA, we use `github.event.pull_request.head.sha || github.sha`, but here, even for PRs, - # we want to use `github.sha` i.e. point to a phantom merge commit to determine the image tag correctly. - COMMIT_SHA: ${{ github.sha }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IMAGE_TAG: | + ${{ hashFiles('Dockerfile.build-tools', + '.github/workflows/check-build-tools-image.yml', + '.github/workflows/build-build-tools-image.yml') }} run: | - LAST_BUILD_TOOLS_SHA=$( - gh api \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - --method GET \ - --field path=Dockerfile.build-tools \ - --field sha=${COMMIT_SHA} \ - --field per_page=1 \ - --jq ".[0].sha" \ - "/repos/${GITHUB_REPOSITORY}/commits" - ) - echo "image-tag=${LAST_BUILD_TOOLS_SHA}" | tee -a $GITHUB_OUTPUT + echo "image-tag=${IMAGE_TAG}" | tee -a $GITHUB_OUTPUT - name: Check if such tag found in the registry id: check-image From e729f282051a990e77edfe613c45749f9d3d5fbe Mon Sep 17 00:00:00 2001 From: MMeent Date: Mon, 17 Jun 2024 20:57:49 +0200 Subject: [PATCH 195/220] Fix log rates (#8035) ## Summary of changes - Stop logging HealthCheck message passing at INFO level (moved to DEBUG) - Stop logging /status accesses at INFO (moved to DEBUG) - Stop logging most occurances of `missing config file "compute_ctl_temp_override.conf"` - Log memory usage only when the data has changed significantly, or if we've not recently logged the data, rather than always every 2 seconds. --- compute_tools/src/compute.rs | 70 +++++++++++---------- compute_tools/src/config.rs | 19 +++--- compute_tools/src/http/api.rs | 4 +- libs/vm_monitor/src/cgroup.rs | 101 +++++++++++++++++++++++++++++- libs/vm_monitor/src/dispatcher.rs | 13 ++-- libs/vm_monitor/src/runner.rs | 23 ++++--- vm-image-spec.yaml | 5 +- 7 files changed, 176 insertions(+), 59 deletions(-) diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 40060f4117..a79b666409 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -918,38 +918,39 @@ impl ComputeNode { // temporarily reset max_cluster_size in config // to avoid the possibility of hitting the limit, while we are reconfiguring: // creating new extensions, roles, etc... - config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?; - self.pg_reload_conf()?; + config::with_compute_ctl_tmp_override(pgdata_path, "neon.max_cluster_size=-1", || { + self.pg_reload_conf()?; - let mut client = Client::connect(self.connstr.as_str(), NoTls)?; + let mut client = Client::connect(self.connstr.as_str(), NoTls)?; - // Proceed with post-startup configuration. Note, that order of operations is important. - // Disable DDL forwarding because control plane already knows about these roles/databases. - if spec.mode == ComputeMode::Primary { - client.simple_query("SET neon.forward_ddl = false")?; - cleanup_instance(&mut client)?; - handle_roles(&spec, &mut client)?; - handle_databases(&spec, &mut client)?; - handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?; - handle_grants( - &spec, - &mut client, - self.connstr.as_str(), - self.has_feature(ComputeFeature::AnonExtension), - )?; - handle_extensions(&spec, &mut client)?; - handle_extension_neon(&mut client)?; - // We can skip handle_migrations here because a new migration can only appear - // if we have a new version of the compute_ctl binary, which can only happen - // if compute got restarted, in which case we'll end up inside of apply_config - // instead of reconfigure. - } + // Proceed with post-startup configuration. Note, that order of operations is important. + // Disable DDL forwarding because control plane already knows about these roles/databases. + if spec.mode == ComputeMode::Primary { + client.simple_query("SET neon.forward_ddl = false")?; + cleanup_instance(&mut client)?; + handle_roles(&spec, &mut client)?; + handle_databases(&spec, &mut client)?; + handle_role_deletions(&spec, self.connstr.as_str(), &mut client)?; + handle_grants( + &spec, + &mut client, + self.connstr.as_str(), + self.has_feature(ComputeFeature::AnonExtension), + )?; + handle_extensions(&spec, &mut client)?; + handle_extension_neon(&mut client)?; + // We can skip handle_migrations here because a new migration can only appear + // if we have a new version of the compute_ctl binary, which can only happen + // if compute got restarted, in which case we'll end up inside of apply_config + // instead of reconfigure. + } - // 'Close' connection - drop(client); + // 'Close' connection + drop(client); + + Ok(()) + })?; - // reset max_cluster_size in config back to original value and reload config - config::compute_ctl_temp_override_remove(pgdata_path)?; self.pg_reload_conf()?; let unknown_op = "unknown".to_string(); @@ -1040,12 +1041,17 @@ impl ComputeNode { // temporarily reset max_cluster_size in config // to avoid the possibility of hitting the limit, while we are applying config: // creating new extensions, roles, etc... - config::compute_ctl_temp_override_create(pgdata_path, "neon.max_cluster_size=-1")?; - self.pg_reload_conf()?; + config::with_compute_ctl_tmp_override( + pgdata_path, + "neon.max_cluster_size=-1", + || { + self.pg_reload_conf()?; - self.apply_config(&compute_state)?; + self.apply_config(&compute_state)?; - config::compute_ctl_temp_override_remove(pgdata_path)?; + Ok(()) + }, + )?; self.pg_reload_conf()?; } self.post_apply_config()?; diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 89c866b20c..2c4aec4116 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -131,18 +131,17 @@ pub fn write_postgres_conf( Ok(()) } -/// create file compute_ctl_temp_override.conf in pgdata_dir -/// add provided options to this file -pub fn compute_ctl_temp_override_create(pgdata_path: &Path, options: &str) -> Result<()> { +pub fn with_compute_ctl_tmp_override(pgdata_path: &Path, options: &str, exec: F) -> Result<()> +where + F: FnOnce() -> Result<()>, +{ let path = pgdata_path.join("compute_ctl_temp_override.conf"); let mut file = File::create(path)?; write!(file, "{}", options)?; - Ok(()) -} -/// remove file compute_ctl_temp_override.conf in pgdata_dir -pub fn compute_ctl_temp_override_remove(pgdata_path: &Path) -> Result<()> { - let path = pgdata_path.join("compute_ctl_temp_override.conf"); - std::fs::remove_file(path)?; - Ok(()) + let res = exec(); + + file.set_len(0)?; + + res } diff --git a/compute_tools/src/http/api.rs b/compute_tools/src/http/api.rs index 0286429cf2..43d29402bc 100644 --- a/compute_tools/src/http/api.rs +++ b/compute_tools/src/http/api.rs @@ -17,7 +17,7 @@ use hyper::header::CONTENT_TYPE; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use tokio::task; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use tracing_utils::http::OtelName; use utils::http::request::must_get_query_param; @@ -48,7 +48,7 @@ async fn routes(req: Request, compute: &Arc) -> Response { - info!("serving /status GET request"); + debug!("serving /status GET request"); let state = compute.state.lock().unwrap(); let status_response = status_response_from_state(&state); Response::new(Body::from(serde_json::to_string(&status_response).unwrap())) diff --git a/libs/vm_monitor/src/cgroup.rs b/libs/vm_monitor/src/cgroup.rs index 7160a42df2..3223765016 100644 --- a/libs/vm_monitor/src/cgroup.rs +++ b/libs/vm_monitor/src/cgroup.rs @@ -25,6 +25,8 @@ pub struct Config { /// /// For simplicity, this value must be greater than or equal to `memory_history_len`. memory_history_log_interval: usize, + /// The max number of iterations to skip before logging the next iteration + memory_history_log_noskip_interval: Duration, } impl Default for Config { @@ -33,6 +35,7 @@ impl Default for Config { memory_poll_interval: Duration::from_millis(100), memory_history_len: 5, // use 500ms of history for decision-making memory_history_log_interval: 20, // but only log every ~2s (otherwise it's spammy) + memory_history_log_noskip_interval: Duration::from_secs(15), // but only if it's changed, or 60 seconds have passed } } } @@ -85,7 +88,12 @@ impl CgroupWatcher { // buffer for samples that will be logged. once full, it remains so. let history_log_len = self.config.memory_history_log_interval; + let max_skip = self.config.memory_history_log_noskip_interval; let mut history_log_buf = vec![MemoryStatus::zeroed(); history_log_len]; + let mut last_logged_memusage = MemoryStatus::zeroed(); + + // Ensure that we're tracking a value that's definitely in the past, as Instant::now is only guaranteed to be non-decreasing on Rust's T1-supported systems. + let mut can_skip_logs_until = Instant::now() - max_skip; for t in 0_u64.. { ticker.tick().await; @@ -115,12 +123,24 @@ impl CgroupWatcher { // equal to the logging interval, we can just log the entire buffer every time we set // the last entry, which also means that for this log line, we can ignore that it's a // ring buffer (because all the entries are in order of increasing time). - if i == history_log_len - 1 { + // + // We skip logging the data if data hasn't meaningfully changed in a while, unless + // we've already ignored previous iterations for the last max_skip period. + if i == history_log_len - 1 + && (now > can_skip_logs_until + || !history_log_buf + .iter() + .all(|usage| last_logged_memusage.status_is_close_or_similar(usage))) + { info!( history = ?MemoryStatus::debug_slice(&history_log_buf), summary = ?summary, "Recent cgroup memory statistics history" ); + + can_skip_logs_until = now + max_skip; + + last_logged_memusage = *history_log_buf.last().unwrap(); } updates @@ -232,6 +252,24 @@ impl MemoryStatus { DS(slice) } + + /// Check if the other memory status is a close or similar result. + /// Returns true if the larger value is not larger than the smaller value + /// by 1/8 of the smaller value, and within 128MiB. + /// See tests::check_similarity_behaviour for examples of behaviour + fn status_is_close_or_similar(&self, other: &MemoryStatus) -> bool { + let margin; + let diff; + if self.non_reclaimable >= other.non_reclaimable { + margin = other.non_reclaimable / 8; + diff = self.non_reclaimable - other.non_reclaimable; + } else { + margin = self.non_reclaimable / 8; + diff = other.non_reclaimable - self.non_reclaimable; + } + + diff < margin && diff < 128 * 1024 * 1024 + } } #[cfg(test)] @@ -261,4 +299,65 @@ mod tests { assert_eq!(values(2, 4), [9, 0, 1, 2]); assert_eq!(values(2, 10), [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]); } + + #[test] + fn check_similarity_behaviour() { + // This all accesses private methods, so we can't actually run this + // as doctests, because doctests run as an external crate. + let mut small = super::MemoryStatus { + non_reclaimable: 1024, + }; + let mut large = super::MemoryStatus { + non_reclaimable: 1024 * 1024 * 1024 * 1024, + }; + + // objects are self-similar, no matter the size + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + // inequality is symmetric + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + + small.non_reclaimable = 64; + large.non_reclaimable = (small.non_reclaimable / 8) * 9; + + // objects are self-similar, no matter the size + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + // values are similar if the larger value is larger by less than + // 12.5%, i.e. 1/8 of the smaller value. + // In the example above, large is exactly 12.5% larger, so this doesn't + // match. + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + + large.non_reclaimable -= 1; + assert!(large.status_is_close_or_similar(&large)); + + assert!(small.status_is_close_or_similar(&large)); + assert!(large.status_is_close_or_similar(&small)); + + // The 1/8 rule only applies up to 128MiB of difference + small.non_reclaimable = 1024 * 1024 * 1024 * 1024; + large.non_reclaimable = small.non_reclaimable / 8 * 9; + assert!(small.status_is_close_or_similar(&small)); + assert!(large.status_is_close_or_similar(&large)); + + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + // the large value is put just above the threshold + large.non_reclaimable = small.non_reclaimable + 128 * 1024 * 1024; + assert!(large.status_is_close_or_similar(&large)); + + assert!(!small.status_is_close_or_similar(&large)); + assert!(!large.status_is_close_or_similar(&small)); + // now below + large.non_reclaimable -= 1; + assert!(large.status_is_close_or_similar(&large)); + + assert!(small.status_is_close_or_similar(&large)); + assert!(large.status_is_close_or_similar(&small)); + } } diff --git a/libs/vm_monitor/src/dispatcher.rs b/libs/vm_monitor/src/dispatcher.rs index c76baf04e7..6a965ace9b 100644 --- a/libs/vm_monitor/src/dispatcher.rs +++ b/libs/vm_monitor/src/dispatcher.rs @@ -12,11 +12,11 @@ use futures::{ stream::{SplitSink, SplitStream}, SinkExt, StreamExt, }; -use tracing::info; +use tracing::{debug, info}; use crate::protocol::{ - OutboundMsg, ProtocolRange, ProtocolResponse, ProtocolVersion, PROTOCOL_MAX_VERSION, - PROTOCOL_MIN_VERSION, + OutboundMsg, OutboundMsgKind, ProtocolRange, ProtocolResponse, ProtocolVersion, + PROTOCOL_MAX_VERSION, PROTOCOL_MIN_VERSION, }; /// The central handler for all communications in the monitor. @@ -118,7 +118,12 @@ impl Dispatcher { /// serialize the wrong thing and send it, since `self.sink.send` will take /// any string. pub async fn send(&mut self, message: OutboundMsg) -> anyhow::Result<()> { - info!(?message, "sending message"); + if matches!(&message.inner, OutboundMsgKind::HealthCheck { .. }) { + debug!(?message, "sending message"); + } else { + info!(?message, "sending message"); + } + let json = serde_json::to_string(&message).context("failed to serialize message")?; self.sink .send(Message::Text(json)) diff --git a/libs/vm_monitor/src/runner.rs b/libs/vm_monitor/src/runner.rs index ca02637ecf..36f8573a38 100644 --- a/libs/vm_monitor/src/runner.rs +++ b/libs/vm_monitor/src/runner.rs @@ -12,7 +12,7 @@ use axum::extract::ws::{Message, WebSocket}; use futures::StreamExt; use tokio::sync::{broadcast, watch}; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use crate::cgroup::{self, CgroupWatcher}; use crate::dispatcher::Dispatcher; @@ -474,26 +474,29 @@ impl Runner { // there is a message from the agent msg = self.dispatcher.source.next() => { if let Some(msg) = msg { - // Don't use 'message' as a key as the string also uses - // that for its key - info!(?msg, "received message"); - match msg { + match &msg { Ok(msg) => { let message: InboundMsg = match msg { Message::Text(text) => { - serde_json::from_str(&text).context("failed to deserialize text message")? + serde_json::from_str(text).context("failed to deserialize text message")? } other => { warn!( // Don't use 'message' as a key as the // string also uses that for its key msg = ?other, - "agent should only send text messages but received different type" + "problem processing incoming message: agent should only send text messages but received different type" ); continue }, }; + if matches!(&message.inner, InboundMsgKind::HealthCheck { .. }) { + debug!(?msg, "received message"); + } else { + info!(?msg, "received message"); + } + let out = match self.process_message(message.clone()).await { Ok(Some(out)) => out, Ok(None) => continue, @@ -517,7 +520,11 @@ impl Runner { .await .context("failed to send message")?; } - Err(e) => warn!("{e}"), + Err(e) => warn!( + error = format!("{e}"), + msg = ?msg, + "received error message" + ), } } else { anyhow::bail!("dispatcher connection closed") diff --git a/vm-image-spec.yaml b/vm-image-spec.yaml index 99164645a7..3c446ecdea 100644 --- a/vm-image-spec.yaml +++ b/vm-image-spec.yaml @@ -324,14 +324,15 @@ files: help: 'Whether or not the replication slot wal_status is lost' key_labels: - slot_name - values: [wal_status_is_lost] + values: [wal_is_lost] query: | SELECT slot_name, CASE WHEN wal_status = 'lost' THEN 1 ELSE 0 - END AS wal_status_is_lost + END AS wal_is_lost FROM pg_replication_slots; + - filename: neon_collector_autoscaling.yml content: | collector_name: neon_collector_autoscaling From 6c6a7f9acee6ee785c9993837bd6be349c294a92 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Tue, 18 Jun 2024 09:42:22 +0200 Subject: [PATCH 196/220] [v2] Include openssl and ICU statically linked (#8074) We had to revert the earlier static linking change due to libicu version incompatibilities: - original PR: https://github.com/neondatabase/neon/pull/7956 - revert PR: https://github.com/neondatabase/neon/pull/8003 Specifically, the problem manifests for existing projects as error ``` DETAIL: The collation in the database was created using version 153.120.42, but the operating system provides version 153.14.37. ``` So, this PR reintroduces the original change but with the exact same libicu version as in Debian `bullseye`, i.e., the libicu version that we're using today. This avoids the version incompatibility. Additional changes made by Christian ==================================== - `hashFiles` can take multiple arguments, use that feature - validation of the libicu tarball checksum - parallel build (`-j $(nproc)`) for openssl and libicu Follow-ups ========== Debian bullseye has a few patches on top of libicu: https://sources.debian.org/patches/icu/67.1-7/ We still decide whether we need to include these patches or not. => https://github.com/neondatabase/cloud/issues/14527 Eventually, we'll have to figure out an upgrade story for libicu. That work is tracked in epic https://github.com/neondatabase/cloud/issues/14525. The OpenSSL version in this PR is arbitrary. We should use `1.1.1w` + Debian patches if applicable. See https://github.com/neondatabase/cloud/issues/14526. Longer-term: * https://github.com/neondatabase/cloud/issues/14519 * https://github.com/neondatabase/cloud/issues/14525 Refs ==== Co-authored-by: Christian Schwarz refs https://github.com/neondatabase/cloud/issues/12648 --------- Co-authored-by: Rahul Patil --- .github/workflows/build_and_test.yml | 6 ++-- Dockerfile | 2 -- Dockerfile.build-tools | 42 ++++++++++++++++++++++++++++ Makefile | 15 +++++++++- 4 files changed, 59 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index bd2996ec4c..742716776e 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -299,21 +299,21 @@ jobs: uses: actions/cache@v4 with: path: pg_install/v14 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v15 build id: cache_pg_15 uses: actions/cache@v4 with: path: pg_install/v15 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Cache postgres v16 build id: cache_pg_16 uses: actions/cache@v4 with: path: pg_install/v16 - key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} + key: v1-${{ runner.os }}-${{ matrix.build_type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-${{ hashFiles('Makefile', 'Dockerfile.build-tools') }} - name: Build postgres v14 if: steps.cache_pg_14.outputs.cache-hit != 'true' diff --git a/Dockerfile b/Dockerfile index 5f82df3e18..b4900d4a94 100644 --- a/Dockerfile +++ b/Dockerfile @@ -69,8 +69,6 @@ RUN set -e \ && apt install -y \ libreadline-dev \ libseccomp-dev \ - libicu67 \ - openssl \ ca-certificates \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ diff --git a/Dockerfile.build-tools b/Dockerfile.build-tools index e7c61ace0e..5dd2c13c0e 100644 --- a/Dockerfile.build-tools +++ b/Dockerfile.build-tools @@ -112,6 +112,45 @@ RUN for package in Capture::Tiny DateTime Devel::Cover Digest::MD5 File::Spec JS && make install \ && rm -rf ../lcov.tar.gz +# Compile and install the static OpenSSL library +ENV OPENSSL_VERSION=3.2.2 +ENV OPENSSL_PREFIX=/usr/local/openssl +RUN wget -O /tmp/openssl-${OPENSSL_VERSION}.tar.gz https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz && \ + echo "197149c18d9e9f292c43f0400acaba12e5f52cacfe050f3d199277ea738ec2e7 /tmp/openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum --check && \ + cd /tmp && \ + tar xzvf /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + rm /tmp/openssl-${OPENSSL_VERSION}.tar.gz && \ + cd /tmp/openssl-${OPENSSL_VERSION} && \ + ./config --prefix=${OPENSSL_PREFIX} -static --static no-shared -fPIC && \ + make -j "$(nproc)" && \ + make install && \ + cd /tmp && \ + rm -rf /tmp/openssl-${OPENSSL_VERSION} + +# Use the same version of libicu as the compute nodes so that +# clusters created using inidb on pageserver can be used by computes. +# +# TODO: at this time, Dockerfile.compute-node uses the debian bullseye libicu +# package, which is 67.1. We're duplicating that knowledge here, and also, technically, +# Debian has a few patches on top of 67.1 that we're not adding here. +ENV ICU_VERSION=67.1 +ENV ICU_PREFIX=/usr/local/icu + +# Download and build static ICU +RUN wget -O /tmp/libicu-${ICU_VERSION}.tgz https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/icu4c-${ICU_VERSION//./_}-src.tgz && \ + echo "94a80cd6f251a53bd2a997f6f1b5ac6653fe791dfab66e1eb0227740fb86d5dc /tmp/libicu-${ICU_VERSION}.tgz" | sha256sum --check && \ + mkdir /tmp/icu && \ + pushd /tmp/icu && \ + tar -xzf /tmp/libicu-${ICU_VERSION}.tgz && \ + pushd icu/source && \ + ./configure --prefix=${ICU_PREFIX} --enable-static --enable-shared=no CXXFLAGS="-fPIC" CFLAGS="-fPIC" && \ + make -j "$(nproc)" && \ + make install && \ + popd && \ + rm -rf icu && \ + rm -f /tmp/libicu-${ICU_VERSION}.tgz && \ + popd + # Switch to nonroot user USER nonroot:nonroot WORKDIR /home/nonroot @@ -170,3 +209,6 @@ RUN whoami \ && rustup --version --verbose \ && rustc --version --verbose \ && clang --version + +# Set following flag to check in Makefile if its running in Docker +RUN touch /home/nonroot/.docker_build diff --git a/Makefile b/Makefile index 37bd19ba44..942867d81a 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package managers POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +OPENSSL_PREFIX_DIR := /usr/local/openssl +ICU_PREFIX_DIR := /usr/local/icu + # # We differentiate between release / debug build types using the BUILD_TYPE # environment variable. @@ -20,6 +23,16 @@ else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif +ifeq ($(shell test -e /home/nonroot/.docker_build && echo -n yes),yes) + # Exclude static build openssl, icu for local build (MacOS, Linux) + # Only keep for build type release and debug + PG_CFLAGS += -I$(OPENSSL_PREFIX_DIR)/include + PG_CONFIGURE_OPTS += --with-icu + PG_CONFIGURE_OPTS += ICU_CFLAGS='-I/$(ICU_PREFIX_DIR)/include -DU_STATIC_IMPLEMENTATION' + PG_CONFIGURE_OPTS += ICU_LIBS='-L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -licui18n -licuuc -licudata -lstdc++ -Wl,-Bdynamic -lm' + PG_CONFIGURE_OPTS += LDFLAGS='-L$(OPENSSL_PREFIX_DIR)/lib -L$(OPENSSL_PREFIX_DIR)/lib64 -L$(ICU_PREFIX_DIR)/lib -L$(ICU_PREFIX_DIR)/lib64 -Wl,-Bstatic -lssl -lcrypto -Wl,-Bdynamic -lrt -lm -ldl -lpthread' +endif + UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) # Seccomp BPF is only available for Linux @@ -28,7 +41,7 @@ else ifeq ($(UNAME_S),Darwin) ifndef DISABLE_HOMEBREW # macOS with brew-installed openssl requires explicit paths # It can be configured with OPENSSL_PREFIX variable - OPENSSL_PREFIX ?= $(shell brew --prefix openssl@3) + OPENSSL_PREFIX := $(shell brew --prefix openssl@3) PG_CONFIGURE_OPTS += --with-includes=$(OPENSSL_PREFIX)/include --with-libraries=$(OPENSSL_PREFIX)/lib PG_CONFIGURE_OPTS += PKG_CONFIG_PATH=$(shell brew --prefix icu4c)/lib/pkgconfig # macOS already has bison and flex in the system, but they are old and result in postgres-v14 target failure From ed9ffb9af2ce30eff88e9c6fcfe0c315d69e025b Mon Sep 17 00:00:00 2001 From: John Spray Date: Tue, 18 Jun 2024 13:44:30 +0100 Subject: [PATCH 197/220] pageserver: eliminate CalculateSyntheticSizeError::LsnNotFound (`test_metric_collection` flake) (#8065) ## Problem ``` ERROR synthetic_size_worker: failed to calculate synthetic size for tenant ae449af30216ac56d2c1173f894b1122: Could not find size at 0/218CA70 in timeline d8da32b5e3e0bf18cfdb560f9de29638\n') ``` e.g. https://neon-github-public-dev.s3.amazonaws.com/reports/main/9518948590/index.html#/testresult/30a6d1e2471d2775 This test had allow lists but was disrupted by https://github.com/neondatabase/neon/pull/8051. In that PR, I had kept an error path in fill_logical_sizes that covered the case where we couldn't find sizes for some of the segments, but that path could only be hit in the case that some Timeline was shut down concurrently with a synthetic size calculation, so it makes sense to just leave the segment's size None in this case: the subsequent size calculations do not assume it is Some. ## Summary of changes - Remove `CalculateSyntheticSizeError::LsnNotFound` and just proceed in the case where we used to return it - Remove defunct allow list entries in `test_metric_collection` --- pageserver/src/tenant/size.rs | 11 ++--------- .../regress/test_pageserver_metric_collection.py | 6 ------ 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/pageserver/src/tenant/size.rs b/pageserver/src/tenant/size.rs index cdd5b0cbe7..b2338b620e 100644 --- a/pageserver/src/tenant/size.rs +++ b/pageserver/src/tenant/size.rs @@ -60,10 +60,6 @@ pub(crate) enum CalculateSyntheticSizeError { #[error(transparent)] Fatal(anyhow::Error), - /// The LSN we are trying to calculate a size at no longer exists at the point we query it - #[error("Could not find size at {lsn} in timeline {timeline_id}")] - LsnNotFound { timeline_id: TimelineId, lsn: Lsn }, - /// Tenant shut down while calculating size #[error("Cancelled")] Cancelled, @@ -375,9 +371,8 @@ pub(super) async fn gather_inputs( /// Augment 'segments' with logical sizes /// -/// this will probably conflict with on-demand downloaded layers, or at least force them all -/// to be downloaded -/// +/// This will leave segments' sizes as None if the Timeline associated with the segment is deleted concurrently +/// (i.e. we cannot read its logical size at a particular LSN). async fn fill_logical_sizes( timelines: &[Arc], segments: &mut [SegmentMeta], @@ -498,8 +493,6 @@ async fn fill_logical_sizes( if let Some(Some(size)) = sizes_needed.get(&(timeline_id, lsn)) { seg.segment.size = Some(*size); - } else { - return Err(CalculateSyntheticSizeError::LsnNotFound { timeline_id, lsn }); } } Ok(()) diff --git a/test_runner/regress/test_pageserver_metric_collection.py b/test_runner/regress/test_pageserver_metric_collection.py index b0465f2a96..cea35a6acb 100644 --- a/test_runner/regress/test_pageserver_metric_collection.py +++ b/test_runner/regress/test_pageserver_metric_collection.py @@ -75,9 +75,6 @@ def test_metric_collection( env.pageserver.allowed_errors.extend( [ ".*metrics endpoint refused the sent metrics*", - # we have a fast rate of calculation, these can happen at shutdown - ".*synthetic_size_worker:calculate_synthetic_size.*:gather_size_inputs.*: failed to calculate logical size at .*: cancelled.*", - ".*synthetic_size_worker: failed to calculate synthetic size for tenant .*: failed to calculate some logical_sizes", ".*metrics_collection: failed to upload to S3: Failed to upload data of length .* to storage path.*", ] ) @@ -238,9 +235,6 @@ def test_metric_collection_cleans_up_tempfile( env.pageserver.allowed_errors.extend( [ ".*metrics endpoint refused the sent metrics*", - # we have a fast rate of calculation, these can happen at shutdown - ".*synthetic_size_worker:calculate_synthetic_size.*:gather_size_inputs.*: failed to calculate logical size at .*: cancelled.*", - ".*synthetic_size_worker: failed to calculate synthetic size for tenant .*: failed to calculate some logical_sizes", ] ) From d8b2a49c5574873eebeaf97cdc9d6d6531ff6f51 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Mon, 3 Jun 2024 15:47:21 +0300 Subject: [PATCH 198/220] safekeeper: streaming pull_timeline - Add /snapshot http endpoing streaming tar archive timeline contents up to flush_lsn. - Add check that term doesn't change, corresponding test passes now. - Also prepares infra to hold off WAL removal during the basebackup. - Sprinkle fsyncs to persist the pull_timeline result. ref https://github.com/neondatabase/neon/issues/6340 --- Cargo.lock | 1 + safekeeper/Cargo.toml | 1 + safekeeper/src/control_file.rs | 2 +- safekeeper/src/http/routes.rs | 121 ++++---- safekeeper/src/pull_timeline.rs | 351 +++++++++++++++++------ safekeeper/src/safekeeper.rs | 3 + safekeeper/src/timeline.rs | 4 +- safekeeper/src/wal_storage.rs | 52 ++-- test_runner/regress/test_wal_acceptor.py | 27 +- 9 files changed, 367 insertions(+), 195 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c8a8b0c0f..5eac648fd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5158,6 +5158,7 @@ dependencies = [ "tokio-io-timeout", "tokio-postgres", "tokio-stream", + "tokio-tar", "tokio-util", "toml_edit", "tracing", diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index c8b732fee1..a650d5e207 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -46,6 +46,7 @@ tokio = { workspace = true, features = ["fs"] } tokio-util = { workspace = true } tokio-io-timeout.workspace = true tokio-postgres.workspace = true +tokio-tar.workspace = true toml_edit.workspace = true tracing.workspace = true url.workspace = true diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index e9bb5202da..9d65187350 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -23,7 +23,7 @@ pub const SK_MAGIC: u32 = 0xcafeceefu32; pub const SK_FORMAT_VERSION: u32 = 8; // contains persistent metadata for safekeeper -const CONTROL_FILE_NAME: &str = "safekeeper.control"; +pub const CONTROL_FILE_NAME: &str = "safekeeper.control"; // needed to atomically update the state using `rename` const CONTROL_FILE_NAME_PARTIAL: &str = "safekeeper.control.partial"; pub const CHECKSUM_SIZE: usize = std::mem::size_of::(); diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 1e29b21fac..40ac2c105d 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -1,38 +1,25 @@ use hyper::{Body, Request, Response, StatusCode, Uri}; - use once_cell::sync::Lazy; -use postgres_ffi::WAL_SEGMENT_SIZE; -use safekeeper_api::models::{SkTimelineInfo, TimelineCopyRequest}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::fmt; +use std::io::Write as _; use std::str::FromStr; use std::sync::Arc; use storage_broker::proto::SafekeeperTimelineInfo; use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId; -use tokio::fs::File; -use tokio::io::AsyncReadExt; +use tokio::sync::mpsc; +use tokio::task; +use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; +use tracing::{info_span, Instrument}; use utils::failpoint_support::failpoints_handler; +use utils::http::endpoint::{prometheus_metrics_handler, request_span, ChannelWriter}; use utils::http::request::parse_query_param; -use std::io::Write as _; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use tracing::{info_span, Instrument}; -use utils::http::endpoint::{prometheus_metrics_handler, request_span, ChannelWriter}; - -use crate::debug_dump::TimelineDigestRequest; -use crate::receive_wal::WalReceiverState; -use crate::safekeeper::Term; -use crate::safekeeper::{ServerInfo, TermLsn}; -use crate::send_wal::WalSenderState; -use crate::timeline::PeerInfo; -use crate::{copy_timeline, debug_dump, patch_control_file, pull_timeline}; - -use crate::timelines_global_map::TimelineDeleteForceResult; -use crate::GlobalTimelines; -use crate::SafeKeeperConf; +use postgres_ffi::WAL_SEGMENT_SIZE; +use safekeeper_api::models::TimelineCreateRequest; +use safekeeper_api::models::{SkTimelineInfo, TimelineCopyRequest}; use utils::{ auth::SwappableJwtAuth, http::{ @@ -46,7 +33,16 @@ use utils::{ lsn::Lsn, }; -use super::models::TimelineCreateRequest; +use crate::debug_dump::TimelineDigestRequest; +use crate::receive_wal::WalReceiverState; +use crate::safekeeper::Term; +use crate::safekeeper::{ServerInfo, TermLsn}; +use crate::send_wal::WalSenderState; +use crate::timeline::PeerInfo; +use crate::timelines_global_map::TimelineDeleteForceResult; +use crate::GlobalTimelines; +use crate::SafeKeeperConf; +use crate::{copy_timeline, debug_dump, patch_control_file, pull_timeline}; #[derive(Debug, Serialize)] struct SafekeeperStatus { @@ -206,6 +202,42 @@ async fn timeline_pull_handler(mut request: Request) -> Result) -> Result, ApiError> { + let ttid = TenantTimelineId::new( + parse_request_param(&request, "tenant_id")?, + parse_request_param(&request, "timeline_id")?, + ); + check_permission(&request, Some(ttid.tenant_id))?; + + let tli = GlobalTimelines::get(ttid).map_err(ApiError::from)?; + // Note: with evicted timelines it should work better then de-evict them and + // stream; probably start_snapshot would copy partial s3 file to dest path + // and stream control file, or return FullAccessTimeline if timeline is not + // evicted. + let tli = tli + .full_access_guard() + .await + .map_err(ApiError::InternalServerError)?; + + // To stream the body use wrap_stream which wants Stream of Result, + // so create the chan and write to it in another task. + let (tx, rx) = mpsc::channel(1); + + task::spawn(pull_timeline::stream_snapshot(tli, tx)); + + let rx_stream = ReceiverStream::new(rx); + let body = Body::wrap_stream(rx_stream); + + let response = Response::builder() + .status(200) + .header(hyper::header::CONTENT_TYPE, "application/octet-stream") + .body(body) + .unwrap(); + + Ok(response) +} + async fn timeline_copy_handler(mut request: Request) -> Result, ApiError> { check_permission(&request, None)?; @@ -260,41 +292,6 @@ async fn timeline_digest_handler(request: Request) -> Result) -> Result, ApiError> { - let ttid = TenantTimelineId::new( - parse_request_param(&request, "tenant_id")?, - parse_request_param(&request, "timeline_id")?, - ); - check_permission(&request, Some(ttid.tenant_id))?; - - let filename: String = parse_request_param(&request, "filename")?; - - let tli = GlobalTimelines::get(ttid).map_err(ApiError::from)?; - let tli = tli - .full_access_guard() - .await - .map_err(ApiError::InternalServerError)?; - - let filepath = tli.get_timeline_dir().join(filename); - let mut file = File::open(&filepath) - .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; - - let mut content = Vec::new(); - // TODO: don't store files in memory - file.read_to_end(&mut content) - .await - .map_err(|e| ApiError::InternalServerError(e.into()))?; - - Response::builder() - .status(StatusCode::OK) - .header("Content-Type", "application/octet-stream") - .body(Body::from(content)) - .map_err(|e| ApiError::InternalServerError(e.into())) -} - /// Force persist control file. async fn timeline_checkpoint_handler(request: Request) -> Result, ApiError> { check_permission(&request, None)?; @@ -566,13 +563,13 @@ pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder .delete("/v1/tenant/:tenant_id", |r| { request_span(r, tenant_delete_handler) }) + .get( + "/v1/tenant/:tenant_id/timeline/:timeline_id/snapshot", + |r| request_span(r, timeline_snapshot_handler), + ) .post("/v1/pull_timeline", |r| { request_span(r, timeline_pull_handler) }) - .get( - "/v1/tenant/:tenant_id/timeline/:timeline_id/file/:filename", - |r| request_span(r, timeline_files_handler), - ) .post( "/v1/tenant/:tenant_id/timeline/:source_timeline_id/copy", |r| request_span(r, timeline_copy_handler), diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 7b41c98cb8..4099a324f9 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -1,28 +1,223 @@ -use std::sync::Arc; - +use anyhow::{anyhow, bail, Context, Result}; +use bytes::Bytes; use camino::Utf8PathBuf; use camino_tempfile::Utf8TempDir; use chrono::{DateTime, Utc}; +use futures::{SinkExt, StreamExt, TryStreamExt}; +use postgres_ffi::{XLogFileName, XLogSegNo, PG_TLI}; use serde::{Deserialize, Serialize}; +use std::{ + cmp::min, + io::{self, ErrorKind}, + sync::Arc, +}; +use tokio::{ + fs::{File, OpenOptions}, + io::AsyncWrite, + sync::mpsc, +}; +use tokio_tar::{Archive, Builder}; +use tokio_util::{ + io::{CopyToBytes, SinkWriter}, + sync::PollSender, +}; +use tracing::{error, info, instrument}; -use anyhow::{bail, Context, Result}; -use tokio::io::AsyncWriteExt; -use tracing::info; +use crate::{ + control_file::{self, CONTROL_FILE_NAME}, + debug_dump, + http::routes::TimelineStatus, + safekeeper::Term, + timeline::{get_tenant_dir, get_timeline_dir, FullAccessTimeline, Timeline, TimelineError}, + wal_storage::{self, open_wal_file, Storage}, + GlobalTimelines, SafeKeeperConf, +}; use utils::{ + crashsafe::{durable_rename, fsync_async_opt}, id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, pausable_failpoint, }; -use crate::{ - control_file, debug_dump, - http::routes::TimelineStatus, - timeline::{get_tenant_dir, get_timeline_dir, Timeline, TimelineError}, - wal_storage::{self, Storage}, - GlobalTimelines, SafeKeeperConf, -}; +/// Stream tar archive of timeline to tx. +#[instrument(name = "snapshot", skip_all, fields(ttid = %tli.ttid))] +pub async fn stream_snapshot(tli: FullAccessTimeline, tx: mpsc::Sender>) { + if let Err(e) = stream_snapshot_guts(tli, tx.clone()).await { + // Error type/contents don't matter as they won't can't reach the client + // (hyper likely doesn't do anything with it), but http stream will be + // prematurely terminated. It would be nice to try to send the error in + // trailers though. + tx.send(Err(anyhow!("snapshot failed"))).await.ok(); + error!("snapshot failed: {:#}", e); + } +} -/// Info about timeline on safekeeper ready for reporting. +/// State needed while streaming the snapshot. +pub struct SnapshotContext { + pub from_segno: XLogSegNo, // including + pub upto_segno: XLogSegNo, // including + pub term: Term, + pub last_log_term: Term, + pub flush_lsn: Lsn, + pub wal_seg_size: usize, + // used to remove WAL hold off in Drop. + pub tli: FullAccessTimeline, +} + +impl Drop for SnapshotContext { + fn drop(&mut self) { + // todo: spawn task removing WAL gc hold off + } +} + +pub async fn stream_snapshot_guts( + tli: FullAccessTimeline, + tx: mpsc::Sender>, +) -> Result<()> { + // tokio-tar wants Write implementor, but we have mpsc tx >; + // use SinkWriter as a Write impl. That is, + // - create Sink from the tx. It returns PollSendError if chan is closed. + let sink = PollSender::new(tx); + // - SinkWriter needs sink error to be io one, map it. + let sink_io_err = sink.sink_map_err(|_| io::Error::from(ErrorKind::BrokenPipe)); + // - SinkWriter wants sink type to be just Bytes, not Result, so map + // it with with(). Note that with() accepts async function which we don't + // need and allows the map to fail, which we don't need either, but hence + // two Oks. + let oksink = sink_io_err.with(|b: Bytes| async { io::Result::Ok(Result::Ok(b)) }); + // - SinkWriter (not surprisingly) wants sink of &[u8], not bytes, so wrap + // into CopyToBytes. This is a data copy. + let copy_to_bytes = CopyToBytes::new(oksink); + let mut writer = SinkWriter::new(copy_to_bytes); + let pinned_writer = std::pin::pin!(writer); + + // Note that tokio_tar append_* funcs use tokio::io::copy with 8KB buffer + // which is also likely suboptimal. + let mut ar = Builder::new_non_terminated(pinned_writer); + + let bctx = tli.start_snapshot(&mut ar).await?; + pausable_failpoint!("sk-snapshot-after-list-pausable"); + + let tli_dir = tli.get_timeline_dir(); + info!( + "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}", + bctx.upto_segno - bctx.from_segno + 1, + bctx.from_segno, + bctx.upto_segno, + bctx.term, + bctx.last_log_term, + bctx.flush_lsn, + ); + for segno in bctx.from_segno..=bctx.upto_segno { + let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?; + let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size); + if is_partial { + wal_file_name.push_str(".partial"); + } + ar.append_file(&wal_file_name, &mut sf).await?; + } + + // Do the term check before ar.finish to make archive corrupted in case of + // term change. Client shouldn't ignore abrupt stream end, but to be sure. + tli.finish_snapshot(&bctx).await?; + + ar.finish().await?; + + Ok(()) +} + +impl FullAccessTimeline { + /// Start streaming tar archive with timeline: + /// 1) stream control file under lock; + /// 2) hold off WAL removal; + /// 3) collect SnapshotContext to understand which WAL segments should be + /// streamed. + /// + /// Snapshot streams data up to flush_lsn. To make this safe, we must check + /// that term doesn't change during the procedure, or we risk sending mix of + /// WAL from different histories. Term is remembered in the SnapshotContext + /// and checked in finish_snapshot. Note that in the last segment some WAL + /// higher than flush_lsn set here might be streamed; that's fine as long as + /// terms doesn't change. + /// + /// Alternatively we could send only up to commit_lsn to get some valid + /// state which later will be recovered by compute, in this case term check + /// is not needed, but we likely don't want that as there might be no + /// compute which could perform the recovery. + /// + /// When returned SnapshotContext is dropped WAL hold is removed. + async fn start_snapshot( + &self, + ar: &mut tokio_tar::Builder, + ) -> Result { + let shared_state = self.read_shared_state().await; + + let cf_path = self.get_timeline_dir().join(CONTROL_FILE_NAME); + let mut cf = File::open(cf_path).await?; + ar.append_file(CONTROL_FILE_NAME, &mut cf).await?; + + // We need to stream since the oldest segment someone (s3 or pageserver) + // still needs. This duplicates calc_horizon_lsn logic. + let from_lsn = min( + shared_state.sk.state.remote_consistent_lsn, + shared_state.sk.state.backup_lsn, + ); + if from_lsn == Lsn::INVALID { + // this is possible if snapshot is called before handling first + // elected message + bail!("snapshot is called on uninitialized timeline"); + } + let from_segno = from_lsn.segment_number(shared_state.get_wal_seg_size()); + let term = shared_state.sk.get_term(); + let last_log_term = shared_state.sk.get_last_log_term(); + let flush_lsn = shared_state.sk.flush_lsn(); + let upto_segno = flush_lsn.segment_number(shared_state.get_wal_seg_size()); + // have some limit on max number of segments as a sanity check + const MAX_ALLOWED_SEGS: u64 = 1000; + let num_segs = upto_segno - from_segno + 1; + if num_segs > MAX_ALLOWED_SEGS { + bail!( + "snapshot is called on timeline with {} segments, but the limit is {}", + num_segs, + MAX_ALLOWED_SEGS + ); + } + + // TODO: set WAL hold off. + + let bctx = SnapshotContext { + from_segno, + upto_segno, + term, + last_log_term, + flush_lsn, + wal_seg_size: shared_state.get_wal_seg_size(), + tli: self.clone(), + }; + + Ok(bctx) + } + + /// Finish snapshotting: check that term(s) hasn't changed. + /// + /// Note that WAL gc hold off is removed in Drop of SnapshotContext to not + /// forget this if snapshotting fails mid the way. + pub async fn finish_snapshot(&self, bctx: &SnapshotContext) -> Result<()> { + let shared_state = self.read_shared_state().await; + let term = shared_state.sk.get_term(); + let last_log_term = shared_state.sk.get_last_log_term(); + // There are some cases to relax this check (e.g. last_log_term might + // change, but as long as older history is strictly part of new that's + // fine), but there is no need to do it. + if bctx.term != term || bctx.last_log_term != last_log_term { + bail!("term(s) changed during snapshot: were term={}, last_log_term={}, now term={}, last_log_term={}", + bctx.term, bctx.last_log_term, term, last_log_term); + } + Ok(()) + } +} + +/// pull_timeline request body. #[derive(Debug, Serialize, Deserialize)] pub struct Request { pub tenant_id: TenantId, @@ -72,13 +267,15 @@ pub async fn handle_request(request: Request) -> Result { let mut statuses = Vec::new(); for (i, response) in responses.into_iter().enumerate() { - let response = response.context(format!("Failed to get status from {}", http_hosts[i]))?; + let response = response.context(format!("fetching status from {}", http_hosts[i]))?; + response + .error_for_status_ref() + .context(format!("checking status from {}", http_hosts[i]))?; let status: crate::http::routes::TimelineStatus = response.json().await?; statuses.push((status, i)); } // Find the most advanced safekeeper - // TODO: current logic may be wrong, fix it later let (status, i) = statuses .into_iter() .max_by_key(|(status, _)| { @@ -111,95 +308,59 @@ async fn pull_timeline(status: TimelineStatus, host: String) -> Result let conf = &GlobalTimelines::get_global_config(); - let client = reqwest::Client::new(); - // TODO: don't use debug dump, it should be used only in tests. - // This is a proof of concept, we should figure out a way - // to use scp without implementing it manually. + let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?; - // Implementing our own scp over HTTP. - // At first, we need to fetch list of files from safekeeper. - let dump: DebugDumpResponse = client + let client = reqwest::Client::new(); + + // Request stream with basebackup archive. + let bb_resp = client .get(format!( - "{}/v1/debug_dump?dump_all=true&tenant_id={}&timeline_id={}", + "{}/v1/tenant/{}/timeline/{}/snapshot", host, status.tenant_id, status.timeline_id )) .send() - .await? - .json() .await?; + bb_resp.error_for_status_ref()?; - if dump.timelines.len() != 1 { - bail!( - "expected to fetch single timeline, got {} timelines", - dump.timelines.len() - ); - } + // Make Stream of Bytes from it... + let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other); + // and turn it into StreamReader implementing AsyncRead. + let bb_reader = tokio_util::io::StreamReader::new(bb_stream); - let timeline = dump.timelines.into_iter().next().unwrap(); - let disk_content = timeline.disk_content.ok_or(anyhow::anyhow!( - "timeline {} doesn't have disk content", - ttid - ))?; - - let mut filenames = disk_content - .files - .iter() - .map(|file| file.name.clone()) - .collect::>(); - - // Sort filenames to make sure we pull files in correct order - // After sorting, we should have: - // - 000000010000000000000001 - // - ... - // - 000000010000000000000002.partial - // - safekeeper.control - filenames.sort(); - - // safekeeper.control should be the first file, so we need to move it to the beginning - let control_file_index = filenames - .iter() - .position(|name| name == "safekeeper.control") - .ok_or(anyhow::anyhow!("safekeeper.control not found"))?; - filenames.remove(control_file_index); - filenames.insert(0, "safekeeper.control".to_string()); - - pausable_failpoint!("sk-pull-timeline-after-list-pausable"); - - info!( - "downloading {} files from safekeeper {}", - filenames.len(), - host - ); - - let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?; - - // Note: some time happens between fetching list of files and fetching files themselves. - // It's possible that some files will be removed from safekeeper and we will fail to fetch them. - // This function will fail in this case, should be retried by the caller. - for filename in filenames { - let file_path = tli_dir_path.join(&filename); - // /v1/tenant/:tenant_id/timeline/:timeline_id/file/:filename - let http_url = format!( - "{}/v1/tenant/{}/timeline/{}/file/{}", - host, status.tenant_id, status.timeline_id, filename - ); - - let mut file = tokio::fs::File::create(&file_path).await?; - let mut response = client.get(&http_url).send().await?; - if response.status() != reqwest::StatusCode::OK { - bail!( - "pulling file {} failed: status is {}", - filename, - response.status() - ); - } - while let Some(chunk) = response.chunk().await? { - file.write_all(&chunk).await?; - file.flush().await?; + // Extract it on the fly to the disk. We don't use simple unpack() to fsync + // files. + let mut entries = Archive::new(bb_reader).entries()?; + while let Some(base_tar_entry) = entries.next().await { + let mut entry = base_tar_entry?; + let header = entry.header(); + let file_path = header.path()?.into_owned(); + match header.entry_type() { + tokio_tar::EntryType::Regular => { + let utf8_file_path = + Utf8PathBuf::from_path_buf(file_path).expect("non-Unicode path"); + let dst_path = tli_dir_path.join(utf8_file_path); + let mut f = OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(&dst_path) + .await?; + tokio::io::copy(&mut entry, &mut f).await?; + // fsync the file + f.sync_all().await?; + } + _ => { + bail!( + "entry {} in backup tar archive is of unexpected type: {:?}", + file_path.display(), + header.entry_type() + ); + } } } - // TODO: fsync? + // fsync temp timeline directory to remember its contents. + fsync_async_opt(&tli_dir_path, !conf.no_sync).await?; // Let's create timeline from temp directory and verify that it's correct let (commit_lsn, flush_lsn) = validate_temp_timeline(conf, ttid, &tli_dir_path).await?; @@ -290,7 +451,9 @@ pub async fn load_temp_timeline( ttid, tmp_path, timeline_path ); tokio::fs::create_dir_all(get_tenant_dir(conf, &ttid.tenant_id)).await?; - tokio::fs::rename(tmp_path, &timeline_path).await?; + // fsync tenant dir creation + fsync_async_opt(&conf.workdir, !conf.no_sync).await?; + durable_rename(tmp_path, &timeline_path, !conf.no_sync).await?; let tli = GlobalTimelines::load_timeline(&guard, ttid) .await diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 563dbbe315..ae230960ae 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -780,6 +780,9 @@ where // Initializing backup_lsn is useful to avoid making backup think it should upload 0 segment. state.backup_lsn = max(state.backup_lsn, state.timeline_start_lsn); + // similar for remote_consistent_lsn + state.remote_consistent_lsn = + max(state.remote_consistent_lsn, state.timeline_start_lsn); state.acceptor_state.term_history = msg.term_history.clone(); self.state.finish_change(&state).await?; diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 148a7e90bd..e510a05a32 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, bail, Result}; use camino::Utf8PathBuf; use serde::{Deserialize, Serialize}; -use tokio::fs; +use tokio::fs::{self}; use tokio_util::sync::CancellationToken; use utils::id::TenantId; @@ -225,7 +225,7 @@ impl SharedState { }) } - fn get_wal_seg_size(&self) -> usize { + pub(crate) fn get_wal_seg_size(&self) -> usize { self.sk.state.server.wal_seg_size as usize } diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 45e27e1951..0c1731937c 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -684,13 +684,12 @@ impl WalReader { let xlogoff = self.pos.segment_offset(self.wal_seg_size); let segno = self.pos.segment_number(self.wal_seg_size); let wal_file_name = XLogFileName(PG_TLI, segno, self.wal_seg_size); - let wal_file_path = self.timeline_dir.join(&wal_file_name); // Try to open local file, if we may have WAL locally if self.pos >= self.local_start_lsn { - let res = Self::open_wal_file(&wal_file_path).await; + let res = open_wal_file(&self.timeline_dir, segno, self.wal_seg_size).await; match res { - Ok(mut file) => { + Ok((mut file, _)) => { file.seek(SeekFrom::Start(xlogoff as u64)).await?; return Ok(Box::pin(file)); } @@ -718,25 +717,6 @@ impl WalReader { bail!("WAL segment is not found") } - - /// Helper function for opening a wal file. - async fn open_wal_file(wal_file_path: &Utf8Path) -> Result { - // First try to open the .partial file. - let mut partial_path = wal_file_path.to_owned(); - partial_path.set_extension("partial"); - if let Ok(opened_file) = tokio::fs::File::open(&partial_path).await { - return Ok(opened_file); - } - - // If that failed, try it without the .partial extension. - tokio::fs::File::open(&wal_file_path) - .await - .with_context(|| format!("Failed to open WAL file {:?}", wal_file_path)) - .map_err(|e| { - warn!("{}", e); - e - }) - } } /// Zero block for filling created WAL segments. @@ -758,6 +738,34 @@ async fn write_zeroes(file: &mut File, mut count: usize) -> Result<()> { Ok(()) } +/// Helper function for opening WAL segment `segno` in `dir`. Returns file and +/// whether it is .partial. +pub(crate) async fn open_wal_file( + timeline_dir: &Utf8Path, + segno: XLogSegNo, + wal_seg_size: usize, +) -> Result<(tokio::fs::File, bool)> { + let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size)?; + + // First try to open the .partial file. + let mut partial_path = wal_file_path.to_owned(); + partial_path.set_extension("partial"); + if let Ok(opened_file) = tokio::fs::File::open(&wal_file_partial_path).await { + return Ok((opened_file, true)); + } + + // If that failed, try it without the .partial extension. + let pf = tokio::fs::File::open(&wal_file_path) + .await + .with_context(|| format!("failed to open WAL file {:#}", wal_file_path)) + .map_err(|e| { + warn!("{}", e); + e + })?; + + Ok((pf, false)) +} + /// Helper returning full path to WAL segment file and its .partial brother. pub fn wal_file_paths( timeline_dir: &Utf8Path, diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index dce30f5388..11aeb8f182 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -317,9 +317,9 @@ def test_broker(neon_env_builder: NeonEnvBuilder): time.sleep(1) # Ensure that safekeepers don't lose remote_consistent_lsn on restart. - # Control file is persisted each 5s. TODO: do that on shutdown and remove sleep. - time.sleep(6) for sk in env.safekeepers: + # force persist cfile + sk.http_client().checkpoint(tenant_id, timeline_id) sk.stop() sk.start() stat_after_restart = [cli.timeline_status(tenant_id, timeline_id) for cli in clients] @@ -1749,11 +1749,11 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 4 env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_pull_timeline") + timeline_id = env.initial_timeline log.info("Use only first 3 safekeepers") env.safekeepers[3].stop() - endpoint = env.endpoints.create("test_pull_timeline") + endpoint = env.endpoints.create("main") endpoint.active_safekeepers = [1, 2, 3] endpoint.start() @@ -1787,7 +1787,7 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): show_statuses(env.safekeepers, tenant_id, timeline_id) log.info("Restarting compute with new config to verify that it works") - endpoint.stop_and_destroy().create("test_pull_timeline") + endpoint.stop_and_destroy().create("main") endpoint.active_safekeepers = [1, 3, 4] endpoint.start() @@ -1836,14 +1836,14 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): src_flush_lsn = src_sk.get_flush_lsn(tenant_id, timeline_id) log.info(f"flush_lsn on src before pull_timeline: {src_flush_lsn}") - dst_http = dst_sk.http_client() + src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) pt_handle.start() - dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + src_sk.wait_until_paused("sk-snapshot-after-list-pausable") # ensure segment exists endpoint.safe_psql("insert into t select generate_series(1, 180000), 'papaya'") @@ -1854,7 +1854,7 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): first_segment_p = src_sk.timeline_dir(tenant_id, timeline_id) / "000000010000000000000001" log.info(f"first segment exist={os.path.exists(first_segment_p)}") - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "off")) pt_handle.join() timeline_start_lsn = src_sk.get_timeline_start_lsn(tenant_id, timeline_id) @@ -1883,7 +1883,6 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): # enough, so it won't be affected by term change anymore. # # Expected to fail while term check is not implemented. -@pytest.mark.xfail def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) @@ -1900,14 +1899,14 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): ep.safe_psql("create table t(key int, value text)") ep.safe_psql("insert into t select generate_series(1, 1000), 'pear'") - dst_http = dst_sk.http_client() + src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "pause")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) pt_handle.start() - dst_sk.wait_until_paused("sk-pull-timeline-after-list-pausable") + src_sk.wait_until_paused("sk-snapshot-after-list-pausable") src_http = src_sk.http_client() term_before = src_http.timeline_status(tenant_id, timeline_id).term @@ -1922,7 +1921,7 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): term_after = src_http.timeline_status(tenant_id, timeline_id).term assert term_after > term_before, f"term_after={term_after}, term_before={term_before}" - dst_http.configure_failpoints(("sk-pull-timeline-after-list-pausable", "off")) + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "off")) with pytest.raises(requests.exceptions.HTTPError): pt_handle.join() From 29a41fc7b913e76666702344b1a751bc4d1aab69 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2024 16:10:05 +0300 Subject: [PATCH 199/220] Implement holding off WAL removal for pull_timeline. --- safekeeper/src/pull_timeline.rs | 18 +++++++++++++++--- safekeeper/src/timeline.rs | 5 +++++ safekeeper/src/timeline_manager.rs | 6 ++++-- test_runner/fixtures/neon_fixtures.py | 9 ++++++--- test_runner/regress/test_wal_acceptor.py | 6 ++++-- 5 files changed, 34 insertions(+), 10 deletions(-) diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 4099a324f9..2c4cc836f7 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -15,6 +15,7 @@ use tokio::{ fs::{File, OpenOptions}, io::AsyncWrite, sync::mpsc, + task, }; use tokio_tar::{Archive, Builder}; use tokio_util::{ @@ -66,7 +67,11 @@ pub struct SnapshotContext { impl Drop for SnapshotContext { fn drop(&mut self) { - // todo: spawn task removing WAL gc hold off + let tli = self.tli.clone(); + task::spawn(async move { + let mut shared_state = tli.write_shared_state().await; + shared_state.wal_removal_on_hold = false; + }); } } @@ -150,7 +155,7 @@ impl FullAccessTimeline { &self, ar: &mut tokio_tar::Builder, ) -> Result { - let shared_state = self.read_shared_state().await; + let mut shared_state = self.write_shared_state().await; let cf_path = self.get_timeline_dir().join(CONTROL_FILE_NAME); let mut cf = File::open(cf_path).await?; @@ -183,7 +188,14 @@ impl FullAccessTimeline { ); } - // TODO: set WAL hold off. + // Prevent WAL removal while we're streaming data. + // + // Since this a flag, not a counter just bail out if already set; we + // shouldn't need concurrent snapshotting. + if shared_state.wal_removal_on_hold { + bail!("wal_removal_on_hold is already true"); + } + shared_state.wal_removal_on_hold = true; let bctx = SnapshotContext { from_segno, diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index e510a05a32..544ffdbb36 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -168,6 +168,9 @@ pub struct SharedState { pub(crate) sk: SafeKeeper, /// In memory list containing state of peers sent in latest messages from them. pub(crate) peers_info: PeersInfo, + // True value hinders old WAL removal; this is used by snapshotting. We + // could make it a counter, but there is no need to. + pub(crate) wal_removal_on_hold: bool, } impl SharedState { @@ -205,6 +208,7 @@ impl SharedState { Ok(Self { sk, peers_info: PeersInfo(vec![]), + wal_removal_on_hold: false, }) } @@ -222,6 +226,7 @@ impl SharedState { Ok(Self { sk: SafeKeeper::new(control_store, wal_store, conf.my_id)?, peers_info: PeersInfo(vec![]), + wal_removal_on_hold: false, }) } diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 087b988c69..592426bba3 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -39,6 +39,7 @@ pub struct StateSnapshot { // misc pub cfile_last_persist_at: Instant, pub inmem_flush_pending: bool, + pub wal_removal_on_hold: bool, pub peers: Vec, } @@ -54,6 +55,7 @@ impl StateSnapshot { cfile_backup_lsn: read_guard.sk.state.backup_lsn, cfile_last_persist_at: read_guard.sk.state.pers.last_persist_at(), inmem_flush_pending: Self::has_unflushed_inmem_state(&read_guard), + wal_removal_on_hold: read_guard.wal_removal_on_hold, peers: read_guard.get_peers(heartbeat_timeout), } } @@ -324,8 +326,8 @@ async fn update_wal_removal( last_removed_segno: u64, wal_removal_task: &mut Option>>, ) { - if wal_removal_task.is_some() { - // WAL removal is already in progress + if wal_removal_task.is_some() || state.wal_removal_on_hold { + // WAL removal is already in progress or hold off return; } diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 394f5283f3..12fda5468f 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3897,11 +3897,13 @@ class Safekeeper(LogUtils): segments.sort() return segments - def checkpoint_up_to(self, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn): + def checkpoint_up_to( + self, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn, wait_wal_removal=True + ): """ Assuming pageserver(s) uploaded to s3 up to `lsn`, 1) wait for remote_consistent_lsn and wal_backup_lsn on safekeeper to reach it. - 2) checkpoint timeline on safekeeper, which should remove WAL before this LSN. + 2) checkpoint timeline on safekeeper, which should remove WAL before this LSN; optionally wait for that. """ cli = self.http_client() @@ -3925,7 +3927,8 @@ class Safekeeper(LogUtils): # pageserver to this safekeeper wait_until(30, 1, are_lsns_advanced) cli.checkpoint(tenant_id, timeline_id) - wait_until(30, 1, are_segments_removed) + if wait_wal_removal: + wait_until(30, 1, are_segments_removed) def wait_until_paused(self, failpoint: str): msg = f"at failpoint {failpoint}" diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 11aeb8f182..300a6b7115 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -1816,7 +1816,6 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): # 4) Do some write, verify integrity with timeline_digest. # Expected to fail while holding off WAL gc plus fetching commit_lsn WAL # segment is not implemented. -@pytest.mark.xfail def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) @@ -1850,13 +1849,16 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): lsn = last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) assert lsn > Lsn("0/2000000") # Checkpoint timeline beyond lsn. - src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn) + src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn, wait_wal_removal=False) first_segment_p = src_sk.timeline_dir(tenant_id, timeline_id) / "000000010000000000000001" log.info(f"first segment exist={os.path.exists(first_segment_p)}") src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "off")) pt_handle.join() + # after pull_timeline is finished WAL should be removed on donor + src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn, wait_wal_removal=True) + timeline_start_lsn = src_sk.get_timeline_start_lsn(tenant_id, timeline_id) dst_flush_lsn = dst_sk.get_flush_lsn(tenant_id, timeline_id) log.info(f"flush_lsn on dst after pull_timeline: {dst_flush_lsn}") From 4feb6ba29c11f7e5a945bdb7add681c5e4a04b9b Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Thu, 13 Jun 2024 22:47:58 +0300 Subject: [PATCH 200/220] Make pull_timeline work with auth enabled. - Make safekeeper read SAFEKEEPER_AUTH_TOKEN env variable with JWT token to connect to other safekeepers. - Set it in neon_local when auth is enabled. - Create simple rust http client supporting it, and use it in pull_timeline implementation. - Enable auth in all pull_timeline tests. - Make sk http_client() by default generate safekeeper wide token, it makes easier enabling auth in all tests by default. --- control_plane/src/safekeeper.rs | 15 +- safekeeper/src/bin/safekeeper.rs | 19 +++ safekeeper/src/http/client.rs | 139 ++++++++++++++++++ safekeeper/src/http/mod.rs | 1 + safekeeper/src/http/routes.rs | 3 +- safekeeper/src/lib.rs | 5 +- safekeeper/src/pull_timeline.rs | 60 ++++---- .../tests/walproposer_sim/safekeeper.rs | 1 + test_runner/fixtures/neon_fixtures.py | 18 ++- test_runner/regress/test_wal_acceptor.py | 25 +++- 10 files changed, 245 insertions(+), 41 deletions(-) create mode 100644 safekeeper/src/http/client.rs diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index d62a2e80b5..4a320ce53d 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -14,6 +14,7 @@ use camino::Utf8PathBuf; use postgres_connection::PgConnectionConfig; use reqwest::{IntoUrl, Method}; use thiserror::Error; +use utils::auth::{Claims, Scope}; use utils::{http::error::HttpErrorBody, id::NodeId}; use crate::{ @@ -197,7 +198,7 @@ impl SafekeeperNode { &datadir, &self.env.safekeeper_bin(), &args, - [], + self.safekeeper_env_variables()?, background_process::InitialPidFile::Expect(self.pid_file()), || async { match self.check_status().await { @@ -210,6 +211,18 @@ impl SafekeeperNode { .await } + fn safekeeper_env_variables(&self) -> anyhow::Result> { + // Generate a token to connect from safekeeper to peers + if self.conf.auth_enabled { + let token = self + .env + .generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?; + Ok(vec![("SAFEKEEPER_AUTH_TOKEN".to_owned(), token)]) + } else { + Ok(Vec::new()) + } + } + /// /// Stop the server. /// diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 7476654426..86238c7292 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -13,7 +13,9 @@ use tokio::runtime::Handle; use tokio::signal::unix::{signal, SignalKind}; use tokio::task::JoinError; use toml_edit::Document; +use utils::logging::SecretString; +use std::env::{var, VarError}; use std::fs::{self, File}; use std::io::{ErrorKind, Write}; use std::str::FromStr; @@ -287,6 +289,22 @@ async fn main() -> anyhow::Result<()> { } }; + // Load JWT auth token to connect to other safekeepers for pull_timeline. + let sk_auth_token = match var("SAFEKEEPER_AUTH_TOKEN") { + Ok(v) => { + info!("loaded JWT token for authentication with safekeepers"); + Some(SecretString::from(v)) + } + Err(VarError::NotPresent) => { + info!("no JWT token for authentication with safekeepers detected"); + None + } + Err(_) => { + warn!("JWT token for authentication with safekeepers is not unicode"); + None + } + }; + let conf = SafeKeeperConf { workdir, my_id: id, @@ -307,6 +325,7 @@ async fn main() -> anyhow::Result<()> { pg_auth, pg_tenant_only_auth, http_auth, + sk_auth_token, current_thread_runtime: args.current_thread_runtime, walsenders_keep_horizon: args.walsenders_keep_horizon, partial_backup_enabled: args.partial_backup_enabled, diff --git a/safekeeper/src/http/client.rs b/safekeeper/src/http/client.rs new file mode 100644 index 0000000000..0bb31c200d --- /dev/null +++ b/safekeeper/src/http/client.rs @@ -0,0 +1,139 @@ +//! Safekeeper http client. +//! +//! Partially copied from pageserver client; some parts might be better to be +//! united. +//! +//! It would be also good to move it out to separate crate, but this needs +//! duplication of internal-but-reported structs like WalSenderState, ServerInfo +//! etc. + +use reqwest::{IntoUrl, Method, StatusCode}; +use utils::{ + http::error::HttpErrorBody, + id::{TenantId, TimelineId}, + logging::SecretString, +}; + +use super::routes::TimelineStatus; + +#[derive(Debug, Clone)] +pub struct Client { + mgmt_api_endpoint: String, + authorization_header: Option, + client: reqwest::Client, +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Failed to receive body (reqwest error). + #[error("receive body: {0}")] + ReceiveBody(reqwest::Error), + + /// Status is not ok, but failed to parse body as `HttpErrorBody`. + #[error("receive error body: {0}")] + ReceiveErrorBody(String), + + /// Status is not ok; parsed error in body as `HttpErrorBody`. + #[error("safekeeper API: {1}")] + ApiError(StatusCode, String), +} + +pub type Result = std::result::Result; + +pub trait ResponseErrorMessageExt: Sized { + fn error_from_body(self) -> impl std::future::Future> + Send; +} + +/// If status is not ok, try to extract error message from the body. +impl ResponseErrorMessageExt for reqwest::Response { + async fn error_from_body(self) -> Result { + let status = self.status(); + if !(status.is_client_error() || status.is_server_error()) { + return Ok(self); + } + + let url = self.url().to_owned(); + Err(match self.json::().await { + Ok(HttpErrorBody { msg }) => Error::ApiError(status, msg), + Err(_) => { + Error::ReceiveErrorBody(format!("http error ({}) at {}.", status.as_u16(), url)) + } + }) + } +} + +impl Client { + pub fn new(mgmt_api_endpoint: String, jwt: Option) -> Self { + Self::from_client(reqwest::Client::new(), mgmt_api_endpoint, jwt) + } + + pub fn from_client( + client: reqwest::Client, + mgmt_api_endpoint: String, + jwt: Option, + ) -> Self { + Self { + mgmt_api_endpoint, + authorization_header: jwt + .map(|jwt| SecretString::from(format!("Bearer {}", jwt.get_contents()))), + client, + } + } + + pub async fn timeline_status( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}", + self.mgmt_api_endpoint, tenant_id, timeline_id + ); + let resp = self.get(&uri).await?; + resp.json().await.map_err(Error::ReceiveBody) + } + + pub async fn snapshot( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + let uri = format!( + "{}/v1/tenant/{}/timeline/{}/snapshot", + self.mgmt_api_endpoint, tenant_id, timeline_id + ); + self.get(&uri).await + } + + async fn get(&self, uri: U) -> Result { + self.request(Method::GET, uri, ()).await + } + + /// Send the request and check that the status code is good. + async fn request( + &self, + method: Method, + uri: U, + body: B, + ) -> Result { + let res = self.request_noerror(method, uri, body).await?; + let response = res.error_from_body().await?; + Ok(response) + } + + /// Just send the request. + async fn request_noerror( + &self, + method: Method, + uri: U, + body: B, + ) -> Result { + let req = self.client.request(method, uri); + let req = if let Some(value) = &self.authorization_header { + req.header(reqwest::header::AUTHORIZATION, value.get_contents()) + } else { + req + }; + req.json(&body).send().await.map_err(Error::ReceiveBody) + } +} diff --git a/safekeeper/src/http/mod.rs b/safekeeper/src/http/mod.rs index 2a9570595f..52fb13ff5b 100644 --- a/safekeeper/src/http/mod.rs +++ b/safekeeper/src/http/mod.rs @@ -1,3 +1,4 @@ +pub mod client; pub mod routes; pub use routes::make_router; diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 40ac2c105d..3f2cd97ccd 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -195,8 +195,9 @@ async fn timeline_pull_handler(mut request: Request) -> Result>, pub pg_tenant_only_auth: Option>, pub http_auth: Option>, + /// JWT token to connect to other safekeepers with. + pub sk_auth_token: Option, pub current_thread_runtime: bool, pub walsenders_keep_horizon: bool, pub partial_backup_enabled: bool, @@ -114,6 +116,7 @@ impl SafeKeeperConf { pg_auth: None, pg_tenant_only_auth: None, http_auth: None, + sk_auth_token: None, heartbeat_timeout: Duration::new(5, 0), max_offloader_lag_bytes: defaults::DEFAULT_MAX_OFFLOADER_LAG_BYTES, current_thread_runtime: false, diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 2c4cc836f7..66c41f65ff 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -27,7 +27,10 @@ use tracing::{error, info, instrument}; use crate::{ control_file::{self, CONTROL_FILE_NAME}, debug_dump, - http::routes::TimelineStatus, + http::{ + client::{self, Client}, + routes::TimelineStatus, + }, safekeeper::Term, timeline::{get_tenant_dir, get_timeline_dir, FullAccessTimeline, Timeline, TimelineError}, wal_storage::{self, open_wal_file, Storage}, @@ -36,6 +39,7 @@ use crate::{ use utils::{ crashsafe::{durable_rename, fsync_async_opt}, id::{TenantId, TenantTimelineId, TimelineId}, + logging::SecretString, lsn::Lsn, pausable_failpoint, }; @@ -163,6 +167,11 @@ impl FullAccessTimeline { // We need to stream since the oldest segment someone (s3 or pageserver) // still needs. This duplicates calc_horizon_lsn logic. + // + // We know that WAL wasn't removed up to this point because it cannot be + // removed further than `backup_lsn`. Since we're holding shared_state + // lock and setting `wal_removal_on_hold` later, it guarantees that WAL + // won't be removed until we're done. let from_lsn = min( shared_state.sk.state.remote_consistent_lsn, shared_state.sk.state.backup_lsn, @@ -255,7 +264,10 @@ pub struct DebugDumpResponse { } /// Find the most advanced safekeeper and pull timeline from it. -pub async fn handle_request(request: Request) -> Result { +pub async fn handle_request( + request: Request, + sk_auth_token: Option, +) -> Result { let existing_tli = GlobalTimelines::get(TenantTimelineId::new( request.tenant_id, request.timeline_id, @@ -264,26 +276,22 @@ pub async fn handle_request(request: Request) -> Result { bail!("Timeline {} already exists", request.timeline_id); } - let client = reqwest::Client::new(); let http_hosts = request.http_hosts.clone(); - // Send request to /v1/tenant/:tenant_id/timeline/:timeline_id - let responses = futures::future::join_all(http_hosts.iter().map(|url| { - let url = format!( - "{}/v1/tenant/{}/timeline/{}", - url, request.tenant_id, request.timeline_id - ); - client.get(url).send() - })) - .await; + // Figure out statuses of potential donors. + let responses: Vec> = + futures::future::join_all(http_hosts.iter().map(|url| async { + let cclient = Client::new(url.clone(), sk_auth_token.clone()); + let info = cclient + .timeline_status(request.tenant_id, request.timeline_id) + .await?; + Ok(info) + })) + .await; let mut statuses = Vec::new(); for (i, response) in responses.into_iter().enumerate() { - let response = response.context(format!("fetching status from {}", http_hosts[i]))?; - response - .error_for_status_ref() - .context(format!("checking status from {}", http_hosts[i]))?; - let status: crate::http::routes::TimelineStatus = response.json().await?; + let status = response.context(format!("fetching status from {}", http_hosts[i]))?; statuses.push((status, i)); } @@ -303,10 +311,14 @@ pub async fn handle_request(request: Request) -> Result { assert!(status.tenant_id == request.tenant_id); assert!(status.timeline_id == request.timeline_id); - pull_timeline(status, safekeeper_host).await + pull_timeline(status, safekeeper_host, sk_auth_token).await } -async fn pull_timeline(status: TimelineStatus, host: String) -> Result { +async fn pull_timeline( + status: TimelineStatus, + host: String, + sk_auth_token: Option, +) -> Result { let ttid = TenantTimelineId::new(status.tenant_id, status.timeline_id); info!( "pulling timeline {} from safekeeper {}, commit_lsn={}, flush_lsn={}, term={}, epoch={}", @@ -322,17 +334,11 @@ async fn pull_timeline(status: TimelineStatus, host: String) -> Result let (_tmp_dir, tli_dir_path) = create_temp_timeline_dir(conf, ttid).await?; - let client = reqwest::Client::new(); - + let client = Client::new(host.clone(), sk_auth_token.clone()); // Request stream with basebackup archive. let bb_resp = client - .get(format!( - "{}/v1/tenant/{}/timeline/{}/snapshot", - host, status.tenant_id, status.timeline_id - )) - .send() + .snapshot(status.tenant_id, status.timeline_id) .await?; - bb_resp.error_for_status_ref()?; // Make Stream of Bytes from it... let bb_stream = bb_resp.bytes_stream().map_err(std::io::Error::other); diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 27e2a4453b..47539872a6 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -174,6 +174,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { pg_auth: None, pg_tenant_only_auth: None, http_auth: None, + sk_auth_token: None, current_thread_runtime: false, walsenders_keep_horizon: false, partial_backup_enabled: false, diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 12fda5468f..aa55b6e4cb 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3847,7 +3847,15 @@ class Safekeeper(LogUtils): assert isinstance(res, dict) return res - def http_client(self, auth_token: Optional[str] = None) -> SafekeeperHttpClient: + def http_client( + self, auth_token: Optional[str] = None, gen_sk_wide_token: bool = True + ) -> SafekeeperHttpClient: + """ + When auth_token is None but gen_sk_wide is True creates safekeeper wide + token, which is a reasonable default. + """ + if auth_token is None and gen_sk_wide_token: + auth_token = self.env.auth_keys.generate_safekeeper_token() is_testing_enabled = '"testing"' in self.env.get_binary_version("safekeeper") return SafekeeperHttpClient( port=self.port.http, auth_token=auth_token, is_testing_enabled=is_testing_enabled @@ -4450,6 +4458,7 @@ def wait_for_last_flush_lsn( tenant: TenantId, timeline: TimelineId, pageserver_id: Optional[int] = None, + auth_token: Optional[str] = None, ) -> Lsn: """Wait for pageserver to catch up the latest flush LSN, returns the last observed lsn.""" @@ -4463,7 +4472,7 @@ def wait_for_last_flush_lsn( f"wait_for_last_flush_lsn: waiting for {last_flush_lsn} on shard {tenant_shard_id} on pageserver {pageserver.id})" ) waited = wait_for_last_record_lsn( - pageserver.http_client(), tenant_shard_id, timeline, last_flush_lsn + pageserver.http_client(auth_token=auth_token), tenant_shard_id, timeline, last_flush_lsn ) assert waited >= last_flush_lsn @@ -4559,6 +4568,7 @@ def last_flush_lsn_upload( tenant_id: TenantId, timeline_id: TimelineId, pageserver_id: Optional[int] = None, + auth_token: Optional[str] = None, ) -> Lsn: """ Wait for pageserver to catch to the latest flush LSN of given endpoint, @@ -4566,11 +4576,11 @@ def last_flush_lsn_upload( reaching flush LSN). """ last_flush_lsn = wait_for_last_flush_lsn( - env, endpoint, tenant_id, timeline_id, pageserver_id=pageserver_id + env, endpoint, tenant_id, timeline_id, pageserver_id=pageserver_id, auth_token=auth_token ) shards = tenant_get_shards(env, tenant_id, pageserver_id) for tenant_shard_id, pageserver in shards: - ps_http = pageserver.http_client() + ps_http = pageserver.http_client(auth_token=auth_token) wait_for_last_record_lsn(ps_http, tenant_shard_id, timeline_id, last_flush_lsn) # force a checkpoint to trigger upload ps_http.timeline_checkpoint(tenant_shard_id, timeline_id) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 300a6b7115..7bf208db54 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -374,7 +374,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): http_cli_other = env.safekeepers[0].http_client( auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) - http_cli_noauth = env.safekeepers[0].http_client() + http_cli_noauth = env.safekeepers[0].http_client(gen_sk_wide_token=False) # Pretend WAL is offloaded to s3. if auth_enabled: @@ -830,7 +830,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): auth_token=env.auth_keys.generate_tenant_token(TenantId.generate()) ) wa_http_cli_bad.check_status() - wa_http_cli_noauth = wa.http_client() + wa_http_cli_noauth = wa.http_client(gen_sk_wide_token=False) wa_http_cli_noauth.check_status() # debug endpoint requires safekeeper scope @@ -964,7 +964,7 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder): # By default, neon_local enables auth on all services if auth is configured, # so http must require the token. - sk_http_cli_noauth = sk.http_client() + sk_http_cli_noauth = sk.http_client(gen_sk_wide_token=False) sk_http_cli_auth = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) with pytest.raises(sk_http_cli_noauth.HTTPError, match="Forbidden|Unauthorized"): sk_http_cli_noauth.timeline_status(tenant_id, timeline_id) @@ -1640,7 +1640,7 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): sk_http_other = sk.http_client( auth_token=env.auth_keys.generate_tenant_token(tenant_id_other) ) - sk_http_noauth = sk.http_client() + sk_http_noauth = sk.http_client(gen_sk_wide_token=False) assert (sk_data_dir / str(tenant_id) / str(timeline_id_1)).is_dir() assert (sk_data_dir / str(tenant_id) / str(timeline_id_2)).is_dir() assert (sk_data_dir / str(tenant_id) / str(timeline_id_3)).is_dir() @@ -1723,7 +1723,10 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): cur.execute("INSERT INTO t (key) VALUES (123)") +# Basic pull_timeline test. def test_pull_timeline(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True + def execute_payload(endpoint: Endpoint): with closing(endpoint.connect()) as conn: with conn.cursor() as cur: @@ -1739,7 +1742,7 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): for sk in safekeepers: - http_cli = sk.http_client() + http_cli = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) try: status = http_cli.timeline_status(tenant_id, timeline_id) log.info(f"Safekeeper {sk.id} status: {status}") @@ -1769,7 +1772,7 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): res = ( env.safekeepers[3] - .http_client() + .http_client(auth_token=env.auth_keys.generate_safekeeper_token()) .pull_timeline( { "tenant_id": str(tenant_id), @@ -1817,6 +1820,7 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder): # Expected to fail while holding off WAL gc plus fetching commit_lsn WAL # segment is not implemented. def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() @@ -1846,7 +1850,13 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): # ensure segment exists endpoint.safe_psql("insert into t select generate_series(1, 180000), 'papaya'") - lsn = last_flush_lsn_upload(env, endpoint, tenant_id, timeline_id) + lsn = last_flush_lsn_upload( + env, + endpoint, + tenant_id, + timeline_id, + auth_token=env.auth_keys.generate_tenant_token(tenant_id), + ) assert lsn > Lsn("0/2000000") # Checkpoint timeline beyond lsn. src_sk.checkpoint_up_to(tenant_id, timeline_id, lsn, wait_wal_removal=False) @@ -1886,6 +1896,7 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): # # Expected to fail while term check is not implemented. def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): + neon_env_builder.auth_enabled = True neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() From 68a2298973603df5b21f230d813f59b73ae2ebb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Tue, 18 Jun 2024 16:03:23 +0200 Subject: [PATCH 201/220] Add support to specifying storage account in AzureConfig (#8090) We want to be able to specify the storage account via the toml configuration, so that we can connect to multiple storage accounts in the same process. https://neondb.slack.com/archives/C06SJG60FRB/p1718702144270139 --- docs/pageserver-services.md | 3 ++- libs/remote_storage/src/azure_blob.rs | 5 ++++- libs/remote_storage/src/lib.rs | 17 +++++++++++++++-- libs/remote_storage/tests/test_real_azure.rs | 1 + 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/docs/pageserver-services.md b/docs/pageserver-services.md index ba5d3c423e..11d984eb08 100644 --- a/docs/pageserver-services.md +++ b/docs/pageserver-services.md @@ -101,11 +101,12 @@ or ```toml [remote_storage] container_name = 'some-container-name' +storage_account = 'somestorageaccnt' container_region = 'us-east' prefix_in_container = '/test-prefix/' ``` -`AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_ACCESS_KEY` env variables can be used to specify the azure credentials if needed. +The `AZURE_STORAGE_ACCESS_KEY` env variable can be used to specify the azure credentials if needed. ## Repository background tasks diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 2aa05a9d30..dbd64fb5a6 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -54,7 +54,10 @@ impl AzureBlobStorage { azure_config.container_name ); - let account = env::var("AZURE_STORAGE_ACCOUNT").expect("missing AZURE_STORAGE_ACCOUNT"); + // Use the storage account from the config by default, fall back to env var if not present. + let account = azure_config.storage_account.clone().unwrap_or_else(|| { + env::var("AZURE_STORAGE_ACCOUNT").expect("missing AZURE_STORAGE_ACCOUNT") + }); // If the `AZURE_STORAGE_ACCESS_KEY` env var has an access key, use that, // otherwise try the token based credentials. diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 8c984abed2..72748e156c 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -466,7 +466,11 @@ impl GenericRemoteStorage { Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout)?)) } RemoteStorageKind::AzureContainer(azure_config) => { - info!("Using azure container '{}' in region '{}' as a remote storage, prefix in container: '{:?}'", + let storage_account = azure_config + .storage_account + .as_deref() + .unwrap_or(""); + info!("Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'", azure_config.container_name, azure_config.container_region, azure_config.prefix_in_container); Self::AzureBlob(Arc::new(AzureBlobStorage::new(azure_config, timeout)?)) } @@ -589,6 +593,8 @@ impl Debug for S3Config { pub struct AzureConfig { /// Name of the container to connect to. pub container_name: String, + /// Name of the storage account the container is inside of + pub storage_account: Option, /// The region where the bucket is located at. pub container_region: String, /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once. @@ -603,8 +609,9 @@ impl Debug for AzureConfig { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("AzureConfig") .field("bucket_name", &self.container_name) + .field("storage_account", &self.storage_account) .field("bucket_region", &self.container_region) - .field("prefix_in_bucket", &self.prefix_in_container) + .field("prefix_in_container", &self.prefix_in_container) .field("concurrency_limit", &self.concurrency_limit) .field( "max_keys_per_list_response", @@ -718,6 +725,12 @@ impl RemoteStorageConfig { (None, None, None, Some(container_name), Some(container_region)) => { RemoteStorageKind::AzureContainer(AzureConfig { container_name: parse_toml_string("container_name", container_name)?, + storage_account: toml + .get("storage_account") + .map(|storage_account| { + parse_toml_string("storage_account", storage_account) + }) + .transpose()?, container_region: parse_toml_string("container_region", container_region)?, prefix_in_container: toml .get("prefix_in_container") diff --git a/libs/remote_storage/tests/test_real_azure.rs b/libs/remote_storage/tests/test_real_azure.rs index cd0b2be4b5..23628dfebe 100644 --- a/libs/remote_storage/tests/test_real_azure.rs +++ b/libs/remote_storage/tests/test_real_azure.rs @@ -212,6 +212,7 @@ fn create_azure_client( let remote_storage_config = RemoteStorageConfig { storage: RemoteStorageKind::AzureContainer(AzureConfig { container_name: remote_storage_azure_container, + storage_account: None, container_region: remote_storage_azure_region, prefix_in_container: Some(format!("test_{millis}_{random:08x}/")), concurrency_limit: NonZeroUsize::new(100).unwrap(), From cf60e4c0c5bdd7b719b999b3b6603b3d8ec6c04d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 16:40:27 +0100 Subject: [PATCH 202/220] build(deps): bump ws from 8.16.0 to 8.17.1 in /test_runner/pg_clients/typescript/serverless-driver (#8087) --- .../typescript/serverless-driver/package-lock.json | 8 ++++---- .../pg_clients/typescript/serverless-driver/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test_runner/pg_clients/typescript/serverless-driver/package-lock.json b/test_runner/pg_clients/typescript/serverless-driver/package-lock.json index 5a3ad3c238..f3b456f1ed 100644 --- a/test_runner/pg_clients/typescript/serverless-driver/package-lock.json +++ b/test_runner/pg_clients/typescript/serverless-driver/package-lock.json @@ -6,7 +6,7 @@ "": { "dependencies": { "@neondatabase/serverless": "0.9.0", - "ws": "8.16.0" + "ws": "8.17.1" } }, "node_modules/@neondatabase/serverless": { @@ -96,9 +96,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, diff --git a/test_runner/pg_clients/typescript/serverless-driver/package.json b/test_runner/pg_clients/typescript/serverless-driver/package.json index 9d9da0f42c..3ae7a8a6cf 100644 --- a/test_runner/pg_clients/typescript/serverless-driver/package.json +++ b/test_runner/pg_clients/typescript/serverless-driver/package.json @@ -2,6 +2,6 @@ "type": "module", "dependencies": { "@neondatabase/serverless": "0.9.0", - "ws": "8.16.0" + "ws": "8.17.1" } } From 8a9fa0a4e41937c6b20474d89534699806c7c706 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 16:40:46 +0100 Subject: [PATCH 203/220] build(deps): bump urllib3 from 1.26.18 to 1.26.19 (#8086) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 25c0c7398d..7740388fb8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2806,13 +2806,13 @@ files = [ [[package]] name = "urllib3" -version = "1.26.18" +version = "1.26.19" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, + {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, ] [package.extras] From 8ee67241678150f9bb8413161481376150a3a849 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:30:53 +0300 Subject: [PATCH 204/220] Update overview section to reflect current code organization --- docs/core_changes.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/core_changes.md b/docs/core_changes.md index ea219adae9..f86d5133e8 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -11,9 +11,10 @@ page server. We currently use the same binary for both, with --wal-redo runtime the WAL redo mode. Some PostgreSQL changes are needed in the compute node, while others are just for the WAL redo process. -In addition to core PostgreSQL changes, there is a Neon extension in contrib/neon, to hook into the -smgr interface. Once all the core changes have been submitted to upstream or eliminated some other -way, the extension could live outside the postgres repository and build against vanilla PostgreSQL. +In addition to core PostgreSQL changes, there is a Neon extension in the pgxn/neon directory that +hooks into the smgr interface, and rmgr extension in pgxn/neon_rmgr. The extensions are loaded into +the Postgres processes with shared_preload_libraries. Most of the Neon-specific code is in the +extensions, and for any new features, that is preferred over modifying core PostgreSQL code. Below is a list of all the PostgreSQL source code changes, categorized into changes needed for compute, and changes needed for the WAL redo process: From 0396ed67f710739c3b3bd8f1dafbcd517432a467 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:30:56 +0300 Subject: [PATCH 205/220] Update comments on various items To update things that have changed since this was written, and to reflect discussions at offsite meeting. --- docs/core_changes.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/core_changes.md b/docs/core_changes.md index f86d5133e8..9dd4ea806b 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -38,6 +38,7 @@ The problem is that the XLOG_HEAP_INSERT record does not include the command id Bite the bullet and submit the patch to PostgreSQL, to add the t_cid to the WAL records. It makes the WAL records larger, which could make this unpopular in the PostgreSQL community. However, it might simplify some logical decoding code; Andres Freund briefly mentioned in PGCon 2022 discussion on Heikki's Neon presentation that logical decoding currently needs to jump through some hoops to reconstruct the same information. +Update from Heikki (2024-04-17): I tried to write an upstream patch for that, to use the t_cid field for logical decoding, but it was not as straightforward as it first sounded. ### Alternatives Perhaps we could write an extra WAL record with the t_cid information, when a page is evicted that contains rows that were touched a transaction that's still running. However, that seems very complicated. @@ -96,6 +97,8 @@ Maybe some bigger rewrite of FSM and VM would help to avoid WAL-logging FSM and also some changes in src/backend/storage/smgr/smgr.c ``` +pgvector 0.6.0 also needs a similar change, which would be very nice to get rid of too. + When a GIN index is built, for example, it is built by inserting the entries into the index more or less normally, but without WAL-logging anything. After the index has been built, we iterate through all pages and write them to the WAL. That doesn't work for Neon, because if a page is not WAL-logged @@ -110,6 +113,10 @@ an operation: `smgr_start_unlogged_build`, `smgr_finish_unlogged_build_phase_1` I think it would make sense to be more explicit about that in PostgreSQL too. So extract these changes to a patch and post to pgsql-hackers. +Perhaps we could deduce that an unlogged index build has started when we see a page being evicted +with zero LSN. How to be sure it's an unlogged index build rather than a bug? Currently we have a +check for that and PANIC if we see page with zero LSN being evicted. And how do we detect when the +index build has finished? See https://github.com/neondatabase/neon/pull/7440 for an attempt at that. ## Track last-written page LSN @@ -322,6 +329,8 @@ and finally WAL-log that the extension succeeded. Submit to upstream. This could be useful for the Disk Encryption patches too, or for compression. +We have submitted this to upstream, but it's moving at glacial a speed. +https://commitfest.postgresql.org/47/4428/ ## Added relpersistence argument to smgropen() From 33a09946fc5d0b5dec92ab84ce25803879e99358 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:30:58 +0300 Subject: [PATCH 206/220] Prefetching has been implemented --- docs/core_changes.md | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/docs/core_changes.md b/docs/core_changes.md index 9dd4ea806b..57d1fdfe59 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -21,6 +21,18 @@ compute, and changes needed for the WAL redo process: # Changes for Compute node +## Prefetching + +There are changes in many places to perform prefetching, for example for sequential scans. Neon +doesn't benefit from OS readahead, and the latency to pageservers is quite high compared to local +disk, so prefetching is critical for performance, also for sequential scans. + +### How to get rid of the patch + +Upcoming "streaming read" work in v17 might simplify this. And async I/O work in v18 will hopefully +do more. + + ## Add t_cid to heap WAL records ``` @@ -482,19 +494,6 @@ hint bits are set. Wal logging hint bits updates requires FPI which significantl Add special WAL record for setting page hints. -## Prefetching - -### Why? - -As far as pages in Neon are loaded on demand, to reduce node startup time -and also speedup some massive queries we need some mechanism for bulk loading to -reduce page request round-trip overhead. - -Currently Postgres is supporting prefetching only for bitmap scan. -In Neon we should also use prefetch for sequential and index scans, because the OS is not doing it for us. -For sequential scan we could prefetch some number of following pages. For index scan we could prefetch pages -of heap relation addressed by TIDs. - ## Prewarming ### Why? From b774ab54d428a82bfad1772917e237dfd3939eb3 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:31:01 +0300 Subject: [PATCH 207/220] Remove obsolete ones - Relation size cache was moved to extension - the changes in visibilitymap.c and freespace.c became unnecessary with v16, thanks to changes in upstream code - WALProposer was moved to extension - The hack in ReadBuffer_common to not throw an error on unexpected data beyond EOF was removed in v16 rebase. We haven't seen such errors, so I guess that was some early issue that was fixed long time ago. - The ginfast.c diff was made unnecessary by upstream commit 56b662523f --- docs/core_changes.md | 155 ------------------------------------------- 1 file changed, 155 deletions(-) diff --git a/docs/core_changes.md b/docs/core_changes.md index 57d1fdfe59..60b54825c4 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -55,50 +55,6 @@ Update from Heikki (2024-04-17): I tried to write an upstream patch for that, to ### Alternatives Perhaps we could write an extra WAL record with the t_cid information, when a page is evicted that contains rows that were touched a transaction that's still running. However, that seems very complicated. -## ginfast.c - -``` -diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c -index e0d9940946..2d964c02e9 100644 ---- a/src/backend/access/gin/ginfast.c -+++ b/src/backend/access/gin/ginfast.c -@@ -285,6 +285,17 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) - memset(&sublist, 0, sizeof(GinMetaPageData)); - makeSublist(index, collector->tuples, collector->ntuples, &sublist); - -+ if (metadata->head != InvalidBlockNumber) -+ { -+ /* -+ * ZENITH: Get buffer before XLogBeginInsert() to avoid recursive call -+ * of XLogBeginInsert(). Reading a new buffer might evict a dirty page from -+ * the buffer cache, and if that page happens to be an FSM or VM page, zenith_write() -+ * will try to WAL-log an image of the page. -+ */ -+ buffer = ReadBuffer(index, metadata->tail); -+ } -+ - if (needWal) - XLogBeginInsert(); - -@@ -316,7 +327,6 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) - data.prevTail = metadata->tail; - data.newRightlink = sublist.head; - -- buffer = ReadBuffer(index, metadata->tail); - LockBuffer(buffer, GIN_EXCLUSIVE); - page = BufferGetPage(buffer); -``` - -The problem is explained in the comment above - -### How to get rid of the patch - -Can we stop WAL-logging FSM or VM pages? Or delay the WAL logging until we're out of the critical -section or something. - -Maybe some bigger rewrite of FSM and VM would help to avoid WAL-logging FSM and VM page images? - - ## Mark index builds that use buffer manager without logging explicitly ``` @@ -160,57 +116,6 @@ The old method is still available, though. Wait until v15? -## Cache relation sizes - -The Neon extension contains a little cache for smgrnblocks() and smgrexists() calls, to avoid going -to the page server every time. It might be useful to cache those in PostgreSQL, maybe in the -relcache? (I think we do cache nblocks in relcache already, check why that's not good enough for -Neon) - - -## Use buffer manager when extending VM or FSM - -``` - src/backend/storage/freespace/freespace.c | 14 +- - src/backend/access/heap/visibilitymap.c | 15 +- - -diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c -index e198df65d8..addfe93eac 100644 ---- a/src/backend/access/heap/visibilitymap.c -+++ b/src/backend/access/heap/visibilitymap.c -@@ -652,10 +652,19 @@ vm_extend(Relation rel, BlockNumber vm_nblocks) - /* Now extend the file */ - while (vm_nblocks_now < vm_nblocks) - { -- PageSetChecksumInplace((Page) pg.data, vm_nblocks_now); -+ /* -+ * ZENITH: Initialize VM pages through buffer cache to prevent loading -+ * them from pageserver. -+ */ -+ Buffer buffer = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, P_NEW, -+ RBM_ZERO_AND_LOCK, NULL); -+ Page page = BufferGetPage(buffer); -+ -+ PageInit((Page) page, BLCKSZ, 0); -+ PageSetChecksumInplace(page, vm_nblocks_now); -+ MarkBufferDirty(buffer); -+ UnlockReleaseBuffer(buffer); - -- smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now, -- pg.data, false); - vm_nblocks_now++; - } -``` - -### Problem we're trying to solve - -??? - -### How to get rid of the patch - -Maybe this would be a reasonable change in PostgreSQL too? - - ## Allow startup without reading checkpoint record In Neon, the compute node is stateless. So when we are launching compute node, we need to provide @@ -270,66 +175,6 @@ would be weird if the sequence moved backwards though, think of PITR. Or add a GUC for the amount to prefix to PostgreSQL, and force it to 1 in Neon. -## Walproposer - -``` - src/Makefile | 1 + - src/backend/replication/libpqwalproposer/Makefile | 37 + - src/backend/replication/libpqwalproposer/libpqwalproposer.c | 416 ++++++++++++ - src/backend/postmaster/bgworker.c | 4 + - src/backend/postmaster/postmaster.c | 6 + - src/backend/replication/Makefile | 4 +- - src/backend/replication/walproposer.c | 2350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - src/backend/replication/walproposer_utils.c | 402 +++++++++++ - src/backend/replication/walreceiver.c | 7 + - src/backend/replication/walsender.c | 320 ++++++--- - src/backend/storage/ipc/ipci.c | 6 + - src/include/replication/walproposer.h | 565 ++++++++++++++++ -``` - -WAL proposer is communicating with safekeeper and ensures WAL durability by quorum writes. It is -currently implemented as patch to standard WAL sender. - -### How to get rid of the patch - -Refactor into an extension. Submit hooks or APIs into upstream if necessary. - -@MMeent did some work on this already: https://github.com/neondatabase/postgres/pull/96 - -## Ignore unexpected data beyond EOF in bufmgr.c - -``` -@@ -922,11 +928,14 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, - */ - bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr); - if (!PageIsNew((Page) bufBlock)) -- ereport(ERROR, -+ { -+ // XXX-ZENITH -+ MemSet((char *) bufBlock, 0, BLCKSZ); -+ ereport(DEBUG1, - (errmsg("unexpected data beyond EOF in block %u of relation %s", - blockNum, relpath(smgr->smgr_rnode, forkNum)), - errhint("This has been seen to occur with buggy kernels; consider updating your system."))); -- -+ } - /* - * We *must* do smgrextend before succeeding, else the page will not - * be reserved by the kernel, and the next P_NEW call will decide to -``` - -PostgreSQL is a bit sloppy with extending relations. Usually, the relation is extended with zeros -first, then the page is filled, and finally the new page WAL-logged. But if multiple backends extend -a relation at the same time, the pages can be WAL-logged in different order. - -I'm not sure what scenario exactly required this change in Neon, though. - -### How to get rid of the patch - -Submit patches to pgsql-hackers, to tighten up the WAL-logging around relation extension. It's a bit -confusing even in PostgreSQL. Maybe WAL log the intention to extend first, then extend the relation, -and finally WAL-log that the extension succeeded. - ## Make smgr interface available to extensions ``` From 1c1b4b0c040b07a7bad2d873ad4189d84096c9e5 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:31:03 +0300 Subject: [PATCH 208/220] Add a bunch of items for new changes that we've made --- docs/core_changes.md | 142 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/docs/core_changes.md b/docs/core_changes.md index 60b54825c4..6cc19d8b62 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -311,6 +311,148 @@ Ignore it. This is only needed for disaster recovery, so once we've eliminated a patches, we can just keep it around as a patch or as separate branch in a repo. +## pg_waldump flags to ignore errors + +After creating a new project or branch in Neon, the first timeline can begin in the middle of a WAL segment. pg_waldump chokes on that, so we added some flags to make it possible to ignore errors. + +### How to get rid of the patch + +Like previous one, ignore it. + + + +## Backpressure if pageserver doesn't ingest WAL fast enough + +``` +@@ -3200,6 +3202,7 @@ ProcessInterrupts(void) + return; + InterruptPending = false; + ++retry: + if (ProcDiePending) + { + ProcDiePending = false; +@@ -3447,6 +3450,13 @@ ProcessInterrupts(void) + + if (ParallelApplyMessagePending) + HandleParallelApplyMessages(); ++ ++ /* Call registered callback if any */ ++ if (ProcessInterruptsCallback) ++ { ++ if (ProcessInterruptsCallback()) ++ goto retry; ++ } + } +``` + + +### How to get rid of the patch + +Submit a patch to upstream, for a hook in ProcessInterrupts. Could be useful for other extensions +too. + + +## SLRU on-demand download + +``` + src/backend/access/transam/slru.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------- + 1 file changed, 92 insertions(+), 13 deletions(-) +``` + +### Problem we're trying to solve + +Previously, SLRU files were included in the basebackup, but the total size of them can be large, +several GB, and downloading them all made the startup time too long. + +### Alternatives + +FUSE hook or LD_PRELOAD trick to intercept the reads on SLRU files + + +## WAL-log an all-zeros page as one large hole + +- In XLogRecordAssemble() + +### Problem we're trying to solve + +This change was made in v16. Starting with v16, when PostgreSQL extends a relation, it first extends +it with zeros, and it can extend the relation more than one block at a time. The all-zeros page is WAL-ogged, but it's very wasteful to include 8 kB of zeros in the WAL for that. This hack was made so that we WAL logged a compact record with a whole-page "hole". However, PostgreSQL has assertions that prevent that such WAL records from being replayed, so this breaks compatibility such that unmodified PostreSQL cannot process Neon-generated WAL. + +### How to get rid of the patch + +Find another compact representation for a full-page image of an all-zeros page. A compressed image perhaps. + + +## Shut down walproposer after checkpointer + +``` ++ /* Neon: Also allow walproposer background worker to be treated like a WAL sender, so that it's shut down last */ ++ if ((bp->bkend_type == BACKEND_TYPE_NORMAL || bp->bkend_type == BACKEND_TYPE_BGWORKER) && +``` + +This changes was needed so that postmaster shuts down the walproposer process only after the shutdown checkpoint record is written. Otherwise, the shutdown record will never make it to the safekeepers. + +### How to get rid of the patch + +Do a bigger refactoring of the postmaster state machine, such that a background worker can specify +the shutdown ordering by itself. The postmaster state machine has grown pretty complicated, and +would benefit from a refactoring for the sake of readability anyway. + + +## EXPLAIN changes for prefetch and LFC + +### How to get rid of the patch + +Konstantin submitted a patch to -hackers already: https://commitfest.postgresql.org/47/4643/. Get that into a committable state. + + +## On-demand download of extensions + +### How to get rid of the patch + +FUSE or LD_PRELOAD trickery to intercept reads? + + +## Publication superuser checks + +We have hacked CreatePublication so that also neon_superuser can create them. + +### How to get rid of the patch + +Create an upstream patch with more fine-grained privileges for publications CREATE/DROP that can be GRANTed to users. + + +## WAL log replication slots + +### How to get rid of the patch + +Utilize the upcoming v17 "slot sync worker", or a similar neon-specific background worker process, to periodically WAL-log the slots, or to export them somewhere else. + + +## WAL-log replication snapshots + +### How to get rid of the patch + +WAL-log them periodically, from a backgound worker. + + +## WAL-log relmapper files + +Similarly to replications snapshot files, the CID mapping files generated during VACUUM FULL of a catalog table are WAL-logged + +### How to get rid of the patch + +WAL-log them periodically, from a backgound worker. + + +## XLogWaitForReplayOf() + +?? + + + + # Not currently committed but proposed ## Disable ring buffer buffer manager strategies From 560627b525d7abc0b25410fafc4f9fa523c3a078 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 18 Jun 2024 13:38:15 +0300 Subject: [PATCH 209/220] Replace a few references to Zenith with neon --- docs/core_changes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/core_changes.md b/docs/core_changes.md index 6cc19d8b62..1388317728 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -156,7 +156,7 @@ index 0415df9ccb..9f9db3c8bc 100644 * crash we can lose (skip over) as many values as we pre-logged. */ -#define SEQ_LOG_VALS 32 -+/* Zenith XXX: to ensure sequence order of sequence in Zenith we need to WAL log each sequence update. */ ++/* Neon XXX: to ensure sequence order of sequence in Zenith we need to WAL log each sequence update. */ +/* #define SEQ_LOG_VALS 32 */ +#define SEQ_LOG_VALS 0 ``` @@ -485,6 +485,6 @@ Add special WAL record for setting page hints. ### Why? -Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Zenith. +Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Neon. But overhead of request-response round-trip for loading pages on demand can make started node warm-up quite slow. We can capture state of compute node buffer cache and send bulk request for this pages at startup. From 30b890e378771c95a4ef08d7724d39ff1fad39a9 Mon Sep 17 00:00:00 2001 From: Yuchen Liang <70461588+yliang412@users.noreply.github.com> Date: Tue, 18 Jun 2024 13:37:06 -0400 Subject: [PATCH 210/220] feat(pageserver): use leases to temporarily block gc (#8084) Part of #7497, extracts from #7996, closes #8063. ## Problem With the LSN lease API introduced in https://github.com/neondatabase/neon/issues/7808, we want to implement the real lease logic so that GC will keep all the layers needed to reconstruct all pages at all the leased LSNs with valid leases at a given time. Signed-off-by: Yuchen Liang --- control_plane/src/pageserver.rs | 8 ++ libs/pageserver_api/src/models.rs | 16 +++ pageserver/src/http/routes.rs | 2 +- pageserver/src/page_service.rs | 2 +- pageserver/src/repository.rs | 2 + pageserver/src/tenant.rs | 110 ++++++++++++++++- pageserver/src/tenant/config.rs | 31 +++++ pageserver/src/tenant/tasks.rs | 24 ++++ pageserver/src/tenant/timeline.rs | 116 ++++++++++++++++-- .../regress/test_attach_tenant_config.py | 2 + 10 files changed, 294 insertions(+), 19 deletions(-) diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index 5a84763697..13e684da24 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -383,6 +383,10 @@ impl PageServerNode { .map(|x| x.parse::()) .transpose() .context("Failed to parse 'switch_aux_file_policy'")?, + lsn_lease_length: settings.remove("lsn_lease_length").map(|x| x.to_string()), + lsn_lease_length_for_ts: settings + .remove("lsn_lease_length_for_ts") + .map(|x| x.to_string()), }; if !settings.is_empty() { bail!("Unrecognized tenant settings: {settings:?}") @@ -506,6 +510,10 @@ impl PageServerNode { .map(|x| x.parse::()) .transpose() .context("Failed to parse 'switch_aux_file_policy'")?, + lsn_lease_length: settings.remove("lsn_lease_length").map(|x| x.to_string()), + lsn_lease_length_for_ts: settings + .remove("lsn_lease_length_for_ts") + .map(|x| x.to_string()), } }; diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 9311dab33c..70db0b7344 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -177,6 +177,20 @@ serde_with::serde_conv!( |value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) } ); +impl LsnLease { + /// The default length for an explicit LSN lease request (10 minutes). + pub const DEFAULT_LENGTH: Duration = Duration::from_secs(10 * 60); + + /// The default length for an implicit LSN lease granted during + /// `get_lsn_by_timestamp` request (1 minutes). + pub const DEFAULT_LENGTH_FOR_TS: Duration = Duration::from_secs(60); + + /// Checks whether the lease is expired. + pub fn is_expired(&self, now: &SystemTime) -> bool { + now > &self.valid_until + } +} + /// The only [`TenantState`] variants we could be `TenantState::Activating` from. #[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ActivatingFrom { @@ -322,6 +336,8 @@ pub struct TenantConfig { pub timeline_get_throttle: Option, pub image_layer_creation_check_threshold: Option, pub switch_aux_file_policy: Option, + pub lsn_lease_length: Option, + pub lsn_lease_length_for_ts: Option, } /// The policy for the aux file storage. It can be switched through `switch_aux_file_policy` diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 657708c0d6..482879630a 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -1730,7 +1730,7 @@ async fn lsn_lease_handler( active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id) .await?; let result = timeline - .make_lsn_lease(lsn, &ctx) + .make_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx) .map_err(|e| ApiError::InternalServerError(e.context("lsn lease http handler")))?; json_response(StatusCode::OK, result) diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index ae389826d5..ebc23e8945 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -935,7 +935,7 @@ impl PageServerHandler { let timeline = self .get_active_tenant_timeline(tenant_shard_id.tenant_id, timeline_id, shard_selector) .await?; - let lease = timeline.make_lsn_lease(lsn, ctx)?; + let lease = timeline.make_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)?; let valid_until = lease .valid_until .duration_since(SystemTime::UNIX_EPOCH) diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index 7b30c3ecf7..5a334d0290 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -240,6 +240,7 @@ pub struct GcResult { pub layers_needed_by_cutoff: u64, pub layers_needed_by_pitr: u64, pub layers_needed_by_branches: u64, + pub layers_needed_by_leases: u64, pub layers_not_updated: u64, pub layers_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files. @@ -269,6 +270,7 @@ impl AddAssign for GcResult { self.layers_needed_by_pitr += other.layers_needed_by_pitr; self.layers_needed_by_cutoff += other.layers_needed_by_cutoff; self.layers_needed_by_branches += other.layers_needed_by_branches; + self.layers_needed_by_leases += other.layers_needed_by_leases; self.layers_not_updated += other.layers_not_updated; self.layers_removed += other.layers_removed; diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 801321e36d..ca5765c99b 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -31,6 +31,7 @@ use remote_storage::DownloadError; use remote_storage::GenericRemoteStorage; use remote_storage::TimeoutOrCancel; use std::fmt; +use std::time::SystemTime; use storage_broker::BrokerClientChannel; use tokio::io::BufReader; use tokio::sync::watch; @@ -65,9 +66,9 @@ use self::timeline::uninit::TimelineCreateGuard; use self::timeline::uninit::TimelineExclusionError; use self::timeline::uninit::UninitializedTimeline; use self::timeline::EvictionTaskTenantState; +use self::timeline::GcCutoffs; use self::timeline::TimelineResources; use self::timeline::WaitLsnError; -use self::timeline::{GcCutoffs, GcInfo}; use crate::config::PageServerConf; use crate::context::{DownloadBehavior, RequestContext}; use crate::deletion_queue::DeletionQueueClient; @@ -2428,6 +2429,13 @@ impl Tenant { } } + pub fn get_lsn_lease_length(&self) -> Duration { + let tenant_conf = self.tenant_conf.load().tenant_conf.clone(); + tenant_conf + .lsn_lease_length + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length) + } + pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) { // Use read-copy-update in order to avoid overwriting the location config // state if this races with [`Tenant::set_new_location_config`]. Note that @@ -3010,12 +3018,13 @@ impl Tenant { { let mut target = timeline.gc_info.write().unwrap(); + let now = SystemTime::now(); + target.leases.retain(|_, lease| !lease.is_expired(&now)); + match gc_cutoffs.remove(&timeline.timeline_id) { Some(cutoffs) => { - *target = GcInfo { - retain_lsns: branchpoints, - cutoffs, - }; + target.retain_lsns = branchpoints; + target.cutoffs = cutoffs; } None => { // reasons for this being unavailable: @@ -3833,6 +3842,8 @@ pub(crate) mod harness { tenant_conf.image_layer_creation_check_threshold, ), switch_aux_file_policy: Some(tenant_conf.switch_aux_file_policy), + lsn_lease_length: Some(tenant_conf.lsn_lease_length), + lsn_lease_length_for_ts: Some(tenant_conf.lsn_lease_length_for_ts), } } } @@ -6939,4 +6950,93 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_lsn_lease() -> anyhow::Result<()> { + let (tenant, ctx) = TenantHarness::create("test_lsn_lease")?.load().await; + let key = Key::from_hex("010000000033333333444444445500000000").unwrap(); + + let end_lsn = Lsn(0x100); + let image_layers = (0x20..=0x90) + .step_by(0x10) + .map(|n| { + ( + Lsn(n), + vec![(key, test_img(&format!("data key at {:x}", n)))], + ) + }) + .collect(); + + let timeline = tenant + .create_test_timeline_with_layers( + TIMELINE_ID, + Lsn(0x10), + DEFAULT_PG_VERSION, + &ctx, + Vec::new(), + image_layers, + end_lsn, + ) + .await?; + + let leased_lsns = [0x30, 0x50, 0x70]; + let mut leases = Vec::new(); + let _: anyhow::Result<_> = leased_lsns.iter().try_for_each(|n| { + leases.push(timeline.make_lsn_lease(Lsn(*n), timeline.get_lsn_lease_length(), &ctx)?); + Ok(()) + }); + + // Renewing with shorter lease should not change the lease. + let updated_lease_0 = + timeline.make_lsn_lease(Lsn(leased_lsns[0]), Duration::from_secs(0), &ctx)?; + assert_eq!(updated_lease_0.valid_until, leases[0].valid_until); + + // Renewing with a long lease should renew lease with later expiration time. + let updated_lease_1 = timeline.make_lsn_lease( + Lsn(leased_lsns[1]), + timeline.get_lsn_lease_length() * 2, + &ctx, + )?; + + assert!(updated_lease_1.valid_until > leases[1].valid_until); + + // Force set disk consistent lsn so we can get the cutoff at `end_lsn`. + info!( + "latest_gc_cutoff_lsn: {}", + *timeline.get_latest_gc_cutoff_lsn() + ); + timeline.force_set_disk_consistent_lsn(end_lsn); + + let res = tenant + .gc_iteration( + Some(TIMELINE_ID), + 0, + Duration::ZERO, + &CancellationToken::new(), + &ctx, + ) + .await?; + + // Keeping everything <= Lsn(0x80) b/c leases: + // 0/10: initdb layer + // (0/20..=0/70).step_by(0x10): image layers added when creating the timeline. + assert_eq!(res.layers_needed_by_leases, 7); + // Keeping 0/90 b/c it is the latest layer. + assert_eq!(res.layers_not_updated, 1); + // Removed 0/80. + assert_eq!(res.layers_removed, 1); + + // Make lease on a already GC-ed LSN. + // 0/80 does not have a valid lease + is below latest_gc_cutoff + assert!(Lsn(0x80) < *timeline.get_latest_gc_cutoff_lsn()); + let res = timeline.make_lsn_lease(Lsn(0x80), timeline.get_lsn_lease_length(), &ctx); + assert!(res.is_err()); + + // Should still be able to renew a currently valid lease + // Assumption: original lease to is still valid for 0/50. + let _ = + timeline.make_lsn_lease(Lsn(leased_lsns[1]), timeline.get_lsn_lease_length(), &ctx)?; + + Ok(()) + } } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index 342d705954..1b9be12642 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -13,6 +13,7 @@ use pageserver_api::models::AuxFilePolicy; use pageserver_api::models::CompactionAlgorithm; use pageserver_api::models::CompactionAlgorithmSettings; use pageserver_api::models::EvictionPolicy; +use pageserver_api::models::LsnLease; use pageserver_api::models::{self, ThrottleConfig}; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; use serde::de::IntoDeserializer; @@ -377,6 +378,16 @@ pub struct TenantConf { /// There is a `last_aux_file_policy` flag which gets persisted in `index_part.json` once the first aux /// file is written. pub switch_aux_file_policy: AuxFilePolicy, + + /// The length for an explicit LSN lease request. + /// Layers needed to reconstruct pages at LSN will not be GC-ed during this interval. + #[serde(with = "humantime_serde")] + pub lsn_lease_length: Duration, + + /// The length for an implicit LSN lease granted as part of `get_lsn_by_timestamp` request. + /// Layers needed to reconstruct pages at LSN will not be GC-ed during this interval. + #[serde(with = "humantime_serde")] + pub lsn_lease_length_for_ts: Duration, } /// Same as TenantConf, but this struct preserves the information about @@ -476,6 +487,16 @@ pub struct TenantConfOpt { #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] pub switch_aux_file_policy: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + #[serde(default)] + pub lsn_lease_length: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + #[serde(default)] + pub lsn_lease_length_for_ts: Option, } impl TenantConfOpt { @@ -538,6 +559,12 @@ impl TenantConfOpt { switch_aux_file_policy: self .switch_aux_file_policy .unwrap_or(global_conf.switch_aux_file_policy), + lsn_lease_length: self + .lsn_lease_length + .unwrap_or(global_conf.lsn_lease_length), + lsn_lease_length_for_ts: self + .lsn_lease_length_for_ts + .unwrap_or(global_conf.lsn_lease_length_for_ts), } } } @@ -582,6 +609,8 @@ impl Default for TenantConf { timeline_get_throttle: crate::tenant::throttle::Config::disabled(), image_layer_creation_check_threshold: DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD, switch_aux_file_policy: AuxFilePolicy::default_tenant_config(), + lsn_lease_length: LsnLease::DEFAULT_LENGTH, + lsn_lease_length_for_ts: LsnLease::DEFAULT_LENGTH_FOR_TS, } } } @@ -657,6 +686,8 @@ impl From for models::TenantConfig { timeline_get_throttle: value.timeline_get_throttle.map(ThrottleConfig::from), image_layer_creation_check_threshold: value.image_layer_creation_check_threshold, switch_aux_file_policy: value.switch_aux_file_policy, + lsn_lease_length: value.lsn_lease_length.map(humantime), + lsn_lease_length_for_ts: value.lsn_lease_length_for_ts.map(humantime), } } } diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index a6dfa84f35..d679b78f32 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -346,6 +346,7 @@ async fn gc_loop(tenant: Arc, cancel: CancellationToken) { // cutoff specified as time. let ctx = RequestContext::todo_child(TaskKind::GarbageCollector, DownloadBehavior::Download); + let mut first = true; loop { tokio::select! { @@ -362,6 +363,14 @@ async fn gc_loop(tenant: Arc, cancel: CancellationToken) { if first { first = false; + + if delay_by_lease_length(tenant.get_lsn_lease_length(), &cancel) + .await + .is_err() + { + break; + } + if random_init_delay(period, &cancel).await.is_err() { break; } @@ -531,6 +540,21 @@ pub(crate) async fn random_init_delay( } } +/// Delays GC by defaul lease length at restart. +/// +/// We do this as the leases mapping are not persisted to disk. By delaying GC by default +/// length, we gurantees that all the leases we granted before the restart will expire +/// when we run GC for the first time after the restart. +pub(crate) async fn delay_by_lease_length( + length: Duration, + cancel: &CancellationToken, +) -> Result<(), Cancelled> { + match tokio::time::timeout(length, cancel.cancelled()).await { + Ok(_) => Err(Cancelled), + Err(_) => Ok(()), + } +} + /// Attention: the `task` and `period` beocme labels of a pageserver-wide prometheus metric. pub(crate) fn warn_when_period_overrun( elapsed: Duration, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 08bec329e1..a4f1108635 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -47,7 +47,6 @@ use utils::{ vec_map::VecMap, }; -use std::ops::{Deref, Range}; use std::pin::pin; use std::sync::atomic::Ordering as AtomicOrdering; use std::sync::{Arc, Mutex, RwLock, Weak}; @@ -61,6 +60,10 @@ use std::{ cmp::{max, min, Ordering}, ops::ControlFlow, }; +use std::{ + collections::btree_map::Entry, + ops::{Deref, Range}, +}; use crate::metrics::GetKind; use crate::pgdatadir_mapping::MAX_AUX_FILE_V2_DELTAS; @@ -454,6 +457,9 @@ pub(crate) struct GcInfo { /// The cutoff coordinates, which are combined by selecting the minimum. pub(crate) cutoffs: GcCutoffs, + + /// Leases granted to particular LSNs. + pub(crate) leases: BTreeMap, } impl GcInfo { @@ -1555,17 +1561,46 @@ impl Timeline { Ok(()) } - /// Obtains a temporary lease blocking garbage collection for the given LSN + /// Obtains a temporary lease blocking garbage collection for the given LSN. + /// + /// This function will error if the requesting LSN is less than the `latest_gc_cutoff_lsn` and there is also + /// no existing lease to renew. If there is an existing lease in the map, the lease will be renewed only if + /// the request extends the lease. The returned lease is therefore the maximum between the existing lease and + /// the requesting lease. pub(crate) fn make_lsn_lease( &self, - _lsn: Lsn, + lsn: Lsn, + length: Duration, _ctx: &RequestContext, ) -> anyhow::Result { - const LEASE_LENGTH: Duration = Duration::from_secs(5 * 60); - let lease = LsnLease { - valid_until: SystemTime::now() + LEASE_LENGTH, + let lease = { + let mut gc_info = self.gc_info.write().unwrap(); + + let valid_until = SystemTime::now() + length; + + let entry = gc_info.leases.entry(lsn); + + let lease = { + if let Entry::Occupied(mut occupied) = entry { + let existing_lease = occupied.get_mut(); + if valid_until > existing_lease.valid_until { + existing_lease.valid_until = valid_until; + } + existing_lease.clone() + } else { + // Reject already GC-ed LSN (lsn < latest_gc_cutoff) + let latest_gc_cutoff_lsn = self.get_latest_gc_cutoff_lsn(); + if lsn < *latest_gc_cutoff_lsn { + bail!("tried to request a page version that was garbage collected. requested at {} gc cutoff {}", lsn, *latest_gc_cutoff_lsn); + } + + entry.or_insert(LsnLease { valid_until }).clone() + } + }; + + lease }; - // TODO: dummy implementation + Ok(lease) } @@ -2082,6 +2117,24 @@ const REPARTITION_FREQ_IN_CHECKPOINT_DISTANCE: u64 = 10; // Private functions impl Timeline { + pub(crate) fn get_lsn_lease_length(&self) -> Duration { + let tenant_conf = self.tenant_conf.load(); + tenant_conf + .tenant_conf + .lsn_lease_length + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length) + } + + // TODO(yuchen): remove unused flag after implementing https://github.com/neondatabase/neon/issues/8072 + #[allow(unused)] + pub(crate) fn get_lsn_lease_length_for_ts(&self) -> Duration { + let tenant_conf = self.tenant_conf.load(); + tenant_conf + .tenant_conf + .lsn_lease_length_for_ts + .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length_for_ts) + } + pub(crate) fn get_switch_aux_file_policy(&self) -> AuxFilePolicy { let tenant_conf = self.tenant_conf.load(); tenant_conf @@ -4907,13 +4960,25 @@ impl Timeline { return Err(GcError::TimelineCancelled); } - let (horizon_cutoff, pitr_cutoff, retain_lsns) = { + let (horizon_cutoff, pitr_cutoff, retain_lsns, max_lsn_with_valid_lease) = { let gc_info = self.gc_info.read().unwrap(); let horizon_cutoff = min(gc_info.cutoffs.horizon, self.get_disk_consistent_lsn()); let pitr_cutoff = gc_info.cutoffs.pitr; let retain_lsns = gc_info.retain_lsns.clone(); - (horizon_cutoff, pitr_cutoff, retain_lsns) + + // Gets the maximum LSN that holds the valid lease. + // + // Caveat: `refresh_gc_info` is in charged of updating the lease map. + // Here, we do not check for stale leases again. + let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn); + + ( + horizon_cutoff, + pitr_cutoff, + retain_lsns, + max_lsn_with_valid_lease, + ) }; let mut new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff); @@ -4944,7 +5009,13 @@ impl Timeline { .set(Lsn::INVALID.0 as i64); let res = self - .gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff) + .gc_timeline( + horizon_cutoff, + pitr_cutoff, + retain_lsns, + max_lsn_with_valid_lease, + new_gc_cutoff, + ) .instrument( info_span!("gc_timeline", timeline_id = %self.timeline_id, cutoff = %new_gc_cutoff), ) @@ -4961,6 +5032,7 @@ impl Timeline { horizon_cutoff: Lsn, pitr_cutoff: Lsn, retain_lsns: Vec, + max_lsn_with_valid_lease: Option, new_gc_cutoff: Lsn, ) -> Result { // FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc @@ -5009,7 +5081,8 @@ impl Timeline { // 1. it is older than cutoff LSN; // 2. it is older than PITR interval; // 3. it doesn't need to be retained for 'retain_lsns'; - // 4. newer on-disk image layers cover the layer's whole key range + // 4. it does not need to be kept for LSNs holding valid leases. + // 5. newer on-disk image layers cover the layer's whole key range // // TODO holding a write lock is too agressive and avoidable let mut guard = self.layers.write().await; @@ -5060,7 +5133,21 @@ impl Timeline { } } - // 4. Is there a later on-disk layer for this relation? + // 4. Is there a valid lease that requires us to keep this layer? + if let Some(lsn) = &max_lsn_with_valid_lease { + // keep if layer start <= any of the lease + if &l.get_lsn_range().start <= lsn { + debug!( + "keeping {} because there is a valid lease preventing GC at {}", + l.layer_name(), + lsn, + ); + result.layers_needed_by_leases += 1; + continue 'outer; + } + } + + // 5. Is there a later on-disk layer for this relation? // // The end-LSN is exclusive, while disk_consistent_lsn is // inclusive. For example, if disk_consistent_lsn is 100, it is @@ -5438,6 +5525,11 @@ impl Timeline { self.last_record_lsn.advance(new_lsn); } + #[cfg(test)] + pub(super) fn force_set_disk_consistent_lsn(&self, new_value: Lsn) { + self.disk_consistent_lsn.store(new_value); + } + /// Force create an image layer and place it into the layer map. /// /// DO NOT use this function directly. Use [`Tenant::branch_timeline_test_with_layers`] diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 1d193b8999..f4667a82dc 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -195,6 +195,8 @@ def test_fully_custom_config(positive_env: NeonEnv): "walreceiver_connect_timeout": "13m", "image_layer_creation_check_threshold": 1, "switch_aux_file_policy": "cross-validation", + "lsn_lease_length": "1m", + "lsn_lease_length_for_ts": "5s", } ps_http = env.pageserver.http_client() From 6bb8b1d7c29a11d23b44a8f9a656223f0a410005 Mon Sep 17 00:00:00 2001 From: Arseny Sher Date: Tue, 18 Jun 2024 16:13:47 +0300 Subject: [PATCH 211/220] Remove dead code from walproposer_pg.c Now that logical walsenders fetch WAL from safekeepers recovery in walproposer is not needed. Fixes warnings. --- pgxn/neon/walproposer_pg.c | 194 +------------------------------------ 1 file changed, 3 insertions(+), 191 deletions(-) diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 316e23a72e..da1a6f76f0 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -100,17 +100,12 @@ static void StartProposerReplication(WalProposer *wp, StartReplicationCmd *cmd); static void WalSndLoop(WalProposer *wp); static void XLogBroadcastWalProposer(WalProposer *wp); -static void XLogWalPropWrite(WalProposer *wp, char *buf, Size nbytes, XLogRecPtr recptr); -static void XLogWalPropClose(XLogRecPtr recptr); - static void add_nwr_event_set(Safekeeper *sk, uint32 events); static void update_nwr_event_set(Safekeeper *sk, uint32 events); static void rm_safekeeper_event_set(Safekeeper *to_remove, bool is_sk); static void CheckGracefulShutdown(WalProposer *wp); -static XLogRecPtr GetLogRepRestartLSN(WalProposer *wp); - static void init_walprop_config(bool syncSafekeepers) { @@ -1236,8 +1231,6 @@ StartProposerReplication(WalProposer *wp, StartReplicationCmd *cmd) static void WalSndLoop(WalProposer *wp) { - XLogRecPtr flushPtr; - /* Clear any already-pending wakeups */ ResetLatch(MyLatch); @@ -1333,8 +1326,9 @@ XLogBroadcastWalProposer(WalProposer *wp) } /* - Used to download WAL before basebackup for logical walsenders from sk, no longer - needed because walsender always uses neon_walreader. + Used to download WAL before basebackup for walproposer/logical walsenders. No + longer used, replaced by neon_walreader; but callback still exists because + simulation tests use it. */ static bool WalProposerRecovery(WalProposer *wp, Safekeeper *sk) @@ -1342,136 +1336,6 @@ WalProposerRecovery(WalProposer *wp, Safekeeper *sk) return true; } -/* - * These variables are used similarly to openLogFile/SegNo, - * but for walproposer to write the XLOG during recovery. walpropFileTLI is the TimeLineID - * corresponding the filename of walpropFile. - */ -static int walpropFile = -1; -static TimeLineID walpropFileTLI = 0; -static XLogSegNo walpropSegNo = 0; - -/* - * Write XLOG data to disk. - */ -static void -XLogWalPropWrite(WalProposer *wp, char *buf, Size nbytes, XLogRecPtr recptr) -{ - int startoff; - int byteswritten; - - /* - * Apart from walproposer, basebackup LSN page is also written out by - * postgres itself which writes WAL only in pages, and in basebackup it is - * inherently dummy (only safekeepers have historic WAL). Update WAL - * buffers here to avoid dummy page overwriting correct one we download - * here. Ugly, but alternatives are about the same ugly. We won't need - * that if we switch to on-demand WAL download from safekeepers, without - * writing to disk. - * - * https://github.com/neondatabase/neon/issues/5749 - */ - if (!wp->config->syncSafekeepers) - XLogUpdateWalBuffers(buf, recptr, nbytes); - - while (nbytes > 0) - { - int segbytes; - - /* Close the current segment if it's completed */ - if (walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)) - XLogWalPropClose(recptr); - - if (walpropFile < 0) - { -#if PG_VERSION_NUM >= 150000 - /* FIXME Is it ok to use hardcoded value here? */ - TimeLineID tli = 1; -#else - bool use_existent = true; -#endif - /* Create/use new log file */ - XLByteToSeg(recptr, walpropSegNo, wal_segment_size); -#if PG_VERSION_NUM >= 150000 - walpropFile = XLogFileInit(walpropSegNo, tli); - walpropFileTLI = tli; -#else - walpropFile = XLogFileInit(walpropSegNo, &use_existent, false); - walpropFileTLI = ThisTimeLineID; -#endif - } - - /* Calculate the start offset of the received logs */ - startoff = XLogSegmentOffset(recptr, wal_segment_size); - - if (startoff + nbytes > wal_segment_size) - segbytes = wal_segment_size - startoff; - else - segbytes = nbytes; - - /* OK to write the logs */ - errno = 0; - - byteswritten = pg_pwrite(walpropFile, buf, segbytes, (off_t) startoff); - if (byteswritten <= 0) - { - char xlogfname[MAXFNAMELEN]; - int save_errno; - - /* if write didn't set errno, assume no disk space */ - if (errno == 0) - errno = ENOSPC; - - save_errno = errno; - XLogFileName(xlogfname, walpropFileTLI, walpropSegNo, wal_segment_size); - errno = save_errno; - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not write to log segment %s " - "at offset %u, length %lu: %m", - xlogfname, startoff, (unsigned long) segbytes))); - } - - /* Update state for write */ - recptr += byteswritten; - - nbytes -= byteswritten; - buf += byteswritten; - } - - /* - * Close the current segment if it's fully written up in the last cycle of - * the loop. - */ - if (walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)) - { - XLogWalPropClose(recptr); - } -} - -/* - * Close the current segment. - */ -static void -XLogWalPropClose(XLogRecPtr recptr) -{ - Assert(walpropFile >= 0 && !XLByteInSeg(recptr, walpropSegNo, wal_segment_size)); - - if (close(walpropFile) != 0) - { - char xlogfname[MAXFNAMELEN]; - - XLogFileName(xlogfname, walpropFileTLI, walpropSegNo, wal_segment_size); - - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not close log segment %s: %m", - xlogfname))); - } - - walpropFile = -1; -} - static void walprop_pg_wal_reader_allocate(Safekeeper *sk) { @@ -1987,58 +1851,6 @@ walprop_pg_log_internal(WalProposer *wp, int level, const char *line) elog(FATAL, "unexpected log_internal message at level %d: %s", level, line); } -static XLogRecPtr -GetLogRepRestartLSN(WalProposer *wp) -{ - FILE *f; - XLogRecPtr lrRestartLsn = InvalidXLogRecPtr; - - /* We don't need to do anything in syncSafekeepers mode. */ - if (wp->config->syncSafekeepers) - return InvalidXLogRecPtr; - - /* - * If there are active logical replication subscription we need to provide - * enough WAL for their WAL senders based on th position of their - * replication slots. - */ - f = fopen("restart.lsn", "rb"); - if (f != NULL) - { - size_t rc = fread(&lrRestartLsn, sizeof(lrRestartLsn), 1, f); - - fclose(f); - if (rc == 1 && lrRestartLsn != InvalidXLogRecPtr) - { - uint64 download_range_mb; - - wpg_log(LOG, "logical replication restart LSN %X/%X", LSN_FORMAT_ARGS(lrRestartLsn)); - - /* - * If we need to download more than a max_slot_wal_keep_size, - * don't do it to avoid risk of exploding pg_wal. Logical - * replication won't work until recreated, but at least compute - * would start; this also follows max_slot_wal_keep_size - * semantics. - */ - download_range_mb = (wp->propEpochStartLsn - lrRestartLsn) / MB; - if (max_slot_wal_keep_size_mb > 0 && download_range_mb >= max_slot_wal_keep_size_mb) - { - wpg_log(WARNING, "not downloading WAL for logical replication since %X/%X as max_slot_wal_keep_size=%dMB", - LSN_FORMAT_ARGS(lrRestartLsn), max_slot_wal_keep_size_mb); - return InvalidXLogRecPtr; - } - - /* - * start from the beginning of the segment to fetch page headers - * verifed by XLogReader - */ - lrRestartLsn = lrRestartLsn - XLogSegmentOffset(lrRestartLsn, wal_segment_size); - } - } - return lrRestartLsn; -} - void SetNeonCurrentClusterSize(uint64 size) { From 68476bb4ba0565f68a01504a66a8ddb8fd2ac19b Mon Sep 17 00:00:00 2001 From: Alex Chi Z Date: Tue, 18 Jun 2024 16:02:57 -0400 Subject: [PATCH 212/220] feat(pageserver): add iterator API for btree reader (#8083) The new image iterator and delta iterator uses an iterator-based API. https://github.com/neondatabase/neon/pull/8006 / part of https://github.com/neondatabase/neon/issues/8002 This requires the underlying thing (the btree) to have an iterator API, and the iterator should have a type name so that it can be stored somewhere. ```rust pub struct DeltaLayerIterator { index_iterator: BTreeIterator } ``` versus ```rust pub struct DeltaLayerIterator { index_iterator: impl Stream<....> } ``` (this requires nightly flag and still buggy in the Rust compiler) There are multiple ways to achieve this: 1. Either write a BTreeIterator from scratch that provides `async next`. This is the most efficient way to do that. 2. Or wrap the current `get_stream` API, which is the current approach in the pull request. In the future, we should do (1), and the `get_stream` API should be refactored to use the iterator API. With (2), we have to wrap the `get_stream` API with `Pin>`, where we have the overhead of dynamic dispatch. However, (2) needs a rewrite of the `visit` function, which would take some time to write and review. I'd like to define this iterator API first and work on a real iterator API later. ## Summary of changes Add `DiskBtreeIterator` and related tests. Signed-off-by: Alex Chi Z --- pageserver/src/tenant/disk_btree.rs | 36 ++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/pageserver/src/tenant/disk_btree.rs b/pageserver/src/tenant/disk_btree.rs index 6d85d1e60e..119df3e6c4 100644 --- a/pageserver/src/tenant/disk_btree.rs +++ b/pageserver/src/tenant/disk_btree.rs @@ -22,7 +22,7 @@ use async_stream::try_stream; use byteorder::{ReadBytesExt, BE}; use bytes::{BufMut, Bytes, BytesMut}; use either::Either; -use futures::Stream; +use futures::{Stream, StreamExt}; use hex; use std::{ cmp::Ordering, @@ -259,6 +259,16 @@ where Ok(result) } + pub fn iter<'a>( + &'a self, + start_key: &'a [u8; L], + ctx: &'a RequestContext, + ) -> DiskBtreeIterator<'a> { + DiskBtreeIterator { + stream: Box::pin(self.get_stream_from(start_key, ctx)), + } + } + /// Return a stream which yields all key, value pairs from the index /// starting from the first key greater or equal to `start_key`. /// @@ -496,6 +506,19 @@ where } } +pub struct DiskBtreeIterator<'a> { + #[allow(clippy::type_complexity)] + stream: std::pin::Pin< + Box, u64), DiskBtreeError>> + 'a>, + >, +} + +impl<'a> DiskBtreeIterator<'a> { + pub async fn next(&mut self) -> Option, u64), DiskBtreeError>> { + self.stream.next().await + } +} + /// /// Public builder object, for creating a new tree. /// @@ -1088,6 +1111,17 @@ pub(crate) mod tests { == all_data.get(&u128::MAX).cloned() ); + // Test iterator and get_stream API + let mut iter = reader.iter(&[0; 16], &ctx); + let mut cnt = 0; + while let Some(res) = iter.next().await { + let (key, val) = res?; + let key = u128::from_be_bytes(key.as_slice().try_into().unwrap()); + assert_eq!(val, *all_data.get(&key).unwrap()); + cnt += 1; + } + assert_eq!(cnt, all_data.len()); + Ok(()) } From 4753b8f3902751bf0617be89ae1daccdfb60a7a9 Mon Sep 17 00:00:00 2001 From: Sergey Melnikov Date: Wed, 19 Jun 2024 11:33:21 +0200 Subject: [PATCH 213/220] Copy release images to prod ECR (#8101) ## Problem We want to have all released images in production ECR repository ## Summary of changes Copy all docker images to production ECR repository cc: https://github.com/neondatabase/cloud/issues/10177 --- .github/workflows/build_and_test.yml | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 742716776e..8c8500260c 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1070,7 +1070,8 @@ jobs: username: ${{ secrets.NEON_DOCKERHUB_USERNAME }} password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }} - - uses: docker/login-action@v3 + - name: Login to dev ECR + uses: docker/login-action@v3 with: registry: 369495373322.dkr.ecr.eu-central-1.amazonaws.com username: ${{ secrets.AWS_ACCESS_KEY_DEV }} @@ -1104,6 +1105,22 @@ jobs: docker buildx imagetools create -t neondatabase/neon-test-extensions-v16:latest \ neondatabase/neon-test-extensions-v16:${{ needs.tag.outputs.build-tag }} + - name: Login to prod ECR + uses: docker/login-action@v3 + if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' + with: + registry: 093970136003.dkr.ecr.eu-central-1.amazonaws.com + username: ${{ secrets.PROD_GHA_RUNNER_LIMITED_AWS_ACCESS_KEY_ID }} + password: ${{ secrets.PROD_GHA_RUNNER_LIMITED_AWS_SECRET_ACCESS_KEY }} + + - name: Copy all images to prod ECR + if: github.ref_name == 'release'|| github.ref_name == 'release-proxy' + run: | + for image in neon compute-tools {vm-,}compute-node-{v14,v15,v16}; do + docker buildx imagetools create -t 093970136003.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} \ + 369495373322.dkr.ecr.eu-central-1.amazonaws.com/${image}:${{ needs.tag.outputs.build-tag }} + done + trigger-custom-extensions-build-and-wait: needs: [ check-permissions, tag ] runs-on: ubuntu-22.04 From 5778d714f0a4a6f009d9b88bd30a6e35127a6410 Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Wed, 19 Jun 2024 11:55:30 +0100 Subject: [PATCH 214/220] storcon: add drain and fill background operations for graceful cluster restarts (#8014) ## Problem Pageserver restarts cause read availablity downtime for tenants. See `Motivation` section in the [RFC](https://github.com/neondatabase/neon/pull/7704). ## Summary of changes * Introduce a new `NodeSchedulingPolicy`: `PauseForRestart` * Implement the first take of drain and fill algorithms * Add a node status endpoint which can be polled to figure out when an operation is done The implementation follows the RFC, so it might be useful to peek at it as you're reviewing. Since the PR is rather chunky, I've made sure all commits build (with warnings), so you can review by commit if you prefer that. RFC: https://github.com/neondatabase/neon/pull/7704 Related https://github.com/neondatabase/neon/issues/7387 --- Cargo.lock | 1 + libs/pageserver_api/src/controller_api.rs | 3 + storage_controller/Cargo.toml | 1 + .../src/background_node_operations.rs | 59 ++ storage_controller/src/http.rs | 43 ++ storage_controller/src/lib.rs | 1 + storage_controller/src/node.rs | 7 +- storage_controller/src/persistence.rs | 22 +- storage_controller/src/scheduler.rs | 39 ++ storage_controller/src/service.rs | 571 +++++++++++++++++- storage_controller/src/tenant_shard.rs | 36 +- test_runner/fixtures/neon_fixtures.py | 24 + .../regress/test_storage_controller.py | 119 +++- 13 files changed, 905 insertions(+), 21 deletions(-) create mode 100644 storage_controller/src/background_node_operations.rs diff --git a/Cargo.lock b/Cargo.lock index 5eac648fd9..cf8a0b3286 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5754,6 +5754,7 @@ dependencies = [ "r2d2", "reqwest 0.12.4", "routerify", + "scopeguard", "serde", "serde_json", "strum", diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index 1278f17ad2..a0d10dc665 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -209,6 +209,7 @@ pub enum NodeSchedulingPolicy { Active, Filling, Pause, + PauseForRestart, Draining, } @@ -220,6 +221,7 @@ impl FromStr for NodeSchedulingPolicy { "active" => Ok(Self::Active), "filling" => Ok(Self::Filling), "pause" => Ok(Self::Pause), + "pause_for_restart" => Ok(Self::PauseForRestart), "draining" => Ok(Self::Draining), _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")), } @@ -233,6 +235,7 @@ impl From for String { Active => "active", Filling => "filling", Pause => "pause", + PauseForRestart => "pause_for_restart", Draining => "draining", } .to_string() diff --git a/storage_controller/Cargo.toml b/storage_controller/Cargo.toml index 194619a496..b54dea5d47 100644 --- a/storage_controller/Cargo.toml +++ b/storage_controller/Cargo.toml @@ -40,6 +40,7 @@ tokio.workspace = true tokio-util.workspace = true tracing.workspace = true measured.workspace = true +scopeguard.workspace = true strum.workspace = true strum_macros.workspace = true diff --git a/storage_controller/src/background_node_operations.rs b/storage_controller/src/background_node_operations.rs new file mode 100644 index 0000000000..74b7e7c849 --- /dev/null +++ b/storage_controller/src/background_node_operations.rs @@ -0,0 +1,59 @@ +use std::{borrow::Cow, fmt::Debug, fmt::Display}; + +use tokio_util::sync::CancellationToken; +use utils::id::NodeId; + +pub(crate) const MAX_RECONCILES_PER_OPERATION: usize = 10; + +#[derive(Copy, Clone)] +pub(crate) struct Drain { + pub(crate) node_id: NodeId, +} + +#[derive(Copy, Clone)] +pub(crate) struct Fill { + pub(crate) node_id: NodeId, +} + +#[derive(Copy, Clone)] +pub(crate) enum Operation { + Drain(Drain), + Fill(Fill), +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum OperationError { + #[error("Node state changed during operation: {0}")] + NodeStateChanged(Cow<'static, str>), + #[error("Operation finalize error: {0}")] + FinalizeError(Cow<'static, str>), + #[error("Operation cancelled")] + Cancelled, +} + +pub(crate) struct OperationHandler { + pub(crate) operation: Operation, + #[allow(unused)] + pub(crate) cancel: CancellationToken, +} + +impl Display for Drain { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "drain {}", self.node_id) + } +} + +impl Display for Fill { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "fill {}", self.node_id) + } +} + +impl Display for Operation { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Operation::Drain(op) => write!(f, "{op}"), + Operation::Fill(op) => write!(f, "{op}"), + } + } +} diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index bbb6d2cb32..3e9951fb9e 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -480,6 +480,39 @@ async fn handle_node_configure(mut req: Request) -> Result, ) } +async fn handle_node_status(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + let node_status = state.service.get_node(node_id).await?; + + json_response(StatusCode::OK, node_status) +} + +async fn handle_node_drain(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.start_node_drain(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + +async fn handle_node_fill(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + + state.service.start_node_fill(node_id).await?; + + json_response(StatusCode::ACCEPTED, ()) +} + async fn handle_tenant_shard_split( service: Arc, mut req: Request, @@ -832,6 +865,16 @@ pub fn make_router( RequestName("control_v1_node_config"), ) }) + .get("/control/v1/node/:node_id", |r| { + named_request_span(r, handle_node_status, RequestName("control_v1_node_status")) + }) + .put("/control/v1/node/:node_id/drain", |r| { + named_request_span(r, handle_node_drain, RequestName("control_v1_node_drain")) + }) + .put("/control/v1/node/:node_id/fill", |r| { + named_request_span(r, handle_node_fill, RequestName("control_v1_node_fill")) + }) + // TODO(vlad): endpoint for cancelling drain and fill // Tenant Shard operations .put("/control/v1/tenant/:tenant_shard_id/migrate", |r| { tenant_service_handler( diff --git a/storage_controller/src/lib.rs b/storage_controller/src/lib.rs index 2ea490a14b..8caf638904 100644 --- a/storage_controller/src/lib.rs +++ b/storage_controller/src/lib.rs @@ -2,6 +2,7 @@ use serde::Serialize; use utils::seqwait::MonotonicCounter; mod auth; +mod background_node_operations; mod compute_hook; mod heartbeater; pub mod http; diff --git a/storage_controller/src/node.rs b/storage_controller/src/node.rs index 34dcf0c642..4d17dff9fe 100644 --- a/storage_controller/src/node.rs +++ b/storage_controller/src/node.rs @@ -59,6 +59,10 @@ impl Node { self.id } + pub(crate) fn get_scheduling(&self) -> NodeSchedulingPolicy { + self.scheduling + } + pub(crate) fn set_scheduling(&mut self, scheduling: NodeSchedulingPolicy) { self.scheduling = scheduling } @@ -151,6 +155,7 @@ impl Node { NodeSchedulingPolicy::Draining => MaySchedule::No, NodeSchedulingPolicy::Filling => MaySchedule::Yes(score), NodeSchedulingPolicy::Pause => MaySchedule::No, + NodeSchedulingPolicy::PauseForRestart => MaySchedule::No, } } @@ -167,7 +172,7 @@ impl Node { listen_http_port, listen_pg_addr, listen_pg_port, - scheduling: NodeSchedulingPolicy::Filling, + scheduling: NodeSchedulingPolicy::Active, availability: NodeAvailability::Offline, cancel: CancellationToken::new(), } diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index 67c05296d5..47caf7ae81 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -442,13 +442,15 @@ impl Persistence { #[tracing::instrument(skip_all, fields(node_id))] pub(crate) async fn re_attach( &self, - node_id: NodeId, + input_node_id: NodeId, ) -> DatabaseResult> { + use crate::schema::nodes::dsl::scheduling_policy; + use crate::schema::nodes::dsl::*; use crate::schema::tenant_shards::dsl::*; let updated = self .with_measured_conn(DatabaseOperation::ReAttach, move |conn| { let rows_updated = diesel::update(tenant_shards) - .filter(generation_pageserver.eq(node_id.0 as i64)) + .filter(generation_pageserver.eq(input_node_id.0 as i64)) .set(generation.eq(generation + 1)) .execute(conn)?; @@ -457,9 +459,23 @@ impl Persistence { // TODO: UPDATE+SELECT in one query let updated = tenant_shards - .filter(generation_pageserver.eq(node_id.0 as i64)) + .filter(generation_pageserver.eq(input_node_id.0 as i64)) .select(TenantShardPersistence::as_select()) .load(conn)?; + + // If the node went through a drain and restart phase before re-attaching, + // then reset it's node scheduling policy to active. + diesel::update(nodes) + .filter(node_id.eq(input_node_id.0 as i64)) + .filter( + scheduling_policy + .eq(String::from(NodeSchedulingPolicy::PauseForRestart)) + .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Draining))) + .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Filling))), + ) + .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active))) + .execute(conn)?; + Ok(updated) }) .await?; diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 4ab85509dc..0bd2eeac35 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -1,4 +1,5 @@ use crate::{node::Node, tenant_shard::TenantShard}; +use itertools::Itertools; use pageserver_api::controller_api::UtilizationScore; use serde::Serialize; use std::collections::HashMap; @@ -283,6 +284,44 @@ impl Scheduler { } } + // Check if the number of shards attached to a given node is lagging below + // the cluster average. If that's the case, the node should be filled. + pub(crate) fn compute_fill_requirement(&self, node_id: NodeId) -> usize { + let Some(node) = self.nodes.get(&node_id) else { + debug_assert!(false); + tracing::error!("Scheduler missing node {node_id}"); + return 0; + }; + assert!(!self.nodes.is_empty()); + let expected_attached_shards_per_node = self.expected_attached_shard_count(); + + for (node_id, node) in self.nodes.iter() { + tracing::trace!(%node_id, "attached_shard_count={} shard_count={} expected={}", node.attached_shard_count, node.shard_count, expected_attached_shards_per_node); + } + + if node.attached_shard_count < expected_attached_shards_per_node { + expected_attached_shards_per_node - node.attached_shard_count + } else { + 0 + } + } + + pub(crate) fn expected_attached_shard_count(&self) -> usize { + let total_attached_shards: usize = + self.nodes.values().map(|n| n.attached_shard_count).sum(); + + assert!(!self.nodes.is_empty()); + total_attached_shards / self.nodes.len() + } + + pub(crate) fn nodes_by_attached_shard_count(&self) -> Vec<(NodeId, usize)> { + self.nodes + .iter() + .map(|(node_id, stats)| (*node_id, stats.attached_shard_count)) + .sorted_by(|lhs, rhs| Ord::cmp(&lhs.1, &rhs.1).reverse()) + .collect() + } + pub(crate) fn node_upsert(&mut self, node: &Node) { use std::collections::hash_map::Entry::*; match self.nodes.entry(node.get_id()) { diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 926332f946..c94af113db 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -8,13 +8,17 @@ use std::{ }; use crate::{ + background_node_operations::{ + Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION, + }, compute_hook::NotifyError, id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, WrappedWriteGuard}, persistence::{AbortShardSplitStatus, TenantFilter}, reconciler::{ReconcileError, ReconcileUnits}, scheduler::{MaySchedule, ScheduleContext, ScheduleMode}, tenant_shard::{ - MigrateAttachment, ReconcileNeeded, ScheduleOptimization, ScheduleOptimizationAction, + MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization, + ScheduleOptimizationAction, }, }; use anyhow::Context; @@ -134,6 +138,11 @@ struct ServiceState { scheduler: Scheduler, + /// Ongoing background operation on the cluster if any is running. + /// Note that only one such operation may run at any given time, + /// hence the type choice. + ongoing_operation: Option, + /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile delayed_reconcile_rx: tokio::sync::mpsc::Receiver, } @@ -185,6 +194,7 @@ impl ServiceState { tenants, nodes: Arc::new(nodes), scheduler, + ongoing_operation: None, delayed_reconcile_rx, } } @@ -296,6 +306,17 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(value: OperationError) -> Self { + match value { + OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => { + ApiError::InternalServerError(anyhow::anyhow!(err)) + } + OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()), + } + } +} + #[allow(clippy::large_enum_variant)] enum TenantCreateOrUpdate { Create(TenantCreateRequest), @@ -1594,15 +1615,32 @@ impl Service { // Setting a node active unblocks any Reconcilers that might write to the location config API, // but those requests will not be accepted by the node until it has finished processing // the re-attach response. + // + // Additionally, reset the nodes scheduling policy to match the conditional update done + // in [`Persistence::re_attach`]. if let Some(node) = nodes.get(&reattach_req.node_id) { - if !node.is_available() { + let reset_scheduling = matches!( + node.get_scheduling(), + NodeSchedulingPolicy::PauseForRestart + | NodeSchedulingPolicy::Draining + | NodeSchedulingPolicy::Filling + ); + + if !node.is_available() || reset_scheduling { let mut new_nodes = (**nodes).clone(); if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) { - node.set_availability(NodeAvailability::Active(UtilizationScore::worst())); + if !node.is_available() { + node.set_availability(NodeAvailability::Active(UtilizationScore::worst())); + } + + if reset_scheduling { + node.set_scheduling(NodeSchedulingPolicy::Active); + } + scheduler.node_upsert(node); + let new_nodes = Arc::new(new_nodes); + *nodes = new_nodes; } - let new_nodes = Arc::new(new_nodes); - *nodes = new_nodes; } } @@ -1883,6 +1921,25 @@ impl Service { Ok(()) } + /// Same as [`Service::await_waiters`], but returns the waiters which are still + /// in progress + async fn await_waiters_remainder( + &self, + waiters: Vec, + timeout: Duration, + ) -> Vec { + let deadline = Instant::now().checked_add(timeout).unwrap(); + for waiter in waiters.iter() { + let timeout = deadline.duration_since(Instant::now()); + let _ = waiter.wait_timeout(timeout).await; + } + + waiters + .into_iter() + .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress)) + .collect::>() + } + /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request, /// and transform it into either a tenant creation of a series of shard updates. /// @@ -4164,6 +4221,18 @@ impl Service { Ok(nodes) } + pub(crate) async fn get_node(&self, node_id: NodeId) -> Result { + self.inner + .read() + .unwrap() + .nodes + .get(&node_id) + .cloned() + .ok_or(ApiError::NotFound( + format!("Node {node_id} not registered").into(), + )) + } + pub(crate) async fn node_register( &self, register_req: NodeRegisterRequest, @@ -4318,9 +4387,6 @@ impl Service { if let Some(scheduling) = scheduling { node.set_scheduling(scheduling); - - // TODO: once we have a background scheduling ticker for fill/drain, kick it - // to wake up and start working. } // Update the scheduler, in case the elegibility of the node for new shards has changed @@ -4411,7 +4477,7 @@ impl Service { // TODO: in the background, we should balance work back onto this pageserver } AvailabilityTransition::Unchanged => { - tracing::debug!("Node {} no change during config", node_id); + tracing::debug!("Node {} no availability change during config", node_id); } } @@ -4420,6 +4486,201 @@ impl Service { Ok(()) } + pub(crate) async fn start_node_drain( + self: &Arc, + node_id: NodeId, + ) -> Result<(), ApiError> { + let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + let schedulable_nodes_count = nodes + .iter() + .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_))) + .count(); + + ( + locked + .ongoing_operation + .as_ref() + .map(|ongoing| ongoing.operation), + node.is_available(), + node.get_scheduling(), + schedulable_nodes_count, + ) + }; + + if let Some(ongoing) = ongoing_op { + return Err(ApiError::PreconditionFailed( + format!("Background operation already ongoing for node: {}", ongoing).into(), + )); + } + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if schedulable_nodes_count == 0 { + return Err(ApiError::PreconditionFailed( + "No other schedulable nodes to drain to".into(), + )); + } + + match node_policy { + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => { + self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining)) + .await?; + + let cancel = CancellationToken::new(); + + self.inner.write().unwrap().ongoing_operation = Some(OperationHandler { + operation: Operation::Drain(Drain { node_id }), + cancel: cancel.clone(), + }); + + tokio::task::spawn({ + let service = self.clone(); + let cancel = cancel.clone(); + async move { + scopeguard::defer! { + let prev = service.inner.write().unwrap().ongoing_operation.take(); + + if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) { + assert_eq!(removed_drain.node_id, node_id, "We always take the same operation"); + } else { + panic!("We always remove the same operation") + } + } + + tracing::info!(%node_id, "Drain background operation starting"); + let res = service.drain_node(node_id, cancel).await; + match res { + Ok(()) => { + tracing::info!(%node_id, "Drain background operation completed successfully"); + } + Err(OperationError::Cancelled) => { + tracing::info!(%node_id, "Drain background operation was cancelled"); + } + Err(err) => { + tracing::error!(%node_id, "Drain background operation encountered: {err}") + } + } + } + }); + } + NodeSchedulingPolicy::Draining => { + return Err(ApiError::Conflict(format!( + "Node {node_id} has drain in progress" + ))); + } + policy => { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} cannot be drained due to {policy:?} policy").into(), + )); + } + } + + Ok(()) + } + + pub(crate) async fn start_node_fill(self: &Arc, node_id: NodeId) -> Result<(), ApiError> { + let (ongoing_op, node_available, node_policy, total_nodes_count) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + + ( + locked + .ongoing_operation + .as_ref() + .map(|ongoing| ongoing.operation), + node.is_available(), + node.get_scheduling(), + nodes.len(), + ) + }; + + if let Some(ongoing) = ongoing_op { + return Err(ApiError::PreconditionFailed( + format!("Background operation already ongoing for node: {}", ongoing).into(), + )); + } + + if !node_available { + return Err(ApiError::ResourceUnavailable( + format!("Node {node_id} is currently unavailable").into(), + )); + } + + if total_nodes_count <= 1 { + return Err(ApiError::PreconditionFailed( + "No other nodes to fill from".into(), + )); + } + + match node_policy { + NodeSchedulingPolicy::Active => { + self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling)) + .await?; + + let cancel = CancellationToken::new(); + + self.inner.write().unwrap().ongoing_operation = Some(OperationHandler { + operation: Operation::Fill(Fill { node_id }), + cancel: cancel.clone(), + }); + + tokio::task::spawn({ + let service = self.clone(); + let cancel = cancel.clone(); + async move { + scopeguard::defer! { + let prev = service.inner.write().unwrap().ongoing_operation.take(); + + if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) { + assert_eq!(removed_fill.node_id, node_id, "We always take the same operation"); + } else { + panic!("We always remove the same operation") + } + } + + tracing::info!(%node_id, "Fill background operation starting"); + let res = service.fill_node(node_id, cancel).await; + match res { + Ok(()) => { + tracing::info!(%node_id, "Fill background operation completed successfully"); + } + Err(OperationError::Cancelled) => { + tracing::info!(%node_id, "Fill background operation was cancelled"); + } + Err(err) => { + tracing::error!(%node_id, "Fill background operation encountered: {err}") + } + } + } + }); + } + NodeSchedulingPolicy::Filling => { + return Err(ApiError::Conflict(format!( + "Node {node_id} has fill in progress" + ))); + } + policy => { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} cannot be filled due to {policy:?} policy").into(), + )); + } + } + + Ok(()) + } + /// Helper for methods that will try and call pageserver APIs for /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant /// is attached somewhere. @@ -5004,4 +5265,296 @@ impl Service { // to complete. self.gate.close().await; } + + /// Drain a node by moving the shards attached to it as primaries. + /// This is a long running operation and it should run as a separate Tokio task. + pub(crate) async fn drain_node( + &self, + node_id: NodeId, + cancel: CancellationToken, + ) -> Result<(), OperationError> { + let mut last_inspected_shard: Option = None; + let mut inspected_all_shards = false; + let mut waiters = Vec::new(); + let mut schedule_context = ScheduleContext::default(); + + while !inspected_all_shards { + if cancel.is_cancelled() { + return Err(OperationError::Cancelled); + } + + { + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); + + let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged( + format!("node {node_id} was removed").into(), + ))?; + + let current_policy = node.get_scheduling(); + if !matches!(current_policy, NodeSchedulingPolicy::Draining) { + // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think + // about it + return Err(OperationError::NodeStateChanged( + format!("node {node_id} changed state to {current_policy:?}").into(), + )); + } + + let mut cursor = tenants.iter_mut().skip_while({ + let skip_past = last_inspected_shard; + move |(tid, _)| match skip_past { + Some(last) => **tid != last, + None => false, + } + }); + + while waiters.len() < MAX_RECONCILES_PER_OPERATION { + let (tid, tenant_shard) = match cursor.next() { + Some(some) => some, + None => { + inspected_all_shards = true; + break; + } + }; + + if tenant_shard.intent.demote_attached(scheduler, node_id) { + match tenant_shard.schedule(scheduler, &mut schedule_context) { + Err(e) => { + tracing::warn!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Scheduling error when draining pageserver {} : {e}", node_id + ); + } + Ok(()) => { + let scheduled_to = tenant_shard.intent.get_attached(); + tracing::info!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Rescheduled shard while draining node {}: {} -> {:?}", + node_id, + node_id, + scheduled_to + ); + + let waiter = self.maybe_reconcile_shard(tenant_shard, nodes); + if let Some(some) = waiter { + waiters.push(some); + } + } + } + } + + last_inspected_shard = Some(*tid); + } + } + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + while !waiters.is_empty() { + tracing::info!("Awaiting {} pending drain reconciliations", waiters.len()); + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + // At this point we have done the best we could to drain shards from this node. + // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]` + // to complete the drain. + if let Err(err) = self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart)) + .await + { + // This is not fatal. Anything that is polling the node scheduling policy to detect + // the end of the drain operations will hang, but all such places should enforce an + // overall timeout. The scheduling policy will be updated upon node re-attach and/or + // by the counterpart fill operation. + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}" + ) + .into(), + )); + } + + Ok(()) + } + + /// Create a node fill plan (pick secondaries to promote) that meets the following requirements: + /// 1. The node should be filled until it reaches the expected cluster average of + /// attached shards. If there are not enough secondaries on the node, the plan stops early. + /// 2. Select tenant shards to promote such that the number of attached shards is balanced + /// throughout the cluster. We achieve this by picking tenant shards from each node, + /// starting from the ones with the largest number of attached shards, until the node + /// reaches the expected cluster average. + fn fill_node_plan(&self, node_id: NodeId) -> Vec { + let mut locked = self.inner.write().unwrap(); + let fill_requirement = locked.scheduler.compute_fill_requirement(node_id); + + let mut tids_by_node = locked + .tenants + .iter_mut() + .filter_map(|(tid, tenant_shard)| { + if tenant_shard.intent.get_secondary().contains(&node_id) { + if let Some(primary) = tenant_shard.intent.get_attached() { + return Some((*primary, *tid)); + } + } + + None + }) + .into_group_map(); + + let expected_attached = locked.scheduler.expected_attached_shard_count(); + let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count(); + + let mut plan = Vec::new(); + for (node_id, attached) in nodes_by_load { + if plan.len() >= fill_requirement + || tids_by_node.is_empty() + || attached <= expected_attached + { + break; + } + + let can_take = attached - expected_attached; + let mut remove_node = false; + for _ in 0..can_take { + match tids_by_node.get_mut(&node_id) { + Some(tids) => match tids.pop() { + Some(tid) => { + plan.push(tid); + } + None => { + remove_node = true; + break; + } + }, + None => { + break; + } + } + } + + if remove_node { + tids_by_node.remove(&node_id); + } + } + + plan + } + + /// Fill a node by promoting its secondaries until the cluster is balanced + /// with regards to attached shard counts. Note that this operation only + /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`]. + /// This is a long running operation and it should run as a separate Tokio task. + pub(crate) async fn fill_node( + &self, + node_id: NodeId, + cancel: CancellationToken, + ) -> Result<(), OperationError> { + // TODO(vlad): Currently this operates on the assumption that all + // secondaries are warm. This is not always true (e.g. we just migrated the + // tenant). Take that into consideration by checking the secondary status. + let mut tids_to_promote = self.fill_node_plan(node_id); + + let mut waiters = Vec::new(); + let mut schedule_context = ScheduleContext::default(); + + // Execute the plan we've composed above. Before aplying each move from the plan, + // we validate to ensure that it has not gone stale in the meantime. + while !tids_to_promote.is_empty() { + if cancel.is_cancelled() { + return Err(OperationError::Cancelled); + } + + { + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); + + let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged( + format!("node {node_id} was removed").into(), + ))?; + + let current_policy = node.get_scheduling(); + if !matches!(current_policy, NodeSchedulingPolicy::Filling) { + // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think + // about it + return Err(OperationError::NodeStateChanged( + format!("node {node_id} changed state to {current_policy:?}").into(), + )); + } + + while waiters.len() < MAX_RECONCILES_PER_OPERATION { + if let Some(tid) = tids_to_promote.pop() { + if let Some(tenant_shard) = tenants.get_mut(&tid) { + // If the node being filled is not a secondary anymore, + // skip the promotion. + if !tenant_shard.intent.get_secondary().contains(&node_id) { + continue; + } + + let previously_attached_to = *tenant_shard.intent.get_attached(); + + tenant_shard.intent.promote_attached(scheduler, node_id); + match tenant_shard.schedule(scheduler, &mut schedule_context) { + Err(e) => { + tracing::warn!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Scheduling error when filling pageserver {} : {e}", node_id + ); + } + Ok(()) => { + tracing::info!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Rescheduled shard while filling node {}: {:?} -> {}", + node_id, + previously_attached_to, + node_id + ); + + if let Some(waiter) = + self.maybe_reconcile_shard(tenant_shard, nodes) + { + waiters.push(waiter); + } + } + } + } + } else { + break; + } + } + } + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + while !waiters.is_empty() { + tracing::info!("Awaiting {} pending fill reconciliations", waiters.len()); + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + if let Err(err) = self + .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active)) + .await + { + // This isn't a huge issue since the filling process starts upon request. However, it + // will prevent the next drain from starting. The only case in which this can fail + // is database unavailability. Such a case will require manual intervention. + return Err(OperationError::FinalizeError( + format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}") + .into(), + )); + } + + Ok(()) + } } diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index 77bbf4c604..d1b632755f 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -10,7 +10,9 @@ use crate::{ reconciler::ReconcileUnits, scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext}, }; -use pageserver_api::controller_api::{PlacementPolicy, ShardSchedulingPolicy}; +use pageserver_api::controller_api::{ + NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy, +}; use pageserver_api::{ models::{LocationConfig, LocationConfigMode, TenantConfig}, shard::{ShardIdentity, TenantShardId}, @@ -311,6 +313,12 @@ pub(crate) struct ReconcilerWaiter { seq: Sequence, } +pub(crate) enum ReconcilerStatus { + Done, + Failed, + InProgress, +} + #[derive(thiserror::Error, Debug)] pub(crate) enum ReconcileWaitError { #[error("Timeout waiting for shard {0}")] @@ -373,6 +381,16 @@ impl ReconcilerWaiter { Ok(()) } + + pub(crate) fn get_status(&self) -> ReconcilerStatus { + if self.seq_wait.would_wait_for(self.seq).is_err() { + ReconcilerStatus::Done + } else if self.error_seq_wait.would_wait_for(self.seq).is_err() { + ReconcilerStatus::Failed + } else { + ReconcilerStatus::InProgress + } + } } /// Having spawned a reconciler task, the tenant shard's state will carry enough @@ -652,13 +670,17 @@ impl TenantShard { let mut scores = all_pageservers .iter() .flat_map(|node_id| { - if matches!( - nodes - .get(node_id) - .map(|n| n.may_schedule()) - .unwrap_or(MaySchedule::No), - MaySchedule::No + let node = nodes.get(node_id); + if node.is_none() { + None + } else if matches!( + node.unwrap().get_scheduling(), + NodeSchedulingPolicy::Filling ) { + // If the node is currently filling, don't count it as a candidate to avoid, + // racing with the background fill. + None + } else if matches!(node.unwrap().may_schedule(), MaySchedule::No) { None } else { let affinity_score = schedule_context.get_node_affinity(*node_id); diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index aa55b6e4cb..bad93ff39a 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2213,6 +2213,30 @@ class NeonStorageController(MetricsGetter, LogUtils): headers=self.headers(TokenScope.ADMIN), ) + def node_drain(self, node_id): + log.info(f"node_drain({node_id})") + self.request( + "PUT", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/drain", + headers=self.headers(TokenScope.ADMIN), + ) + + def node_fill(self, node_id): + log.info(f"node_fill({node_id})") + self.request( + "PUT", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}/fill", + headers=self.headers(TokenScope.ADMIN), + ) + + def node_status(self, node_id): + response = self.request( + "GET", + f"{self.env.storage_controller_api}/control/v1/node/{node_id}", + headers=self.headers(TokenScope.ADMIN), + ) + return response.json() + def node_list(self): response = self.request( "GET", diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 8624a45f45..30f96ceee8 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -40,7 +40,7 @@ from werkzeug.wrappers.response import Response def get_node_shard_counts(env: NeonEnv, tenant_ids): - counts: defaultdict[str, int] = defaultdict(int) + counts: defaultdict[int, int] = defaultdict(int) for tid in tenant_ids: for shard in env.storage_controller.locate(tid): counts[shard["node_id"]] += 1 @@ -1502,3 +1502,120 @@ def test_tenant_import(neon_env_builder: NeonEnvBuilder, shard_count, remote_sto workload = Workload(env, tenant_id, timeline, branch_name=branch) workload.expect_rows = expect_rows workload.validate() + + +def test_graceful_cluster_restart(neon_env_builder: NeonEnvBuilder): + """ + Graceful reststart of storage controller clusters use the drain and + fill hooks in order to migrate attachments away from pageservers before + restarting. In practice, Ansible will drive this process. + """ + neon_env_builder.num_pageservers = 2 + env = neon_env_builder.init_configs() + env.start() + + tenant_count = 5 + shard_count_per_tenant = 8 + total_shards = tenant_count * shard_count_per_tenant + tenant_ids = [] + + for _ in range(0, tenant_count): + tid = TenantId.generate() + tenant_ids.append(tid) + env.neon_cli.create_tenant( + tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant + ) + + # Give things a chance to settle. + # A call to `reconcile_until_idle` could be used here instead, + # however since all attachments are placed on the same node, + # we'd have to wait for a long time (2 minutes-ish) for optimizations + # to quiesce. + # TODO: once the initial attachment selection is fixed, update this + # to use `reconcile_until_idle`. + time.sleep(2) + + nodes = env.storage_controller.node_list() + assert len(nodes) == 2 + + def retryable_node_operation(op, ps_id, max_attempts, backoff): + while max_attempts > 0: + try: + op(ps_id) + return + except StorageControllerApiException as e: + max_attempts -= 1 + log.info(f"Operation failed ({max_attempts} attempts left): {e}") + + if max_attempts == 0: + raise e + + time.sleep(backoff) + + def poll_node_status(node_id, desired_scheduling_policy, max_attempts, backoff): + log.info(f"Polling {node_id} for {desired_scheduling_policy} scheduling policy") + while max_attempts > 0: + try: + status = env.storage_controller.node_status(node_id) + policy = status["scheduling"] + if policy == desired_scheduling_policy: + return + else: + max_attempts -= 1 + log.info(f"Status call returned {policy=} ({max_attempts} attempts left)") + + if max_attempts == 0: + raise AssertionError( + f"Status for {node_id=} did not reach {desired_scheduling_policy=}" + ) + + time.sleep(backoff) + except StorageControllerApiException as e: + max_attempts -= 1 + log.info(f"Status call failed ({max_attempts} retries left): {e}") + + if max_attempts == 0: + raise e + + time.sleep(backoff) + + def assert_shard_counts_balanced(env: NeonEnv, shard_counts, total_shards): + # Assert that all nodes have some attached shards + assert len(shard_counts) == len(env.pageservers) + + min_shard_count = min(shard_counts.values()) + max_shard_count = max(shard_counts.values()) + + flake_factor = 5 / 100 + assert max_shard_count - min_shard_count <= int(total_shards * flake_factor) + + # Perform a graceful rolling restart + for ps in env.pageservers: + retryable_node_operation( + lambda ps_id: env.storage_controller.node_drain(ps_id), ps.id, max_attempts=3, backoff=2 + ) + poll_node_status(ps.id, "PauseForRestart", max_attempts=6, backoff=5) + + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after draining node {ps.id}: {shard_counts}") + # Assert that we've drained the node + assert shard_counts[ps.id] == 0 + # Assert that those shards actually went somewhere + assert sum(shard_counts.values()) == total_shards + + ps.restart() + poll_node_status(ps.id, "Active", max_attempts=10, backoff=1) + + retryable_node_operation( + lambda ps_id: env.storage_controller.node_fill(ps_id), ps.id, max_attempts=3, backoff=2 + ) + poll_node_status(ps.id, "Active", max_attempts=6, backoff=5) + + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after filling node {ps.id}: {shard_counts}") + assert_shard_counts_balanced(env, shard_counts, total_shards) + + # Now check that shards are reasonably balanced + shard_counts = get_node_shard_counts(env, tenant_ids) + log.info(f"Shard counts after rolling restart: {shard_counts}") + assert_shard_counts_balanced(env, shard_counts, total_shards) From e7d62a257d8c56e2289733a9890557f9dbff93cb Mon Sep 17 00:00:00 2001 From: Vlad Lazar Date: Wed, 19 Jun 2024 11:55:59 +0100 Subject: [PATCH 215/220] test: fix tenant duplication utility generation numbers (#8096) ## Problem We have this set of test utilities which duplicate a tenant by copying everything that's in remote storage and then attaching a tenant to the pageserver and storage controller. When the "copied tenants" are created on the storage controller, they start off from generation number 0. This means that they can't see anything past that generation. This issues has existed ever since generation numbers have been introduced, but we've largely been lucky for the generation to stay stable during the template tenant creation. ## Summary of Changes Extend the storage controller debug attach hook to accept a generation override. Use that in the tenant duplication logic to set the generation number to something greater than the naturally reached generation. This allows the tenants to see all layer files. --- control_plane/src/storage_controller.rs | 2 ++ storage_controller/src/service.rs | 3 ++- test_runner/fixtures/neon_fixtures.py | 16 ++++++++++++++-- test_runner/fixtures/pageserver/many_tenants.py | 2 ++ 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index b6b7ea7762..72948e203f 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -46,6 +46,7 @@ const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; pub struct AttachHookRequest { pub tenant_shard_id: TenantShardId, pub node_id: Option, + pub generation_override: Option, } #[derive(Serialize, Deserialize)] @@ -440,6 +441,7 @@ impl StorageController { let request = AttachHookRequest { tenant_shard_id, node_id: Some(pageserver_id), + generation_override: None, }; let response = self diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index c94af113db..181e262638 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -1234,13 +1234,14 @@ impl Service { let locked = self.inner.write().unwrap(); !locked.tenants.contains_key(&attach_req.tenant_shard_id) }; + if insert { let tsp = TenantShardPersistence { tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(), shard_number: attach_req.tenant_shard_id.shard_number.0 as i32, shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32, shard_stripe_size: 0, - generation: Some(0), + generation: attach_req.generation_override.or(Some(0)), generation_pageserver: None, placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(), config: serde_json::to_string(&TenantConfig::default()).unwrap(), diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index bad93ff39a..8994db8cf2 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -2159,12 +2159,19 @@ class NeonStorageController(MetricsGetter, LogUtils): return time.time() - t1 def attach_hook_issue( - self, tenant_shard_id: Union[TenantId, TenantShardId], pageserver_id: int + self, + tenant_shard_id: Union[TenantId, TenantShardId], + pageserver_id: int, + generation_override: Optional[int] = None, ) -> int: + body = {"tenant_shard_id": str(tenant_shard_id), "node_id": pageserver_id} + if generation_override is not None: + body["generation_override"] = generation_override + response = self.request( "POST", f"{self.env.storage_controller_api}/debug/v1/attach-hook", - json={"tenant_shard_id": str(tenant_shard_id), "node_id": pageserver_id}, + json=body, headers=self.headers(TokenScope.ADMIN), ) gen = response.json()["gen"] @@ -2635,6 +2642,7 @@ class NeonPageserver(PgProtocol, LogUtils): config: None | Dict[str, Any] = None, config_null: bool = False, generation: Optional[int] = None, + override_storage_controller_generation: bool = False, ): """ Tenant attachment passes through here to acquire a generation number before proceeding @@ -2643,6 +2651,10 @@ class NeonPageserver(PgProtocol, LogUtils): client = self.http_client() if generation is None: generation = self.env.storage_controller.attach_hook_issue(tenant_id, self.id) + elif override_storage_controller_generation: + generation = self.env.storage_controller.attach_hook_issue( + tenant_id, self.id, generation + ) return client.tenant_attach( tenant_id, config, diff --git a/test_runner/fixtures/pageserver/many_tenants.py b/test_runner/fixtures/pageserver/many_tenants.py index def80a1c3e..8730d8ef75 100644 --- a/test_runner/fixtures/pageserver/many_tenants.py +++ b/test_runner/fixtures/pageserver/many_tenants.py @@ -66,6 +66,8 @@ def single_timeline( env.pageserver.tenant_attach( tenant, config=template_config.copy(), + generation=100, + override_storage_controller_generation=True, ) time.sleep(0.1) wait_until_tenant_state(ps_http, tenant, "Broken", 10) From 438fd2aaf32a682b4cd1175f6e992190321490e1 Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Wed, 19 Jun 2024 13:59:36 +0200 Subject: [PATCH 216/220] neon_local: `background_process`: launch all processes in repo dir (or `datadir`) (#8058) Before this PR, storage controller and broker would run in the PWD of neon_local, i.e., most likely the checkout of neon.git. With this PR, the shared infrastructure for background processes sets the PWD. Benefits: * easy listing of processes in a repo dir using `lsof`, see added comment in the code * coredumps go in the right directory (next to the process) * generally matching common expectations, I think Changes: * set the working directory in `background_process` module * drive-by: fix reliance of storage_controller on NEON_REPO_DIR being set by neon_local for the local compute hook to work correctly --- control_plane/src/background_process.rs | 11 +++++++- control_plane/src/bin/neon_local.rs | 6 +++-- control_plane/src/local_env.rs | 35 +++++++++++++++++-------- control_plane/src/storage_controller.rs | 10 ++++--- storage_controller/src/compute_hook.rs | 8 +++++- storage_controller/src/main.rs | 8 ++++++ storage_controller/src/service.rs | 4 +++ 7 files changed, 63 insertions(+), 19 deletions(-) diff --git a/control_plane/src/background_process.rs b/control_plane/src/background_process.rs index 94666f2870..3f4ddbdb2b 100644 --- a/control_plane/src/background_process.rs +++ b/control_plane/src/background_process.rs @@ -69,6 +69,9 @@ where // Not generic AsRef, otherwise empty `envs` prevents type inference EI: IntoIterator, { + if !datadir.metadata().context("stat datadir")?.is_dir() { + anyhow::bail!("`datadir` must be a directory when calling this function: {datadir:?}"); + } let log_path = datadir.join(format!("{process_name}.log")); let process_log_file = fs::OpenOptions::new() .create(true) @@ -85,7 +88,13 @@ where let background_command = command .stdout(process_log_file) .stderr(same_file_for_stderr) - .args(args); + .args(args) + // spawn all child processes in their datadir, useful for all kinds of things, + // not least cleaning up child processes e.g. after an unclean exit from the test suite: + // ``` + // lsof -d cwd -a +D Users/cs/src/neon/test_output + // ``` + .current_dir(datadir); let filled_cmd = fill_env_vars_prefixed_neon(fill_remote_storage_secrets_vars( fill_rust_env_vars(background_command), diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 18e395e2b5..8fe959792b 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -87,7 +87,8 @@ fn main() -> Result<()> { handle_init(sub_args).map(Some) } else { // all other commands need an existing config - let mut env = LocalEnv::load_config().context("Error loading config")?; + let mut env = + LocalEnv::load_config(&local_env::base_path()).context("Error loading config")?; let original_env = env.clone(); let rt = tokio::runtime::Builder::new_current_thread() @@ -364,7 +365,8 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result { LocalEnv::init(init_conf, force) .context("materialize initial neon_local environment on disk")?; - Ok(LocalEnv::load_config().expect("freshly written config should be loadable")) + Ok(LocalEnv::load_config(&local_env::base_path()) + .expect("freshly written config should be loadable")) } /// The default pageserver is the one where CLI tenant/timeline operations are sent by default. diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 0edcf1be4e..6634274d2a 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -42,8 +42,8 @@ pub struct LocalEnv { // compute endpoints). // // This is not stored in the config file. Rather, this is the path where the - // config file itself is. It is read from the NEON_REPO_DIR env variable or - // '.neon' if not given. + // config file itself is. It is read from the NEON_REPO_DIR env variable which + // must be an absolute path. If the env var is not set, $PWD/.neon is used. pub base_data_dir: PathBuf, // Path to postgres distribution. It's expected that "bin", "include", @@ -431,9 +431,7 @@ impl LocalEnv { } /// Construct `Self` from on-disk state. - pub fn load_config() -> anyhow::Result { - let repopath = base_path(); - + pub fn load_config(repopath: &Path) -> anyhow::Result { if !repopath.exists() { bail!( "Neon config is not found in {}. You need to run 'neon_local init' first", @@ -461,7 +459,7 @@ impl LocalEnv { branch_name_mappings, } = on_disk_config; LocalEnv { - base_data_dir: repopath.clone(), + base_data_dir: repopath.to_owned(), pg_distrib_dir, neon_distrib_dir, default_tenant_id, @@ -482,7 +480,7 @@ impl LocalEnv { "we ensure this during deserialization" ); env.pageservers = { - let iter = std::fs::read_dir(&repopath).context("open dir")?; + let iter = std::fs::read_dir(repopath).context("open dir")?; let mut pageservers = Vec::new(); for res in iter { let dentry = res?; @@ -719,10 +717,25 @@ impl LocalEnv { } pub fn base_path() -> PathBuf { - match std::env::var_os("NEON_REPO_DIR") { - Some(val) => PathBuf::from(val), - None => PathBuf::from(".neon"), - } + let path = match std::env::var_os("NEON_REPO_DIR") { + Some(val) => { + let path = PathBuf::from(val); + if !path.is_absolute() { + // repeat the env var in the error because our default is always absolute + panic!("NEON_REPO_DIR must be an absolute path, got {path:?}"); + } + path + } + None => { + let pwd = std::env::current_dir() + // technically this can fail but it's quite unlikeley + .expect("determine current directory"); + let pwd_abs = pwd.canonicalize().expect("canonicalize current directory"); + pwd_abs.join(".neon") + } + }; + assert!(path.is_absolute()); + path } /// Generate a public/private key pair for JWT authentication diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 72948e203f..4f9f0ba794 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -314,15 +314,17 @@ impl StorageController { args.push(format!("--split-threshold={split_threshold}")) } + args.push(format!( + "--neon-local-repo-dir={}", + self.env.base_data_dir.display() + )); + background_process::start_process( COMMAND, &self.env.base_data_dir, &self.env.storage_controller_bin(), args, - [( - "NEON_REPO_DIR".to_string(), - self.env.base_data_dir.to_string_lossy().to_string(), - )], + [], background_process::InitialPidFile::Create(self.pid_file()), || async { match self.ready().await { diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index 9d326ef82d..a1d051f150 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -283,7 +283,13 @@ impl ComputeHook { // all calls to this function let _locked = self.neon_local_lock.lock().await; - let env = match LocalEnv::load_config() { + let Some(repo_dir) = self.config.neon_local_repo_dir.as_deref() else { + tracing::warn!( + "neon_local_repo_dir not set, likely a bug in neon_local; skipping compute update" + ); + return Ok(()); + }; + let env = match LocalEnv::load_config(repo_dir) { Ok(e) => e, Err(e) => { tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})"); diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index ce8f8d0cdd..f1eb0b30fc 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -4,6 +4,7 @@ use clap::Parser; use diesel::Connection; use metrics::launch_timestamp::LaunchTimestamp; use metrics::BuildInfo; +use std::path::PathBuf; use std::sync::Arc; use storage_controller::http::make_router; use storage_controller::metrics::preinitialize_metrics; @@ -77,6 +78,12 @@ struct Cli { /// How long to wait for the initial database connection to be available. #[arg(long, default_value = "5s")] db_connect_timeout: humantime::Duration, + + /// `neon_local` sets this to the path of the neon_local repo dir. + /// Only relevant for testing. + // TODO: make `cfg(feature = "testing")` + #[arg(long)] + neon_local_repo_dir: Option, } enum StrictMode { @@ -260,6 +267,7 @@ async fn async_main() -> anyhow::Result<()> { .reconciler_concurrency .unwrap_or(RECONCILER_CONCURRENCY_DEFAULT), split_threshold: args.split_threshold, + neon_local_repo_dir: args.neon_local_repo_dir, }; // After loading secrets & config, but before starting anything else, apply database migrations diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 181e262638..8475bf46d2 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -2,6 +2,7 @@ use std::{ borrow::Cow, cmp::Ordering, collections::{BTreeMap, HashMap, HashSet}, + path::PathBuf, str::FromStr, sync::Arc, time::{Duration, Instant}, @@ -236,6 +237,9 @@ pub struct Config { /// How large must a shard grow in bytes before we split it? /// None disables auto-splitting. pub split_threshold: Option, + + // TODO: make this cfg(feature = "testing") + pub neon_local_repo_dir: Option, } impl From for ApiError { From 76aa6936e8e4303fff7809c6c2e9b9467086a0dd Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 19 Jun 2024 13:14:50 +0100 Subject: [PATCH 217/220] tests: make Endpoint.stop() thread safe (occasional flakes in `test_multi_attach`) (#8110) ## Problem Tests using the `Workload` helper would occasionally fail in a strange way, where the endpoint appears to try and stop twice concurrently, and the second stop fails because the pidfile is already gone. `test_multi_attach` suffered from this. Workload has a `__del__` that stops the endpoint, and python is destroying this object in a different thread than NeonEnv.stop is called, resulting in racing stop() calls. Endpoint has a `running` attribute that avoids calling neon_local's stop twice, but that doesn't help in the concurrent case. ## Summary of changes - Make `Endpoint.stop` thread safe with a simple lock held across the updates to `running` and the actual act of stopping it. One could also work around this by letting Workload.endpoint outlive the Workload, or making Workload a context manager, but this change feels most robust, as it avoids all test code having to know that it must not try and stop an endpoint from a destructor. --- test_runner/fixtures/neon_fixtures.py | 53 +++++++++++++++++---------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 8994db8cf2..49857d5151 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -3446,6 +3446,12 @@ class Endpoint(PgProtocol, LogUtils): self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf + # This lock prevents concurrent start & stop operations, keeping `self.running` consistent + # with whether we're really running. Tests generally wouldn't try and do these concurrently, + # but endpoints are also stopped during test teardown, which might happen concurrently with + # destruction of objects in tests. + self.lock = threading.Lock() + def http_client( self, auth_token: Optional[str] = None, retries: Optional[Retry] = None ) -> EndpointHttpClient: @@ -3516,14 +3522,15 @@ class Endpoint(PgProtocol, LogUtils): log.info(f"Starting postgres endpoint {self.endpoint_id}") - self.env.neon_cli.endpoint_start( - self.endpoint_id, - safekeepers=self.active_safekeepers, - remote_ext_config=remote_ext_config, - pageserver_id=pageserver_id, - allow_multiple=allow_multiple, - ) - self.running = True + with self.lock: + self.env.neon_cli.endpoint_start( + self.endpoint_id, + safekeepers=self.active_safekeepers, + remote_ext_config=remote_ext_config, + pageserver_id=pageserver_id, + allow_multiple=allow_multiple, + ) + self.running = True return self @@ -3615,15 +3622,20 @@ class Endpoint(PgProtocol, LogUtils): def stop(self, mode: str = "fast") -> "Endpoint": """ Stop the Postgres instance if it's running. + + Because test teardown might try and stop an endpoint concurrently with test code + stopping the endpoint, this method is thread safe + Returns self. """ - if self.running: - assert self.endpoint_id is not None - self.env.neon_cli.endpoint_stop( - self.endpoint_id, check_return_code=self.check_stop_result, mode=mode - ) - self.running = False + with self.lock: + if self.running: + assert self.endpoint_id is not None + self.env.neon_cli.endpoint_stop( + self.endpoint_id, check_return_code=self.check_stop_result, mode=mode + ) + self.running = False return self @@ -3633,12 +3645,13 @@ class Endpoint(PgProtocol, LogUtils): Returns self. """ - assert self.endpoint_id is not None - self.env.neon_cli.endpoint_stop( - self.endpoint_id, True, check_return_code=self.check_stop_result, mode=mode - ) - self.endpoint_id = None - self.running = False + with self.lock: + assert self.endpoint_id is not None + self.env.neon_cli.endpoint_stop( + self.endpoint_id, True, check_return_code=self.check_stop_result, mode=mode + ) + self.endpoint_id = None + self.running = False return self From b998b703158923c493a9ce359e8c81ec89706653 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 19 Jun 2024 13:34:15 +0100 Subject: [PATCH 218/220] proxy: reduce some per-task memory usage (#8095) ## Problem Some tasks are using around upwards of 10KB of memory at all times, sometimes having buffers that swing them up to 30MB. ## Summary of changes Split some of the async tasks in selective places and box them as appropriate to try and reduce the constant memory usage. Especially in the locations where the large future is only a small part of the total runtime of the task. Also, reduces the size of the CopyBuffer buffer size from 8KB to 1KB. In my local testing and in staging this had a minor improvement. sadly not the improvement I was hoping for :/ Might have more impact in production --- proxy/src/proxy.rs | 42 ++++----- proxy/src/proxy/copy_bidirectional.rs | 2 +- proxy/src/serverless.rs | 120 ++++++++++++++++---------- proxy/src/serverless/backend.rs | 2 +- proxy/src/serverless/conn_pool.rs | 2 +- proxy/src/serverless/sql_over_http.rs | 44 +++++----- proxy/src/serverless/websocket.rs | 4 +- 7 files changed, 124 insertions(+), 92 deletions(-) diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 95b46ae002..072f51958f 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -91,7 +91,7 @@ pub async fn task_main( let endpoint_rate_limiter2 = endpoint_rate_limiter.clone(); connections.spawn(async move { - let (socket, peer_addr) = match read_proxy_protocol(socket).await{ + let (socket, peer_addr) = match read_proxy_protocol(socket).await { Ok((socket, Some(addr))) => (socket, addr.ip()), Err(e) => { error!("per-client task finished with an error: {e:#}"); @@ -101,36 +101,38 @@ pub async fn task_main( error!("missing required client IP"); return; } - Ok((socket, None)) => (socket, peer_addr.ip()) + Ok((socket, None)) => (socket, peer_addr.ip()), }; match socket.inner.set_nodelay(true) { - Ok(()) => {}, + Ok(()) => {} Err(e) => { error!("per-client task finished with an error: failed to set socket option: {e:#}"); return; - }, + } }; let mut ctx = RequestMonitoring::new( - session_id, - peer_addr, - crate::metrics::Protocol::Tcp, - &config.region, - ); + session_id, + peer_addr, + crate::metrics::Protocol::Tcp, + &config.region, + ); let span = ctx.span.clone(); - let res = handle_client( - config, - &mut ctx, - cancellation_handler, - socket, - ClientMode::Tcp, - endpoint_rate_limiter2, - conn_gauge, - ) - .instrument(span.clone()) - .await; + let startup = Box::pin( + handle_client( + config, + &mut ctx, + cancellation_handler, + socket, + ClientMode::Tcp, + endpoint_rate_limiter2, + conn_gauge, + ) + .instrument(span.clone()), + ); + let res = startup.await; match res { Err(e) => { diff --git a/proxy/src/proxy/copy_bidirectional.rs b/proxy/src/proxy/copy_bidirectional.rs index 4b09ebd8dc..aaf3688f21 100644 --- a/proxy/src/proxy/copy_bidirectional.rs +++ b/proxy/src/proxy/copy_bidirectional.rs @@ -98,7 +98,7 @@ pub(super) struct CopyBuffer { amt: u64, buf: Box<[u8]>, } -const DEFAULT_BUF_SIZE: usize = 8 * 1024; +const DEFAULT_BUF_SIZE: usize = 1024; impl CopyBuffer { pub(super) fn new() -> Self { diff --git a/proxy/src/serverless.rs b/proxy/src/serverless.rs index 24ee749e6e..efa999ed7d 100644 --- a/proxy/src/serverless.rs +++ b/proxy/src/serverless.rs @@ -27,14 +27,14 @@ use rand::SeedableRng; pub use reqwest_middleware::{ClientWithMiddleware, Error}; pub use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; use tokio::time::timeout; -use tokio_rustls::TlsAcceptor; +use tokio_rustls::{server::TlsStream, TlsAcceptor}; use tokio_util::task::TaskTracker; use crate::cancellation::CancellationHandlerMain; use crate::config::ProxyConfig; use crate::context::RequestMonitoring; use crate::metrics::Metrics; -use crate::protocol2::read_proxy_protocol; +use crate::protocol2::{read_proxy_protocol, ChainRW}; use crate::proxy::run_until_cancelled; use crate::rate_limiter::EndpointRateLimiter; use crate::serverless::backend::PoolingBackend; @@ -102,8 +102,6 @@ pub async fn task_main( let connections = tokio_util::task::task_tracker::TaskTracker::new(); connections.close(); // allows `connections.wait to complete` - let server = Builder::new(TokioExecutor::new()); - while let Some(res) = run_until_cancelled(ws_listener.accept(), &cancellation_token).await { let (conn, peer_addr) = res.context("could not accept TCP stream")?; if let Err(e) = conn.set_nodelay(true) { @@ -127,24 +125,50 @@ pub async fn task_main( } let conn_token = cancellation_token.child_token(); - let conn = connection_handler( - config, - backend.clone(), - connections.clone(), - cancellation_handler.clone(), - endpoint_rate_limiter.clone(), - conn_token.clone(), - server.clone(), - tls_acceptor.clone(), - conn, - peer_addr, - ) - .instrument(http_conn_span); + let tls_acceptor = tls_acceptor.clone(); + let backend = backend.clone(); + let connections2 = connections.clone(); + let cancellation_handler = cancellation_handler.clone(); + let endpoint_rate_limiter = endpoint_rate_limiter.clone(); + connections.spawn( + async move { + let conn_token2 = conn_token.clone(); + let _cancel_guard = config.http_config.cancel_set.insert(conn_id, conn_token2); - connections.spawn(async move { - let _cancel_guard = config.http_config.cancel_set.insert(conn_id, conn_token); - conn.await - }); + let session_id = uuid::Uuid::new_v4(); + + let _gauge = Metrics::get() + .proxy + .client_connections + .guard(crate::metrics::Protocol::Http); + + let startup_result = Box::pin(connection_startup( + config, + tls_acceptor, + session_id, + conn, + peer_addr, + )) + .await; + let Some((conn, peer_addr)) = startup_result else { + return; + }; + + Box::pin(connection_handler( + config, + backend, + connections2, + cancellation_handler, + endpoint_rate_limiter, + conn_token, + conn, + peer_addr, + session_id, + )) + .await; + } + .instrument(http_conn_span), + ); } connections.wait().await; @@ -152,40 +176,22 @@ pub async fn task_main( Ok(()) } -/// Handles the TCP lifecycle. -/// +/// Handles the TCP startup lifecycle. /// 1. Parses PROXY protocol V2 /// 2. Handles TLS handshake -/// 3. Handles HTTP connection -/// 1. With graceful shutdowns -/// 2. With graceful request cancellation with connection failure -/// 3. With websocket upgrade support. -#[allow(clippy::too_many_arguments)] -async fn connection_handler( - config: &'static ProxyConfig, - backend: Arc, - connections: TaskTracker, - cancellation_handler: Arc, - endpoint_rate_limiter: Arc, - cancellation_token: CancellationToken, - server: Builder, +async fn connection_startup( + config: &ProxyConfig, tls_acceptor: TlsAcceptor, + session_id: uuid::Uuid, conn: TcpStream, peer_addr: SocketAddr, -) { - let session_id = uuid::Uuid::new_v4(); - - let _gauge = Metrics::get() - .proxy - .client_connections - .guard(crate::metrics::Protocol::Http); - +) -> Option<(TlsStream>, IpAddr)> { // handle PROXY protocol let (conn, peer) = match read_proxy_protocol(conn).await { Ok(c) => c, Err(e) => { tracing::error!(?session_id, %peer_addr, "failed to accept TCP connection: invalid PROXY protocol V2 header: {e:#}"); - return; + return None; } }; @@ -208,7 +214,7 @@ async fn connection_handler( Metrics::get().proxy.tls_handshake_failures.inc(); } warn!(?session_id, %peer_addr, "failed to accept TLS connection: {e:?}"); - return; + return None; } // The handshake timed out Err(e) => { @@ -216,16 +222,36 @@ async fn connection_handler( Metrics::get().proxy.tls_handshake_failures.inc(); } warn!(?session_id, %peer_addr, "failed to accept TLS connection: {e:?}"); - return; + return None; } }; + Some((conn, peer_addr)) +} + +/// Handles HTTP connection +/// 1. With graceful shutdowns +/// 2. With graceful request cancellation with connection failure +/// 3. With websocket upgrade support. +#[allow(clippy::too_many_arguments)] +async fn connection_handler( + config: &'static ProxyConfig, + backend: Arc, + connections: TaskTracker, + cancellation_handler: Arc, + endpoint_rate_limiter: Arc, + cancellation_token: CancellationToken, + conn: TlsStream>, + peer_addr: IpAddr, + session_id: uuid::Uuid, +) { let session_id = AtomicTake::new(session_id); // Cancel all current inflight HTTP requests if the HTTP connection is closed. let http_cancellation_token = CancellationToken::new(); let _cancel_connection = http_cancellation_token.clone().drop_guard(); + let server = Builder::new(TokioExecutor::new()); let conn = server.serve_connection_with_upgrades( hyper_util::rt::TokioIo::new(conn), hyper1::service::service_fn(move |req: hyper1::Request| { diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index a40c66a80d..86e64c0a38 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -104,7 +104,7 @@ impl PoolingBackend { ) -> Result, HttpConnError> { let maybe_client = if !force_new { info!("pool: looking for an existing connection"); - self.pool.get(ctx, &conn_info).await? + self.pool.get(ctx, &conn_info)? } else { info!("pool: pool is disabled"); None diff --git a/proxy/src/serverless/conn_pool.rs b/proxy/src/serverless/conn_pool.rs index 5fa253acf8..170bda062e 100644 --- a/proxy/src/serverless/conn_pool.rs +++ b/proxy/src/serverless/conn_pool.rs @@ -375,7 +375,7 @@ impl GlobalConnPool { } } - pub async fn get( + pub fn get( self: &Arc, ctx: &mut RequestMonitoring, conn_info: &ConnInfo, diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 9d6a475aeb..7a99aeb759 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -533,27 +533,31 @@ async fn handle_inner( return Err(SqlOverHttpError::RequestTooLarge); } - let fetch_and_process_request = async { - let body = request.into_body().collect().await?.to_bytes(); - info!(length = body.len(), "request payload read"); - let payload: Payload = serde_json::from_slice(&body)?; - Ok::(payload) // Adjust error type accordingly - } - .map_err(SqlOverHttpError::from); + let fetch_and_process_request = Box::pin( + async { + let body = request.into_body().collect().await?.to_bytes(); + info!(length = body.len(), "request payload read"); + let payload: Payload = serde_json::from_slice(&body)?; + Ok::(payload) // Adjust error type accordingly + } + .map_err(SqlOverHttpError::from), + ); - let authenticate_and_connect = async { - let keys = backend - .authenticate(ctx, &config.authentication_config, &conn_info) - .await?; - let client = backend - .connect_to_compute(ctx, conn_info, keys, !allow_pool) - .await?; - // not strictly necessary to mark success here, - // but it's just insurance for if we forget it somewhere else - ctx.latency_timer.success(); - Ok::<_, HttpConnError>(client) - } - .map_err(SqlOverHttpError::from); + let authenticate_and_connect = Box::pin( + async { + let keys = backend + .authenticate(ctx, &config.authentication_config, &conn_info) + .await?; + let client = backend + .connect_to_compute(ctx, conn_info, keys, !allow_pool) + .await?; + // not strictly necessary to mark success here, + // but it's just insurance for if we forget it somewhere else + ctx.latency_timer.success(); + Ok::<_, HttpConnError>(client) + } + .map_err(SqlOverHttpError::from), + ); let (payload, mut client) = match run_until_cancelled( // Run both operations in parallel diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 7d3153a3c1..0e9772733d 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -141,7 +141,7 @@ pub async fn serve_websocket( .client_connections .guard(crate::metrics::Protocol::Ws); - let res = handle_client( + let res = Box::pin(handle_client( config, &mut ctx, cancellation_handler, @@ -149,7 +149,7 @@ pub async fn serve_websocket( ClientMode::Websockets { hostname }, endpoint_rate_limiter, conn_gauge, - ) + )) .await; match res { From 56da62487015f78c2cfbb48132bc85cd6f1f93d3 Mon Sep 17 00:00:00 2001 From: Peter Bendel Date: Wed, 19 Jun 2024 15:04:29 +0200 Subject: [PATCH 219/220] allow storage_controller error during pagebench (#8109) ## Problem `test_pageserver_max_throughput_getpage_at_latest_lsn` is a pagebench testcase which creates several tenants/timelines to verify pageserver performance. The test swaps environments around in the tenant duplication stage, so the storage controller uses two separate db instances (one in the duplication stage and another one in the benchmarking stage). In the benchmarking stage, the storage controller starts without any knowledge of nodes, but with knowledge of tenants (via attachments.json). When we re-attach and attempt to update the scheduler stats, the scheduler rightfully complains about the node not being known. The setup should preserve the storage controller across the two envs, but i think it's fine to just allow list the error in this case. ## Summary of changes add the error message `2024-06-19T09:38:27.866085Z ERROR Scheduler missing node 1`` to the list of allowed errors for storage_controller --- ...est_pageserver_max_throughput_getpage_at_latest_lsn.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 772a39fe35..68f3d9dcbe 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -209,3 +209,11 @@ def run_benchmark_max_throughput_latest_lsn( unit="ms", report=MetricReport.LOWER_IS_BETTER, ) + + env.storage_controller.allowed_errors.append( + # The test setup swaps NeonEnv instances, hence different + # pg instances are used for the storage controller db. This means + # the storage controller doesn't know about the nodes mentioned + # in attachments.json at start-up. + ".* Scheduler missing node 1", + ) From fd0b22f5cd11d5df2013bd0d9c79cb70086b3fa8 Mon Sep 17 00:00:00 2001 From: MMeent Date: Wed, 19 Jun 2024 15:05:31 +0200 Subject: [PATCH 220/220] Make sure we can handle temporarily offline PS when we first connect (#8094) Fixes https://github.com/neondatabase/neon/issues/7897 ## Problem `shard->delay_us` was potentially uninitialized when we connect to PS, as it wasn't set to a non-0 value until we've first connected to the shard's pageserver. That caused the exponential backoff to use an initial value (multiplier) of 0 for the first connection attempt to that pageserver, thus causing a hot retry loop with connection attempts to the pageserver without significant delay. That in turn caused attemmpts to reconnect to quickly fail, rather than showing the expected 'wait until pageserver is available' behaviour. ## Summary of changes We initialize shard->delay_us before connection initialization if we notice it is not initialized yet. --- pgxn/neon/libpagestore.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index 5eae2d8204..a665cafafe 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -381,6 +381,15 @@ pageserver_connect(shardno_t shard_no, int elevel) us_since_last_attempt = (int64) (now - shard->last_reconnect_time); shard->last_reconnect_time = now; + /* + * Make sure we don't do exponential backoff with a constant multiplier + * of 0 us, as that doesn't really do much for timeouts... + * + * cf. https://github.com/neondatabase/neon/issues/7897 + */ + if (shard->delay_us == 0) + shard->delay_us = MIN_RECONNECT_INTERVAL_USEC; + /* * If we did other tasks between reconnect attempts, then we won't * need to wait as long as a full delay.