diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 3bceef8fa7..ef6985d697 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -1279,6 +1279,7 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re mode: pageserver_api::models::TimelineCreateRequestMode::Branch { ancestor_timeline_id, ancestor_start_lsn: start_lsn, + read_only: false, pg_version: None, }, }; diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 383939a13f..9f3736d57a 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -402,6 +402,8 @@ pub enum TimelineCreateRequestMode { // using a flattened enum, so, it was an accepted field, and // we continue to accept it by having it here. pg_version: Option, + #[serde(default, skip_serializing_if = "std::ops::Not::not")] + read_only: bool, }, ImportPgdata { import_pgdata: TimelineCreateRequestModeImportPgdata, diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 7ea148971f..cf99cb110c 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -626,6 +626,8 @@ paths: format: hex pg_version: type: integer + read_only: + type: boolean existing_initdb_timeline_id: type: string format: hex diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 0d6791cddd..65e24ff3e9 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -572,6 +572,7 @@ async fn timeline_create_handler( TimelineCreateRequestMode::Branch { ancestor_timeline_id, ancestor_start_lsn, + read_only: _, pg_version: _, } => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch { new_timeline_id, diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 7e4bb627af..d8167e9d94 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -3823,6 +3823,13 @@ impl Service { .await; failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock"); let is_import = create_req.is_import(); + let read_only = matches!( + create_req.mode, + models::TimelineCreateRequestMode::Branch { + read_only: true, + .. + } + ); if is_import { // Ensure that there is no split on-going. @@ -3895,13 +3902,13 @@ impl Service { } None - } else if safekeepers { + } else if safekeepers || read_only { // Note that for imported timelines, we do not create the timeline on the safekeepers // straight away. Instead, we do it once the import finalized such that we know what // start LSN to provide for the safekeepers. This is done in // [`Self::finalize_timeline_import`]. let res = self - .tenant_timeline_create_safekeepers(tenant_id, &timeline_info) + .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, read_only) .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id)) .await?; Some(res) diff --git a/storage_controller/src/service/safekeeper_service.rs b/storage_controller/src/service/safekeeper_service.rs index cd5ace449d..1f673fe445 100644 --- a/storage_controller/src/service/safekeeper_service.rs +++ b/storage_controller/src/service/safekeeper_service.rs @@ -208,6 +208,7 @@ impl Service { self: &Arc, tenant_id: TenantId, timeline_info: &TimelineInfo, + read_only: bool, ) -> Result { let timeline_id = timeline_info.timeline_id; let pg_version = timeline_info.pg_version * 10000; @@ -220,7 +221,11 @@ impl Service { let start_lsn = timeline_info.last_record_lsn; // Choose initial set of safekeepers respecting affinity - let sks = self.safekeepers_for_new_timeline().await?; + let sks = if !read_only { + self.safekeepers_for_new_timeline().await? + } else { + Vec::new() + }; let sks_persistence = sks.iter().map(|sk| sk.id.0 as i64).collect::>(); // Add timeline to db let mut timeline_persist = TimelinePersistence { @@ -253,6 +258,16 @@ impl Service { ))); } } + let ret = SafekeepersInfo { + generation: timeline_persist.generation as u32, + safekeepers: sks.clone(), + tenant_id, + timeline_id, + }; + if read_only { + return Ok(ret); + } + // Create the timeline on a quorum of safekeepers let remaining = self .tenant_timeline_create_safekeepers_quorum( @@ -316,12 +331,7 @@ impl Service { } } - Ok(SafekeepersInfo { - generation: timeline_persist.generation as u32, - safekeepers: sks, - tenant_id, - timeline_id, - }) + Ok(ret) } pub(crate) async fn tenant_timeline_create_safekeepers_until_success( @@ -336,8 +346,10 @@ impl Service { return Err(TimelineImportFinalizeError::ShuttingDown); } + // This function is only used in non-read-only scenarios + let read_only = false; let res = self - .tenant_timeline_create_safekeepers(tenant_id, &timeline_info) + .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, read_only) .await; match res { @@ -410,6 +422,18 @@ impl Service { .chain(tl.sk_set.iter()) .collect::>(); + // The timeline has no safekeepers: we need to delete it from the db manually, + // as no safekeeper reconciler will get to it + if all_sks.is_empty() { + if let Err(err) = self + .persistence + .delete_timeline(tenant_id, timeline_id) + .await + { + tracing::warn!(%tenant_id, %timeline_id, "couldn't delete timeline from db: {err}"); + } + } + // Schedule reconciliations for &sk_id in all_sks.iter() { let pending_op = TimelinePendingOpPersistence { diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index d42c5d403e..f0810270b1 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -10,6 +10,7 @@ from queue import Empty, Queue from threading import Barrier import pytest +import requests from fixtures.common_types import Lsn, TimelineArchivalState, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import ( @@ -401,8 +402,25 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots "earlier", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_pipe ) - snapshot_branchpoint_old = env.create_branch( - "snapshot_branchpoint_old", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_y + snapshot_branchpoint_old = TimelineId.generate() + + env.storage_controller.timeline_create( + env.initial_tenant, + { + "new_timeline_id": str(snapshot_branchpoint_old), + "ancestor_start_lsn": str(branchpoint_y), + "ancestor_timeline_id": str(env.initial_timeline), + "read_only": True, + }, + ) + sk = env.safekeepers[0] + assert sk + with pytest.raises(requests.exceptions.HTTPError, match="Not Found"): + sk.http_client().timeline_status( + tenant_id=env.initial_tenant, timeline_id=snapshot_branchpoint_old + ) + env.neon_cli.mappings_map_branch( + "snapshot_branchpoint_old", env.initial_tenant, snapshot_branchpoint_old ) snapshot_branchpoint = env.create_branch(