mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-17 10:22:56 +00:00
feat(pageserver): support detaching from multiple ancestors
Signed-off-by: Alex Chi Z <chi@neon.tech>
This commit is contained in:
@@ -300,9 +300,8 @@ impl TimelineMetadata {
|
||||
|
||||
/// Returns true if anything was changed
|
||||
pub fn detach_from_ancestor(&mut self, branchpoint: &(TimelineId, Lsn)) {
|
||||
if let Some(ancestor) = self.body.ancestor_timeline {
|
||||
assert_eq!(ancestor, branchpoint.0);
|
||||
}
|
||||
// Detaching from ancestor now doesn't always detach directly to the direct ancestor, but we
|
||||
// ensure the LSN is the same. So we don't check the timeline ID.
|
||||
if self.body.ancestor_lsn != Lsn(0) {
|
||||
assert_eq!(self.body.ancestor_lsn, branchpoint.1);
|
||||
}
|
||||
|
||||
@@ -1957,7 +1957,13 @@ impl TenantManager {
|
||||
.map_err(Error::NotFound)?;
|
||||
|
||||
let resp = timeline
|
||||
.detach_from_ancestor_and_reparent(&tenant, prepared, ctx)
|
||||
.detach_from_ancestor_and_reparent(
|
||||
&tenant,
|
||||
prepared,
|
||||
attempt.ancestor_timeline_id,
|
||||
attempt.ancestor_lsn,
|
||||
ctx,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut slot_guard = slot_guard;
|
||||
|
||||
@@ -5406,9 +5406,19 @@ impl Timeline {
|
||||
self: &Arc<Timeline>,
|
||||
tenant: &crate::tenant::Tenant,
|
||||
prepared: detach_ancestor::PreparedTimelineDetach,
|
||||
ancestor_timeline_id: TimelineId,
|
||||
ancestor_lsn: Lsn,
|
||||
ctx: &RequestContext,
|
||||
) -> Result<detach_ancestor::DetachingAndReparenting, detach_ancestor::Error> {
|
||||
detach_ancestor::detach_and_reparent(self, tenant, prepared, ctx).await
|
||||
detach_ancestor::detach_and_reparent(
|
||||
self,
|
||||
tenant,
|
||||
prepared,
|
||||
ancestor_timeline_id,
|
||||
ancestor_lsn,
|
||||
ctx,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Final step which unblocks the GC.
|
||||
|
||||
@@ -29,7 +29,7 @@ pub(crate) enum Error {
|
||||
#[error("no ancestors")]
|
||||
NoAncestor,
|
||||
|
||||
#[error("too many ancestors")]
|
||||
#[error("the branch has more than 1 ancestor and cannot be fast-path detached")]
|
||||
TooManyAncestors,
|
||||
|
||||
#[error("shutting down, please retry later")]
|
||||
@@ -147,7 +147,8 @@ impl Default for Options {
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Attempt {
|
||||
pub(crate) timeline_id: TimelineId,
|
||||
|
||||
pub(crate) ancestor_timeline_id: TimelineId,
|
||||
pub(crate) ancestor_lsn: Lsn,
|
||||
_guard: completion::Completion,
|
||||
gate_entered: Option<utils::sync::gate::GateGuard>,
|
||||
}
|
||||
@@ -172,20 +173,24 @@ pub(super) async fn prepare(
|
||||
) -> Result<Progress, Error> {
|
||||
use Error::*;
|
||||
|
||||
let Some((ancestor, ancestor_lsn)) = detached
|
||||
let Some((mut ancestor, mut ancestor_lsn)) = detached
|
||||
.ancestor_timeline
|
||||
.as_ref()
|
||||
.map(|tl| (tl.clone(), detached.ancestor_lsn))
|
||||
else {
|
||||
let ancestor_id;
|
||||
let ancestor_lsn;
|
||||
let still_in_progress = {
|
||||
let accessor = detached.remote_client.initialized_upload_queue()?;
|
||||
|
||||
// we are safe to inspect the latest uploaded, because we can only witness this after
|
||||
// restart is complete and ancestor is no more.
|
||||
let latest = accessor.latest_uploaded_index_part();
|
||||
if latest.lineage.detached_previous_ancestor().is_none() {
|
||||
let Some((id, lsn)) = latest.lineage.detached_previous_ancestor() else {
|
||||
return Err(NoAncestor);
|
||||
};
|
||||
ancestor_id = id;
|
||||
ancestor_lsn = lsn;
|
||||
|
||||
latest
|
||||
.gc_blocking
|
||||
@@ -196,7 +201,8 @@ pub(super) async fn prepare(
|
||||
if still_in_progress {
|
||||
// gc is still blocked, we can still reparent and complete.
|
||||
// we are safe to reparent remaining, because they were locked in in the beginning.
|
||||
let attempt = continue_with_blocked_gc(detached, tenant).await?;
|
||||
let attempt =
|
||||
continue_with_blocked_gc(detached, tenant, ancestor_id, ancestor_lsn).await?;
|
||||
|
||||
// because the ancestor of detached is already set to none, we have published all
|
||||
// of the layers, so we are still "prepared."
|
||||
@@ -224,13 +230,26 @@ pub(super) async fn prepare(
|
||||
|
||||
check_no_archived_children_of_ancestor(tenant, detached, &ancestor, ancestor_lsn)?;
|
||||
|
||||
if ancestor.ancestor_timeline.is_some() {
|
||||
// non-technical requirement; we could flatten N ancestors just as easily but we chose
|
||||
// not to, at least initially
|
||||
return Err(TooManyAncestors);
|
||||
// If the ancestor has an ancestor, we might be able to fast-path detach it if the current ancestor does not have any data written/used by the detaching timeline.
|
||||
while let Some(ancestor_of_ancestor) = ancestor.ancestor_timeline.clone() {
|
||||
if ancestor_lsn != ancestor.ancestor_lsn {
|
||||
// non-technical requirement; we could flatten N ancestors just as easily but we chose
|
||||
// not to, at least initially
|
||||
return Err(TooManyAncestors);
|
||||
}
|
||||
// Use the ancestor of the ancestor as the new ancestor (only when the ancestor LSNs are the same)
|
||||
ancestor_lsn = ancestor.ancestor_lsn; // Get the LSN first before resetting the `ancestor` variable
|
||||
ancestor = ancestor_of_ancestor;
|
||||
check_no_archived_children_of_ancestor(tenant, detached, &ancestor, ancestor_lsn)?;
|
||||
}
|
||||
|
||||
let attempt = start_new_attempt(detached, tenant).await?;
|
||||
tracing::info!(
|
||||
"attempt to detach the timeline from the ancestor: {}@{}",
|
||||
ancestor.timeline_id,
|
||||
ancestor_lsn
|
||||
);
|
||||
|
||||
let attempt = start_new_attempt(detached, tenant, ancestor.timeline_id, ancestor_lsn).await?;
|
||||
|
||||
utils::pausable_failpoint!("timeline-detach-ancestor::before_starting_after_locking-pausable");
|
||||
|
||||
@@ -450,8 +469,13 @@ pub(super) async fn prepare(
|
||||
Ok(Progress::Prepared(attempt, prepared))
|
||||
}
|
||||
|
||||
async fn start_new_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
|
||||
let attempt = obtain_exclusive_attempt(detached, tenant)?;
|
||||
async fn start_new_attempt(
|
||||
detached: &Timeline,
|
||||
tenant: &Tenant,
|
||||
ancestor_timeline_id: TimelineId,
|
||||
ancestor_lsn: Lsn,
|
||||
) -> Result<Attempt, Error> {
|
||||
let attempt = obtain_exclusive_attempt(detached, tenant, ancestor_timeline_id, ancestor_lsn)?;
|
||||
|
||||
// insert the block in the index_part.json, if not already there.
|
||||
let _dont_care = tenant
|
||||
@@ -466,13 +490,23 @@ async fn start_new_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Attem
|
||||
Ok(attempt)
|
||||
}
|
||||
|
||||
async fn continue_with_blocked_gc(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
|
||||
async fn continue_with_blocked_gc(
|
||||
detached: &Timeline,
|
||||
tenant: &Tenant,
|
||||
ancestor_timeline_id: TimelineId,
|
||||
ancestor_lsn: Lsn,
|
||||
) -> Result<Attempt, Error> {
|
||||
// FIXME: it would be nice to confirm that there is an in-memory version, since we've just
|
||||
// verified there is a persistent one?
|
||||
obtain_exclusive_attempt(detached, tenant)
|
||||
obtain_exclusive_attempt(detached, tenant, ancestor_timeline_id, ancestor_lsn)
|
||||
}
|
||||
|
||||
fn obtain_exclusive_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
|
||||
fn obtain_exclusive_attempt(
|
||||
detached: &Timeline,
|
||||
tenant: &Tenant,
|
||||
ancestor_timeline_id: TimelineId,
|
||||
ancestor_lsn: Lsn,
|
||||
) -> Result<Attempt, Error> {
|
||||
use Error::{OtherTimelineDetachOngoing, ShuttingDown};
|
||||
|
||||
// ensure we are the only active attempt for this tenant
|
||||
@@ -493,6 +527,8 @@ fn obtain_exclusive_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Atte
|
||||
|
||||
Ok(Attempt {
|
||||
timeline_id: detached.timeline_id,
|
||||
ancestor_timeline_id,
|
||||
ancestor_lsn,
|
||||
_guard: guard,
|
||||
gate_entered: Some(_gate_entered),
|
||||
})
|
||||
@@ -795,6 +831,8 @@ pub(super) async fn detach_and_reparent(
|
||||
detached: &Arc<Timeline>,
|
||||
tenant: &Tenant,
|
||||
prepared: PreparedTimelineDetach,
|
||||
ancestor_timeline_id: TimelineId,
|
||||
ancestor_lsn: Lsn,
|
||||
_ctx: &RequestContext,
|
||||
) -> Result<DetachingAndReparenting, Error> {
|
||||
let PreparedTimelineDetach { layers } = prepared;
|
||||
@@ -822,7 +860,30 @@ pub(super) async fn detach_and_reparent(
|
||||
"cannot (detach? reparent)? complete if the operation is not still ongoing"
|
||||
);
|
||||
|
||||
let ancestor = match (detached.ancestor_timeline.as_ref(), recorded_branchpoint) {
|
||||
let ancestor_to_detach = match detached.ancestor_timeline.as_ref() {
|
||||
Some(mut ancestor) => {
|
||||
while ancestor.timeline_id != ancestor_timeline_id {
|
||||
match ancestor.ancestor_timeline.as_ref() {
|
||||
Some(found) => {
|
||||
if ancestor_lsn != ancestor.ancestor_lsn {
|
||||
return Err(Error::DetachReparent(anyhow::anyhow!(
|
||||
"cannot find the ancestor timeline to detach from: wrong ancestor lsn"
|
||||
)));
|
||||
}
|
||||
ancestor = found;
|
||||
}
|
||||
None => {
|
||||
return Err(Error::DetachReparent(anyhow::anyhow!(
|
||||
"cannot find the ancestor timeline to detach from"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(ancestor)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
let ancestor = match (ancestor_to_detach, recorded_branchpoint) {
|
||||
(Some(ancestor), None) => {
|
||||
assert!(
|
||||
!layers.is_empty(),
|
||||
|
||||
@@ -342,6 +342,128 @@ def test_ancestor_detach_reparents_earlier(neon_env_builder: NeonEnvBuilder):
|
||||
wait_timeline_detail_404(client, env.initial_tenant, env.initial_timeline)
|
||||
|
||||
|
||||
def test_ancestor_detach_two_level_ancestors(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
old main -------|---------X--------->
|
||||
| | |
|
||||
| | +-> after
|
||||
| +--X empty snapshot branch
|
||||
| |
|
||||
| +-> branch-to-detach
|
||||
|
|
||||
+-> reparented
|
||||
|
||||
Ends up as:
|
||||
|
||||
old main -------|---------X--------->
|
||||
| |
|
||||
| +-> after
|
||||
+--> empty snapshot branch
|
||||
|
||||
new main -------|--------------> branch-to-detach
|
||||
|
|
||||
+-> reparented
|
||||
"""
|
||||
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
env.pageserver.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS)
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
with env.endpoints.create_start("main", tenant_id=env.initial_tenant) as ep:
|
||||
ep.safe_psql("CREATE TABLE foo (i BIGINT);")
|
||||
ep.safe_psql("CREATE TABLE audit AS SELECT 1 as starts;")
|
||||
|
||||
branchpoint_pipe = wait_for_last_flush_lsn(
|
||||
env, ep, env.initial_tenant, env.initial_timeline
|
||||
)
|
||||
|
||||
ep.safe_psql("INSERT INTO foo SELECT i::bigint FROM generate_series(0, 8191) g(i);")
|
||||
|
||||
branchpoint_x = wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
|
||||
client.timeline_checkpoint(env.initial_tenant, env.initial_timeline)
|
||||
|
||||
ep.safe_psql("INSERT INTO foo SELECT i::bigint FROM generate_series(8192, 16383) g(i);")
|
||||
wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
|
||||
|
||||
reparented = env.create_branch(
|
||||
"reparented", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_pipe
|
||||
)
|
||||
|
||||
snapshot_branchpoint = env.create_branch(
|
||||
"snapshot_branchpoint", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_x
|
||||
)
|
||||
|
||||
branch_to_detach = env.create_branch(
|
||||
"branch_to_detach", ancestor_branch_name="snapshot_branchpoint", ancestor_start_lsn=branchpoint_x
|
||||
)
|
||||
|
||||
after = env.create_branch("after", ancestor_branch_name="main", ancestor_start_lsn=None)
|
||||
|
||||
all_reparented = client.detach_ancestor(env.initial_tenant, branch_to_detach)
|
||||
assert set(all_reparented) == {reparented, snapshot_branchpoint}
|
||||
|
||||
env.pageserver.quiesce_tenants()
|
||||
|
||||
# checking the ancestor after is much faster than waiting for the endpoint not start
|
||||
expected_result = [
|
||||
("main", env.initial_timeline, None, 16384, 1),
|
||||
("after", after, env.initial_timeline, 16384, 1),
|
||||
("snapshot_branchpoint", snapshot_branchpoint, branch_to_detach, 16384, 1), # not correct
|
||||
("branch_to_detach", branch_to_detach, None, 8192, 1),
|
||||
("reparented", reparented, env.initial_timeline, 0, 1),
|
||||
]
|
||||
|
||||
assert isinstance(env.pageserver_remote_storage, LocalFsStorage)
|
||||
|
||||
for branch_name, queried_timeline, expected_ancestor, _, _ in expected_result:
|
||||
details = client.timeline_detail(env.initial_tenant, queried_timeline)
|
||||
ancestor_timeline_id = details["ancestor_timeline_id"]
|
||||
if expected_ancestor is None:
|
||||
assert ancestor_timeline_id is None
|
||||
else:
|
||||
assert TimelineId(ancestor_timeline_id) == expected_ancestor, f"when checking branch {branch_name}, mapping={expected_result}"
|
||||
|
||||
index_part = env.pageserver_remote_storage.index_content(
|
||||
env.initial_tenant, queried_timeline
|
||||
)
|
||||
lineage = index_part["lineage"]
|
||||
assert lineage is not None
|
||||
|
||||
assert lineage.get("reparenting_history_overflown", "false") == "false"
|
||||
|
||||
if queried_timeline == branch_to_detach:
|
||||
original_ancestor = lineage["original_ancestor"]
|
||||
assert original_ancestor is not None
|
||||
assert original_ancestor[0] == str(env.initial_timeline)
|
||||
assert original_ancestor[1] == str(branchpoint_x)
|
||||
|
||||
# this does not contain Z in the end, so fromisoformat accepts it
|
||||
# it is to be in line with the deletion timestamp.. well, almost.
|
||||
when = original_ancestor[2][:26]
|
||||
when_ts = datetime.datetime.fromisoformat(when)
|
||||
assert when_ts < datetime.datetime.now()
|
||||
assert len(lineage.get("reparenting_history", [])) == 0
|
||||
elif expected_ancestor == branch_to_detach:
|
||||
assert len(lineage.get("original_ancestor", [])) == 0
|
||||
assert lineage["reparenting_history"] == [str(env.initial_timeline)]
|
||||
else:
|
||||
assert len(lineage.get("original_ancestor", [])) == 0
|
||||
assert len(lineage.get("reparenting_history", [])) == 0
|
||||
|
||||
for name, _, _, rows, starts in expected_result:
|
||||
with env.endpoints.create_start(name, tenant_id=env.initial_tenant) as ep:
|
||||
assert ep.safe_psql("SELECT count(*) FROM foo;")[0][0] == rows
|
||||
assert ep.safe_psql(f"SELECT count(*) FROM audit WHERE starts = {starts}")[0][0] == 1
|
||||
|
||||
# delete the timelines to confirm detach actually worked
|
||||
client.timeline_delete(env.initial_tenant, after)
|
||||
wait_timeline_detail_404(client, env.initial_tenant, after)
|
||||
|
||||
client.timeline_delete(env.initial_tenant, env.initial_timeline)
|
||||
wait_timeline_detail_404(client, env.initial_tenant, env.initial_timeline)
|
||||
|
||||
def test_detached_receives_flushes_while_being_detached(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
Makes sure that the timeline is able to receive writes through-out the detach process.
|
||||
|
||||
Reference in New Issue
Block a user