Store read_only flag in pageserver timelines

This commit is contained in:
Arpad Müller
2025-07-09 19:32:11 +02:00
parent fc10bb9438
commit f0c63fabdb
6 changed files with 165 additions and 8 deletions

View File

@@ -1602,6 +1602,10 @@ pub struct TimelineInfo {
/// The status of the rel_size migration.
pub rel_size_migration: Option<RelSizeMigration>,
/// Whether the timeline is read-only (doesn't support WAL advancing).
/// The property is not recursive; child timelines can be non-read-only.
pub read_only: bool,
/// Whether the timeline is invisible in synthetic size calculations.
pub is_invisible: Option<bool>,
}

View File

@@ -452,6 +452,7 @@ async fn build_timeline_info_common(
let state = timeline.current_state();
// Report is_archived = false if the timeline is still loading
let is_archived = timeline.is_archived().unwrap_or(false);
let read_only = timeline.is_read_only().unwrap_or(false);
let remote_consistent_lsn_projected = timeline
.get_remote_consistent_lsn_projected()
.unwrap_or(Lsn(0));
@@ -503,6 +504,7 @@ async fn build_timeline_info_common(
state,
is_archived: Some(is_archived),
read_only,
rel_size_migration: Some(timeline.get_rel_size_v2_status()),
is_invisible: Some(is_invisible),
@@ -591,12 +593,13 @@ async fn timeline_create_handler(
TimelineCreateRequestMode::Branch {
ancestor_timeline_id,
ancestor_start_lsn,
read_only: _,
read_only,
pg_version: _,
} => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch {
new_timeline_id,
ancestor_timeline_id,
ancestor_start_lsn,
read_only,
}),
TimelineCreateRequestMode::ImportPgdata {
import_pgdata:
@@ -3698,7 +3701,7 @@ async fn tenant_evaluate_feature_flag(
let tenant = state
.tenant_manager
.get_attached_tenant_shard(tenant_shard_id)?;
// TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s)
// TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s)
// and we don't need to worry about it for now.
let properties = tenant.feature_resolver.collect_properties();
if as_type.as_deref() == Some("boolean") {

View File

@@ -943,6 +943,7 @@ pub(crate) struct CreateTimelineParamsBranch {
pub(crate) new_timeline_id: TimelineId,
pub(crate) ancestor_timeline_id: TimelineId,
pub(crate) ancestor_start_lsn: Option<Lsn>,
pub(crate) read_only: bool,
}
#[derive(Debug)]
@@ -2572,6 +2573,7 @@ impl TenantShard {
initdb_lsn,
pg_version,
);
let read_only = false;
self.prepare_new_timeline(
new_timeline_id,
&new_metadata,
@@ -2579,6 +2581,7 @@ impl TenantShard {
initdb_lsn,
None,
None,
read_only,
ctx,
)
.await
@@ -2723,6 +2726,7 @@ impl TenantShard {
new_timeline_id,
ancestor_timeline_id,
mut ancestor_start_lsn,
read_only,
}) => {
let ancestor_timeline = self
.get_timeline(ancestor_timeline_id, false)
@@ -2775,8 +2779,14 @@ impl TenantShard {
})?;
}
self.branch_timeline(&ancestor_timeline, new_timeline_id, ancestor_start_lsn, ctx)
.await?
self.branch_timeline(
&ancestor_timeline,
new_timeline_id,
ancestor_start_lsn,
read_only,
ctx,
)
.await?
}
CreateTimelineParams::ImportPgdata(params) => {
self.create_timeline_import_pgdata(params, ctx).await?
@@ -2901,6 +2911,7 @@ impl TenantShard {
initdb_lsn,
PgMajorVersion::PG15,
);
let read_only = false;
this.prepare_new_timeline(
new_timeline_id,
&new_metadata,
@@ -2908,6 +2919,7 @@ impl TenantShard {
initdb_lsn,
None,
None,
read_only,
ctx,
)
.await
@@ -4957,9 +4969,10 @@ impl TenantShard {
src_timeline: &Arc<Timeline>,
dst_id: TimelineId,
start_lsn: Option<Lsn>,
read_only: bool,
ctx: &RequestContext,
) -> Result<CreateTimelineResult, CreateTimelineError> {
self.branch_timeline_impl(src_timeline, dst_id, start_lsn, ctx)
self.branch_timeline_impl(src_timeline, dst_id, start_lsn, read_only, ctx)
.await
}
@@ -4968,6 +4981,7 @@ impl TenantShard {
src_timeline: &Arc<Timeline>,
dst_id: TimelineId,
start_lsn: Option<Lsn>,
read_only: bool,
ctx: &RequestContext,
) -> Result<CreateTimelineResult, CreateTimelineError> {
let src_id = src_timeline.timeline_id;
@@ -5079,6 +5093,7 @@ impl TenantShard {
start_lsn + 1,
Some(Arc::clone(src_timeline)),
Some(src_timeline.get_rel_size_v2_status()),
read_only,
ctx,
)
.await?;
@@ -5343,6 +5358,7 @@ impl TenantShard {
}
}
let pgdata_lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
let read_only = false;
// Import the contents of the data directory at the initial checkpoint
// LSN, and any WAL after that.
@@ -5365,6 +5381,7 @@ impl TenantShard {
pgdata_lsn,
None,
None,
read_only,
ctx,
)
.await?;
@@ -5448,14 +5465,17 @@ impl TenantShard {
start_lsn: Lsn,
ancestor: Option<Arc<Timeline>>,
rel_size_v2_status: Option<RelSizeMigration>,
read_only: bool,
ctx: &RequestContext,
) -> anyhow::Result<(UninitializedTimeline<'a>, RequestContext)> {
let tenant_shard_id = self.tenant_shard_id;
let resources = self.build_timeline_resources(new_timeline_id);
resources
.remote_client
.init_upload_queue_for_empty_remote(new_metadata, rel_size_v2_status.clone())?;
resources.remote_client.init_upload_queue_for_empty_remote(
new_metadata,
rel_size_v2_status.clone(),
read_only,
)?;
let (timeline_struct, timeline_ctx) = self
.create_timeline_struct(

View File

@@ -444,6 +444,7 @@ impl RemoteTimelineClient {
&self,
local_metadata: &TimelineMetadata,
rel_size_v2_status: Option<RelSizeMigration>,
read_only: bool,
) -> anyhow::Result<()> {
// Set the maximum number of inprogress tasks to the remote storage concurrency. There's
// certainly no point in starting more upload tasks than this.
@@ -456,6 +457,9 @@ impl RemoteTimelineClient {
let initialized_queue =
upload_queue.initialize_empty_remote(local_metadata, inprogress_limit)?;
initialized_queue.dirty.rel_size_migration = rel_size_v2_status;
if read_only {
initialized_queue.dirty.read_only = Some(read_only);
}
self.update_remote_physical_size_gauge(None);
info!("initialized upload queue as empty");
Ok(())
@@ -583,6 +587,17 @@ impl RemoteTimelineClient {
.ok()
}
/// Returns whether the timeline is archived.
/// Return None if the remote index_part hasn't been downloaded yet.
pub(crate) fn is_read_only(&self) -> Option<bool> {
self.upload_queue
.lock()
.unwrap()
.initialized_mut()
.map(|q| q.clean.0.read_only.unwrap_or_default())
.ok()
}
/// Returns `Ok(Some(timestamp))` if the timeline has been archived, `Ok(None)` if the timeline hasn't been archived.
///
/// Return Err(_) if the remote index_part hasn't been downloaded yet, or the timeline hasn't been stopped yet.

View File

@@ -114,6 +114,12 @@ pub struct IndexPart {
/// The timestamp when the timeline was marked invisible in synthetic size calculations.
#[serde(skip_serializing_if = "Option::is_none", default)]
pub(crate) marked_invisible_at: Option<NaiveDateTime>,
/// Whether the timeline is read only or not.
///
/// The property is non-recursive, so child timelines can be non-read-only,
/// but it can't be changed for an individual timeline once the timeline has been created.
pub(crate) read_only: Option<bool>,
}
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
@@ -142,6 +148,7 @@ impl IndexPart {
/// - 12: +l2_lsn
/// - 13: +gc_compaction
/// - 14: +marked_invisible_at
/// - 15: +read_only
const LATEST_VERSION: usize = 14;
// Versions we may see when reading from a bucket.
@@ -165,6 +172,7 @@ impl IndexPart {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
}
}
@@ -475,6 +483,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -524,6 +533,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -574,6 +584,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -675,6 +686,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -726,6 +738,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -782,6 +795,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -843,6 +857,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -905,6 +920,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -972,6 +988,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -1052,6 +1069,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -1133,6 +1151,7 @@ mod tests {
l2_lsn: None,
gc_compaction: None,
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -1220,6 +1239,7 @@ mod tests {
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
}),
marked_invisible_at: None,
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
@@ -1308,6 +1328,97 @@ mod tests {
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
}),
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
read_only: None,
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();
assert_eq!(part, expected);
}
#[test]
fn v15_read_only_is_parsed() {
let example = r#"{
"version": 14,
"layer_metadata":{
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
},
"disk_consistent_lsn":"0/16960E8",
"metadata": {
"disk_consistent_lsn": "0/16960E8",
"prev_record_lsn": "0/1696070",
"ancestor_timeline": "e45a7f37d3ee2ff17dc14bf4f4e3f52e",
"ancestor_lsn": "0/0",
"latest_gc_cutoff_lsn": "0/1696070",
"initdb_lsn": "0/1696070",
"pg_version": 14
},
"gc_blocking": {
"started_at": "2024-07-19T09:00:00.123",
"reasons": ["DetachAncestor"]
},
"import_pgdata": {
"V1": {
"Done": {
"idempotency_key": "specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5",
"started_at": "2024-11-13T09:23:42.123",
"finished_at": "2024-11-13T09:42:23.123"
}
}
},
"rel_size_migration": "legacy",
"l2_lsn": "0/16960E8",
"gc_compaction": {
"last_completed_lsn": "0/16960E8"
},
"marked_invisible_at": "2023-07-31T09:00:00.123",
"read_only": true,
}"#;
let expected = IndexPart {
version: 14,
layer_metadata: HashMap::from([
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), LayerFileMetadata {
file_size: 25600000,
generation: Generation::none(),
shard: ShardIndex::unsharded()
}),
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), LayerFileMetadata {
file_size: 9007199254741001,
generation: Generation::none(),
shard: ShardIndex::unsharded()
})
]),
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
metadata: TimelineMetadata::new(
Lsn::from_str("0/16960E8").unwrap(),
Some(Lsn::from_str("0/1696070").unwrap()),
Some(TimelineId::from_str("e45a7f37d3ee2ff17dc14bf4f4e3f52e").unwrap()),
Lsn::INVALID,
Lsn::from_str("0/1696070").unwrap(),
Lsn::from_str("0/1696070").unwrap(),
PgMajorVersion::PG14,
).with_recalculated_checksum().unwrap(),
deleted_at: None,
lineage: Default::default(),
gc_blocking: Some(GcBlocking {
started_at: parse_naive_datetime("2024-07-19T09:00:00.123000000"),
reasons: enumset::EnumSet::from_iter([GcBlockingReason::DetachAncestor]),
}),
last_aux_file_policy: Default::default(),
archived_at: None,
import_pgdata: Some(import_pgdata::index_part_format::Root::V1(import_pgdata::index_part_format::V1::Done(import_pgdata::index_part_format::Done{
started_at: parse_naive_datetime("2024-11-13T09:23:42.123000000"),
finished_at: parse_naive_datetime("2024-11-13T09:42:23.123000000"),
idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new("specified-by-client-218a5213-5044-4562-a28d-d024c5f057f5".to_string()),
}))),
rel_size_migration: Some(RelSizeMigration::Legacy),
l2_lsn: Some("0/16960E8".parse::<Lsn>().unwrap()),
gc_compaction: Some(GcCompactionState {
last_completed_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
}),
marked_invisible_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")),
read_only: Some(true),
};
let part = IndexPart::from_json_bytes(example.as_bytes()).unwrap();

View File

@@ -2349,6 +2349,10 @@ impl Timeline {
self.remote_client.is_invisible()
}
pub(crate) fn is_read_only(&self) -> Option<bool> {
self.remote_client.is_read_only()
}
pub(crate) fn is_stopping(&self) -> bool {
self.current_state() == TimelineState::Stopping
}