mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-06 21:12:55 +00:00
pageserver: improve tenant manifest lifecycle (#11328)
## Problem Currently, the tenant manifest is only uploaded if there are offloaded timelines. The checks are also a bit loose (e.g. only checks number of offloaded timelines). We want to start using the manifest for other things too (e.g. stripe size). Resolves #11271. ## Summary of changes This patch ensures that a tenant manifest always exists. The lifecycle is: * During preload, fetch the existing manifest, if any. * During attach, upload a tenant manifest if it differs from the preloaded one (or does not exist). * Upload a new manifest as needed, if it differs from the last-known manifest (ignoring version number). * On splits, pre-populate the manifest from the parent. * During Pageserver physical GC, remove old manifests but keep the latest 2 generations. This will cause nearly all existing tenants to upload a new tenant manifest on their first attach after this change. Attaches are concurrency-limited in the storage controller, so we expect this will be fine. Also updates `make_broken` to automatically log at `INFO` level when the tenant has been cancelled, to avoid spurious error logs during shutdown.
This commit is contained in:
@@ -45,6 +45,7 @@ use remote_timeline_client::manifest::{
|
||||
};
|
||||
use remote_timeline_client::{
|
||||
FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD, UploadQueueNotReadyError,
|
||||
download_tenant_manifest,
|
||||
};
|
||||
use secondary::heatmap::{HeatMapTenant, HeatMapTimeline};
|
||||
use storage_broker::BrokerClientChannel;
|
||||
@@ -226,7 +227,8 @@ struct TimelinePreload {
|
||||
}
|
||||
|
||||
pub(crate) struct TenantPreload {
|
||||
tenant_manifest: TenantManifest,
|
||||
/// The tenant manifest from remote storage, or None if no manifest was found.
|
||||
tenant_manifest: Option<TenantManifest>,
|
||||
/// Map from timeline ID to a possible timeline preload. It is None iff the timeline is offloaded according to the manifest.
|
||||
timelines: HashMap<TimelineId, Option<TimelinePreload>>,
|
||||
}
|
||||
@@ -282,12 +284,15 @@ pub struct Tenant {
|
||||
/// **Lock order**: if acquiring all (or a subset), acquire them in order `timelines`, `timelines_offloaded`, `timelines_creating`
|
||||
timelines_offloaded: Mutex<HashMap<TimelineId, Arc<OffloadedTimeline>>>,
|
||||
|
||||
/// Serialize writes of the tenant manifest to remote storage. If there are concurrent operations
|
||||
/// affecting the manifest, such as timeline deletion and timeline offload, they must wait for
|
||||
/// each other (this could be optimized to coalesce writes if necessary).
|
||||
/// The last tenant manifest known to be in remote storage. None if the manifest has not yet
|
||||
/// been either downloaded or uploaded. Always Some after tenant attach.
|
||||
///
|
||||
/// The contents of the Mutex are the last manifest we successfully uploaded
|
||||
tenant_manifest_upload: tokio::sync::Mutex<Option<TenantManifest>>,
|
||||
/// Initially populated during tenant attach, updated via `maybe_upload_tenant_manifest`.
|
||||
///
|
||||
/// Do not modify this directly. It is used to check whether a new manifest needs to be
|
||||
/// uploaded. The manifest is constructed in `build_tenant_manifest`, and uploaded via
|
||||
/// `maybe_upload_tenant_manifest`.
|
||||
remote_tenant_manifest: tokio::sync::Mutex<Option<TenantManifest>>,
|
||||
|
||||
// This mutex prevents creation of new timelines during GC.
|
||||
// Adding yet another mutex (in addition to `timelines`) is needed because holding
|
||||
@@ -1526,28 +1531,27 @@ impl Tenant {
|
||||
cancel.clone(),
|
||||
)
|
||||
.await?;
|
||||
let (offloaded_add, tenant_manifest) =
|
||||
match remote_timeline_client::download_tenant_manifest(
|
||||
remote_storage,
|
||||
&self.tenant_shard_id,
|
||||
self.generation,
|
||||
&cancel,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((tenant_manifest, _generation, _manifest_mtime)) => (
|
||||
format!("{} offloaded", tenant_manifest.offloaded_timelines.len()),
|
||||
tenant_manifest,
|
||||
),
|
||||
Err(DownloadError::NotFound) => {
|
||||
("no manifest".to_string(), TenantManifest::empty())
|
||||
}
|
||||
Err(e) => Err(e)?,
|
||||
};
|
||||
|
||||
let tenant_manifest = match download_tenant_manifest(
|
||||
remote_storage,
|
||||
&self.tenant_shard_id,
|
||||
self.generation,
|
||||
&cancel,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((tenant_manifest, _, _)) => Some(tenant_manifest),
|
||||
Err(DownloadError::NotFound) => None,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
info!(
|
||||
"found {} timelines, and {offloaded_add}",
|
||||
remote_timeline_ids.len()
|
||||
"found {} timelines ({} offloaded timelines)",
|
||||
remote_timeline_ids.len(),
|
||||
tenant_manifest
|
||||
.as_ref()
|
||||
.map(|m| m.offloaded_timelines.len())
|
||||
.unwrap_or(0)
|
||||
);
|
||||
|
||||
for k in other_keys {
|
||||
@@ -1556,11 +1560,13 @@ impl Tenant {
|
||||
|
||||
// Avoid downloading IndexPart of offloaded timelines.
|
||||
let mut offloaded_with_prefix = HashSet::new();
|
||||
for offloaded in tenant_manifest.offloaded_timelines.iter() {
|
||||
if remote_timeline_ids.remove(&offloaded.timeline_id) {
|
||||
offloaded_with_prefix.insert(offloaded.timeline_id);
|
||||
} else {
|
||||
// We'll take care later of timelines in the manifest without a prefix
|
||||
if let Some(tenant_manifest) = &tenant_manifest {
|
||||
for offloaded in tenant_manifest.offloaded_timelines.iter() {
|
||||
if remote_timeline_ids.remove(&offloaded.timeline_id) {
|
||||
offloaded_with_prefix.insert(offloaded.timeline_id);
|
||||
} else {
|
||||
// We'll take care later of timelines in the manifest without a prefix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1634,12 +1640,14 @@ impl Tenant {
|
||||
|
||||
let mut offloaded_timeline_ids = HashSet::new();
|
||||
let mut offloaded_timelines_list = Vec::new();
|
||||
for timeline_manifest in preload.tenant_manifest.offloaded_timelines.iter() {
|
||||
let timeline_id = timeline_manifest.timeline_id;
|
||||
let offloaded_timeline =
|
||||
OffloadedTimeline::from_manifest(self.tenant_shard_id, timeline_manifest);
|
||||
offloaded_timelines_list.push((timeline_id, Arc::new(offloaded_timeline)));
|
||||
offloaded_timeline_ids.insert(timeline_id);
|
||||
if let Some(tenant_manifest) = &preload.tenant_manifest {
|
||||
for timeline_manifest in tenant_manifest.offloaded_timelines.iter() {
|
||||
let timeline_id = timeline_manifest.timeline_id;
|
||||
let offloaded_timeline =
|
||||
OffloadedTimeline::from_manifest(self.tenant_shard_id, timeline_manifest);
|
||||
offloaded_timelines_list.push((timeline_id, Arc::new(offloaded_timeline)));
|
||||
offloaded_timeline_ids.insert(timeline_id);
|
||||
}
|
||||
}
|
||||
// Complete deletions for offloaded timeline id's from manifest.
|
||||
// The manifest will be uploaded later in this function.
|
||||
@@ -1797,15 +1805,21 @@ impl Tenant {
|
||||
.context("resume_deletion")
|
||||
.map_err(LoadLocalTimelineError::ResumeDeletion)?;
|
||||
}
|
||||
let needs_manifest_upload =
|
||||
offloaded_timelines_list.len() != preload.tenant_manifest.offloaded_timelines.len();
|
||||
{
|
||||
let mut offloaded_timelines_accessor = self.timelines_offloaded.lock().unwrap();
|
||||
offloaded_timelines_accessor.extend(offloaded_timelines_list.into_iter());
|
||||
}
|
||||
if needs_manifest_upload {
|
||||
self.store_tenant_manifest().await?;
|
||||
|
||||
// Stash the preloaded tenant manifest, and upload a new manifest if changed.
|
||||
//
|
||||
// NB: this must happen after the tenant is fully populated above. In particular the
|
||||
// offloaded timelines, which are included in the manifest.
|
||||
{
|
||||
let mut guard = self.remote_tenant_manifest.lock().await;
|
||||
assert!(guard.is_none(), "tenant manifest set before preload"); // first populated here
|
||||
*guard = preload.tenant_manifest;
|
||||
}
|
||||
self.maybe_upload_tenant_manifest().await?;
|
||||
|
||||
// The local filesystem contents are a cache of what's in the remote IndexPart;
|
||||
// IndexPart is the source of truth.
|
||||
@@ -2219,7 +2233,7 @@ impl Tenant {
|
||||
};
|
||||
|
||||
// Upload new list of offloaded timelines to S3
|
||||
self.store_tenant_manifest().await?;
|
||||
self.maybe_upload_tenant_manifest().await?;
|
||||
|
||||
// Activate the timeline (if it makes sense)
|
||||
if !(timeline.is_broken() || timeline.is_stopping()) {
|
||||
@@ -4053,18 +4067,19 @@ impl Tenant {
|
||||
|
||||
/// Generate an up-to-date TenantManifest based on the state of this Tenant.
|
||||
fn build_tenant_manifest(&self) -> TenantManifest {
|
||||
let timelines_offloaded = self.timelines_offloaded.lock().unwrap();
|
||||
|
||||
let mut timeline_manifests = timelines_offloaded
|
||||
.iter()
|
||||
.map(|(_timeline_id, offloaded)| offloaded.manifest())
|
||||
.collect::<Vec<_>>();
|
||||
// Sort the manifests so that our output is deterministic
|
||||
timeline_manifests.sort_by_key(|timeline_manifest| timeline_manifest.timeline_id);
|
||||
// Collect the offloaded timelines, and sort them for deterministic output.
|
||||
let offloaded_timelines = self
|
||||
.timelines_offloaded
|
||||
.lock()
|
||||
.unwrap()
|
||||
.values()
|
||||
.map(|tli| tli.manifest())
|
||||
.sorted_by_key(|m| m.timeline_id)
|
||||
.collect_vec();
|
||||
|
||||
TenantManifest {
|
||||
version: LATEST_TENANT_MANIFEST_VERSION,
|
||||
offloaded_timelines: timeline_manifests,
|
||||
offloaded_timelines,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4287,7 +4302,7 @@ impl Tenant {
|
||||
timelines: Mutex::new(HashMap::new()),
|
||||
timelines_creating: Mutex::new(HashSet::new()),
|
||||
timelines_offloaded: Mutex::new(HashMap::new()),
|
||||
tenant_manifest_upload: Default::default(),
|
||||
remote_tenant_manifest: Default::default(),
|
||||
gc_cs: tokio::sync::Mutex::new(()),
|
||||
walredo_mgr,
|
||||
remote_storage,
|
||||
@@ -5520,27 +5535,35 @@ impl Tenant {
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Serialize and write the latest TenantManifest to remote storage.
|
||||
pub(crate) async fn store_tenant_manifest(&self) -> Result<(), TenantManifestError> {
|
||||
// Only one manifest write may be done at at time, and the contents of the manifest
|
||||
// must be loaded while holding this lock. This makes it safe to call this function
|
||||
// from anywhere without worrying about colliding updates.
|
||||
/// Builds a new tenant manifest, and uploads it if it differs from the last-known tenant
|
||||
/// manifest in `Self::remote_tenant_manifest`.
|
||||
///
|
||||
/// TODO: instead of requiring callers to remember to call `maybe_upload_tenant_manifest` after
|
||||
/// changing any `Tenant` state that's included in the manifest, consider making the manifest
|
||||
/// the authoritative source of data with an API that automatically uploads on changes. Revisit
|
||||
/// this when the manifest is more widely used and we have a better idea of the data model.
|
||||
pub(crate) async fn maybe_upload_tenant_manifest(&self) -> Result<(), TenantManifestError> {
|
||||
// Multiple tasks may call this function concurrently after mutating the Tenant runtime
|
||||
// state, affecting the manifest generated by `build_tenant_manifest`. We use an async mutex
|
||||
// to serialize these callers. `eq_ignoring_version` acts as a slightly inefficient but
|
||||
// simple coalescing mechanism.
|
||||
let mut guard = tokio::select! {
|
||||
g = self.tenant_manifest_upload.lock() => {
|
||||
g
|
||||
},
|
||||
_ = self.cancel.cancelled() => {
|
||||
return Err(TenantManifestError::Cancelled);
|
||||
}
|
||||
guard = self.remote_tenant_manifest.lock() => guard,
|
||||
_ = self.cancel.cancelled() => return Err(TenantManifestError::Cancelled),
|
||||
};
|
||||
|
||||
// Build a new manifest.
|
||||
let manifest = self.build_tenant_manifest();
|
||||
if Some(&manifest) == (*guard).as_ref() {
|
||||
// Optimisation: skip uploads that don't change anything.
|
||||
return Ok(());
|
||||
|
||||
// Check if the manifest has changed. We ignore the version number here, to avoid
|
||||
// uploading every manifest on version number bumps.
|
||||
if let Some(old) = guard.as_ref() {
|
||||
if manifest.eq_ignoring_version(old) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// Remote storage does no retries internally, so wrap it
|
||||
// Upload the manifest. Remote storage does no retries internally, so retry here.
|
||||
match backoff::retry(
|
||||
|| async {
|
||||
upload_tenant_manifest(
|
||||
@@ -5552,7 +5575,7 @@ impl Tenant {
|
||||
)
|
||||
.await
|
||||
},
|
||||
|_e| self.cancel.is_cancelled(),
|
||||
|_| self.cancel.is_cancelled(),
|
||||
FAILED_UPLOAD_WARN_THRESHOLD,
|
||||
FAILED_REMOTE_OP_RETRIES,
|
||||
"uploading tenant manifest",
|
||||
|
||||
@@ -3,11 +3,15 @@ use serde::{Deserialize, Serialize};
|
||||
use utils::id::TimelineId;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
/// Tenant-shard scoped manifest
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
/// Tenant shard manifest, stored in remote storage. Contains offloaded timelines and other tenant
|
||||
/// shard-wide information that must be persisted in remote storage.
|
||||
///
|
||||
/// The manifest is always updated on tenant attach, and as needed.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TenantManifest {
|
||||
/// Debugging aid describing the version of this manifest.
|
||||
/// Can also be used for distinguishing breaking changes later on.
|
||||
/// The manifest version. Incremented on manifest format changes, even non-breaking ones.
|
||||
/// Manifests must generally always be backwards and forwards compatible for one release, to
|
||||
/// allow release rollbacks.
|
||||
pub version: usize,
|
||||
|
||||
/// The list of offloaded timelines together with enough information
|
||||
@@ -16,6 +20,7 @@ pub struct TenantManifest {
|
||||
/// Note: the timelines mentioned in this list might be deleted, i.e.
|
||||
/// we don't hold an invariant that the references aren't dangling.
|
||||
/// Existence of index-part.json is the actual indicator of timeline existence.
|
||||
#[serde(default)]
|
||||
pub offloaded_timelines: Vec<OffloadedTimelineManifest>,
|
||||
}
|
||||
|
||||
@@ -24,7 +29,7 @@ pub struct TenantManifest {
|
||||
/// Very similar to [`pageserver_api::models::OffloadedTimelineInfo`],
|
||||
/// but the two datastructures serve different needs, this is for a persistent disk format
|
||||
/// that must be backwards compatible, while the other is only for informative purposes.
|
||||
#[derive(Clone, Serialize, Deserialize, Copy, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Copy, PartialEq, Eq)]
|
||||
pub struct OffloadedTimelineManifest {
|
||||
pub timeline_id: TimelineId,
|
||||
/// Whether the timeline has a parent it has been branched off from or not
|
||||
@@ -35,20 +40,114 @@ pub struct OffloadedTimelineManifest {
|
||||
pub archived_at: NaiveDateTime,
|
||||
}
|
||||
|
||||
/// The newest manifest version. This should be incremented on changes, even non-breaking ones. We
|
||||
/// do not use deny_unknown_fields, so new fields are not breaking.
|
||||
pub const LATEST_TENANT_MANIFEST_VERSION: usize = 1;
|
||||
|
||||
impl TenantManifest {
|
||||
pub(crate) fn empty() -> Self {
|
||||
Self {
|
||||
version: LATEST_TENANT_MANIFEST_VERSION,
|
||||
offloaded_timelines: vec![],
|
||||
/// Returns true if the manifests are equal, ignoring the version number. This avoids
|
||||
/// re-uploading all manifests just because the version number is bumped.
|
||||
pub fn eq_ignoring_version(&self, other: &Self) -> bool {
|
||||
// Fast path: if the version is equal, just compare directly.
|
||||
if self.version == other.version {
|
||||
return self == other;
|
||||
}
|
||||
}
|
||||
pub fn from_json_bytes(bytes: &[u8]) -> Result<Self, serde_json::Error> {
|
||||
serde_json::from_slice::<Self>(bytes)
|
||||
|
||||
// We could alternatively just clone and modify the version here.
|
||||
let Self {
|
||||
version: _, // ignore version
|
||||
offloaded_timelines,
|
||||
} = self;
|
||||
|
||||
offloaded_timelines == &other.offloaded_timelines
|
||||
}
|
||||
|
||||
pub(crate) fn to_json_bytes(&self) -> serde_json::Result<Vec<u8>> {
|
||||
/// Decodes a manifest from JSON.
|
||||
pub fn from_json_bytes(bytes: &[u8]) -> Result<Self, serde_json::Error> {
|
||||
serde_json::from_slice(bytes)
|
||||
}
|
||||
|
||||
/// Encodes a manifest as JSON.
|
||||
pub fn to_json_bytes(&self) -> serde_json::Result<Vec<u8>> {
|
||||
serde_json::to_vec(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use utils::id::TimelineId;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Empty manifests should be parsed. Version is required.
|
||||
#[test]
|
||||
fn parse_empty() -> anyhow::Result<()> {
|
||||
let json = r#"{
|
||||
"version": 0
|
||||
}"#;
|
||||
let expected = TenantManifest {
|
||||
version: 0,
|
||||
offloaded_timelines: Vec::new(),
|
||||
};
|
||||
assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unknown fields should be ignored, for forwards compatibility.
|
||||
#[test]
|
||||
fn parse_unknown_fields() -> anyhow::Result<()> {
|
||||
let json = r#"{
|
||||
"version": 1,
|
||||
"foo": "bar"
|
||||
}"#;
|
||||
let expected = TenantManifest {
|
||||
version: 1,
|
||||
offloaded_timelines: Vec::new(),
|
||||
};
|
||||
assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// v1 manifests should be parsed, for backwards compatibility.
|
||||
#[test]
|
||||
fn parse_v1() -> anyhow::Result<()> {
|
||||
let json = r#"{
|
||||
"version": 1,
|
||||
"offloaded_timelines": [
|
||||
{
|
||||
"timeline_id": "5c4df612fd159e63c1b7853fe94d97da",
|
||||
"archived_at": "2025-03-07T11:07:11.373105434"
|
||||
},
|
||||
{
|
||||
"timeline_id": "f3def5823ad7080d2ea538d8e12163fa",
|
||||
"ancestor_timeline_id": "5c4df612fd159e63c1b7853fe94d97da",
|
||||
"ancestor_retain_lsn": "0/1F79038",
|
||||
"archived_at": "2025-03-05T11:10:22.257901390"
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
let expected = TenantManifest {
|
||||
version: 1,
|
||||
offloaded_timelines: vec![
|
||||
OffloadedTimelineManifest {
|
||||
timeline_id: TimelineId::from_str("5c4df612fd159e63c1b7853fe94d97da")?,
|
||||
ancestor_timeline_id: None,
|
||||
ancestor_retain_lsn: None,
|
||||
archived_at: NaiveDateTime::from_str("2025-03-07T11:07:11.373105434")?,
|
||||
},
|
||||
OffloadedTimelineManifest {
|
||||
timeline_id: TimelineId::from_str("f3def5823ad7080d2ea538d8e12163fa")?,
|
||||
ancestor_timeline_id: Some(TimelineId::from_str(
|
||||
"5c4df612fd159e63c1b7853fe94d97da",
|
||||
)?),
|
||||
ancestor_retain_lsn: Some(Lsn::from_str("0/1F79038")?),
|
||||
archived_at: NaiveDateTime::from_str("2025-03-05T11:10:22.257901390")?,
|
||||
},
|
||||
],
|
||||
};
|
||||
assert_eq!(expected, TenantManifest::from_json_bytes(json.as_bytes())?);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ pub(crate) async fn upload_index_part(
|
||||
.await
|
||||
.with_context(|| format!("upload index part for '{tenant_shard_id} / {timeline_id}'"))
|
||||
}
|
||||
|
||||
/// Serializes and uploads the given tenant manifest data to the remote storage.
|
||||
pub(crate) async fn upload_tenant_manifest(
|
||||
storage: &GenericRemoteStorage,
|
||||
@@ -76,16 +77,14 @@ pub(crate) async fn upload_tenant_manifest(
|
||||
});
|
||||
pausable_failpoint!("before-upload-manifest-pausable");
|
||||
|
||||
let serialized = tenant_manifest.to_json_bytes()?;
|
||||
let serialized = Bytes::from(serialized);
|
||||
|
||||
let tenant_manifest_site = serialized.len();
|
||||
|
||||
let serialized = Bytes::from(tenant_manifest.to_json_bytes()?);
|
||||
let tenant_manifest_size = serialized.len();
|
||||
let remote_path = remote_tenant_manifest_path(tenant_shard_id, generation);
|
||||
|
||||
storage
|
||||
.upload_storage_object(
|
||||
futures::stream::once(futures::future::ready(Ok(serialized))),
|
||||
tenant_manifest_site,
|
||||
tenant_manifest_size,
|
||||
&remote_path,
|
||||
cancel,
|
||||
)
|
||||
|
||||
@@ -410,10 +410,13 @@ impl DeleteTimelineFlow {
|
||||
// So indeed, the tenant manifest might refer to an offloaded timeline which has already been deleted.
|
||||
// However, we handle this case in tenant loading code so the next time we attach, the issue is
|
||||
// resolved.
|
||||
tenant.store_tenant_manifest().await.map_err(|e| match e {
|
||||
TenantManifestError::Cancelled => DeleteTimelineError::Cancelled,
|
||||
_ => DeleteTimelineError::Other(e.into()),
|
||||
})?;
|
||||
tenant
|
||||
.maybe_upload_tenant_manifest()
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
TenantManifestError::Cancelled => DeleteTimelineError::Cancelled,
|
||||
err => DeleteTimelineError::Other(err.into()),
|
||||
})?;
|
||||
|
||||
*guard = Self::Finished;
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ pub(crate) async fn offload_timeline(
|
||||
// at the next restart attach it again.
|
||||
// For that to happen, we'd need to make the manifest reflect our *intended* state,
|
||||
// not our actual state of offloaded timelines.
|
||||
tenant.store_tenant_manifest().await?;
|
||||
tenant.maybe_upload_tenant_manifest().await?;
|
||||
|
||||
tracing::info!("Timeline offload complete (remaining arc refcount: {remaining_refcount})");
|
||||
|
||||
|
||||
@@ -318,7 +318,7 @@ def test_timeline_offload_persist(neon_env_builder: NeonEnvBuilder, delete_timel
|
||||
neon_env_builder.pageserver_remote_storage,
|
||||
prefix=f"tenants/{str(tenant_id)}/",
|
||||
)
|
||||
assert_prefix_empty(
|
||||
assert_prefix_not_empty(
|
||||
neon_env_builder.pageserver_remote_storage,
|
||||
prefix=f"tenants/{str(tenant_id)}/tenant-manifest",
|
||||
)
|
||||
@@ -387,7 +387,7 @@ def test_timeline_offload_persist(neon_env_builder: NeonEnvBuilder, delete_timel
|
||||
sum_again = endpoint.safe_psql("SELECT sum(key) from foo where key < 500")
|
||||
assert sum == sum_again
|
||||
|
||||
assert_prefix_empty(
|
||||
assert_prefix_not_empty(
|
||||
neon_env_builder.pageserver_remote_storage,
|
||||
prefix=f"tenants/{str(env.initial_tenant)}/tenant-manifest",
|
||||
)
|
||||
@@ -924,7 +924,7 @@ def test_timeline_offload_generations(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.pageserver_remote_storage,
|
||||
prefix=f"tenants/{str(tenant_id)}/",
|
||||
)
|
||||
assert_prefix_empty(
|
||||
assert_prefix_not_empty(
|
||||
neon_env_builder.pageserver_remote_storage,
|
||||
prefix=f"tenants/{str(tenant_id)}/tenant-manifest",
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user