Merge remote-tracking branch 'origin/dkr/on-demand-split/per-tenant-remote-sync' into problame/for-dkr/on-demand-split/per-tenant-remote-sync

This commit is contained in:
Christian Schwarz
2022-11-25 13:22:31 -05:00
3 changed files with 23 additions and 10 deletions

View File

@@ -544,11 +544,13 @@ impl RemoteTimelineClient {
/// Download a (layer) file from `path`, into local filesystem.
///
/// 'layer_metadata' is the metadata from the remote index file.
///
/// On success, returns the size of the downloaded file.
pub async fn download_layer_file(
&self,
path: &RelativePath,
layer_metadata: &LayerFileMetadata,
) -> anyhow::Result<()> {
) -> anyhow::Result<u64> {
let downloaded_size = download::download_layer_file(
self.conf,
&self.storage_impl,
@@ -576,7 +578,7 @@ impl RemoteTimelineClient {
);
}
}
Ok(())
Ok(downloaded_size)
}
//

View File

@@ -1027,6 +1027,7 @@ impl Timeline {
error!("could not rename file \"{}\": {:?}",
local_path.display(), err);
}
self.metrics.current_physical_size_gauge.sub(local_size);
false
} else {
true
@@ -1070,7 +1071,7 @@ impl Timeline {
}
trace!("downloading image file: {}", file = path.display());
remote_client
let sz = remote_client
.download_layer_file(&RelativePath::from_filename(path), &layer_metadata)
.await
.context("download image layer")?;
@@ -1079,11 +1080,11 @@ impl Timeline {
let image_layer =
ImageLayer::new(self.conf, self.timeline_id, self.tenant_id, &imgfilename);
// FIXME: when to update physical size?
self.layers
.write()
.unwrap()
.insert_historic(Arc::new(image_layer));
self.metrics.current_physical_size_gauge.add(sz);
} else if let Some(deltafilename) = DeltaFileName::parse_str(fname) {
// Create a DeltaLayer struct for each delta file.
// The end-LSN is exclusive, while disk_consistent_lsn is
@@ -1100,7 +1101,7 @@ impl Timeline {
}
trace!("downloading image file: {}", file = path.display());
remote_client
let sz = remote_client
.download_layer_file(&RelativePath::from_filename(path), &layer_metadata)
.await
.context("download delta layer")?;
@@ -1109,11 +1110,11 @@ impl Timeline {
let delta_layer =
DeltaLayer::new(self.conf, self.timeline_id, self.tenant_id, &deltafilename);
// FIXME: when to update physical size?
self.layers
.write()
.unwrap()
.insert_historic(Arc::new(delta_layer));
self.metrics.current_physical_size_gauge.add(sz);
} else {
bail!("unexpected layer filename in remote storage: {}", fname);
}
@@ -1187,8 +1188,6 @@ impl Timeline {
}
};
// TODO what to do with physical size?
// Are there local files that don't exist remotely? Schedule uploads for them
let timeline_path = self.conf.timeline_path(&self.timeline_id, &self.tenant_id);
for fname in &local_only_filenames {

View File

@@ -160,6 +160,12 @@ def test_tenants_attached_after_download(
##### Stop the pageserver, erase its layer file to force it being downloaded from S3
env.postgres.stop_all()
detail_before = client.timeline_detail(
tenant_id, timeline_id, include_non_incremental_physical_size=True
)
assert detail_before["current_physical_size_non_incremental"] == detail_before["current_physical_size"]
env.pageserver.stop()
timeline_dir = Path(env.repo_dir) / "tenants" / str(tenant_id) / "timelines" / str(timeline_id)
@@ -186,11 +192,17 @@ def test_tenants_attached_after_download(
assert (
len(restored_timelines) == 1
), f"Tenant {tenant_id} should have its timeline reattached after its layer is downloaded from the remote storage"
retored_timeline = restored_timelines[0]
assert retored_timeline["timeline_id"] == str(
restored_timeline = restored_timelines[0]
assert restored_timeline["timeline_id"] == str(
timeline_id
), f"Tenant {tenant_id} should have its old timeline {timeline_id} restored from the remote storage"
# Check that the physical size matches after re-downloading
detail_after = client.timeline_detail(
tenant_id, timeline_id, include_non_incremental_physical_size=True
)
assert detail_before["current_physical_size"] == detail_after["current_physical_size"]
@pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.LOCAL_FS])
def test_tenant_upgrades_index_json_from_v0(