bandaid to avoid concurrent timeline downloading until proper refactoring/fix

This commit is contained in:
Dmitry Rodionov
2022-01-26 17:50:21 +03:00
committed by Dmitry Rodionov
parent f3c73f5797
commit 63dd7bce7e
3 changed files with 15 additions and 3 deletions

View File

@@ -173,7 +173,14 @@ impl Layer for ImageLayer {
.as_ref()
.unwrap()
.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
chapter.read_exact_at(&mut buf, offset)?;
chapter.read_exact_at(&mut buf, offset).with_context(|| {
format!(
"failed to read page from data file {} at offset {}",
self.filename().display(),
offset
)
})?;
buf
}

View File

@@ -74,8 +74,13 @@ def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder,
##### Second start, restore the data and ensure it's the same
env.pageserver.start()
log.info("waiting for timeline redownload")
client = env.pageserver.http_client()
client.timeline_attach(UUID(tenant_id), UUID(timeline_id))
# FIXME cannot handle duplicate download requests (which might be caused by repeated timeline detail calls)
# subject to fix in https://github.com/zenithdb/zenith/issues/997
time.sleep(5)
log.info("waiting for timeline redownload")
attempts = 0
while True:
timeline_details = client.timeline_detail(UUID(tenant_id), UUID(timeline_id))

View File

@@ -192,7 +192,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
new_pageserver_pg_port,
new_pageserver_http_port):
# call to attach timeline to new timeline
# call to attach timeline to new pageserver
new_pageserver_http_client.timeline_attach(UUID(tenant), UUID(timeline))
# FIXME cannot handle duplicate download requests, subject to fix in https://github.com/zenithdb/zenith/issues/997
time.sleep(5)