diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 4b5dd18301..6d13460705 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -621,7 +621,8 @@ impl PageServerHandler { let reader = CopyInReader::new(pgb); import_wal_from_tar(&mut datadir_timeline, reader, start_lsn, end_lsn)?; - ensure!(datadir_timeline.tline.get_last_record_lsn() == end_lsn); + // TODO Does it make sense to overshoot? + ensure!(datadir_timeline.tline.get_last_record_lsn() >= end_lsn); // Flush data to disk, then upload to s3. No need for a forced checkpoint. // We only want to persist the data, and it doesn't matter if it's in the @@ -954,7 +955,7 @@ impl postgres_backend::Handler for PageServerHandler { let tenant = ZTenantId::from_str(params[0])?; let timeline = ZTimelineId::from_str(params[1])?; let base_lsn = Lsn::from_str(params[2])?; - let end_lsn = Lsn::from_str(params[2])?; + let end_lsn = Lsn::from_str(params[3])?; self.check_permission(Some(tenant))?; @@ -968,7 +969,7 @@ impl postgres_backend::Handler for PageServerHandler { let tenant = ZTenantId::from_str(params[0])?; let timeline = ZTimelineId::from_str(params[1])?; let start_lsn = Lsn::from_str(params[2])?; - let end_lsn = Lsn::from_str(params[2])?; + let end_lsn = Lsn::from_str(params[3])?; self.check_permission(Some(tenant))?; diff --git a/test_runner/batch_others/test_import.py b/test_runner/batch_others/test_import.py index 7321763fcf..423b1b5896 100644 --- a/test_runner/batch_others/test_import.py +++ b/test_runner/batch_others/test_import.py @@ -61,8 +61,8 @@ def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_build timeline = uuid4() # Set up pageserver for import - env = neon_env_builder.init_start() neon_env_builder.enable_local_fs_remote_storage() + env = neon_env_builder.init_start() env.pageserver.http_client().tenant_create(tenant) def import_tar(base, wal): @@ -100,7 +100,7 @@ def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_build # Wait for data to land in s3 env.pageserver.safe_psql(f"checkpoint {tenant.hex} {timeline.hex}") wait_for_last_record_lsn(client, tenant, timeline, lsn_from_hex(end_lsn)) - # wait_for_upload(client, tenant, timeline, lsn_from_hex(end_lsn)) + wait_for_upload(client, tenant, timeline, lsn_from_hex(end_lsn)) # Check it worked pg = env.postgres.create_start(node_name, tenant_id=tenant) @@ -112,7 +112,7 @@ def test_import_from_pageserver(test_output_dir, pg_bin, vanilla_pg, neon_env_bu num_rows = 3000 neon_env_builder.num_safekeepers = 1 - neon_env_builder + neon_env_builder.enable_local_fs_remote_storage() env = neon_env_builder.init_start() env.neon_cli.create_branch('test_import_from_pageserver') @@ -178,8 +178,9 @@ def test_import_from_pageserver(test_output_dir, pg_bin, vanilla_pg, neon_env_bu ]) # Wait for data to land in s3 - env.pageserver.safe_psql(f"checkpoint {tenant.hex} {timeline.hex}") - wait_for_upload(client, tenant, timeline, lsn_from_hex(end_lsn)) + env.pageserver.safe_psql(f"checkpoint {tenant.hex} {timeline}") + wait_for_last_record_lsn(client, tenant, UUID(timeline), lsn_from_hex(lsn)) + wait_for_upload(client, tenant, UUID(timeline), lsn_from_hex(lsn)) # Check it worked pg = env.postgres.create_start(node_name, tenant_id=tenant)