fix(pageserver): ensure all layers are flushed before measuring RSS (#9861)

## Problem

close https://github.com/neondatabase/neon/issues/9761

The test assumed that no new L0 layers are flushed throughout the
process, which is not true.

## Summary of changes

Fix the test case `test_compaction_l0_memory` by flushing in-memory
layers before compaction.

Signed-off-by: Alex Chi Z <chi@neon.tech>
This commit is contained in:
Alex Chi Z.
2024-11-25 09:25:18 -05:00
committed by GitHub
parent 6f6749c4a9
commit 4630b70962

View File

@@ -103,6 +103,9 @@ def test_compaction_l0_memory(neon_compare: NeonCompare):
cur.execute(f"update tbl{i} set j = {j};")
wait_for_last_flush_lsn(env, endpoint, tenant_id, timeline_id)
pageserver_http.timeline_checkpoint(
tenant_id, timeline_id, compact=False
) # ^1: flush all in-memory layers
endpoint.stop()
# Check we have generated the L0 stack we expected
@@ -118,7 +121,9 @@ def test_compaction_l0_memory(neon_compare: NeonCompare):
return v * 1024
before = rss_hwm()
pageserver_http.timeline_compact(tenant_id, timeline_id)
pageserver_http.timeline_compact(
tenant_id, timeline_id
) # ^1: we must ensure during this process no new L0 layers are flushed
after = rss_hwm()
log.info(f"RSS across compaction: {before} -> {after} (grew {after - before})")
@@ -137,7 +142,7 @@ def test_compaction_l0_memory(neon_compare: NeonCompare):
# To be fixed in https://github.com/neondatabase/neon/issues/8184, after which
# this memory estimate can be revised far downwards to something that doesn't scale
# linearly with the layer sizes.
MEMORY_ESTIMATE = (initial_l0s_size - final_l0s_size) * 1.5
MEMORY_ESTIMATE = (initial_l0s_size - final_l0s_size) * 1.25
# If we find that compaction is using more memory, this may indicate a regression
assert compaction_mapped_rss < MEMORY_ESTIMATE