diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 76dcc159ea..589aea18b4 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -1112,7 +1112,7 @@ impl Timeline { // Accumulate the size of layers in `deltas_to_compact` let mut deltas_to_compact_bytes = 0; - // Under normal circumstances, we will accumulate up to compaction_interval L0s of size + // Under normal circumstances, we will accumulate up to compaction_upper_limit L0s of size // checkpoint_distance each. To avoid edge cases using extra system resources, bound our // work in this function to only operate on this much delta data at once. let delta_size_limit = self.get_compaction_upper_limit() as u64 diff --git a/test_runner/performance/test_compaction.py b/test_runner/performance/test_compaction.py index 0cd1080fa7..eaa89ae754 100644 --- a/test_runner/performance/test_compaction.py +++ b/test_runner/performance/test_compaction.py @@ -75,6 +75,7 @@ def test_compaction_l0_memory(neon_compare: NeonCompare): # Initially disable compaction so that we will build up a stack of L0s "compaction_period": "0s", "gc_period": "0s", + "compaction_upper_limit": 12, } ) neon_compare.tenant = tenant_id @@ -91,6 +92,7 @@ def test_compaction_l0_memory(neon_compare: NeonCompare): tenant_conf = pageserver_http.tenant_config(tenant_id) assert tenant_conf.effective_config["checkpoint_distance"] == 256 * 1024 * 1024 assert tenant_conf.effective_config["compaction_threshold"] == 10 + assert tenant_conf.effective_config["compaction_upper_limit"] == 12 # Aim to write about 20 L0s, so that we will hit the limit on how many # to compact at once