From eceda6337901675f74c00ed943805263459b5556 Mon Sep 17 00:00:00 2001 From: Bojan Serafimov Date: Thu, 8 Jun 2023 12:30:47 -0400 Subject: [PATCH 1/2] Do two iterations --- test_runner/performance/test_startup.py | 35 +++++++++++++++---------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/test_runner/performance/test_startup.py b/test_runner/performance/test_startup.py index b16ba86b22..f47b4c580c 100644 --- a/test_runner/performance/test_startup.py +++ b/test_runner/performance/test_startup.py @@ -28,21 +28,28 @@ def test_startup_simple(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenc env = neon_env_builder.init_start() env.neon_cli.create_branch("test_startup") - with zenbenchmark.record_duration("start_and_select"): - endpoint = env.endpoints.create_start("test_startup") - endpoint.safe_psql("select 1;") - metrics = requests.get(f"http://localhost:{endpoint.http_port}/metrics.json").json() - durations = { - "wait_for_spec_ms": "wait_for_spec", - "sync_safekeepers_ms": "sync_safekeepers", - "basebackup_ms": "basebackup", - "config_ms": "config", - "total_startup_ms": "total_startup", - } - for key, name in durations.items(): - value = metrics[key] - zenbenchmark.record(name, value, "ms", report=MetricReport.LOWER_IS_BETTER) + for i in range(2): + # Start + with zenbenchmark.record_duration(f"{i}_start_and_select"): + endpoint = env.endpoints.create_start("test_startup") + endpoint.safe_psql("select 1;") + + # Get metrics + metrics = requests.get(f"http://localhost:{endpoint.http_port}/metrics.json").json() + durations = { + "wait_for_spec_ms": f"{i}_wait_for_spec", + "sync_safekeepers_ms": f"{i}_sync_safekeepers", + "basebackup_ms": f"{i}_basebackup", + "config_ms": f"{i}_config", + "total_startup_ms": f"{i}_total_startup", + } + for key, name in durations.items(): + value = metrics[key] + zenbenchmark.record(name, value, "ms", report=MetricReport.LOWER_IS_BETTER) + + # Stop so we can restart + endpoint.stop() # This test sometimes runs for longer than the global 5 minute timeout. From 1baecdc27a2187abfa0361833875ddcd605ec247 Mon Sep 17 00:00:00 2001 From: Bojan Serafimov Date: Thu, 8 Jun 2023 12:33:19 -0400 Subject: [PATCH 2/2] comments --- test_runner/performance/test_startup.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/test_runner/performance/test_startup.py b/test_runner/performance/test_startup.py index f47b4c580c..9c45088d62 100644 --- a/test_runner/performance/test_startup.py +++ b/test_runner/performance/test_startup.py @@ -21,14 +21,20 @@ from fixtures.neon_fixtures import NeonEnvBuilder # `sudo tc qdisc del dev lo root netem` # # NOTE this test might not represent the real startup time because the basebackup -# for a large database might be larger, or safekeepers might need more syncing, -# or there might be more operations to apply during config step. +# for a large database might be larger if there's a lof of transaction metadata, +# or safekeepers might need more syncing, or there might be more operations to +# apply during config step, like more users, databases, or extensions. By default +# we load extensions 'neon,pg_stat_statements,timescaledb,pg_cron', but in this +# test we only load neon. def test_startup_simple(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker): neon_env_builder.num_safekeepers = 3 env = neon_env_builder.init_start() env.neon_cli.create_branch("test_startup") + # We do two iterations so we can see if the second startup is faster. It should + # be because the compute node should already be configured with roles, databases, + # extensions, etc from the first run. for i in range(2): # Start with zenbenchmark.record_duration(f"{i}_start_and_select"):