diff --git a/test_runner/performance/test_latency.py b/test_runner/performance/test_latency.py new file mode 100644 index 0000000000..9aa618650d --- /dev/null +++ b/test_runner/performance/test_latency.py @@ -0,0 +1,29 @@ +import threading + +import pytest +from fixtures.compare_fixtures import PgCompare +from fixtures.neon_fixtures import Postgres +from performance.test_perf_pgbench import get_scales_matrix +from performance.test_wal_backpressure import record_read_latency + + +def start_write_workload(pg: Postgres, scale: int = 10): + with pg.connect().cursor() as cur: + cur.execute(f"create table big as select generate_series(1,{scale*100_000})") + + +# Measure latency of reads on one table, while lots of writes are happening on another table. +# The fine-grained tracking of last-written LSNs helps to keep the latency low. Without it, the reads would +# often need to wait for the WAL records of the unrelated writes to be processed by the pageserver. +@pytest.mark.parametrize("scale", get_scales_matrix(1)) +def test_measure_read_latency_heavy_write_workload(neon_with_baseline: PgCompare, scale: int): + env = neon_with_baseline + pg = env.pg + + with pg.connect().cursor() as cur: + cur.execute(f"create table small as select generate_series(1,{scale*100_000})") + + write_thread = threading.Thread(target=start_write_workload, args=(pg, scale * 100)) + write_thread.start() + + record_read_latency(env, lambda: write_thread.is_alive(), "SELECT count(*) from small") diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index f6ca7000dd..9d1efec2c1 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -1,7 +1,7 @@ from datetime import timedelta from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder +from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn from fixtures.utils import query_scalar @@ -34,9 +34,11 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder): # Execute one more transaction with synchronous_commit enabled, to flush # all the previous transactions - cur.execute("SET synchronous_commit=on") cur.execute("INSERT INTO foo VALUES (-1)") + # Wait until WAL is received by pageserver + wait_for_last_flush_lsn(env, pgmain, env.initial_tenant, new_timeline_id) + # Check edge cases: timestamp in the future probe_timestamp = tbl[-1][1] + timedelta(hours=1) result = query_scalar(