Add tests for measuring effect of lsn caching (#2384)

* Add tests for measurif effet of lsn caching

* Fix formatting of test_latency.py

* Fix test_lsn_mapping test
This commit is contained in:
Konstantin Knizhnik
2022-09-03 17:06:19 +03:00
committed by GitHub
parent 71c965b0e1
commit eef7475408
2 changed files with 33 additions and 2 deletions

View File

@@ -0,0 +1,29 @@
import threading
import pytest
from fixtures.compare_fixtures import PgCompare
from fixtures.neon_fixtures import Postgres
from performance.test_perf_pgbench import get_scales_matrix
from performance.test_wal_backpressure import record_read_latency
def start_write_workload(pg: Postgres, scale: int = 10):
with pg.connect().cursor() as cur:
cur.execute(f"create table big as select generate_series(1,{scale*100_000})")
# Measure latency of reads on one table, while lots of writes are happening on another table.
# The fine-grained tracking of last-written LSNs helps to keep the latency low. Without it, the reads would
# often need to wait for the WAL records of the unrelated writes to be processed by the pageserver.
@pytest.mark.parametrize("scale", get_scales_matrix(1))
def test_measure_read_latency_heavy_write_workload(neon_with_baseline: PgCompare, scale: int):
env = neon_with_baseline
pg = env.pg
with pg.connect().cursor() as cur:
cur.execute(f"create table small as select generate_series(1,{scale*100_000})")
write_thread = threading.Thread(target=start_write_workload, args=(pg, scale * 100))
write_thread.start()
record_read_latency(env, lambda: write_thread.is_alive(), "SELECT count(*) from small")

View File

@@ -1,7 +1,7 @@
from datetime import timedelta
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
from fixtures.utils import query_scalar
@@ -34,9 +34,11 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder):
# Execute one more transaction with synchronous_commit enabled, to flush
# all the previous transactions
cur.execute("SET synchronous_commit=on")
cur.execute("INSERT INTO foo VALUES (-1)")
# Wait until WAL is received by pageserver
wait_for_last_flush_lsn(env, pgmain, env.initial_tenant, new_timeline_id)
# Check edge cases: timestamp in the future
probe_timestamp = tbl[-1][1] + timedelta(hours=1)
result = query_scalar(