Compare commits

...

2 Commits

Author SHA1 Message Date
Bojan Serafimov
a8fd6266aa wip 2023-10-27 11:42:18 -04:00
Bojan Serafimov
151605d751 wip 2023-10-24 13:11:40 -04:00
3 changed files with 25 additions and 4 deletions

View File

@@ -36,7 +36,7 @@ use utils::pid_file::{self, PidFileRead};
// it's waiting. If the process hasn't started/stopped after 5 seconds,
// it prints a notice that it's taking long, but keeps waiting.
//
const RETRY_UNTIL_SECS: u64 = 10;
const RETRY_UNTIL_SECS: u64 = 10000;
const RETRIES: u64 = (RETRY_UNTIL_SECS * 1000) / RETRY_INTERVAL_MILLIS;
const RETRY_INTERVAL_MILLIS: u64 = 100;
const DOT_EVERY_RETRIES: u64 = 10;

View File

@@ -157,7 +157,7 @@ def wait_for_last_record_lsn(
lsn: Lsn,
) -> Lsn:
"""waits for pageserver to catch up to a certain lsn, returns the last observed lsn."""
for i in range(100):
for i in range(1000000):
current_lsn = last_record_lsn(pageserver_http, tenant, timeline)
if current_lsn >= lsn:
return current_lsn

View File

@@ -1,5 +1,8 @@
import pytest
import os
import shutil
from contextlib import closing
from fixtures.log_helper import log
from fixtures.compare_fixtures import NeonCompare, PgCompare
from fixtures.pg_version import PgVersion
@@ -18,6 +21,9 @@ from fixtures.pg_version import PgVersion
def test_bulk_insert(neon_with_baseline: PgCompare):
env = neon_with_baseline
# Number of times to run the write query. One run creates 350MB of wal.
n_writes = 10
with closing(env.pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute("create table huge (i int, j int);")
@@ -25,7 +31,10 @@ def test_bulk_insert(neon_with_baseline: PgCompare):
# Run INSERT, recording the time and I/O it takes
with env.record_pageserver_writes("pageserver_writes"):
with env.record_duration("insert"):
cur.execute("insert into huge values (generate_series(1, 5000000), 0);")
for i in range(n_writes):
if n_writes > 1:
log.info(f"running query {i}/{n_writes}")
cur.execute("insert into huge values (generate_series(1, 5000000), 0);")
env.flush()
env.report_peak_memory_use()
@@ -39,7 +48,9 @@ def test_bulk_insert(neon_with_baseline: PgCompare):
def measure_recovery_time(env: NeonCompare):
client = env.env.pageserver.http_client()
# Hmm why is pageserver less ready to respond to http when the datadir is large?
from urllib3.util.retry import Retry
client = env.env.pageserver.http_client(retries=Retry(1000))
pg_version = PgVersion(client.timeline_detail(env.tenant, env.timeline)["pg_version"])
# Stop pageserver and remove tenant data
@@ -57,3 +68,13 @@ def measure_recovery_time(env: NeonCompare):
# Flush, which will also wait for lsn to catch up
env.flush()
# This test is meant for local iteration only. The use case is when you want to re-run
# the measure_recovery_time part of test_bulk_insert, but without running the setup.
# It allows you to iterate on results 2x faster while trying to improve wal ingestion
# performance.
@pytest.mark.skip("this is a convenience test for local dev only")
def test_recovery(neon_env_builder):
env = neon_env_builder.init_start()
measure_recovery_time(env)