mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-10 23:12:54 +00:00
It seems that some benchmarks are failing because they are simply not stopping to ingest wal on shutdown. It might mean that the tests were never ran on a stable pageserver situation and WAL has always been left to be ingested on safekeepers, but let's see if this silences the failures and "stops the bleeding". Cc: https://github.com/neondatabase/neon/issues/8712
42 lines
1.4 KiB
Python
42 lines
1.4 KiB
Python
import time
|
|
|
|
from fixtures.neon_fixtures import NeonEnvBuilder
|
|
|
|
|
|
#
|
|
# Benchmark searching the layer map, when there are a lot of small layer files.
|
|
#
|
|
def test_layer_map(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
|
env = neon_env_builder.init_start()
|
|
n_iters = 10
|
|
n_records = 100000
|
|
|
|
# We want to have a lot of lot of layer files to exercise the layer map. Disable
|
|
# GC, and make checkpoint_distance very small, so that we get a lot of small layer
|
|
# files.
|
|
tenant, _ = env.neon_cli.create_tenant(
|
|
conf={
|
|
"gc_period": "0s",
|
|
"checkpoint_distance": "16384",
|
|
"compaction_period": "1 s",
|
|
"compaction_threshold": "1",
|
|
"compaction_target_size": "16384",
|
|
}
|
|
)
|
|
|
|
env.neon_cli.create_timeline("test_layer_map", tenant_id=tenant)
|
|
endpoint = env.endpoints.create_start("test_layer_map", tenant_id=tenant)
|
|
cur = endpoint.connect().cursor()
|
|
cur.execute("create table t(x integer)")
|
|
for _ in range(n_iters):
|
|
cur.execute(f"insert into t values (generate_series(1,{n_records}))")
|
|
time.sleep(1)
|
|
|
|
cur.execute("vacuum t")
|
|
with zenbenchmark.record_duration("test_query"):
|
|
cur.execute("SELECT count(*) from t")
|
|
assert cur.fetchone() == (n_iters * n_records,)
|
|
|
|
# see https://github.com/neondatabase/neon/issues/8712
|
|
env.stop(immediate=True)
|