Files
neon/test_runner/regress/test_pageserver_getpage_throttle.py
Joonas Koivunen d9dcbffac3 python: allow using allowed_errors.py (#7719)
See #7718. Fix it by renaming all `types.py` to `common_types.py`.

Additionally, add an advert for using `allowed_errors.py` to test any
added regex.
2024-05-13 15:16:23 +03:00

119 lines
4.2 KiB
Python

import json
import uuid
from anyio import Path
from fixtures.common_types import TenantId, TimelineId
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin
from fixtures.pg_version import PgVersion
from fixtures.utils import wait_until
def test_pageserver_getpage_throttle(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
env = neon_env_builder.init_start()
env.pageserver.tenant_detach(env.initial_tenant)
env.pageserver.allowed_errors.append(
# https://github.com/neondatabase/neon/issues/6925
r".*query handler for.*pagestream.*failed: unexpected message: CopyFail during COPY.*"
)
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
rate_limit_rps = 100
compaction_period = 5
env.pageserver.tenant_create(
tenant_id,
conf={
"compaction_period": f"{compaction_period}s",
"timeline_get_throttle": {
"task_kinds": ["PageRequestHandler"],
"initial": 0,
"refill_interval": "100ms",
"refill_amount": int(rate_limit_rps / 10),
"max": int(rate_limit_rps / 10),
"fair": True,
},
},
)
ps_http = env.pageserver.http_client()
ps_http.timeline_create(PgVersion.V16, tenant_id, timeline_id)
def run_pagebench_at_max_speed_and_get_total_requests_completed(duration_secs: int):
cmd = [
str(env.neon_binpath / "pagebench"),
"get-page-latest-lsn",
"--mgmt-api-endpoint",
ps_http.base_url,
"--page-service-connstring",
env.pageserver.connstr(password=None),
"--runtime",
f"{duration_secs}s",
f"{tenant_id}/{timeline_id}",
]
basepath = pg_bin.run_capture(cmd, with_command_header=False)
results_path = Path(basepath + ".stdout")
log.info(f"Benchmark results at: {results_path}")
with open(results_path, "r") as f:
results = json.load(f)
log.info(f"Results:\n{json.dumps(results, sort_keys=True, indent=2)}")
return int(results["total"]["request_count"])
log.info("warmup / make sure metrics are present")
run_pagebench_at_max_speed_and_get_total_requests_completed(2)
metrics_query = {
"tenant_id": str(tenant_id),
"timeline_id": str(timeline_id),
"smgr_query_type": "get_page_at_lsn",
}
metric_name = "pageserver_smgr_query_seconds_sum"
smgr_query_seconds_pre = ps_http.get_metric_value(metric_name, metrics_query)
assert smgr_query_seconds_pre is not None
marker = uuid.uuid4().hex
ps_http.post_tracing_event("info", marker)
_, marker_offset = wait_until(
10, 0.5, lambda: env.pageserver.assert_log_contains(marker, offset=None)
)
log.info("run pagebench")
duration_secs = 10
actual_ncompleted = run_pagebench_at_max_speed_and_get_total_requests_completed(duration_secs)
log.info("validate the client is capped at the configured rps limit")
expect_ncompleted = duration_secs * rate_limit_rps
delta_abs = abs(expect_ncompleted - actual_ncompleted)
threshold = 0.05 * expect_ncompleted
assert (
threshold / rate_limit_rps < 0.1 * duration_secs
), "test self-test: unrealistic expecations regarding precision in this test"
assert (
delta_abs < 0.05 * expect_ncompleted
), "the throttling deviates more than 5percent from the expectation"
log.info("validate that we logged the throttling")
wait_until(
10,
compaction_period / 10,
lambda: env.pageserver.assert_log_contains(
f".*{tenant_id}.*shard was throttled in the last n_seconds.*",
offset=marker_offset,
),
)
log.info("validate that the metric doesn't include throttle wait time")
smgr_query_seconds_post = ps_http.get_metric_value(metric_name, metrics_query)
assert smgr_query_seconds_post is not None
actual_smgr_query_seconds = smgr_query_seconds_post - smgr_query_seconds_pre
assert (
duration_secs >= 10 * actual_smgr_query_seconds
), "smgr metrics should not include throttle wait time"