Files
neon/test_runner/performance/test_compute_ctl_api.py
devin-ai-integration[bot] efb1df4362 fix: Change metric_unit from 'microseconds' to 'μs' in test_compute_ctl_api.py (#11209)
# Fix metric_unit length in test_compute_ctl_api.py

## Description
This PR changes the metric_unit from "microseconds" to "μs" in
test_compute_ctl_api.py to fix the issue where perf test results were
not being stored in the database due to the string exceeding the 10
character limit of the metric_unit column in the perf_test_results
table.

## Problem
As reported in Slack, the perf test results were not being uploaded to
the database because the "microseconds" string (12 characters) exceeds
the 10 character limit of the metric_unit column in the
perf_test_results table.

## Solution
Replace "microseconds" with "μs" in all metric_unit parameters in the
test_compute_ctl_api.py file.

## Testing
The changes have been committed and pushed. The PR is ready for review.

Link to Devin run:
https://app.devin.ai/sessions/e29edd672bd34114b059915820e8a853
Requested by: Peter Bendel

Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: peterbendel@neon.tech <peterbendel@neon.tech>
2025-03-13 10:17:01 +00:00

65 lines
2.0 KiB
Python

from __future__ import annotations
import datetime
import pytest
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
from fixtures.neon_fixtures import NeonEnv
@pytest.mark.timeout(120)
def test_compute_ctl_api_latencies(
neon_simple_env: NeonEnv,
zenbenchmark: NeonBenchmarker,
):
"""
Test compute_ctl HTTP API performance. Do simple GET requests
to catch any pathological degradations in the HTTP server.
"""
env = neon_simple_env
endpoint = env.endpoints.create_start("main")
client = endpoint.http_client()
NUM_REQUESTS = 10000
status_response_latency_us = []
metrics_response_latency_us = []
for _i in range(NUM_REQUESTS):
start_time = datetime.datetime.now()
_ = client.status()
status_response_latency_us.append((datetime.datetime.now() - start_time).microseconds)
start_time = datetime.datetime.now()
_ = client.metrics_json()
metrics_response_latency_us.append((datetime.datetime.now() - start_time).microseconds)
status_response_latency_us = sorted(status_response_latency_us)
metrics_response_latency_us = sorted(metrics_response_latency_us)
zenbenchmark.record(
"status_response_latency_p50_us",
status_response_latency_us[len(status_response_latency_us) // 2],
"μs",
MetricReport.LOWER_IS_BETTER,
)
zenbenchmark.record(
"metrics_response_latency_p50_us",
metrics_response_latency_us[len(metrics_response_latency_us) // 2],
"μs",
MetricReport.LOWER_IS_BETTER,
)
zenbenchmark.record(
"status_response_latency_p99_us",
status_response_latency_us[len(status_response_latency_us) * 99 // 100],
"μs",
MetricReport.LOWER_IS_BETTER,
)
zenbenchmark.record(
"metrics_response_latency_p99_us",
metrics_response_latency_us[len(metrics_response_latency_us) * 99 // 100],
"μs",
MetricReport.LOWER_IS_BETTER,
)