mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-04 20:12:54 +00:00
use a prod-like shared_buffers size for some perf unit tests (#11373)
## Problem
In Neon DBaaS we adjust the shared_buffers to the size of the compute,
or better described we adjust the max number of connections to the
compute size and we adjust the shared_buffers size to the number of max
connections according to about the following sizes
`2 CU: 225mb; 4 CU: 450mb; 8 CU: 900mb`
[see](877e33b428/goapp/controlplane/internal/pkg/compute/computespec/pg_settings.go (L405))
## Summary of changes
We should run perf unit tests with settings that is realistic for a
paying customer and select 8 CU as the reference for those tests.
This commit is contained in:
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
|
||||
from fixtures.utils import shared_buffers_for_max_cu
|
||||
|
||||
|
||||
#
|
||||
@@ -20,7 +21,10 @@ def test_bulk_update(neon_env_builder: NeonEnvBuilder, zenbenchmark, fillfactor)
|
||||
|
||||
timeline_id = env.create_branch("test_bulk_update")
|
||||
tenant_id = env.initial_tenant
|
||||
endpoint = env.endpoints.create_start("test_bulk_update")
|
||||
# use shared_buffers size like in production for 8 CU compute
|
||||
endpoint = env.endpoints.create_start(
|
||||
"test_bulk_update", config_lines=[f"shared_buffers={shared_buffers_for_max_cu(8.0)}"]
|
||||
)
|
||||
cur = endpoint.connect().cursor()
|
||||
cur.execute("set statement_timeout=0")
|
||||
|
||||
|
||||
@@ -17,9 +17,10 @@ from fixtures.pageserver.utils import (
|
||||
wait_for_upload_queue_empty,
|
||||
)
|
||||
from fixtures.remote_storage import s3_storage
|
||||
from fixtures.utils import shared_buffers_for_max_cu
|
||||
|
||||
|
||||
@pytest.mark.timeout(900)
|
||||
@pytest.mark.timeout(1800)
|
||||
@pytest.mark.parametrize("size", [8, 1024, 8192])
|
||||
@pytest.mark.parametrize("s3", [True, False], ids=["s3", "local"])
|
||||
@pytest.mark.parametrize("backpressure", [True, False], ids=["backpressure", "nobackpressure"])
|
||||
@@ -60,6 +61,8 @@ def test_ingest_insert_bulk(
|
||||
f"fsync = {fsync}",
|
||||
"max_replication_apply_lag = 0",
|
||||
f"max_replication_flush_lag = {'10GB' if backpressure else '0'}",
|
||||
# use shared_buffers size like in production for 8 CU compute
|
||||
f"shared_buffers={shared_buffers_for_max_cu(8.0)}",
|
||||
# NB: neon_local defaults to 15MB, which is too slow -- production uses 500MB.
|
||||
f"max_replication_write_lag = {'500MB' if backpressure else '0'}",
|
||||
],
|
||||
|
||||
@@ -12,7 +12,7 @@ from fixtures.neon_fixtures import (
|
||||
from fixtures.pageserver.utils import wait_for_last_record_lsn
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
@pytest.mark.timeout(1200)
|
||||
@pytest.mark.parametrize("size", [1024, 8192, 131072])
|
||||
@pytest.mark.parametrize("fsync", [True, False], ids=["fsync", "nofsync"])
|
||||
def test_ingest_logical_message(
|
||||
|
||||
@@ -7,6 +7,8 @@ from typing import TYPE_CHECKING
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnv
|
||||
|
||||
from fixtures.utils import shared_buffers_for_max_cu
|
||||
|
||||
|
||||
async def repeat_bytes(buf, repetitions: int):
|
||||
for _ in range(repetitions):
|
||||
@@ -45,7 +47,10 @@ async def parallel_load_same_table(endpoint: Endpoint, n_parallel: int):
|
||||
# Load data into one table with COPY TO from 5 parallel connections
|
||||
def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5):
|
||||
env = neon_simple_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
# use shared_buffers size like in production for 8 CU compute
|
||||
endpoint = env.endpoints.create_start(
|
||||
"main", config_lines=[f"shared_buffers={shared_buffers_for_max_cu(8.0)}"]
|
||||
)
|
||||
|
||||
# Create test table
|
||||
conn = endpoint.connect()
|
||||
|
||||
@@ -6,6 +6,7 @@ from fixtures.benchmark_fixture import NeonBenchmarker
|
||||
from fixtures.compare_fixtures import RemoteCompare
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.utils import shared_buffers_for_max_cu
|
||||
|
||||
|
||||
def get_num_relations(default: int = 1000) -> list[int]:
|
||||
@@ -78,7 +79,8 @@ def test_perf_simple_many_relations_reldir_v2(
|
||||
ep = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
"shared_buffers=1000MB",
|
||||
# use shared_buffers size like in production for 8 CU compute
|
||||
f"shared_buffers={shared_buffers_for_max_cu(8.0)}",
|
||||
"max_locks_per_transaction=16384",
|
||||
],
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user