Files
neon/test_runner/performance/test_compaction.py
Heikki Linnakangas 53f438a8a8 Rename "Postgres nodes" in control_plane to endpoints.
We use the term "endpoint" in for compute Postgres nodes in the web UI
and user-facing documentation now. Adjust the nomenclature in the code.

This changes the name of the "neon_local pg" command to "neon_local
endpoint". Also adjust names of classes, variables etc. in the python
tests accordingly.

This also changes the directory structure so that endpoints are now
stored in:

    .neon/endpoints/<endpoint id>

instead of:

    .neon/pgdatadirs/tenants/<tenant_id>/<endpoint (node) name>

The tenant ID is no longer part of the path. That means that you
cannot have two endpoints with the same name/ID in two different
tenants anymore. That's consistent with how we treat endpoints in the
real control plane and proxy: the endpoint ID must be globally unique.
2023-04-13 14:34:29 +03:00

59 lines
2.2 KiB
Python

from contextlib import closing
import pytest
from fixtures.compare_fixtures import NeonCompare
from fixtures.neon_fixtures import wait_for_last_flush_lsn
#
# Test compaction and image layer creation performance.
#
# This creates a few tables and runs some simple INSERTs and UPDATEs on them to generate
# some delta layers. Then it runs manual compaction, measuring how long it takes.
#
@pytest.mark.timeout(1000)
def test_compaction(neon_compare: NeonCompare):
env = neon_compare.env
pageserver_http = env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant(
conf={
# Disable background GC and compaction, we'll run compaction manually.
"gc_period": "0s",
"compaction_period": "0s",
# Make checkpoint distance somewhat smaller than default, to create
# more delta layers quicker, to trigger compaction.
"checkpoint_distance": "25000000", # 25 MB
# Force image layer creation when we run compaction.
"image_creation_threshold": "1",
}
)
neon_compare.tenant = tenant_id
neon_compare.timeline = timeline_id
# Create some tables, and run a bunch of INSERTs and UPDATes on them,
# to generate WAL and layers
endpoint = env.endpoints.create_start(
"main", tenant_id=tenant_id, config_lines=["shared_buffers=512MB"]
)
with closing(endpoint.connect()) as conn:
with conn.cursor() as cur:
for i in range(100):
cur.execute(f"create table tbl{i} (i int, j int);")
cur.execute(f"insert into tbl{i} values (generate_series(1, 1000), 0);")
for j in range(100):
cur.execute(f"update tbl{i} set j = {j};")
wait_for_last_flush_lsn(env, endpoint, tenant_id, timeline_id)
# First compaction generates L1 layers
with neon_compare.zenbenchmark.record_duration("compaction"):
pageserver_http.timeline_compact(tenant_id, timeline_id)
# And second compaction triggers image layer creation
with neon_compare.zenbenchmark.record_duration("image_creation"):
pageserver_http.timeline_compact(tenant_id, timeline_id)
neon_compare.report_size()