mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-03 19:42:55 +00:00
We use the term "endpoint" in for compute Postgres nodes in the web UI
and user-facing documentation now. Adjust the nomenclature in the code.
This changes the name of the "neon_local pg" command to "neon_local
endpoint". Also adjust names of classes, variables etc. in the python
tests accordingly.
This also changes the directory structure so that endpoints are now
stored in:
.neon/endpoints/<endpoint id>
instead of:
.neon/pgdatadirs/tenants/<tenant_id>/<endpoint (node) name>
The tenant ID is no longer part of the path. That means that you
cannot have two endpoints with the same name/ID in two different
tenants anymore. That's consistent with how we treat endpoints in the
real control plane and proxy: the endpoint ID must be globally unique.
39 lines
1.3 KiB
Python
39 lines
1.3 KiB
Python
import time
|
|
|
|
from fixtures.neon_fixtures import NeonEnvBuilder
|
|
|
|
|
|
#
|
|
# Benchmark searching the layer map, when there are a lot of small layer files.
|
|
#
|
|
def test_layer_map(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
|
env = neon_env_builder.init_start()
|
|
n_iters = 10
|
|
n_records = 100000
|
|
|
|
# We want to have a lot of lot of layer files to exercise the layer map. Disable
|
|
# GC, and make checkpoint_distance very small, so that we get a lot of small layer
|
|
# files.
|
|
tenant, _ = env.neon_cli.create_tenant(
|
|
conf={
|
|
"gc_period": "0s",
|
|
"checkpoint_distance": "8192",
|
|
"compaction_period": "1 s",
|
|
"compaction_threshold": "1",
|
|
"compaction_target_size": "8192",
|
|
}
|
|
)
|
|
|
|
env.neon_cli.create_timeline("test_layer_map", tenant_id=tenant)
|
|
endpoint = env.endpoints.create_start("test_layer_map", tenant_id=tenant)
|
|
cur = endpoint.connect().cursor()
|
|
cur.execute("create table t(x integer)")
|
|
for i in range(n_iters):
|
|
cur.execute(f"insert into t values (generate_series(1,{n_records}))")
|
|
time.sleep(1)
|
|
|
|
cur.execute("vacuum t")
|
|
with zenbenchmark.record_duration("test_query"):
|
|
cur.execute("SELECT count(*) from t")
|
|
assert cur.fetchone() == (n_iters * n_records,)
|