mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-06 13:02:55 +00:00
We use the term "endpoint" in for compute Postgres nodes in the web UI
and user-facing documentation now. Adjust the nomenclature in the code.
This changes the name of the "neon_local pg" command to "neon_local
endpoint". Also adjust names of classes, variables etc. in the python
tests accordingly.
This also changes the directory structure so that endpoints are now
stored in:
.neon/endpoints/<endpoint id>
instead of:
.neon/pgdatadirs/tenants/<tenant_id>/<endpoint (node) name>
The tenant ID is no longer part of the path. That means that you
cannot have two endpoints with the same name/ID in two different
tenants anymore. That's consistent with how we treat endpoints in the
real control plane and proxy: the endpoint ID must be globally unique.
70 lines
2.8 KiB
Python
70 lines
2.8 KiB
Python
from datetime import timedelta
|
|
|
|
from fixtures.log_helper import log
|
|
from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
|
|
from fixtures.utils import query_scalar
|
|
|
|
|
|
#
|
|
# Test pageserver get_lsn_by_timestamp API
|
|
#
|
|
def test_lsn_mapping(neon_env_builder: NeonEnvBuilder):
|
|
env = neon_env_builder.init_start()
|
|
|
|
new_timeline_id = env.neon_cli.create_branch("test_lsn_mapping")
|
|
endpoint_main = env.endpoints.create_start("test_lsn_mapping")
|
|
log.info("postgres is running on 'test_lsn_mapping' branch")
|
|
|
|
cur = endpoint_main.connect().cursor()
|
|
# Create table, and insert rows, each in a separate transaction
|
|
# Disable synchronous_commit to make this initialization go faster.
|
|
#
|
|
# Each row contains current insert LSN and the current timestamp, when
|
|
# the row was inserted.
|
|
cur.execute("SET synchronous_commit=off")
|
|
cur.execute("CREATE TABLE foo (x integer)")
|
|
tbl = []
|
|
for i in range(1000):
|
|
cur.execute(f"INSERT INTO foo VALUES({i})")
|
|
# Get the timestamp at UTC
|
|
after_timestamp = query_scalar(cur, "SELECT clock_timestamp()").replace(tzinfo=None)
|
|
tbl.append([i, after_timestamp])
|
|
|
|
# Execute one more transaction with synchronous_commit enabled, to flush
|
|
# all the previous transactions
|
|
cur.execute("INSERT INTO foo VALUES (-1)")
|
|
|
|
# Wait until WAL is received by pageserver
|
|
wait_for_last_flush_lsn(env, endpoint_main, env.initial_tenant, new_timeline_id)
|
|
|
|
with env.pageserver.http_client() as client:
|
|
# Check edge cases: timestamp in the future
|
|
probe_timestamp = tbl[-1][1] + timedelta(hours=1)
|
|
result = client.timeline_get_lsn_by_timestamp(
|
|
env.initial_tenant, new_timeline_id, f"{probe_timestamp.isoformat()}Z"
|
|
)
|
|
assert result == "future"
|
|
|
|
# timestamp too the far history
|
|
probe_timestamp = tbl[0][1] - timedelta(hours=10)
|
|
result = client.timeline_get_lsn_by_timestamp(
|
|
env.initial_tenant, new_timeline_id, f"{probe_timestamp.isoformat()}Z"
|
|
)
|
|
assert result == "past"
|
|
|
|
# Probe a bunch of timestamps in the valid range
|
|
for i in range(1, len(tbl), 100):
|
|
probe_timestamp = tbl[i][1]
|
|
lsn = client.timeline_get_lsn_by_timestamp(
|
|
env.initial_tenant, new_timeline_id, f"{probe_timestamp.isoformat()}Z"
|
|
)
|
|
# Call get_lsn_by_timestamp to get the LSN
|
|
# Launch a new read-only node at that LSN, and check that only the rows
|
|
# that were supposed to be committed at that point in time are visible.
|
|
endpoint_here = env.endpoints.create_start(
|
|
branch_name="test_lsn_mapping", endpoint_id="ep-lsn_mapping_read", lsn=lsn
|
|
)
|
|
assert endpoint_here.safe_psql("SELECT max(x) FROM foo")[0][0] == i
|
|
|
|
endpoint_here.stop_and_destroy()
|