Use uuid.UUID types for tenants and timelines more

This commit is contained in:
Kirill Bulatov
2022-02-16 11:27:03 +02:00
committed by Kirill Bulatov
parent e5bf520b18
commit ce533835e5
13 changed files with 90 additions and 89 deletions

View File

@@ -14,9 +14,8 @@ def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
ps = env.pageserver
tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant)
tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant.hex)
tenant_http_client = env.pageserver.http_client(tenant_token)
invalid_tenant_token = env.auth_keys.generate_tenant_token(uuid4().hex)
invalid_tenant_http_client = env.pageserver.http_client(invalid_tenant_token)
@@ -29,14 +28,14 @@ def test_pageserver_auth(zenith_env_builder: ZenithEnvBuilder):
ps.safe_psql("set FOO", password=management_token)
# tenant can create branches
tenant_http_client.branch_create(UUID(env.initial_tenant), 'new1', 'main')
tenant_http_client.branch_create(env.initial_tenant, 'new1', 'main')
# console can create branches for tenant
management_http_client.branch_create(UUID(env.initial_tenant), 'new2', 'main')
management_http_client.branch_create(env.initial_tenant, 'new2', 'main')
# fail to create branch using token with different tenant_id
with pytest.raises(ZenithPageserverApiException,
match='Forbidden: Tenant id mismatch. Permission denied'):
invalid_tenant_http_client.branch_create(UUID(env.initial_tenant), "new3", "main")
invalid_tenant_http_client.branch_create(env.initial_tenant, "new3", "main")
# create tenant using management token
management_http_client.tenant_create(uuid4())

View File

@@ -119,7 +119,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
with closing(env.pageserver.connect()) as psconn:
with psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) as pscur:
# call gc to advace latest_gc_cutoff_lsn
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)

View File

@@ -36,7 +36,7 @@ async def gc(env: ZenithEnv, timeline: str):
psconn = await env.pageserver.connect_async()
while updates_performed < updates_to_perform:
await psconn.execute(f"do_gc {env.initial_tenant} {timeline} 0")
await psconn.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
# At the same time, run UPDATEs and GC

View File

@@ -56,7 +56,7 @@ def test_old_request_lsn(zenith_simple_env: ZenithEnv):
# Make a lot of updates on a single row, generating a lot of WAL. Trigger
# garbage collections so that the page server will remove old page versions.
for i in range(10):
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
for j in range(100):
cur.execute('UPDATE foo SET val = val + 1 WHERE id = 1;')

View File

@@ -1,19 +1,17 @@
import json
from uuid import uuid4, UUID
import pytest
import psycopg2
import requests
from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient
from typing import cast
import pytest, psycopg2
pytest_plugins = ("fixtures.zenith_fixtures")
def check_client(client: ZenithPageserverHttpClient, initial_tenant: str):
def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID):
client.check_status()
# check initial tenant is there
assert initial_tenant in {t['id'] for t in client.tenant_list()}
assert initial_tenant.hex in {t['id'] for t in client.tenant_list()}
# create new tenant and check it is also there
tenant_id = uuid4()

View File

@@ -50,7 +50,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
cur.execute("DELETE FROM foo")
log.info("Running GC before test")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)
# remember the number of files
@@ -63,7 +63,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
# removing the old image and delta layer.
log.info("Inserting one row and running GC")
cur.execute("INSERT INTO foo VALUES (1)")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
@@ -77,7 +77,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
@@ -89,7 +89,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
@@ -98,7 +98,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
# Run GC again, with no changes in the database. Should not remove anything.
log.info("Run GC again, with nothing to do")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)
assert row['layer_relfiles_total'] == layer_relfiles_remain
@@ -111,7 +111,7 @@ def test_layerfiles_gc(zenith_simple_env: ZenithEnv):
log.info("Drop table and run GC again")
cur.execute("DROP TABLE foo")
pscur.execute(f"do_gc {env.initial_tenant} {timeline} 0")
pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row)

View File

@@ -108,8 +108,8 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve
log.info('load thread stopped')
def assert_local(pageserver_http_client: ZenithPageserverHttpClient, tenant: str, timeline: str):
timeline_detail = pageserver_http_client.timeline_detail(UUID(tenant), UUID(timeline))
def assert_local(pageserver_http_client: ZenithPageserverHttpClient, tenant: UUID, timeline: str):
timeline_detail = pageserver_http_client.timeline_detail(tenant, UUID(timeline))
assert timeline_detail.get('type') == "Local", timeline_detail
return timeline_detail
@@ -127,7 +127,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
# create folder for remote storage mock
remote_storage_mock_path = env.repo_dir / 'local_fs_remote_storage'
tenant = env.create_tenant("74ee8b079a0e437eb0afea7d26a07209")
tenant = env.create_tenant(UUID("74ee8b079a0e437eb0afea7d26a07209"))
log.info("tenant to relocate %s", tenant)
env.zenith_cli.create_branch("test_tenant_relocation", "main", tenant_id=tenant)
@@ -167,11 +167,11 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
# run checkpoint manually to be sure that data landed in remote storage
with closing(env.pageserver.connect()) as psconn:
with psconn.cursor() as pscur:
pscur.execute(f"do_gc {tenant} {timeline}")
pscur.execute(f"do_gc {tenant.hex} {timeline}")
# ensure upload is completed
pageserver_http_client = env.pageserver.http_client()
timeline_detail = pageserver_http_client.timeline_detail(UUID(tenant), UUID(timeline))
timeline_detail = pageserver_http_client.timeline_detail(tenant, UUID(timeline))
assert timeline_detail['disk_consistent_lsn'] == timeline_detail['timeline_state']['Ready']
log.info("inititalizing new pageserver")
@@ -194,7 +194,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
new_pageserver_http_port):
# call to attach timeline to new pageserver
new_pageserver_http_client.timeline_attach(UUID(tenant), UUID(timeline))
new_pageserver_http_client.timeline_attach(tenant, UUID(timeline))
# FIXME cannot handle duplicate download requests, subject to fix in https://github.com/zenithdb/zenith/issues/997
time.sleep(5)
# new pageserver should in sync (modulo wal tail or vacuum activity) with the old one because there was no new writes since checkpoint
@@ -241,7 +241,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
# detach tenant from old pageserver before we check
# that all the data is there to be sure that old pageserver
# is no longer involved, and if it is, we will see the errors
pageserver_http_client.timeline_detach(UUID(tenant), UUID(timeline))
pageserver_http_client.timeline_detach(tenant, UUID(timeline))
with pg_cur(tenant_pg) as cur:
# check that data is still there

View File

@@ -13,7 +13,7 @@ def test_timeline_size(zenith_simple_env: ZenithEnv):
env.zenith_cli.create_branch("test_timeline_size", "empty")
client = env.pageserver.http_client()
res = client.branch_detail(UUID(env.initial_tenant), "test_timeline_size")
res = client.branch_detail(env.initial_tenant, "test_timeline_size")
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
pgmain = env.postgres.create_start("test_timeline_size")
@@ -31,11 +31,11 @@ def test_timeline_size(zenith_simple_env: ZenithEnv):
FROM generate_series(1, 10) g
""")
res = client.branch_detail(UUID(env.initial_tenant), "test_timeline_size")
res = client.branch_detail(env.initial_tenant, "test_timeline_size")
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
cur.execute("TRUNCATE foo")
res = client.branch_detail(UUID(env.initial_tenant), "test_timeline_size")
res = client.branch_detail(env.initial_tenant, "test_timeline_size")
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
@@ -71,7 +71,7 @@ def test_timeline_size_quota(zenith_env_builder: ZenithEnvBuilder):
env.zenith_cli.create_branch("test_timeline_size_quota", "main")
client = env.pageserver.http_client()
res = client.branch_detail(UUID(env.initial_tenant), "test_timeline_size_quota")
res = client.branch_detail(env.initial_tenant, "test_timeline_size_quota")
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
pgmain = env.postgres.create_start(

View File

@@ -65,7 +65,7 @@ def test_many_timelines(zenith_env_builder: ZenithEnvBuilder):
env.zenith_cli.create_branch(branch, "main")
pgs.append(env.postgres.create_start(branch))
tenant_id = uuid.UUID(env.initial_tenant)
tenant_id = env.initial_tenant
def collect_metrics(message: str) -> List[BranchMetrics]:
with env.pageserver.http_client() as pageserver_http:
@@ -321,16 +321,16 @@ class ProposerPostgres(PgProtocol):
def __init__(self,
pgdata_dir: str,
pg_bin,
timeline_id: str,
tenant_id: str,
timeline_id: uuid.UUID,
tenant_id: uuid.UUID,
listen_addr: str,
port: int):
super().__init__(host=listen_addr, port=port, username='zenith_admin')
self.pgdata_dir: str = pgdata_dir
self.pg_bin: PgBin = pg_bin
self.timeline_id: str = timeline_id
self.tenant_id: str = tenant_id
self.timeline_id: uuid.UUID = timeline_id
self.tenant_id: uuid.UUID = tenant_id
self.listen_addr: str = listen_addr
self.port: int = port
@@ -350,8 +350,8 @@ class ProposerPostgres(PgProtocol):
cfg = [
"synchronous_standby_names = 'walproposer'\n",
"shared_preload_libraries = 'zenith'\n",
f"zenith.zenith_timeline = '{self.timeline_id}'\n",
f"zenith.zenith_tenant = '{self.tenant_id}'\n",
f"zenith.zenith_timeline = '{self.timeline_id.hex}'\n",
f"zenith.zenith_tenant = '{self.tenant_id.hex}'\n",
f"zenith.page_server_connstring = ''\n",
f"wal_acceptors = '{wal_acceptors}'\n",
f"listen_addresses = '{self.listen_addr}'\n",
@@ -408,8 +408,8 @@ def test_sync_safekeepers(zenith_env_builder: ZenithEnvBuilder,
zenith_env_builder.num_safekeepers = 3
env = zenith_env_builder.init()
timeline_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
timeline_id = uuid.uuid4()
tenant_id = uuid.uuid4()
# write config for proposer
pgdata_dir = os.path.join(env.repo_dir, "proposer_pgdata")
@@ -495,15 +495,15 @@ class SafekeeperEnv:
self.bin_safekeeper = os.path.join(str(zenith_binpath), 'safekeeper')
self.safekeepers: Optional[List[subprocess.CompletedProcess[Any]]] = None
self.postgres: Optional[ProposerPostgres] = None
self.tenant_id: Optional[str] = None
self.timeline_id: Optional[str] = None
self.tenant_id: Optional[uuid.UUID] = None
self.timeline_id: Optional[uuid.UUID] = None
def init(self) -> "SafekeeperEnv":
assert self.postgres is None, "postgres is already initialized"
assert self.safekeepers is None, "safekeepers are already initialized"
self.timeline_id = uuid.uuid4().hex
self.tenant_id = uuid.uuid4().hex
self.timeline_id = uuid.uuid4()
self.tenant_id = uuid.uuid4()
mkdir_if_needed(str(self.repo_dir))
# Create config and a Safekeeper object for each safekeeper

View File

@@ -11,12 +11,12 @@ pytest_plugins = ("fixtures.zenith_fixtures")
def helper_compare_branch_list(pageserver_http_client: ZenithPageserverHttpClient,
env: ZenithEnv,
initial_tenant: str):
initial_tenant: uuid.UUID):
"""
Compare branches list returned by CLI and directly via API.
Filters out branches created by other tests.
"""
branches = pageserver_http_client.branch_list(uuid.UUID(initial_tenant))
branches = pageserver_http_client.branch_list(initial_tenant)
branches_api = sorted(map(lambda b: cast(str, b['name']), branches))
branches_api = [b for b in branches_api if b.startswith('test_cli_') or b in ('empty', 'main')]
@@ -43,7 +43,6 @@ def test_cli_branch_list(zenith_simple_env: ZenithEnv):
env.zenith_cli.create_branch("test_cli_branch_list_main", "empty")
helper_compare_branch_list(pageserver_http_client, env, env.initial_tenant)
# Create a nested branch
res = env.zenith_cli.create_branch("test_cli_branch_list_nested", "test_cli_branch_list_main")
assert res.stderr == ''
@@ -76,14 +75,14 @@ def test_cli_tenant_list(zenith_simple_env: ZenithEnv):
helper_compare_tenant_list(pageserver_http_client, env)
# Create new tenant
tenant1 = uuid.uuid4().hex
tenant1 = uuid.uuid4()
env.zenith_cli.create_tenant(tenant1)
# check tenant1 appeared
helper_compare_tenant_list(pageserver_http_client, env)
# Create new tenant
tenant2 = uuid.uuid4().hex
tenant2 = uuid.uuid4()
env.zenith_cli.create_tenant(tenant2)
# check tenant2 appeared
@@ -92,9 +91,9 @@ def test_cli_tenant_list(zenith_simple_env: ZenithEnv):
res = env.zenith_cli.list_tenants()
tenants = sorted(map(lambda t: t.split()[0], res.stdout.splitlines()))
assert env.initial_tenant in tenants
assert tenant1 in tenants
assert tenant2 in tenants
assert env.initial_tenant.hex in tenants
assert tenant1.hex in tenants
assert tenant2.hex in tenants
def test_cli_ipv4_listeners(zenith_env_builder: ZenithEnvBuilder):
@@ -120,15 +119,13 @@ def test_cli_start_stop(zenith_env_builder: ZenithEnvBuilder):
env = zenith_env_builder.init()
# Stop default ps/sk
res = env.zenith_cli(["pageserver", "stop"])
res.check_returncode()
res = env.zenith_cli(["safekeeper", "stop"])
res.check_returncode()
env.zenith_cli.pageserver_stop()
env.zenith_cli.safekeeper_stop()
# Default start
res = env.zenith_cli(["start"])
res = env.zenith_cli.raw_cli(["start"])
res.check_returncode()
# Default stop
res = env.zenith_cli(["stop"])
res = env.zenith_cli.raw_cli(["stop"])
res.check_returncode()

View File

@@ -8,6 +8,7 @@ import timeit
import calendar
import enum
from datetime import datetime
import uuid
import pytest
from _pytest.config import Config
from _pytest.terminal import TerminalReporter
@@ -276,11 +277,11 @@ class ZenithBenchmarker:
assert matches
return int(round(float(matches.group(1))))
def get_timeline_size(self, repo_dir: Path, tenantid: str, timelineid: str):
def get_timeline_size(self, repo_dir: Path, tenantid: uuid.UUID, timelineid: str):
"""
Calculate the on-disk size of a timeline
"""
path = "{}/tenants/{}/timelines/{}".format(repo_dir, tenantid, timelineid)
path = "{}/tenants/{}/timelines/{}".format(repo_dir, tenantid.hex, timelineid)
totalbytes = 0
for root, dirs, files in os.walk(path):

View File

@@ -65,7 +65,7 @@ class ZenithCompare(PgCompare):
# We only use one branch and one timeline
self.branch = branch_name
self.env.zenith_cli(["branch", self.branch, "empty"])
self.env.zenith_cli.create_branch(self.branch, "empty")
self._pg = self.env.postgres.create_start(self.branch)
self.timeline = self.pg.safe_psql("SHOW zenith.zenith_timeline")[0][0]
@@ -86,7 +86,7 @@ class ZenithCompare(PgCompare):
return self._pg_bin
def flush(self):
self.pscur.execute(f"do_gc {self.env.initial_tenant} {self.timeline} 0")
self.pscur.execute(f"do_gc {self.env.initial_tenant.hex} {self.timeline} 0")
def report_peak_memory_use(self) -> None:
self.zenbenchmark.record("peak_mem",

View File

@@ -525,11 +525,11 @@ class ZenithEnv:
# generate initial tenant ID here instead of letting 'zenith init' generate it,
# so that we don't need to dig it out of the config file afterwards.
self.initial_tenant = uuid.uuid4().hex
self.initial_tenant = uuid.uuid4()
# Create a config file corresponding to the options
toml = textwrap.dedent(f"""
default_tenantid = '{self.initial_tenant}'
default_tenantid = '{self.initial_tenant.hex}'
""")
# Create config for pageserver
@@ -586,9 +586,9 @@ sync = false # Disable fsyncs to make the tests go faster
""" Get list of safekeeper endpoints suitable for wal_acceptors GUC """
return ','.join([f'localhost:{wa.port.pg}' for wa in self.safekeepers])
def create_tenant(self, tenant_id: Optional[str] = None):
def create_tenant(self, tenant_id: Optional[uuid.UUID] = None) -> uuid.UUID:
if tenant_id is None:
tenant_id = uuid.uuid4().hex
tenant_id = uuid.uuid4()
self.zenith_cli.create_tenant(tenant_id)
return tenant_id
@@ -799,11 +799,11 @@ class ZenithCli:
self.env = env
pass
def create_tenant(self, tenant_id: Optional[str] = None) -> uuid.UUID:
def create_tenant(self, tenant_id: Optional[uuid.UUID] = None) -> uuid.UUID:
if tenant_id is None:
tenant_id = uuid.uuid4().hex
self.raw_cli(['tenant', 'create', tenant_id])
return uuid.UUID(tenant_id)
tenant_id = uuid.uuid4()
self.raw_cli(['tenant', 'create', tenant_id.hex])
return tenant_id
def list_tenants(self) -> 'subprocess.CompletedProcess[str]':
return self.raw_cli(['tenant', 'list'])
@@ -811,18 +811,19 @@ class ZenithCli:
def create_branch(self,
branch_name: str,
starting_point: str,
tenant_id: Optional[str] = None) -> 'subprocess.CompletedProcess[str]':
tenant_id: Optional[uuid.UUID] = None) -> 'subprocess.CompletedProcess[str]':
args = ['branch']
if tenant_id is not None:
args.extend(['--tenantid', tenant_id])
args.extend(['--tenantid', tenant_id.hex])
args.extend([branch_name, starting_point])
return self.raw_cli(args)
def list_branches(self, tenant_id: Optional[str] = None) -> 'subprocess.CompletedProcess[str]':
def list_branches(self,
tenant_id: Optional[uuid.UUID] = None) -> 'subprocess.CompletedProcess[str]':
args = ['branch']
if tenant_id is not None:
args.extend(['--tenantid', tenant_id])
args.extend(['--tenantid', tenant_id.hex])
return self.raw_cli(args)
def init(self, config_toml: str) -> 'subprocess.CompletedProcess[str]':
@@ -851,23 +852,26 @@ class ZenithCli:
def safekeeper_start(self, name: str) -> 'subprocess.CompletedProcess[str]':
return self.raw_cli(['safekeeper', 'start', name])
def safekeeper_stop(self, name: str, immediate=False) -> 'subprocess.CompletedProcess[str]':
def safekeeper_stop(self,
name: Optional[str] = None,
immediate=False) -> 'subprocess.CompletedProcess[str]':
args = ['safekeeper', 'stop']
if immediate:
args.extend(['-m', 'immediate'])
args.append(name)
if name is not None:
args.append(name)
return self.raw_cli(args)
def pg_create(
self,
node_name: str,
tenant_id: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
timeline_spec: Optional[str] = None,
port: Optional[int] = None,
) -> 'subprocess.CompletedProcess[str]':
args = ['pg', 'create']
if tenant_id is not None:
args.extend(['--tenantid', tenant_id])
args.extend(['--tenantid', tenant_id.hex])
if port is not None:
args.append(f'--port={port}')
args.append(node_name)
@@ -878,13 +882,13 @@ class ZenithCli:
def pg_start(
self,
node_name: str,
tenant_id: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
timeline_spec: Optional[str] = None,
port: Optional[int] = None,
) -> 'subprocess.CompletedProcess[str]':
args = ['pg', 'start']
if tenant_id is not None:
args.extend(['--tenantid', tenant_id])
args.extend(['--tenantid', tenant_id.hex])
if port is not None:
args.append(f'--port={port}')
args.append(node_name)
@@ -896,12 +900,12 @@ class ZenithCli:
def pg_stop(
self,
node_name: str,
tenant_id: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
destroy=False,
) -> 'subprocess.CompletedProcess[str]':
args = ['pg', 'stop']
if tenant_id is not None:
args.extend(['--tenantid', tenant_id])
args.extend(['--tenantid', tenant_id.hex])
if destroy:
args.append('--destroy')
args.append(node_name)
@@ -1156,7 +1160,7 @@ def vanilla_pg(test_output_dir: str) -> Iterator[VanillaPostgres]:
class Postgres(PgProtocol):
""" An object representing a running postgres daemon. """
def __init__(self, env: ZenithEnv, tenant_id: str, port: int):
def __init__(self, env: ZenithEnv, tenant_id: uuid.UUID, port: int):
super().__init__(host='localhost', port=port, username='zenith_admin')
self.env = env
@@ -1188,7 +1192,7 @@ class Postgres(PgProtocol):
port=self.port,
timeline_spec=branch)
self.node_name = node_name
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.node_name
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id.hex / self.node_name
self.pgdata_dir = os.path.join(self.env.repo_dir, path)
if config_lines is None:
@@ -1219,7 +1223,7 @@ class Postgres(PgProtocol):
def pg_data_dir_path(self) -> str:
""" Path to data directory """
assert self.node_name
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.node_name
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id.hex / self.node_name
return os.path.join(self.env.repo_dir, path)
def pg_xact_dir_path(self) -> str:
@@ -1333,7 +1337,7 @@ class PostgresFactory:
def create_start(self,
node_name: str = "main",
branch: Optional[str] = None,
tenant_id: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
config_lines: Optional[List[str]] = None) -> Postgres:
pg = Postgres(
@@ -1353,7 +1357,7 @@ class PostgresFactory:
def create(self,
node_name: str = "main",
branch: Optional[str] = None,
tenant_id: Optional[str] = None,
tenant_id: Optional[uuid.UUID] = None,
config_lines: Optional[List[str]] = None) -> Postgres:
pg = Postgres(
@@ -1421,7 +1425,9 @@ class Safekeeper:
self.env.zenith_cli.safekeeper_stop(self.name, immediate)
return self
def append_logical_message(self, tenant_id: str, timeline_id: str,
def append_logical_message(self,
tenant_id: uuid.UUID,
timeline_id: uuid.UUID,
request: Dict[str, Any]) -> Dict[str, Any]:
"""
Send JSON_CTRL query to append LogicalMessage to WAL and modify
@@ -1431,7 +1437,7 @@ class Safekeeper:
# "replication=0" hacks psycopg not to send additional queries
# on startup, see https://github.com/psycopg/psycopg2/pull/482
connstr = f"host=localhost port={self.port.pg} replication=0 options='-c ztimelineid={timeline_id} ztenantid={tenant_id}'"
connstr = f"host=localhost port={self.port.pg} replication=0 options='-c ztimelineid={timeline_id.hex} ztenantid={tenant_id.hex}'"
with closing(psycopg2.connect(connstr)) as conn:
# server doesn't support transactions
@@ -1601,7 +1607,7 @@ def check_restored_datadir_content(test_output_dir: str, env: ZenithEnv, pg: Pos
{psql_path} \
--no-psqlrc \
postgres://localhost:{env.pageserver.service_port.pg} \
-c 'basebackup {pg.tenant_id} {timeline}' \
-c 'basebackup {pg.tenant_id.hex} {timeline}' \
| tar -x -C {restored_dir_path}
"""