Rename custom GUCs:

- zenith.zenith_tenant -> neon.tenant_id
- zenith.zenith_timeline -> neon.timeline_id
This commit is contained in:
Anastasia Lubennikova
2022-05-26 21:18:52 +03:00
parent 6a867bce6d
commit 67d6ff4100
20 changed files with 41 additions and 41 deletions

View File

@@ -121,12 +121,12 @@ fn main() -> Result<()> {
let tenant = spec
.cluster
.settings
.find("neon.tenantid")
.find("neon.tenant_id")
.expect("tenant id should be provided");
let timeline = spec
.cluster
.settings
.find("neon.timelineid")
.find("neon.timeline_id")
.expect("tenant id should be provided");
let compute_state = ComputeNode {

View File

@@ -150,7 +150,7 @@
"vartype": "integer"
},
{
"name": "neon.tenantid",
"name": "neon.tenant_id",
"value": "b0554b632bd4d547a63b86c3630317e8",
"vartype": "string"
},
@@ -160,7 +160,7 @@
"vartype": "integer"
},
{
"name": "neon.timelineid",
"name": "neon.timeline_id",
"value": "2414a61ffc94e428f14b5758fe308e13",
"vartype": "string"
},

View File

@@ -28,7 +28,7 @@ mod pg_helpers_tests {
assert_eq!(
spec.cluster.settings.as_pg_settings(),
"fsync = off\nwal_level = replica\nhot_standby = on\nsafekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenantid = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timelineid = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
"fsync = off\nwal_level = replica\nhot_standby = on\nsafekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenant_id = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timeline_id = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
);
}

View File

@@ -148,8 +148,8 @@ impl PostgresNode {
// Read a few options from the config file
let context = format!("in config file {}", cfg_path_str);
let port: u16 = conf.parse_field("port", &context)?;
let timeline_id: ZTimelineId = conf.parse_field("neon.timelineid", &context)?;
let tenant_id: ZTenantId = conf.parse_field("neon.tenantid", &context)?;
let timeline_id: ZTimelineId = conf.parse_field("neon.timeline_id", &context)?;
let tenant_id: ZTenantId = conf.parse_field("neon.tenant_id", &context)?;
let uses_wal_proposer = conf.get("safekeepers").is_some();
// parse recovery_target_lsn, if any
@@ -306,8 +306,8 @@ impl PostgresNode {
conf.append("shared_preload_libraries", "neon");
conf.append_line("");
conf.append("neon.pageserver_connstring", &pageserver_connstr);
conf.append("neon.tenantid", &self.tenant_id.to_string());
conf.append("neon.timelineid", &self.timeline_id.to_string());
conf.append("neon.tenant_id", &self.tenant_id.to_string());
conf.append("neon.timeline_id", &self.timeline_id.to_string());
if let Some(lsn) = self.lsn {
conf.append("recovery_target_lsn", &lsn.to_string());
}

View File

@@ -30,7 +30,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
pg_branch0 = env.postgres.create_start('main', tenant_id=tenant)
branch0_cur = pg_branch0.connect().cursor()
branch0_cur.execute("SHOW neon.timelineid")
branch0_cur.execute("SHOW neon.timeline_id")
branch0_timeline = branch0_cur.fetchone()[0]
log.info(f"b0 timeline {branch0_timeline}")
@@ -55,7 +55,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
log.info("postgres is running on 'branch1' branch")
branch1_cur = pg_branch1.connect().cursor()
branch1_cur.execute("SHOW neon.timelineid")
branch1_cur.execute("SHOW neon.timeline_id")
branch1_timeline = branch1_cur.fetchone()[0]
log.info(f"b1 timeline {branch1_timeline}")
@@ -79,7 +79,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
log.info("postgres is running on 'branch2' branch")
branch2_cur = pg_branch2.connect().cursor()
branch2_cur.execute("SHOW neon.timelineid")
branch2_cur.execute("SHOW neon.timeline_id")
branch2_timeline = branch2_cur.fetchone()[0]
log.info(f"b2 timeline {branch2_timeline}")

View File

@@ -31,7 +31,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
main_pg_conn = pgmain.connect()
main_cur = main_pg_conn.cursor()
main_cur.execute("SHOW neon.timelineid")
main_cur.execute("SHOW neon.timeline_id")
timeline = main_cur.fetchone()[0]
# Create table, and insert the first 100 rows

View File

@@ -26,7 +26,7 @@ def test_broken_timeline(zenith_env_builder: ZenithEnvBuilder):
cur.execute("CREATE TABLE t(key int primary key, value text)")
cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'")
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
timeline_id = cur.fetchone()[0]
pg.stop()
tenant_timelines.append((tenant_id, timeline_id, pg))

View File

@@ -62,7 +62,7 @@ def test_gc_aggressive(zenith_env_builder: ZenithEnvBuilder):
conn = pg.connect()
cur = conn.cursor()
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
timeline = cur.fetchone()[0]
# Create table, and insert the first 100 rows

View File

@@ -26,7 +26,7 @@ def test_old_request_lsn(zenith_env_builder: ZenithEnvBuilder):
cur = pg_conn.cursor()
# Get the timeline ID of our branch. We need it for the 'do_gc' command
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
timeline = cur.fetchone()[0]
psconn = env.pageserver.connect()

View File

@@ -25,7 +25,7 @@ def test_pitr_gc(zenith_env_builder: ZenithEnvBuilder):
main_pg_conn = pgmain.connect()
main_cur = main_pg_conn.cursor()
main_cur.execute("SHOW neon.timelineid")
main_cur.execute("SHOW neon.timeline_id")
timeline = main_cur.fetchone()[0]
# Create table

View File

@@ -48,8 +48,8 @@ def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder,
client = env.pageserver.http_client()
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
checkpoint_numbers = range(1, 3)

View File

@@ -130,7 +130,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
with closing(tenant_pg.connect()) as conn:
with conn.cursor() as cur:
# save timeline for later gc call
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
timeline = UUID(cur.fetchone()[0])
log.info("timeline to relocate %s", timeline.hex)

View File

@@ -21,8 +21,8 @@ async def tenant_workload(env: ZenithEnv, pg: Postgres):
pg_conn = await pg.connect_async()
tenant_id = await pg_conn.fetchval("show neon.tenantid")
timeline_id = await pg_conn.fetchval("show neon.timelineid")
tenant_id = await pg_conn.fetchval("show neon.tenant_id")
timeline_id = await pg_conn.fetchval("show neon.timeline_id")
await pg_conn.execute("CREATE TABLE t(key int primary key, value text)")
for i in range(1, 100):
@@ -82,9 +82,9 @@ def test_tenants_many(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
for tenant, pg in tenants_pgs:
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute("show neon.tenantid")
cur.execute("show neon.tenant_id")
tenant_id = cur.fetchone()[0]
cur.execute("show neon.timelineid")
cur.execute("show neon.timeline_id")
timeline_id = cur.fetchone()[0]
cur.execute("SELECT pg_current_wal_flush_lsn()")
current_lsn = lsn_from_hex(cur.fetchone()[0])

View File

@@ -21,7 +21,7 @@ def test_timeline_size(zenith_simple_env: ZenithEnv):
with closing(pgmain.connect()) as conn:
with conn.cursor() as cur:
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
# Create table, and insert the first 100 rows
cur.execute("CREATE TABLE foo (t text)")

View File

@@ -337,8 +337,8 @@ def test_broker(zenith_env_builder: ZenithEnvBuilder):
pg.safe_psql("CREATE TABLE t(key int primary key, value text)")
# learn zenith timeline from compute
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
# wait until remote_consistent_lsn gets advanced on all safekeepers
clients = [sk.http_client() for sk in env.safekeepers]
@@ -384,8 +384,8 @@ def test_wal_removal(zenith_env_builder: ZenithEnvBuilder):
cur.execute('CREATE TABLE t(key int primary key, value text)')
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
# force checkpoint to advance remote_consistent_lsn
with closing(env.pageserver.connect()) as psconn:
@@ -498,8 +498,8 @@ class ProposerPostgres(PgProtocol):
cfg = [
"synchronous_standby_names = 'walproposer'\n",
"shared_preload_libraries = 'neon'\n",
f"neon.timelineid = '{self.timeline_id.hex}'\n",
f"neon.tenantid = '{self.tenant_id.hex}'\n",
f"neon.timeline_id = '{self.timeline_id.hex}'\n",
f"neon.tenant_id = '{self.tenant_id.hex}'\n",
f"neon.pageserver_connstring = ''\n",
f"safekeepers = '{safekeepers}'\n",
f"listen_addresses = '{self.listen_addr}'\n",
@@ -612,8 +612,8 @@ def test_timeline_status(zenith_env_builder: ZenithEnvBuilder):
wa_http_cli.check_status()
# learn zenith timeline from compute
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
# fetch something sensible from status
tli_status = wa_http_cli.timeline_status(tenant_id, timeline_id)
@@ -798,8 +798,8 @@ def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
pg.start()
# learn zenith timeline from compute
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
timeline_id = pg.safe_psql("show neon.timeline_id")[0][0]
execute_payload(pg)
show_statuses(env.safekeepers, tenant_id, timeline_id)

View File

@@ -151,8 +151,8 @@ async def run_restarts_under_load(env: ZenithEnv,
test_timeout_at = time.monotonic() + 5 * 60
pg_conn = await pg.connect_async()
tenant_id = await pg_conn.fetchval("show neon.tenantid")
timeline_id = await pg_conn.fetchval("show neon.timelineid")
tenant_id = await pg_conn.fetchval("show neon.tenant_id")
timeline_id = await pg_conn.fetchval("show neon.timeline_id")
bank = BankClient(pg_conn, n_accounts=n_accounts, init_amount=init_amount)
# create tables and initial balances

View File

@@ -19,7 +19,7 @@ def test_wal_restore(zenith_env_builder: ZenithEnvBuilder,
env.zenith_cli.create_branch("test_wal_restore")
pg = env.postgres.create_start('test_wal_restore')
pg.safe_psql("create table t as select generate_series(1,300000)")
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
tenant_id = pg.safe_psql("show neon.tenant_id")[0][0]
env.zenith_cli.pageserver_stop()
port = port_distributor.get_port()
data_dir = os.path.join(test_output_dir, 'pgsql.restored')

View File

@@ -66,7 +66,7 @@ class ZenithCompare(PgCompare):
# We only use one branch and one timeline
self.env.zenith_cli.create_branch(branch_name, 'empty')
self._pg = self.env.postgres.create_start(branch_name)
self.timeline = self.pg.safe_psql("SHOW neon.timelineid")[0][0]
self.timeline = self.pg.safe_psql("SHOW neon.timeline_id")[0][0]
# Long-lived cursor, useful for flushing
self.psconn = self.env.pageserver.connect()

View File

@@ -2039,7 +2039,7 @@ def check_restored_datadir_content(test_output_dir: str, env: ZenithEnv, pg: Pos
# Get the timeline ID. We need it for the 'basebackup' command
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute("SHOW neon.timelineid")
cur.execute("SHOW neon.timeline_id")
timeline = cur.fetchone()[0]
# stop postgres to ensure that files won't change