mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-26 23:59:58 +00:00
Rename contrib/zenith to contrib/neon. Rename custom GUCs:
- zenith.page_server_connstring -> neon.pageserver_connstring - zenith.zenith_tenant -> neon.tenantid - zenith.zenith_timeline -> neon.timelineid - zenith.max_cluster_size -> neon.max_cluster_size
This commit is contained in:
committed by
Anastasia Lubennikova
parent
e3b320daab
commit
3accde613d
10
Makefile
10
Makefile
@@ -74,16 +74,16 @@ postgres-headers: postgres-configure
|
||||
+@echo "Installing PostgreSQL headers"
|
||||
$(MAKE) -C tmp_install/build/src/include MAKELEVEL=0 install
|
||||
|
||||
# Compile and install PostgreSQL and contrib/zenith
|
||||
# Compile and install PostgreSQL and contrib/neon
|
||||
.PHONY: postgres
|
||||
postgres: postgres-configure \
|
||||
postgres-headers # to prevent `make install` conflicts with zenith's `postgres-headers`
|
||||
+@echo "Compiling PostgreSQL"
|
||||
$(MAKE) -C tmp_install/build MAKELEVEL=0 install
|
||||
+@echo "Compiling contrib/zenith"
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith install
|
||||
+@echo "Compiling contrib/zenith_test_utils"
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith_test_utils install
|
||||
+@echo "Compiling contrib/neon"
|
||||
$(MAKE) -C tmp_install/build/contrib/neon install
|
||||
+@echo "Compiling contrib/neon_test_utils"
|
||||
$(MAKE) -C tmp_install/build/contrib/neon_test_utils install
|
||||
+@echo "Compiling pg_buffercache"
|
||||
$(MAKE) -C tmp_install/build/contrib/pg_buffercache install
|
||||
+@echo "Compiling pageinspect"
|
||||
|
||||
@@ -116,17 +116,17 @@ fn main() -> Result<()> {
|
||||
let pageserver_connstr = spec
|
||||
.cluster
|
||||
.settings
|
||||
.find("zenith.page_server_connstring")
|
||||
.find("neon.pageserver_connstring")
|
||||
.expect("pageserver connstr should be provided");
|
||||
let tenant = spec
|
||||
.cluster
|
||||
.settings
|
||||
.find("zenith.zenith_tenant")
|
||||
.find("neon.tenantid")
|
||||
.expect("tenant id should be provided");
|
||||
let timeline = spec
|
||||
.cluster
|
||||
.settings
|
||||
.find("zenith.zenith_timeline")
|
||||
.find("neon.timelineid")
|
||||
.expect("tenant id should be provided");
|
||||
|
||||
let compute_state = ComputeNode {
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
"vartype": "integer"
|
||||
},
|
||||
{
|
||||
"name": "zenith.zenith_tenant",
|
||||
"name": "neon.tenantid",
|
||||
"value": "b0554b632bd4d547a63b86c3630317e8",
|
||||
"vartype": "string"
|
||||
},
|
||||
@@ -160,13 +160,13 @@
|
||||
"vartype": "integer"
|
||||
},
|
||||
{
|
||||
"name": "zenith.zenith_timeline",
|
||||
"name": "neon.timelineid",
|
||||
"value": "2414a61ffc94e428f14b5758fe308e13",
|
||||
"vartype": "string"
|
||||
},
|
||||
{
|
||||
"name": "shared_preload_libraries",
|
||||
"value": "zenith",
|
||||
"value": "neon",
|
||||
"vartype": "string"
|
||||
},
|
||||
{
|
||||
@@ -175,7 +175,7 @@
|
||||
"vartype": "string"
|
||||
},
|
||||
{
|
||||
"name": "zenith.page_server_connstring",
|
||||
"name": "neon.pageserver_connstring",
|
||||
"value": "host=127.0.0.1 port=6400",
|
||||
"vartype": "string"
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ mod pg_helpers_tests {
|
||||
|
||||
assert_eq!(
|
||||
spec.cluster.settings.as_pg_settings(),
|
||||
"fsync = off\nwal_level = replica\nhot_standby = on\nwal_acceptors = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nzenith.zenith_tenant = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nzenith.zenith_timeline = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'zenith'\nsynchronous_standby_names = 'walproposer'\nzenith.page_server_connstring = 'host=127.0.0.1 port=6400'"
|
||||
"fsync = off\nwal_level = replica\nhot_standby = on\nwal_acceptors = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501'\nwal_log_hints = on\nlog_connections = on\nshared_buffers = 32768\nport = 55432\nmax_connections = 100\nmax_wal_senders = 10\nlisten_addresses = '0.0.0.0'\nwal_sender_timeout = 0\npassword_encryption = md5\nmaintenance_work_mem = 65536\nmax_parallel_workers = 8\nmax_worker_processes = 8\nneon.tenantid = 'b0554b632bd4d547a63b86c3630317e8'\nmax_replication_slots = 10\nneon.timelineid = '2414a61ffc94e428f14b5758fe308e13'\nshared_preload_libraries = 'neon'\nsynchronous_standby_names = 'walproposer'\nneon.pageserver_connstring = 'host=127.0.0.1 port=6400'"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -148,8 +148,8 @@ impl PostgresNode {
|
||||
// Read a few options from the config file
|
||||
let context = format!("in config file {}", cfg_path_str);
|
||||
let port: u16 = conf.parse_field("port", &context)?;
|
||||
let timeline_id: ZTimelineId = conf.parse_field("zenith.zenith_timeline", &context)?;
|
||||
let tenant_id: ZTenantId = conf.parse_field("zenith.zenith_tenant", &context)?;
|
||||
let timeline_id: ZTimelineId = conf.parse_field("neon.timelineid", &context)?;
|
||||
let tenant_id: ZTenantId = conf.parse_field("neon.tenantid", &context)?;
|
||||
let uses_wal_proposer = conf.get("wal_acceptors").is_some();
|
||||
|
||||
// parse recovery_target_lsn, if any
|
||||
@@ -303,11 +303,11 @@ impl PostgresNode {
|
||||
// uses only needed variables namely host, port, user, password.
|
||||
format!("postgresql://no_user:{}@{}:{}", password, host, port)
|
||||
};
|
||||
conf.append("shared_preload_libraries", "zenith");
|
||||
conf.append("shared_preload_libraries", "neon");
|
||||
conf.append_line("");
|
||||
conf.append("zenith.page_server_connstring", &pageserver_connstr);
|
||||
conf.append("zenith.zenith_tenant", &self.tenant_id.to_string());
|
||||
conf.append("zenith.zenith_timeline", &self.timeline_id.to_string());
|
||||
conf.append("neon.pageserver_connstring", &pageserver_connstr);
|
||||
conf.append("neon.tenantid", &self.tenant_id.to_string());
|
||||
conf.append("neon.timelineid", &self.timeline_id.to_string());
|
||||
if let Some(lsn) = self.lsn {
|
||||
conf.append("recovery_target_lsn", &lsn.to_string());
|
||||
}
|
||||
@@ -352,7 +352,7 @@ impl PostgresNode {
|
||||
// This isn't really a supported configuration, but can be useful for
|
||||
// testing.
|
||||
conf.append("synchronous_standby_names", "pageserver");
|
||||
conf.append("zenith.callmemaybe_connstring", &self.connstr());
|
||||
conf.append("neon.callmemaybe_connstring", &self.connstr());
|
||||
}
|
||||
|
||||
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
||||
|
||||
@@ -22,8 +22,8 @@ so we don't want to give users access to the functionality that we don't think i
|
||||
|
||||
* pageserver - calculate the size consumed by a timeline and add it to the feedback message.
|
||||
* safekeeper - pass feedback message from pageserver to compute.
|
||||
* compute - receive feedback message, enforce size limit based on GUC `zenith.max_cluster_size`.
|
||||
* console - set and update `zenith.max_cluster_size` setting
|
||||
* compute - receive feedback message, enforce size limit based on GUC `neon.max_cluster_size`.
|
||||
* console - set and update `neon.max_cluster_size` setting
|
||||
|
||||
## Proposed implementation
|
||||
|
||||
@@ -49,7 +49,7 @@ This message is received by the safekeeper and propagated to compute node as a p
|
||||
|
||||
Finally, when compute node receives the `current_timeline_size` from safekeeper (or from pageserver directly), it updates the global variable.
|
||||
|
||||
And then every zenith_extend() operation checks if limit is reached `(current_timeline_size > zenith.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so.
|
||||
And then every zenith_extend() operation checks if limit is reached `(current_timeline_size > neon.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so.
|
||||
(see Postgres error codes [https://www.postgresql.org/docs/devel/errcodes-appendix.html](https://www.postgresql.org/docs/devel/errcodes-appendix.html))
|
||||
|
||||
TODO:
|
||||
@@ -75,5 +75,5 @@ We should warn users if the limit is soon to be reached.
|
||||
### **Security implications**
|
||||
|
||||
We treat compute as an untrusted component. That's why we try to isolate it with secure container runtime or a VM.
|
||||
Malicious users may change the `zenith.max_cluster_size`, so we need an extra size limit check.
|
||||
Malicious users may change the `neon.max_cluster_size`, so we need an extra size limit check.
|
||||
To cover this case, we also monitor the compute node size in the console.
|
||||
|
||||
@@ -42,13 +42,13 @@ Integration tests, written in Python using the `pytest` framework.
|
||||
|
||||
`/vendor/postgres`:
|
||||
|
||||
PostgreSQL source tree, with the modifications needed for Zenith.
|
||||
PostgreSQL source tree, with the modifications needed for Neon.
|
||||
|
||||
`/vendor/postgres/contrib/zenith`:
|
||||
`/vendor/postgres/contrib/neon`:
|
||||
|
||||
PostgreSQL extension that implements storage manager API and network communications with remote page server.
|
||||
|
||||
`/vendor/postgres/contrib/zenith_test_utils`:
|
||||
`/vendor/postgres/contrib/neon_test_utils`:
|
||||
|
||||
PostgreSQL extension that contains functions needed for testing and debugging.
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ impl Conf {
|
||||
.arg(self.datadir.as_os_str())
|
||||
.args(&["-c", "wal_keep_size=50MB"]) // Ensure old WAL is not removed
|
||||
.args(&["-c", "logging_collector=on"]) // stderr will mess up with tests output
|
||||
.args(&["-c", "shared_preload_libraries=zenith"]) // can only be loaded at startup
|
||||
.args(&["-c", "shared_preload_libraries=neon"]) // can only be loaded at startup
|
||||
// Disable background processes as much as possible
|
||||
.args(&["-c", "wal_writer_delay=10s"])
|
||||
.args(&["-c", "autovacuum=off"])
|
||||
@@ -178,7 +178,7 @@ fn generate_internal<C: postgres::GenericClient>(
|
||||
client: &mut C,
|
||||
f: impl Fn(&mut C, PgLsn) -> Result<Option<PgLsn>>,
|
||||
) -> Result<PgLsn> {
|
||||
client.execute("create extension if not exists zenith_test_utils", &[])?;
|
||||
client.execute("create extension if not exists neon_test_utils", &[])?;
|
||||
|
||||
let wal_segment_size = client.query_one(
|
||||
"select cast(setting as bigint) as setting, unit \
|
||||
|
||||
@@ -607,8 +607,8 @@ impl PostgresRedoProcess {
|
||||
.open(PathBuf::from(&datadir).join("postgresql.conf"))?;
|
||||
config.write_all(b"shared_buffers=128kB\n")?;
|
||||
config.write_all(b"fsync=off\n")?;
|
||||
config.write_all(b"shared_preload_libraries=zenith\n")?;
|
||||
config.write_all(b"zenith.wal_redo=on\n")?;
|
||||
config.write_all(b"shared_preload_libraries=neon\n")?;
|
||||
config.write_all(b"neon.wal_redo=on\n")?;
|
||||
}
|
||||
// Start postgres itself
|
||||
let mut child = Command::new(conf.pg_bin_dir().join("postgres"))
|
||||
|
||||
@@ -30,7 +30,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
|
||||
pg_branch0 = env.postgres.create_start('main', tenant_id=tenant)
|
||||
branch0_cur = pg_branch0.connect().cursor()
|
||||
branch0_cur.execute("SHOW zenith.zenith_timeline")
|
||||
branch0_cur.execute("SHOW neon.timelineid")
|
||||
branch0_timeline = branch0_cur.fetchone()[0]
|
||||
log.info(f"b0 timeline {branch0_timeline}")
|
||||
|
||||
@@ -55,7 +55,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info("postgres is running on 'branch1' branch")
|
||||
|
||||
branch1_cur = pg_branch1.connect().cursor()
|
||||
branch1_cur.execute("SHOW zenith.zenith_timeline")
|
||||
branch1_cur.execute("SHOW neon.timelineid")
|
||||
branch1_timeline = branch1_cur.fetchone()[0]
|
||||
log.info(f"b1 timeline {branch1_timeline}")
|
||||
|
||||
@@ -79,7 +79,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder):
|
||||
log.info("postgres is running on 'branch2' branch")
|
||||
branch2_cur = pg_branch2.connect().cursor()
|
||||
|
||||
branch2_cur.execute("SHOW zenith.zenith_timeline")
|
||||
branch2_cur.execute("SHOW neon.timelineid")
|
||||
branch2_timeline = branch2_cur.fetchone()[0]
|
||||
log.info(f"b2 timeline {branch2_timeline}")
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def check_backpressure(pg: Postgres, stop_event: threading.Event, polling_interv
|
||||
log.info("checks started")
|
||||
|
||||
with pg_cur(pg) as cur:
|
||||
cur.execute("CREATE EXTENSION zenith") # TODO move it to zenith_fixtures?
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to zenith_fixtures?
|
||||
|
||||
cur.execute("select pg_size_bytes(current_setting('max_replication_write_lag'))")
|
||||
res = cur.fetchone()
|
||||
|
||||
@@ -31,7 +31,7 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder):
|
||||
main_pg_conn = pgmain.connect()
|
||||
main_cur = main_pg_conn.cursor()
|
||||
|
||||
main_cur.execute("SHOW zenith.zenith_timeline")
|
||||
main_cur.execute("SHOW neon.timelineid")
|
||||
timeline = main_cur.fetchone()[0]
|
||||
|
||||
# Create table, and insert the first 100 rows
|
||||
|
||||
@@ -26,7 +26,7 @@ def test_broken_timeline(zenith_env_builder: ZenithEnvBuilder):
|
||||
cur.execute("CREATE TABLE t(key int primary key, value text)")
|
||||
cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'")
|
||||
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
timeline_id = cur.fetchone()[0]
|
||||
pg.stop()
|
||||
tenant_timelines.append((tenant_id, timeline_id, pg))
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_clog_truncate(zenith_simple_env: ZenithEnv):
|
||||
log.info('postgres is running on test_clog_truncate branch')
|
||||
|
||||
# Install extension containing function needed for test
|
||||
pg.safe_psql('CREATE EXTENSION zenith_test_utils')
|
||||
pg.safe_psql('CREATE EXTENSION neon_test_utils')
|
||||
|
||||
# Consume many xids to advance clog
|
||||
with closing(pg.connect()) as conn:
|
||||
|
||||
@@ -62,7 +62,7 @@ def test_gc_aggressive(zenith_env_builder: ZenithEnvBuilder):
|
||||
conn = pg.connect()
|
||||
cur = conn.cursor()
|
||||
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
# Create table, and insert the first 100 rows
|
||||
|
||||
@@ -26,7 +26,7 @@ def test_old_request_lsn(zenith_env_builder: ZenithEnvBuilder):
|
||||
cur = pg_conn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
psconn = env.pageserver.connect()
|
||||
|
||||
@@ -25,7 +25,7 @@ def test_pitr_gc(zenith_env_builder: ZenithEnvBuilder):
|
||||
main_pg_conn = pgmain.connect()
|
||||
main_cur = main_pg_conn.cursor()
|
||||
|
||||
main_cur.execute("SHOW zenith.zenith_timeline")
|
||||
main_cur.execute("SHOW neon.timelineid")
|
||||
timeline = main_cur.fetchone()[0]
|
||||
|
||||
# Create table
|
||||
|
||||
@@ -8,7 +8,7 @@ from psycopg2.errors import IoError
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
extensions = ["pageinspect", "zenith_test_utils", "pg_buffercache"]
|
||||
extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"]
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -48,8 +48,8 @@ def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder,
|
||||
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
timeline_id = pg.safe_psql("show zenith.zenith_timeline")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
|
||||
|
||||
checkpoint_numbers = range(1, 3)
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
with closing(tenant_pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
# save timeline for later gc call
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
timeline = UUID(cur.fetchone()[0])
|
||||
log.info("timeline to relocate %s", timeline.hex)
|
||||
|
||||
@@ -223,7 +223,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder,
|
||||
|
||||
tenant_pg_config_file_path = pathlib.Path(tenant_pg.config_file_path())
|
||||
tenant_pg_config_file_path.open('a').write(
|
||||
f"\nzenith.page_server_connstring = 'postgresql://no_user:@localhost:{new_pageserver_pg_port}'"
|
||||
f"\nneon.pageserver_connstring = 'postgresql://no_user:@localhost:{new_pageserver_pg_port}'"
|
||||
)
|
||||
|
||||
tenant_pg.start()
|
||||
|
||||
@@ -21,8 +21,8 @@ async def tenant_workload(env: ZenithEnv, pg: Postgres):
|
||||
|
||||
pg_conn = await pg.connect_async()
|
||||
|
||||
tenant_id = await pg_conn.fetchval("show zenith.zenith_tenant")
|
||||
timeline_id = await pg_conn.fetchval("show zenith.zenith_timeline")
|
||||
tenant_id = await pg_conn.fetchval("show neon.tenantid")
|
||||
timeline_id = await pg_conn.fetchval("show neon.timelineid")
|
||||
|
||||
await pg_conn.execute("CREATE TABLE t(key int primary key, value text)")
|
||||
for i in range(1, 100):
|
||||
@@ -82,9 +82,9 @@ def test_tenants_many(zenith_env_builder: ZenithEnvBuilder, storage_type: str):
|
||||
for tenant, pg in tenants_pgs:
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("show zenith.zenith_tenant")
|
||||
cur.execute("show neon.tenantid")
|
||||
tenant_id = cur.fetchone()[0]
|
||||
cur.execute("show zenith.zenith_timeline")
|
||||
cur.execute("show neon.timelineid")
|
||||
timeline_id = cur.fetchone()[0]
|
||||
cur.execute("SELECT pg_current_wal_flush_lsn()")
|
||||
current_lsn = lsn_from_hex(cur.fetchone()[0])
|
||||
|
||||
@@ -21,7 +21,7 @@ def test_timeline_size(zenith_simple_env: ZenithEnv):
|
||||
|
||||
with closing(pgmain.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
|
||||
# Create table, and insert the first 100 rows
|
||||
cur.execute("CREATE TABLE foo (t text)")
|
||||
@@ -81,12 +81,12 @@ def test_timeline_size_quota(zenith_env_builder: ZenithEnvBuilder):
|
||||
pgmain = env.postgres.create_start(
|
||||
"test_timeline_size_quota",
|
||||
# Set small limit for the test
|
||||
config_lines=['zenith.max_cluster_size=30MB'])
|
||||
config_lines=['neon.max_cluster_size=30MB'])
|
||||
log.info("postgres is running on 'test_timeline_size_quota' branch")
|
||||
|
||||
with closing(pgmain.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("CREATE EXTENSION zenith") # TODO move it to zenith_fixtures?
|
||||
cur.execute("CREATE EXTENSION neon") # TODO move it to zenith_fixtures?
|
||||
|
||||
cur.execute("CREATE TABLE foo (t text)")
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ def test_vm_bit_clear(zenith_simple_env: ZenithEnv):
|
||||
cur = pg_conn.cursor()
|
||||
|
||||
# Install extension containing function needed for test
|
||||
cur.execute('CREATE EXTENSION zenith_test_utils')
|
||||
cur.execute('CREATE EXTENSION neon_test_utils')
|
||||
|
||||
# Create a test table and freeze it to set the VM bit.
|
||||
cur.execute('CREATE TABLE vmtest_delete (id integer PRIMARY KEY)')
|
||||
|
||||
@@ -337,8 +337,8 @@ def test_broker(zenith_env_builder: ZenithEnvBuilder):
|
||||
pg.safe_psql("CREATE TABLE t(key int primary key, value text)")
|
||||
|
||||
# learn zenith timeline from compute
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
timeline_id = pg.safe_psql("show zenith.zenith_timeline")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
|
||||
|
||||
# wait until remote_consistent_lsn gets advanced on all safekeepers
|
||||
clients = [sk.http_client() for sk in env.safekeepers]
|
||||
@@ -384,8 +384,8 @@ def test_wal_removal(zenith_env_builder: ZenithEnvBuilder):
|
||||
cur.execute('CREATE TABLE t(key int primary key, value text)')
|
||||
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
|
||||
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
timeline_id = pg.safe_psql("show zenith.zenith_timeline")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
|
||||
|
||||
# force checkpoint to advance remote_consistent_lsn
|
||||
with closing(env.pageserver.connect()) as psconn:
|
||||
@@ -497,10 +497,10 @@ class ProposerPostgres(PgProtocol):
|
||||
with open(self.config_file_path(), "w") as f:
|
||||
cfg = [
|
||||
"synchronous_standby_names = 'walproposer'\n",
|
||||
"shared_preload_libraries = 'zenith'\n",
|
||||
f"zenith.zenith_timeline = '{self.timeline_id.hex}'\n",
|
||||
f"zenith.zenith_tenant = '{self.tenant_id.hex}'\n",
|
||||
f"zenith.page_server_connstring = ''\n",
|
||||
"shared_preload_libraries = 'neon'\n",
|
||||
f"neon.timelineid = '{self.timeline_id.hex}'\n",
|
||||
f"neon.tenantid = '{self.tenant_id.hex}'\n",
|
||||
f"neon.pageserver_connstring = ''\n",
|
||||
f"wal_acceptors = '{safekeepers}'\n",
|
||||
f"listen_addresses = '{self.listen_addr}'\n",
|
||||
f"port = '{self.port}'\n",
|
||||
@@ -612,8 +612,8 @@ def test_timeline_status(zenith_env_builder: ZenithEnvBuilder):
|
||||
wa_http_cli.check_status()
|
||||
|
||||
# learn zenith timeline from compute
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
timeline_id = pg.safe_psql("show zenith.zenith_timeline")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
|
||||
|
||||
# fetch something sensible from status
|
||||
tli_status = wa_http_cli.timeline_status(tenant_id, timeline_id)
|
||||
@@ -798,8 +798,8 @@ def test_replace_safekeeper(zenith_env_builder: ZenithEnvBuilder):
|
||||
pg.start()
|
||||
|
||||
# learn zenith timeline from compute
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
timeline_id = pg.safe_psql("show zenith.zenith_timeline")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
timeline_id = pg.safe_psql("show neon.timelineid")[0][0]
|
||||
|
||||
execute_payload(pg)
|
||||
show_statuses(env.safekeepers, tenant_id, timeline_id)
|
||||
|
||||
@@ -151,8 +151,8 @@ async def run_restarts_under_load(env: ZenithEnv,
|
||||
test_timeout_at = time.monotonic() + 5 * 60
|
||||
|
||||
pg_conn = await pg.connect_async()
|
||||
tenant_id = await pg_conn.fetchval("show zenith.zenith_tenant")
|
||||
timeline_id = await pg_conn.fetchval("show zenith.zenith_timeline")
|
||||
tenant_id = await pg_conn.fetchval("show neon.tenantid")
|
||||
timeline_id = await pg_conn.fetchval("show neon.timelineid")
|
||||
|
||||
bank = BankClient(pg_conn, n_accounts=n_accounts, init_amount=init_amount)
|
||||
# create tables and initial balances
|
||||
|
||||
@@ -19,7 +19,7 @@ def test_wal_restore(zenith_env_builder: ZenithEnvBuilder,
|
||||
env.zenith_cli.create_branch("test_wal_restore")
|
||||
pg = env.postgres.create_start('test_wal_restore')
|
||||
pg.safe_psql("create table t as select generate_series(1,300000)")
|
||||
tenant_id = pg.safe_psql("show zenith.zenith_tenant")[0][0]
|
||||
tenant_id = pg.safe_psql("show neon.tenantid")[0][0]
|
||||
env.zenith_cli.pageserver_stop()
|
||||
port = port_distributor.get_port()
|
||||
data_dir = os.path.join(test_output_dir, 'pgsql.restored')
|
||||
|
||||
@@ -66,7 +66,7 @@ class ZenithCompare(PgCompare):
|
||||
# We only use one branch and one timeline
|
||||
self.env.zenith_cli.create_branch(branch_name, 'empty')
|
||||
self._pg = self.env.postgres.create_start(branch_name)
|
||||
self.timeline = self.pg.safe_psql("SHOW zenith.zenith_timeline")[0][0]
|
||||
self.timeline = self.pg.safe_psql("SHOW neon.timelineid")[0][0]
|
||||
|
||||
# Long-lived cursor, useful for flushing
|
||||
self.psconn = self.env.pageserver.connect()
|
||||
|
||||
@@ -2039,7 +2039,7 @@ def check_restored_datadir_content(test_output_dir: str, env: ZenithEnv, pg: Pos
|
||||
# Get the timeline ID. We need it for the 'basebackup' command
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
cur.execute("SHOW neon.timelineid")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
# stop postgres to ensure that files won't change
|
||||
|
||||
2
vendor/postgres
vendored
2
vendor/postgres
vendored
Submodule vendor/postgres updated: 038b2b98e5...165e61b5e0
Reference in New Issue
Block a user