mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 13:32:57 +00:00
Instead of having a lot of separate fixtures for setting up the page server, the compute nodes, the safekeepers etc., have one big ZenithEnv object that encapsulates the whole environment. Every test either uses a shared "zenith_simple_env" fixture, which contains the default setup of a pageserver with no authentication, and no safekeepers. Tests that want to use safekeepers or authentication set up a custom test-specific ZenithEnv fixture. Gathering information about the whole environment into one object makes some things simpler. For example, when a new compute node is created, you no longer need to pass the 'wal_acceptors' connection string as argument to the 'postgres.create_start' function. The 'create_start' function fetches that information directly from the ZenithEnv object.
71 lines
2.4 KiB
Python
71 lines
2.4 KiB
Python
from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content
|
|
from fixtures.log_helper import log
|
|
|
|
pytest_plugins = ("fixtures.zenith_fixtures")
|
|
|
|
|
|
#
|
|
# Test multixact state after branching
|
|
# Now this test is very minimalistic -
|
|
# it only checks next_multixact_id field in restored pg_control,
|
|
# since we don't have functions to check multixact internals.
|
|
#
|
|
def test_multixact(zenith_simple_env: ZenithEnv, test_output_dir):
|
|
env = zenith_simple_env
|
|
# Create a branch for us
|
|
env.zenith_cli(["branch", "test_multixact", "empty"])
|
|
pg = env.postgres.create_start('test_multixact')
|
|
|
|
log.info("postgres is running on 'test_multixact' branch")
|
|
pg_conn = pg.connect()
|
|
cur = pg_conn.cursor()
|
|
|
|
cur.execute('''
|
|
CREATE TABLE t1(i int primary key);
|
|
INSERT INTO t1 select * from generate_series(1, 100);
|
|
''')
|
|
|
|
cur.execute('SELECT next_multixact_id FROM pg_control_checkpoint()')
|
|
next_multixact_id_old = cur.fetchone()[0]
|
|
|
|
# Lock entries in parallel connections to set multixact
|
|
nclients = 3
|
|
connections = []
|
|
for i in range(nclients):
|
|
# Do not turn on autocommit. We want to hold the key-share locks.
|
|
conn = pg.connect(autocommit=False)
|
|
conn.cursor().execute('select * from t1 for key share')
|
|
connections.append(conn)
|
|
|
|
# We should have a multixact now. We can close the connections.
|
|
for c in connections:
|
|
c.close()
|
|
|
|
# force wal flush
|
|
cur.execute('checkpoint')
|
|
|
|
cur.execute('SELECT next_multixact_id, pg_current_wal_flush_lsn() FROM pg_control_checkpoint()')
|
|
res = cur.fetchone()
|
|
next_multixact_id = res[0]
|
|
lsn = res[1]
|
|
|
|
# Ensure that we did lock some tuples
|
|
assert int(next_multixact_id) > int(next_multixact_id_old)
|
|
|
|
# Branch at this point
|
|
env.zenith_cli(["branch", "test_multixact_new", "test_multixact@" + lsn])
|
|
pg_new = env.postgres.create_start('test_multixact_new')
|
|
|
|
log.info("postgres is running on 'test_multixact_new' branch")
|
|
pg_new_conn = pg_new.connect()
|
|
cur_new = pg_new_conn.cursor()
|
|
|
|
cur_new.execute('SELECT next_multixact_id FROM pg_control_checkpoint()')
|
|
next_multixact_id_new = cur_new.fetchone()[0]
|
|
|
|
# Check that we restored pg_controlfile correctly
|
|
assert next_multixact_id_new == next_multixact_id
|
|
|
|
# Check that we restore the content of the datadir correctly
|
|
check_restored_datadir_content(test_output_dir, env, pg_new)
|