Files
neon/test_runner/batch_others/test_pageserver_catchup.py
Arseny Sher 75002adc14 Make shared_buffers large in test_pageserver_catchup.
We intentionally write while pageserver is down, so we shouldn't query it.

Noticed by @petuhovskiy at
https://github.com/zenithdb/postgres/pull/141#issuecomment-1080261700
2022-03-28 20:34:06 +04:00

65 lines
2.3 KiB
Python

from fixtures.zenith_fixtures import ZenithEnvBuilder
# Test safekeeper sync and pageserver catch up
# while initial compute node is down and pageserver is lagging behind safekeepers.
# Ensure that basebackup after restart of all components is correct
# and new compute node contains all data.
def test_pageserver_catchup_while_compute_down(zenith_env_builder: ZenithEnvBuilder):
zenith_env_builder.num_safekeepers = 3
env = zenith_env_builder.init_start()
env.zenith_cli.create_branch('test_pageserver_catchup_while_compute_down')
# Make shared_buffers large to ensure we won't query pageserver while it is down.
pg = env.postgres.create_start('test_pageserver_catchup_while_compute_down',
config_lines=['shared_buffers=512MB'])
pg_conn = pg.connect()
cur = pg_conn.cursor()
# Create table, and insert some rows.
cur.execute('CREATE TABLE foo (t text)')
cur.execute('''
INSERT INTO foo
SELECT 'long string to consume some space' || g
FROM generate_series(1, 10000) g
''')
cur.execute("SELECT count(*) FROM foo")
assert cur.fetchone() == (10000, )
# Stop and restart pageserver. This is a more or less graceful shutdown, although
# the page server doesn't currently have a shutdown routine so there's no difference
# between stopping and crashing.
env.pageserver.stop()
# insert some more rows
# since pageserver is shut down, these will be only on safekeepers
cur.execute('''
INSERT INTO foo
SELECT 'long string to consume some space' || g
FROM generate_series(1, 10000) g
''')
# stop safekeepers gracefully
env.safekeepers[0].stop()
env.safekeepers[1].stop()
env.safekeepers[2].stop()
# start everything again
# safekeepers must synchronize and pageserver must catch up
env.pageserver.start()
env.safekeepers[0].start()
env.safekeepers[1].start()
env.safekeepers[2].start()
# restart compute node
pg.stop_and_destroy().create_start('test_pageserver_catchup_while_compute_down')
# Ensure that basebackup went correct and pageserver returned all data
pg_conn = pg.connect()
cur = pg_conn.cursor()
cur.execute("SELECT count(*) FROM foo")
assert cur.fetchone() == (20000, )