mirror of
https://github.com/neondatabase/neon.git
synced 2026-05-15 12:10:37 +00:00
* Add --id argument to safekeeper setting its unique u64 id. In preparation for storage node messaging. IDs are supposed to be monotonically assigned by the console. In tests it is issued by ZenithEnv; at the zenith cli level and fixtures, string name is completely replaced by integer id. Example TOML configs are adjusted accordingly. Sequential ids are chosen over Zid mainly because they are compact and easy to type/remember. * add node id to pageserver This adds node id parameter to pageserver configuration. Also I use a simple builder to construct pageserver config struct to avoid setting node id to some temporary invalid value. Some of the changes in test fixtures are needed to split init and start operations for envrionment. Co-authored-by: Arseny Sher <sher-ars@yandex.ru>
76 lines
2.6 KiB
Python
76 lines
2.6 KiB
Python
import pytest
|
|
|
|
from contextlib import closing
|
|
from fixtures.zenith_fixtures import ZenithEnvBuilder
|
|
from fixtures.log_helper import log
|
|
|
|
|
|
#
|
|
# Test restarting and recreating a postgres instance
|
|
#
|
|
@pytest.mark.parametrize('with_wal_acceptors', [False, True])
|
|
def test_restart_compute(zenith_env_builder: ZenithEnvBuilder, with_wal_acceptors: bool):
|
|
zenith_env_builder.pageserver_auth_enabled = True
|
|
if with_wal_acceptors:
|
|
zenith_env_builder.num_safekeepers = 3
|
|
env = zenith_env_builder.init_start()
|
|
|
|
env.zenith_cli.create_branch("test_restart_compute", "main")
|
|
|
|
pg = env.postgres.create_start('test_restart_compute')
|
|
log.info("postgres is running on 'test_restart_compute' branch")
|
|
|
|
with closing(pg.connect()) as conn:
|
|
with conn.cursor() as cur:
|
|
cur.execute('CREATE TABLE t(key int primary key, value text)')
|
|
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
|
|
cur.execute('SELECT sum(key) FROM t')
|
|
r = cur.fetchone()
|
|
assert r == (5000050000, )
|
|
log.info(f"res = {r}")
|
|
|
|
# Remove data directory and restart
|
|
pg.stop_and_destroy().create_start('test_restart_compute')
|
|
|
|
with closing(pg.connect()) as conn:
|
|
with conn.cursor() as cur:
|
|
# We can still see the row
|
|
cur.execute('SELECT sum(key) FROM t')
|
|
r = cur.fetchone()
|
|
assert r == (5000050000, )
|
|
log.info(f"res = {r}")
|
|
|
|
# Insert another row
|
|
cur.execute("INSERT INTO t VALUES (100001, 'payload2')")
|
|
cur.execute('SELECT count(*) FROM t')
|
|
|
|
r = cur.fetchone()
|
|
assert r == (100001, )
|
|
log.info(f"res = {r}")
|
|
|
|
# Again remove data directory and restart
|
|
pg.stop_and_destroy().create_start('test_restart_compute')
|
|
|
|
# That select causes lots of FPI's and increases probability of wakeepers
|
|
# lagging behind after query completion
|
|
with closing(pg.connect()) as conn:
|
|
with conn.cursor() as cur:
|
|
# We can still see the rows
|
|
cur.execute('SELECT count(*) FROM t')
|
|
|
|
r = cur.fetchone()
|
|
assert r == (100001, )
|
|
log.info(f"res = {r}")
|
|
|
|
# And again remove data directory and restart
|
|
pg.stop_and_destroy().create_start('test_restart_compute')
|
|
|
|
with closing(pg.connect()) as conn:
|
|
with conn.cursor() as cur:
|
|
# We can still see the rows
|
|
cur.execute('SELECT count(*) FROM t')
|
|
|
|
r = cur.fetchone()
|
|
assert r == (100001, )
|
|
log.info(f"res = {r}")
|