From b1b67cc5a055561a3d60c4e0194b0a3103cb8624 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Tue, 31 May 2022 19:13:12 +0300 Subject: [PATCH] improve test normal work to start several computes --- .../batch_others/test_ancestor_branch.py | 4 +- test_runner/batch_others/test_normal_work.py | 47 +++++++++++++++++++ test_runner/batch_others/test_wal_acceptor.py | 19 -------- test_runner/fixtures/zenith_fixtures.py | 22 +++++++-- 4 files changed, 65 insertions(+), 27 deletions(-) create mode 100644 test_runner/batch_others/test_normal_work.py diff --git a/test_runner/batch_others/test_ancestor_branch.py b/test_runner/batch_others/test_ancestor_branch.py index d87bebcc11..78724c434e 100644 --- a/test_runner/batch_others/test_ancestor_branch.py +++ b/test_runner/batch_others/test_ancestor_branch.py @@ -24,9 +24,7 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder): 'compaction_target_size': '4194304', }) - with closing(env.pageserver.connect()) as psconn: - with psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) as pscur: - pscur.execute("failpoints flush-frozen=sleep(10000)") + env.pageserver.safe_psql("failpoints flush-frozen=sleep(10000)") pg_branch0 = env.postgres.create_start('main', tenant_id=tenant) branch0_cur = pg_branch0.connect().cursor() diff --git a/test_runner/batch_others/test_normal_work.py b/test_runner/batch_others/test_normal_work.py new file mode 100644 index 0000000000..87dd2d5e18 --- /dev/null +++ b/test_runner/batch_others/test_normal_work.py @@ -0,0 +1,47 @@ +from fixtures.log_helper import log +from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient + + +def check_tenant(env: ZenithEnv, pageserver_http: ZenithPageserverHttpClient): + tenant_id, timeline_id = env.zenith_cli.create_tenant() + pg = env.postgres.create_start('main', tenant_id=tenant_id) + # we rely upon autocommit after each statement + res_1 = pg.safe_psql_many(queries=[ + 'CREATE TABLE t(key int primary key, value text)', + 'INSERT INTO t SELECT generate_series(1,100000), \'payload\'', + 'SELECT sum(key) FROM t', + ]) + + assert res_1[-1][0] == (5000050000, ) + # TODO check detach on live instance + log.info("stopping compute") + pg.stop() + log.info("compute stopped") + + pg.start() + res_2 = pg.safe_psql('SELECT sum(key) FROM t') + assert res_2[0] == (5000050000, ) + + pg.stop() + pageserver_http.timeline_detach(tenant_id, timeline_id) + + +def test_normal_work(zenith_env_builder: ZenithEnvBuilder): + """ + Basic test: + * create new tenant with a timeline + * write some data + * ensure that it was successfully written + * restart compute + * check that the data is there + * stop compute + * detach timeline + + Repeat check for several tenants/timelines. + """ + + env = zenith_env_builder.init_start() + pageserver_http = env.pageserver.http_client() + + for _ in range(3): + check_tenant(env, pageserver_http) diff --git a/test_runner/batch_others/test_wal_acceptor.py b/test_runner/batch_others/test_wal_acceptor.py index 40a9b48a18..007641417e 100644 --- a/test_runner/batch_others/test_wal_acceptor.py +++ b/test_runner/batch_others/test_wal_acceptor.py @@ -18,25 +18,6 @@ from fixtures.log_helper import log from typing import List, Optional, Any -# basic test, write something in setup with wal acceptors, ensure that commits -# succeed and data is written -def test_normal_work(zenith_env_builder: ZenithEnvBuilder): - zenith_env_builder.num_safekeepers = 3 - env = zenith_env_builder.init_start() - - env.zenith_cli.create_branch('test_safekeepers_normal_work') - pg = env.postgres.create_start('test_safekeepers_normal_work') - - with closing(pg.connect()) as conn: - with conn.cursor() as cur: - # we rely upon autocommit after each statement - # as waiting for acceptors happens there - cur.execute('CREATE TABLE t(key int primary key, value text)') - cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'") - cur.execute('SELECT sum(key) FROM t') - assert cur.fetchone() == (5000050000, ) - - @dataclass class TimelineMetrics: timeline_id: str diff --git a/test_runner/fixtures/zenith_fixtures.py b/test_runner/fixtures/zenith_fixtures.py index 5f3c16c4e6..ff905efa53 100644 --- a/test_runner/fixtures/zenith_fixtures.py +++ b/test_runner/fixtures/zenith_fixtures.py @@ -338,18 +338,30 @@ class PgProtocol: conn_options['server_settings'] = {key: val} return await asyncpg.connect(**conn_options) - def safe_psql(self, query: str, **kwargs: Any) -> List[Any]: + def safe_psql(self, query: str, **kwargs: Any) -> List[Tuple[Any, ...]]: """ Execute query against the node and return all rows. This method passes all extra params to connstr. """ + return self.safe_psql_many([query], **kwargs)[0] + def safe_psql_many(self, queries: List[str], **kwargs: Any) -> List[List[Tuple[Any, ...]]]: + """ + Execute queries against the node and return all rows. + This method passes all extra params to connstr. + """ + result: List[List[Any]] = [] with closing(self.connect(**kwargs)) as conn: with conn.cursor() as cur: - cur.execute(query) - if cur.description is None: - return [] # query didn't return data - return cast(List[Any], cur.fetchall()) + for query in queries: + log.info(f"Executing query: {query}") + cur.execute(query) + + if cur.description is None: + result.append([]) # query didn't return data + else: + result.append(cast(List[Any], cur.fetchall())) + return result @dataclass