Compare commits

...

7 Commits

Author SHA1 Message Date
Arthur Petukhovsky
8a44796905 Increase parallel workers to trigger more errors 2021-09-29 12:23:49 +03:00
Arthur Petukhovsky
ed521e05e7 Hide debug logs in test_wal_acceptor_async 2021-09-29 11:47:53 +03:00
Arthur Petukhovsky
6a13500da4 Fix print in last test 2021-09-29 11:47:53 +03:00
Arthur Petukhovsky
9b8168ebde Don't log test output while running 2021-09-29 11:47:53 +03:00
Arthur Petukhovsky
f9bb4dbf08 Use f-strings for logs 2021-09-29 11:47:52 +03:00
Arthur Petukhovsky
20ee204c27 Fix string formatting 2021-09-29 11:47:52 +03:00
Arthur Petukhovsky
3fdd85bcb8 Use logging in python tests 2021-09-29 11:47:52 +03:00
25 changed files with 209 additions and 98 deletions

View File

@@ -233,7 +233,7 @@ jobs:
exit 1 exit 1
fi fi
if << parameters.run_in_parallel >>; then if << parameters.run_in_parallel >>; then
EXTRA_PARAMS="-n4 $EXTRA_PARAMS" EXTRA_PARAMS="-n16 $EXTRA_PARAMS"
fi; fi;
# Run the tests. # Run the tests.
# #
@@ -245,7 +245,7 @@ jobs:
# multiple tests in one file) # multiple tests in one file)
# -rA prints summary in the end # -rA prints summary in the end
# -n4 uses four processes to run tests via pytest-xdist # -n4 uses four processes to run tests via pytest-xdist
pipenv run pytest --junitxml=$TEST_OUTPUT/junit.xml --tb=short -s --verbose -rA $TEST_SELECTION $EXTRA_PARAMS pipenv run pytest --junitxml=$TEST_OUTPUT/junit.xml --tb=short --verbose -rA $TEST_SELECTION $EXTRA_PARAMS
- run: - run:
# CircleCI artifacts are preserved one file at a time, so skipping # CircleCI artifacts are preserved one file at a time, so skipping
# this step isn't a good idea. If you want to extract the # this step isn't a good idea. If you want to extract the

View File

@@ -1,6 +1,9 @@
import subprocess import subprocess
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
zenith_cli.run(["branch", "test_branch_behind", "empty"]) zenith_cli.run(["branch", "test_branch_behind", "empty"])
pgmain = postgres.create_start('test_branch_behind') pgmain = postgres.create_start('test_branch_behind')
print("postgres is running on 'test_branch_behind' branch") log.info("postgres is running on 'test_branch_behind' branch")
main_pg_conn = pgmain.connect() main_pg_conn = pgmain.connect()
main_cur = main_pg_conn.cursor() main_cur = main_pg_conn.cursor()
@@ -27,7 +30,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''') ''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()') main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_a = main_cur.fetchone()[0] lsn_a = main_cur.fetchone()[0]
print('LSN after 100 rows: ' + lsn_a) log.info(f'LSN after 100 rows: {lsn_a}')
# Insert some more rows. (This generates enough WAL to fill a few segments.) # Insert some more rows. (This generates enough WAL to fill a few segments.)
main_cur.execute(''' main_cur.execute('''
@@ -37,7 +40,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''') ''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()') main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_b = main_cur.fetchone()[0] lsn_b = main_cur.fetchone()[0]
print('LSN after 100100 rows: ' + lsn_b) log.info(f'LSN after 100100 rows: {lsn_b}')
# Branch at the point where only 100 rows were inserted # Branch at the point where only 100 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_hundred", "test_branch_behind@" + lsn_a]) zenith_cli.run(["branch", "test_branch_behind_hundred", "test_branch_behind@" + lsn_a])
@@ -52,7 +55,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
main_cur.execute('SELECT pg_current_wal_insert_lsn()') main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_c = main_cur.fetchone()[0] lsn_c = main_cur.fetchone()[0]
print('LSN after 200100 rows: ' + lsn_c) log.info(f'LSN after 200100 rows: {lsn_c}')
# Branch at the point where only 200 rows were inserted # Branch at the point where only 200 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_more", "test_branch_behind@" + lsn_b]) zenith_cli.run(["branch", "test_branch_behind_more", "test_branch_behind@" + lsn_b])
@@ -89,4 +92,4 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
try: try:
zenith_cli.run(["branch", "test_branch_preinitdb", "test_branch_behind@0/42"]) zenith_cli.run(["branch", "test_branch_preinitdb", "test_branch_behind@0/42"])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
print("Branch creation with pre-initdb LSN failed (as expected)") log.info("Branch creation with pre-initdb LSN failed (as expected)")

View File

@@ -5,6 +5,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -24,7 +28,7 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
] ]
pg = postgres.create_start('test_clog_truncate', config_lines=config) pg = postgres.create_start('test_clog_truncate', config_lines=config)
print('postgres is running on test_clog_truncate branch') log.info('postgres is running on test_clog_truncate branch')
# Install extension containing function needed for test # Install extension containing function needed for test
pg.safe_psql('CREATE EXTENSION zenith_test_utils') pg.safe_psql('CREATE EXTENSION zenith_test_utils')
@@ -33,22 +37,22 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
cur.execute('select test_consume_xids(1000*1000*10);') cur.execute('select test_consume_xids(1000*1000*10);')
print('xids consumed') log.info('xids consumed')
# call a checkpoint to trigger TruncateSubtrans # call a checkpoint to trigger TruncateSubtrans
cur.execute('CHECKPOINT;') cur.execute('CHECKPOINT;')
# ensure WAL flush # ensure WAL flush
cur.execute('select txid_current()') cur.execute('select txid_current()')
print(cur.fetchone()) log.info(cur.fetchone())
# wait for autovacuum to truncate the pg_xact # wait for autovacuum to truncate the pg_xact
# XXX Is it worth to add a timeout here? # XXX Is it worth to add a timeout here?
pg_xact_0000_path = os.path.join(pg.pg_xact_dir_path(), '0000') pg_xact_0000_path = os.path.join(pg.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path = " + pg_xact_0000_path) log.info(f"pg_xact_0000_path = {pg_xact_0000_path}")
while os.path.isfile(pg_xact_0000_path): while os.path.isfile(pg_xact_0000_path):
print("file exists. wait for truncation. " "pg_xact_0000_path = " + pg_xact_0000_path) log.info(f"file exists. wait for truncation. " "pg_xact_0000_path = {pg_xact_0000_path}")
time.sleep(5) time.sleep(5)
# checkpoint to advance latest lsn # checkpoint to advance latest lsn
@@ -59,14 +63,14 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
lsn_after_truncation = cur.fetchone()[0] lsn_after_truncation = cur.fetchone()[0]
# create new branch after clog truncation and start a compute node on it # create new branch after clog truncation and start a compute node on it
print('create branch at lsn_after_truncation ' + lsn_after_truncation) log.info(f'create branch at lsn_after_truncation {lsn_after_truncation}')
zenith_cli.run( zenith_cli.run(
["branch", "test_clog_truncate_new", "test_clog_truncate@" + lsn_after_truncation]) ["branch", "test_clog_truncate_new", "test_clog_truncate@" + lsn_after_truncation])
pg2 = postgres.create_start('test_clog_truncate_new') pg2 = postgres.create_start('test_clog_truncate_new')
print('postgres is running on test_clog_truncate_new branch') log.info('postgres is running on test_clog_truncate_new branch')
# check that new node doesn't contain truncated segment # check that new node doesn't contain truncated segment
pg_xact_0000_path_new = os.path.join(pg2.pg_xact_dir_path(), '0000') pg_xact_0000_path_new = os.path.join(pg2.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path_new = " + pg_xact_0000_path_new) log.info("pg_xact_0000_path_new = " + pg_xact_0000_path_new)
assert os.path.isfile(pg_xact_0000_path_new) is False assert os.path.isfile(pg_xact_0000_path_new) is False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -14,7 +18,7 @@ def test_config(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFact
# change config # change config
pg = postgres.create_start('test_config', config_lines=['log_min_messages=debug1']) pg = postgres.create_start('test_config', config_lines=['log_min_messages=debug1'])
print('postgres is running on test_config branch') log.info('postgres is running on test_config branch')
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:

View File

@@ -4,6 +4,10 @@ import pathlib
from contextlib import closing from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory, ZenithCli, check_restored_datadir_content from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory, ZenithCli, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -19,7 +23,7 @@ def test_createdb(
zenith_cli.run(["branch", "test_createdb", "empty"]) zenith_cli.run(["branch", "test_createdb", "empty"])
pg = postgres.create_start('test_createdb') pg = postgres.create_start('test_createdb')
print("postgres is running on 'test_createdb' branch") log.info("postgres is running on 'test_createdb' branch")
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
@@ -53,7 +57,7 @@ def test_dropdb(
zenith_cli.run(["branch", "test_dropdb", "empty"]) zenith_cli.run(["branch", "test_dropdb", "empty"])
pg = postgres.create_start('test_dropdb') pg = postgres.create_start('test_dropdb')
print("postgres is running on 'test_dropdb' branch") log.info("postgres is running on 'test_dropdb' branch")
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
@@ -88,13 +92,13 @@ def test_dropdb(
# Test that database subdir exists on the branch before drop # Test that database subdir exists on the branch before drop
dbpath = pathlib.Path(pg_before.pgdata_dir) / 'base' / str(dboid) dbpath = pathlib.Path(pg_before.pgdata_dir) / 'base' / str(dboid)
print(dbpath) log.info(dbpath)
assert os.path.isdir(dbpath) == True assert os.path.isdir(dbpath) == True
# Test that database subdir doesn't exist on the branch after drop # Test that database subdir doesn't exist on the branch after drop
dbpath = pathlib.Path(pg_after.pgdata_dir) / 'base' / str(dboid) dbpath = pathlib.Path(pg_after.pgdata_dir) / 'base' / str(dboid)
print(dbpath) log.info(dbpath)
assert os.path.isdir(dbpath) == False assert os.path.isdir(dbpath) == False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -12,7 +16,7 @@ def test_createuser(zenith_cli, pageserver: ZenithPageserver, postgres: Postgres
zenith_cli.run(["branch", "test_createuser", "empty"]) zenith_cli.run(["branch", "test_createuser", "empty"])
pg = postgres.create_start('test_createuser') pg = postgres.create_start('test_createuser')
print("postgres is running on 'test_createuser' branch") log.info("postgres is running on 'test_createuser' branch")
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_restored_datadir_content from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -15,7 +19,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact", "empty"]) zenith_cli.run(["branch", "test_multixact", "empty"])
pg = postgres.create_start('test_multixact') pg = postgres.create_start('test_multixact')
print("postgres is running on 'test_multixact' branch") log.info("postgres is running on 'test_multixact' branch")
pg_conn = pg.connect() pg_conn = pg.connect()
cur = pg_conn.cursor() cur = pg_conn.cursor()
@@ -55,7 +59,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact_new", "test_multixact@" + lsn]) zenith_cli.run(["branch", "test_multixact_new", "test_multixact@" + lsn])
pg_new = postgres.create_start('test_multixact_new') pg_new = postgres.create_start('test_multixact_new')
print("postgres is running on 'test_multixact_new' branch") log.info("postgres is running on 'test_multixact_new' branch")
pg_new_conn = pg_new.connect() pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor() cur_new = pg_new_conn.cursor()

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
# #
@@ -18,7 +22,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
# Create a branch for us # Create a branch for us
zenith_cli.run(["branch", "test_old_request_lsn", "empty"]) zenith_cli.run(["branch", "test_old_request_lsn", "empty"])
pg = postgres.create_start('test_old_request_lsn') pg = postgres.create_start('test_old_request_lsn')
print('postgres is running on test_old_request_lsn branch') log.info('postgres is running on test_old_request_lsn branch')
pg_conn = pg.connect() pg_conn = pg.connect()
cur = pg_conn.cursor() cur = pg_conn.cursor()
@@ -46,7 +50,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
from pg_settings where name = 'shared_buffers' from pg_settings where name = 'shared_buffers'
''') ''')
row = cur.fetchone() row = cur.fetchone()
print(f'shared_buffers is {row[0]}, table size {row[1]}'); log.info(f'shared_buffers is {row[0]}, table size {row[1]}');
assert int(row[0]) < int(row[1]) assert int(row[0]) < int(row[1])
cur.execute('VACUUM foo'); cur.execute('VACUUM foo');

View File

@@ -6,6 +6,10 @@ from contextlib import closing
from multiprocessing import Process, Value from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
# Check that dead minority doesn't prevent the commits: execute insert n_inserts # Check that dead minority doesn't prevent the commits: execute insert n_inserts
@@ -40,7 +44,7 @@ def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres:
from pg_settings where name = 'shared_buffers' from pg_settings where name = 'shared_buffers'
''') ''')
row = cur.fetchone() row = cur.fetchone()
print("shared_buffers is {}, table size {}", row[0], row[1]); log.info(f"shared_buffers is {row[0]}, table size {row[1]}");
assert int(row[0]) < int(row[1]) assert int(row[0]) < int(row[1])
# Stop and restart pageserver. This is a more or less graceful shutdown, although # Stop and restart pageserver. This is a more or less graceful shutdown, although

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory from fixtures.zenith_fixtures import PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -8,7 +12,7 @@ def test_pgbench(postgres: PostgresFactory, pg_bin, zenith_cli):
zenith_cli.run(["branch", "test_pgbench", "empty"]) zenith_cli.run(["branch", "test_pgbench", "empty"])
pg = postgres.create_start('test_pgbench') pg = postgres.create_start('test_pgbench')
print("postgres is running on 'test_pgbench' branch") log.info("postgres is running on 'test_pgbench' branch")
connstr = pg.connstr() connstr = pg.connstr()

View File

@@ -3,6 +3,10 @@ import pytest
from contextlib import closing from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -30,7 +34,7 @@ def test_restart_compute(
pg = postgres.create_start('test_restart_compute', pg = postgres.create_start('test_restart_compute',
wal_acceptors=wal_acceptor_connstrs) wal_acceptors=wal_acceptor_connstrs)
print("postgres is running on 'test_restart_compute' branch") log.info("postgres is running on 'test_restart_compute' branch")
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
@@ -39,7 +43,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t') cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone() r = cur.fetchone()
assert r == (5000050000, ) assert r == (5000050000, )
print("res = ", r) log.info(f"res = {r}")
# Remove data directory and restart # Remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute', pg.stop_and_destroy().create_start('test_restart_compute',
@@ -52,7 +56,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t') cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone() r = cur.fetchone()
assert r == (5000050000, ) assert r == (5000050000, )
print("res = ", r) log.info(f"res = {r}")
# Insert another row # Insert another row
cur.execute("INSERT INTO t VALUES (100001, 'payload2')") cur.execute("INSERT INTO t VALUES (100001, 'payload2')")
@@ -60,7 +64,7 @@ def test_restart_compute(
r = cur.fetchone() r = cur.fetchone()
assert r == (100001, ) assert r == (100001, )
print("res = ", r) log.info(f"res = {r}")
# Again remove data directory and restart # Again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute', pg.stop_and_destroy().create_start('test_restart_compute',
@@ -75,7 +79,7 @@ def test_restart_compute(
r = cur.fetchone() r = cur.fetchone()
assert r == (100001, ) assert r == (100001, )
print("res = ", r) log.info(f"res = {r}")
# And again remove data directory and restart # And again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute', pg.stop_and_destroy().create_start('test_restart_compute',
@@ -88,4 +92,4 @@ def test_restart_compute(
r = cur.fetchone() r = cur.fetchone()
assert r == (100001, ) assert r == (100001, )
print("res = ", r) log.info(f"res = {r}")

View File

@@ -2,12 +2,16 @@ from contextlib import closing
import psycopg2.extras import psycopg2.extras
import time; import time;
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
def print_gc_result(row): def print_gc_result(row):
print("GC duration {elapsed} ms".format_map(row)); log.info("GC duration {elapsed} ms".format_map(row));
print(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row)) log.info(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
print(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row)) log.info(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
# #
@@ -35,7 +39,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass"); cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass");
row = cur.fetchone(); row = cur.fetchone();
print("relfilenode is {}", row[0]); log.info(f"relfilenode is {row[0]}");
# Run GC, to clear out any garbage left behind in the catalogs by # Run GC, to clear out any garbage left behind in the catalogs by
# the CREATE TABLE command. We want to have a clean slate with no garbage # the CREATE TABLE command. We want to have a clean slate with no garbage
@@ -50,7 +54,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# update to confuse our numbers either. # update to confuse our numbers either.
cur.execute("DELETE FROM foo") cur.execute("DELETE FROM foo")
print("Running GC before test") log.info("Running GC before test")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone() row = pscur.fetchone()
print_gc_result(row); print_gc_result(row);
@@ -61,7 +65,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert a row and run GC. Checkpoint should freeze the layer # Insert a row and run GC. Checkpoint should freeze the layer
# so that there is only the most recent image layer left for the rel, # so that there is only the most recent image layer left for the rel,
# removing the old image and delta layer. # removing the old image and delta layer.
print("Inserting one row and running GC") log.info("Inserting one row and running GC")
cur.execute("INSERT INTO foo VALUES (1)") cur.execute("INSERT INTO foo VALUES (1)")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone() row = pscur.fetchone()
@@ -73,7 +77,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert two more rows and run GC. # Insert two more rows and run GC.
# This should create new image and delta layer file with the new contents, and # This should create new image and delta layer file with the new contents, and
# then remove the old one image and the just-created delta layer. # then remove the old one image and the just-created delta layer.
print("Inserting two more rows and running GC") log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)") cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)") cur.execute("INSERT INTO foo VALUES (3)")
@@ -85,7 +89,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0 assert row['layer_relfiles_dropped'] == 0
# Do it again. Should again create two new layer files and remove old ones. # Do it again. Should again create two new layer files and remove old ones.
print("Inserting two more rows and running GC") log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)") cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)") cur.execute("INSERT INTO foo VALUES (3)")
@@ -97,7 +101,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0 assert row['layer_relfiles_dropped'] == 0
# Run GC again, with no changes in the database. Should not remove anything. # Run GC again, with no changes in the database. Should not remove anything.
print("Run GC again, with nothing to do") log.info("Run GC again, with nothing to do")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone() row = pscur.fetchone()
print_gc_result(row); print_gc_result(row);
@@ -108,7 +112,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# #
# Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage # Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage
# #
print("Drop table and run GC again"); log.info("Drop table and run GC again");
cur.execute("DROP TABLE foo") cur.execute("DROP TABLE foo")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")

View File

@@ -3,6 +3,9 @@ from uuid import UUID
import psycopg2.extras import psycopg2.extras
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
def test_timeline_size( def test_timeline_size(
zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin
@@ -15,7 +18,7 @@ def test_timeline_size(
assert res["current_logical_size"] == res["current_logical_size_non_incremental"] assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
pgmain = postgres.create_start("test_timeline_size") pgmain = postgres.create_start("test_timeline_size")
print("postgres is running on 'test_timeline_size' branch") log.info("postgres is running on 'test_timeline_size' branch")
with closing(pgmain.connect()) as conn: with closing(pgmain.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:

View File

@@ -2,6 +2,9 @@ import os
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, PgBin from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, PgBin
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
zenith_cli.run(["branch", "test_twophase", "empty"]) zenith_cli.run(["branch", "test_twophase", "empty"])
pg = postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5']) pg = postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5'])
print("postgres is running on 'test_twophase' branch") log.info("postgres is running on 'test_twophase' branch")
conn = pg.connect() conn = pg.connect()
cur = conn.cursor() cur = conn.cursor()
@@ -45,7 +48,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT') cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path()) twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files) log.info(twophase_files)
assert len(twophase_files) == 4 assert len(twophase_files) == 4
cur.execute("COMMIT PREPARED 'insert_three'") cur.execute("COMMIT PREPARED 'insert_three'")
@@ -53,7 +56,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT') cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path()) twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files) log.info(twophase_files)
assert len(twophase_files) == 2 assert len(twophase_files) == 2
# Create a branch with the transaction in prepared state # Create a branch with the transaction in prepared state
@@ -67,7 +70,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
# Check that we restored only needed twophase files # Check that we restored only needed twophase files
twophase_files2 = os.listdir(pg2.pg_twophase_dir_path()) twophase_files2 = os.listdir(pg2.pg_twophase_dir_path())
print(twophase_files2) log.info(twophase_files2)
assert twophase_files2.sort() == twophase_files.sort() assert twophase_files2.sort() == twophase_files.sort()
conn2 = pg2.connect() conn2 = pg2.connect()

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
# #
@@ -11,7 +15,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
zenith_cli.run(["branch", "test_vm_bit_clear", "empty"]) zenith_cli.run(["branch", "test_vm_bit_clear", "empty"])
pg = postgres.create_start('test_vm_bit_clear') pg = postgres.create_start('test_vm_bit_clear')
print("postgres is running on 'test_vm_bit_clear' branch") log.info("postgres is running on 'test_vm_bit_clear' branch")
pg_conn = pg.connect() pg_conn = pg.connect()
cur = pg_conn.cursor() cur = pg_conn.cursor()
@@ -63,7 +67,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
# server at the right point-in-time avoids that full-page image. # server at the right point-in-time avoids that full-page image.
pg_new = postgres.create_start('test_vm_bit_clear_new') pg_new = postgres.create_start('test_vm_bit_clear_new')
print("postgres is running on 'test_vm_bit_clear_new' branch") log.info("postgres is running on 'test_vm_bit_clear_new' branch")
pg_new_conn = pg_new.connect() pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor() cur_new = pg_new_conn.cursor()

View File

@@ -10,6 +10,10 @@ from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory, PgBin from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory, PgBin
from fixtures.utils import lsn_to_hex, mkdir_if_needed from fixtures.utils import lsn_to_hex, mkdir_if_needed
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -284,10 +288,10 @@ def test_sync_safekeepers(repo_dir: str, pg_bin: PgBin, wa_factory: WalAcceptorF
) )
lsn_hex = lsn_to_hex(res["inserted_wal"]["end_lsn"]) lsn_hex = lsn_to_hex(res["inserted_wal"]["end_lsn"])
lsn_after_append.append(lsn_hex) lsn_after_append.append(lsn_hex)
print(f"safekeeper[{i}] lsn after append: {lsn_hex}") log.info(f"safekeeper[{i}] lsn after append: {lsn_hex}")
# run sync safekeepers # run sync safekeepers
lsn_after_sync = pg.sync_safekeepers() lsn_after_sync = pg.sync_safekeepers()
print(f"lsn after sync = {lsn_after_sync}") log.info(f"lsn after sync = {lsn_after_sync}")
assert all(lsn_after_sync == lsn for lsn in lsn_after_append) assert all(lsn_after_sync == lsn for lsn in lsn_after_append)

View File

@@ -4,7 +4,10 @@ import random
from fixtures.zenith_fixtures import WalAcceptor, WalAcceptorFactory, ZenithPageserver, PostgresFactory, Postgres from fixtures.zenith_fixtures import WalAcceptor, WalAcceptorFactory, ZenithPageserver, PostgresFactory, Postgres
from typing import List from typing import List
from fixtures.utils import debug_print
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root.wal_acceptor_async')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -63,18 +66,18 @@ class WorkerStats(object):
self.counters[worker_id] += 1 self.counters[worker_id] += 1
def check_progress(self): def check_progress(self):
debug_print("Workers progress: {}".format(self.counters)) log.debug("Workers progress: {}".format(self.counters))
# every worker should finish at least one tx # every worker should finish at least one tx
assert all(cnt > 0 for cnt in self.counters) assert all(cnt > 0 for cnt in self.counters)
progress = sum(self.counters) progress = sum(self.counters)
print('All workers made {} transactions'.format(progress)) log.info('All workers made {} transactions'.format(progress))
async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accounts, max_transfer): async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accounts, max_transfer):
pg_conn = await pg.connect_async() pg_conn = await pg.connect_async()
debug_print('Started worker {}'.format(worker_id)) log.debug('Started worker {}'.format(worker_id))
while stats.running: while stats.running:
from_uid = random.randint(0, n_accounts - 1) from_uid = random.randint(0, n_accounts - 1)
@@ -84,9 +87,9 @@ async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accou
await bank_transfer(pg_conn, from_uid, to_uid, amount) await bank_transfer(pg_conn, from_uid, to_uid, amount)
stats.inc_progress(worker_id) stats.inc_progress(worker_id)
debug_print('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid)) log.debug('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
debug_print('Finished worker {}'.format(worker_id)) log.debug('Finished worker {}'.format(worker_id))
await pg_conn.close() await pg_conn.close()
@@ -134,7 +137,7 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
victim.start() victim.start()
print('Iterations are finished, exiting coroutines...') log.info('Iterations are finished, exiting coroutines...')
stats.running = False stats.running = False
# await all workers # await all workers
await asyncio.gather(*workers) await asyncio.gather(*workers)

View File

@@ -3,6 +3,10 @@ import os
from fixtures.utils import mkdir_if_needed from fixtures.utils import mkdir_if_needed
from fixtures.zenith_fixtures import PageserverPort, PostgresFactory, check_restored_datadir_content from fixtures.zenith_fixtures import PageserverPort, PostgresFactory, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
@@ -38,7 +42,7 @@ def test_zenith_regress(postgres: PostgresFactory, pg_bin, zenith_cli, test_outp
'--inputdir={}'.format(src_path), '--inputdir={}'.format(src_path),
] ]
print(pg_regress_command) log.info(pg_regress_command)
env = { env = {
'PGPORT': str(pg.port), 'PGPORT': str(pg.port),
'PGUSER': pg.username, 'PGUSER': pg.username,

View File

@@ -1,5 +1,3 @@
from pprint import pprint
import os import os
import re import re
import timeit import timeit

View File

@@ -0,0 +1,37 @@
import logging
import logging.config
import time
# timestamp in UTC+-00:00 aka GMT
class UTCFormatter(logging.Formatter):
converter = time.gmtime
LOGGING = {
"version": 1,
"formatters": {
"defaultFormatter": {
"()": UTCFormatter,
"format": "%(asctime)s.%(msecs)-3d %(levelname)s [%(filename)s:%(lineno)d] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "defaultFormatter",
"level": "DEBUG",
"stream": "ext://sys.stderr"
}
},
"loggers": {
"root": {
"level": "DEBUG",
"handlers": ["console"]
},
"root.wal_acceptor_async": {
"level": "INFO" # lot of logs on DEBUG level
}
}
}
logging.config.dictConfig(LOGGING)

View File

@@ -3,6 +3,9 @@ import subprocess
from typing import Any, List from typing import Any, List
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
def get_self_dir() -> str: def get_self_dir() -> str:
""" Get the path to the directory where this script lives. """ """ Get the path to the directory where this script lives. """
@@ -39,7 +42,7 @@ def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str:
with open(stdout_filename, 'w') as stdout_f: with open(stdout_filename, 'w') as stdout_f:
with open(stderr_filename, 'w') as stderr_f: with open(stderr_filename, 'w') as stderr_f:
print('(capturing output to "{}.stdout")'.format(base)) log.info('(capturing output to "{}.stdout")'.format(base))
subprocess.run(cmd, **kwargs, stdout=stdout_f, stderr=stderr_f) subprocess.run(cmd, **kwargs, stdout=stdout_f, stderr=stderr_f)
return basepath return basepath
@@ -58,14 +61,6 @@ def global_counter() -> int:
_global_counter += 1 _global_counter += 1
return _global_counter return _global_counter
def debug_print(*args, **kwargs) -> None:
""" Print to the console if TEST_DEBUG_PRINT is set in env.
All parameters are passed to print().
"""
if os.environ.get('TEST_DEBUG_PRINT') is not None:
print(*args, **kwargs)
def lsn_to_hex(num: int) -> str: def lsn_to_hex(num: int) -> str:
""" Convert lsn from int to standard hex notation. """ """ Convert lsn from int to standard hex notation. """
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff) return "{:X}/{:X}".format(num >> 32, num & 0xffffffff)

View File

@@ -27,6 +27,11 @@ from typing_extensions import Literal
import requests import requests
from .utils import (get_self_dir, mkdir_if_needed, subprocess_capture) from .utils import (get_self_dir, mkdir_if_needed, subprocess_capture)
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
""" """
This file contains pytest fixtures. A fixture is a test resource that can be This file contains pytest fixtures. A fixture is a test resource that can be
summoned by placing its name in the test's arguments. summoned by placing its name in the test's arguments.
@@ -186,13 +191,13 @@ class ZenithCli:
>>> result = zenith_cli.run(...) >>> result = zenith_cli.run(...)
>>> assert result.stderr == "" >>> assert result.stderr == ""
>>> print(result.stdout) >>> log.info(result.stdout)
""" """
assert type(arguments) == list assert type(arguments) == list
args = [self.bin_zenith] + arguments args = [self.bin_zenith] + arguments
print('Running command "{}"'.format(' '.join(args))) log.info('Running command "{}"'.format(' '.join(args)))
# Interceipt CalledProcessError and print more info # Interceipt CalledProcessError and print more info
try: try:
@@ -209,7 +214,7 @@ class ZenithCli:
stdout: {exc.stdout} stdout: {exc.stdout}
stderr: {exc.stderr} stderr: {exc.stderr}
""" """
print(msg) log.info(msg)
raise Exception(msg) from exc raise Exception(msg) from exc
@@ -417,7 +422,7 @@ class ZenithPageserver(PgProtocol):
def pageserver_port(port_distributor: PortDistributor) -> PageserverPort: def pageserver_port(port_distributor: PortDistributor) -> PageserverPort:
pg = port_distributor.get_port() pg = port_distributor.get_port()
http = port_distributor.get_port() http = port_distributor.get_port()
print(f"pageserver_port: pg={pg} http={http}") log.info(f"pageserver_port: pg={pg} http={http}")
return PageserverPort(pg=pg, http=http) return PageserverPort(pg=pg, http=http)
@@ -441,7 +446,7 @@ def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: Pageserver
yield ps yield ps
# After the yield comes any cleanup code we need. # After the yield comes any cleanup code we need.
print('Starting pageserver cleanup') log.info('Starting pageserver cleanup')
ps.stop() ps.stop()
class PgBin: class PgBin:
@@ -479,7 +484,7 @@ class PgBin:
""" """
self._fixpath(command) self._fixpath(command)
print('Running command "{}"'.format(' '.join(command))) log.info('Running command "{}"'.format(' '.join(command)))
env = self._build_env(env) env = self._build_env(env)
subprocess.run(command, env=env, cwd=cwd, check=True) subprocess.run(command, env=env, cwd=cwd, check=True)
@@ -496,7 +501,7 @@ class PgBin:
""" """
self._fixpath(command) self._fixpath(command)
print('Running command "{}"'.format(' '.join(command))) log.info('Running command "{}"'.format(' '.join(command)))
env = self._build_env(env) env = self._build_env(env)
return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs) return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs)
@@ -564,12 +569,12 @@ class Postgres(PgProtocol):
assert self.branch is not None assert self.branch is not None
print(f"Starting postgres on branch {self.branch}") log.info(f"Starting postgres on branch {self.branch}")
run_result = self.zenith_cli.run(['pg', 'start', self.branch, f'--tenantid={self.tenant_id}', f'--port={self.port}']) run_result = self.zenith_cli.run(['pg', 'start', self.branch, f'--tenantid={self.tenant_id}', f'--port={self.port}'])
self.running = True self.running = True
print(f"stdout: {run_result.stdout}") log.info(f"stdout: {run_result.stdout}")
return self return self
@@ -784,7 +789,7 @@ def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin:
yield pgfactory yield pgfactory
# After the yield comes any cleanup code we need. # After the yield comes any cleanup code we need.
print('Starting postgres cleanup') log.info('Starting postgres cleanup')
pgfactory.stop_all() pgfactory.stop_all()
def read_pid(path: Path): def read_pid(path: Path):
@@ -815,7 +820,7 @@ class WalAcceptor:
# Tell page server it can receive WAL from this WAL safekeeper # Tell page server it can receive WAL from this WAL safekeeper
cmd.extend(["--pageserver", f"localhost:{self.pageserver_port}"]) cmd.extend(["--pageserver", f"localhost:{self.pageserver_port}"])
cmd.extend(["--recall", "1 second"]) cmd.extend(["--recall", "1 second"])
print('Running command "{}"'.format(' '.join(cmd))) log.info('Running command "{}"'.format(' '.join(cmd)))
env = {'PAGESERVER_AUTH_TOKEN': self.auth_token} if self.auth_token else None env = {'PAGESERVER_AUTH_TOKEN': self.auth_token} if self.auth_token else None
subprocess.run(cmd, check=True, env=env) subprocess.run(cmd, check=True, env=env)
@@ -844,10 +849,10 @@ class WalAcceptor:
return pid return pid
def stop(self) -> 'WalAcceptor': def stop(self) -> 'WalAcceptor':
print('Stopping wal acceptor {}'.format(self.num)) log.info('Stopping wal acceptor {}'.format(self.num))
pid = self.get_pid() pid = self.get_pid()
if pid is None: if pid is None:
print("Wal acceptor {} is not running".format(self.num)) log.info("Wal acceptor {} is not running".format(self.num))
return self return self
try: try:
@@ -873,10 +878,10 @@ class WalAcceptor:
conn.autocommit = True conn.autocommit = True
with conn.cursor() as cur: with conn.cursor() as cur:
request_json = json.dumps(request) request_json = json.dumps(request)
print(f"JSON_CTRL request on port {self.port}: {request_json}") log.info(f"JSON_CTRL request on port {self.port}: {request_json}")
cur.execute("JSON_CTRL " + request_json) cur.execute("JSON_CTRL " + request_json)
all = cur.fetchall() all = cur.fetchall()
print(f"JSON_CTRL response: {all[0][0]}") log.info(f"JSON_CTRL response: {all[0][0]}")
return json.loads(all[0][0]) return json.loads(all[0][0])
class WalAcceptorFactory: class WalAcceptorFactory:
@@ -934,7 +939,7 @@ def wa_factory(zenith_binpath: str, repo_dir: str, pageserver_port: PageserverPo
) )
yield wafactory yield wafactory
# After the yield comes any cleanup code we need. # After the yield comes any cleanup code we need.
print('Starting wal acceptors cleanup') log.info('Starting wal acceptors cleanup')
wafactory.stop_all() wafactory.stop_all()
@@ -943,7 +948,7 @@ def base_dir() -> str:
""" find the base directory (currently this is the git root) """ """ find the base directory (currently this is the git root) """
base_dir = os.path.normpath(os.path.join(get_self_dir(), '../..')) base_dir = os.path.normpath(os.path.join(get_self_dir(), '../..'))
print('\nbase_dir is', base_dir) log.info(f'base_dir is {base_dir}')
return base_dir return base_dir
@@ -972,7 +977,7 @@ def test_output_dir(request: Any, top_output_dir: str) -> str:
test_name = 'shared' test_name = 'shared'
test_output_dir = os.path.join(top_output_dir, test_name) test_output_dir = os.path.join(top_output_dir, test_name)
print('test_output_dir is', test_output_dir) log.info(f'test_output_dir is {test_output_dir}')
shutil.rmtree(test_output_dir, ignore_errors=True) shutil.rmtree(test_output_dir, ignore_errors=True)
mkdir_if_needed(test_output_dir) mkdir_if_needed(test_output_dir)
return test_output_dir return test_output_dir
@@ -1014,7 +1019,7 @@ def pg_distrib_dir(base_dir: str) -> str:
pg_dir = env_postgres_bin pg_dir = env_postgres_bin
else: else:
pg_dir = os.path.normpath(os.path.join(base_dir, DEFAULT_POSTGRES_DIR)) pg_dir = os.path.normpath(os.path.join(base_dir, DEFAULT_POSTGRES_DIR))
print('postgres dir is', pg_dir) log.info(f'postgres dir is {pg_dir}')
if not os.path.exists(os.path.join(pg_dir, 'bin/postgres')): if not os.path.exists(os.path.join(pg_dir, 'bin/postgres')):
raise Exception('postgres not found at "{}"'.format(pg_dir)) raise Exception('postgres not found at "{}"'.format(pg_dir))
return pg_dir return pg_dir
@@ -1053,7 +1058,7 @@ def list_files_to_compare(pgdata_dir: str):
pgdata_files.append(rel_file) pgdata_files.append(rel_file)
pgdata_files.sort() pgdata_files.sort()
print(pgdata_files) log.info(pgdata_files)
return pgdata_files return pgdata_files
# pg is the existing and running compute node, that we want to compare with a basebackup # pg is the existing and running compute node, that we want to compare with a basebackup
@@ -1099,9 +1104,9 @@ def check_restored_datadir_content(zenith_cli: ZenithCli, test_output_dir: str,
restored_dir_path, restored_dir_path,
pgdata_files, pgdata_files,
shallow=False) shallow=False)
print('filecmp result mismatch and error lists:') log.info('filecmp result mismatch and error lists:')
print(mismatch) log.info(mismatch)
print(error) log.info(error)
for f in mismatch: for f in mismatch:

View File

@@ -2,6 +2,10 @@ import os
from contextlib import closing from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture") pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str): def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str):
@@ -31,7 +35,7 @@ def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg
zenith_cli.run(["branch", "test_bulk_insert", "empty"]) zenith_cli.run(["branch", "test_bulk_insert", "empty"])
pg = postgres.create_start('test_bulk_insert') pg = postgres.create_start('test_bulk_insert')
print("postgres is running on 'test_bulk_insert' branch") log.info("postgres is running on 'test_bulk_insert' branch")
# Open a connection directly to the page server that we'll use to force # Open a connection directly to the page server that we'll use to force
# flushing the layers to disk # flushing the layers to disk

View File

@@ -2,6 +2,10 @@ import os
from contextlib import closing from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture") pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str): def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str):
@@ -31,7 +35,7 @@ def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin
zenith_cli.run(["branch", "test_pgbench_perf", "empty"]) zenith_cli.run(["branch", "test_pgbench_perf", "empty"])
pg = postgres.create_start('test_pgbench_perf') pg = postgres.create_start('test_pgbench_perf')
print("postgres is running on 'test_pgbench_perf' branch") log.info("postgres is running on 'test_pgbench_perf' branch")
# Open a connection directly to the page server that we'll use to force # Open a connection directly to the page server that we'll use to force
# flushing the layers to disk # flushing the layers to disk

View File

@@ -1,6 +1,10 @@
import pytest import pytest
import os import os
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures") pytest_plugins = ("fixtures.zenith_fixtures")
""" """
Use this test to see what happens when tests fail. Use this test to see what happens when tests fail.
@@ -22,7 +26,7 @@ def test_broken(zenith_cli, pageserver, postgres, pg_bin):
zenith_cli.run(["branch", "test_broken", "empty"]) zenith_cli.run(["branch", "test_broken", "empty"])
postgres.create_start("test_broken") postgres.create_start("test_broken")
print('postgres is running') log.info('postgres is running')
print('THIS NEXT COMMAND WILL FAIL:') log.info('THIS NEXT COMMAND WILL FAIL:')
pg_bin.run('pgbench -i_am_a_broken_test'.split()) pg_bin.run('pgbench -i_am_a_broken_test'.split())