Use logging in python tests

This commit is contained in:
Arthur Petukhovsky
2021-09-28 13:17:51 +03:00
parent fb05e4cb0b
commit 3fdd85bcb8
23 changed files with 201 additions and 94 deletions

View File

@@ -1,6 +1,9 @@
import subprocess
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
zenith_cli.run(["branch", "test_branch_behind", "empty"])
pgmain = postgres.create_start('test_branch_behind')
print("postgres is running on 'test_branch_behind' branch")
log.info("postgres is running on 'test_branch_behind' branch")
main_pg_conn = pgmain.connect()
main_cur = main_pg_conn.cursor()
@@ -27,7 +30,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_a = main_cur.fetchone()[0]
print('LSN after 100 rows: ' + lsn_a)
log.info('LSN after 100 rows: ' + lsn_a)
# Insert some more rows. (This generates enough WAL to fill a few segments.)
main_cur.execute('''
@@ -37,7 +40,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_b = main_cur.fetchone()[0]
print('LSN after 100100 rows: ' + lsn_b)
log.info('LSN after 100100 rows: ' + lsn_b)
# Branch at the point where only 100 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_hundred", "test_branch_behind@" + lsn_a])
@@ -52,7 +55,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_c = main_cur.fetchone()[0]
print('LSN after 200100 rows: ' + lsn_c)
log.info('LSN after 200100 rows: ' + lsn_c)
# Branch at the point where only 200 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_more", "test_branch_behind@" + lsn_b])
@@ -89,4 +92,4 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
try:
zenith_cli.run(["branch", "test_branch_preinitdb", "test_branch_behind@0/42"])
except subprocess.CalledProcessError:
print("Branch creation with pre-initdb LSN failed (as expected)")
log.info("Branch creation with pre-initdb LSN failed (as expected)")

View File

@@ -5,6 +5,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -24,7 +28,7 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
]
pg = postgres.create_start('test_clog_truncate', config_lines=config)
print('postgres is running on test_clog_truncate branch')
log.info('postgres is running on test_clog_truncate branch')
# Install extension containing function needed for test
pg.safe_psql('CREATE EXTENSION zenith_test_utils')
@@ -33,22 +37,22 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute('select test_consume_xids(1000*1000*10);')
print('xids consumed')
log.info('xids consumed')
# call a checkpoint to trigger TruncateSubtrans
cur.execute('CHECKPOINT;')
# ensure WAL flush
cur.execute('select txid_current()')
print(cur.fetchone())
log.info(cur.fetchone())
# wait for autovacuum to truncate the pg_xact
# XXX Is it worth to add a timeout here?
pg_xact_0000_path = os.path.join(pg.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path = " + pg_xact_0000_path)
log.info("pg_xact_0000_path = " + pg_xact_0000_path)
while os.path.isfile(pg_xact_0000_path):
print("file exists. wait for truncation. " "pg_xact_0000_path = " + pg_xact_0000_path)
log.info("file exists. wait for truncation. " "pg_xact_0000_path = " + pg_xact_0000_path)
time.sleep(5)
# checkpoint to advance latest lsn
@@ -59,14 +63,14 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
lsn_after_truncation = cur.fetchone()[0]
# create new branch after clog truncation and start a compute node on it
print('create branch at lsn_after_truncation ' + lsn_after_truncation)
log.info('create branch at lsn_after_truncation ' + lsn_after_truncation)
zenith_cli.run(
["branch", "test_clog_truncate_new", "test_clog_truncate@" + lsn_after_truncation])
pg2 = postgres.create_start('test_clog_truncate_new')
print('postgres is running on test_clog_truncate_new branch')
log.info('postgres is running on test_clog_truncate_new branch')
# check that new node doesn't contain truncated segment
pg_xact_0000_path_new = os.path.join(pg2.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path_new = " + pg_xact_0000_path_new)
log.info("pg_xact_0000_path_new = " + pg_xact_0000_path_new)
assert os.path.isfile(pg_xact_0000_path_new) is False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -14,7 +18,7 @@ def test_config(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFact
# change config
pg = postgres.create_start('test_config', config_lines=['log_min_messages=debug1'])
print('postgres is running on test_config branch')
log.info('postgres is running on test_config branch')
with closing(pg.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -4,6 +4,10 @@ import pathlib
from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory, ZenithCli, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -19,7 +23,7 @@ def test_createdb(
zenith_cli.run(["branch", "test_createdb", "empty"])
pg = postgres.create_start('test_createdb')
print("postgres is running on 'test_createdb' branch")
log.info("postgres is running on 'test_createdb' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -53,7 +57,7 @@ def test_dropdb(
zenith_cli.run(["branch", "test_dropdb", "empty"])
pg = postgres.create_start('test_dropdb')
print("postgres is running on 'test_dropdb' branch")
log.info("postgres is running on 'test_dropdb' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -88,13 +92,13 @@ def test_dropdb(
# Test that database subdir exists on the branch before drop
dbpath = pathlib.Path(pg_before.pgdata_dir) / 'base' / str(dboid)
print(dbpath)
log.info(dbpath)
assert os.path.isdir(dbpath) == True
# Test that database subdir doesn't exist on the branch after drop
dbpath = pathlib.Path(pg_after.pgdata_dir) / 'base' / str(dboid)
print(dbpath)
log.info(dbpath)
assert os.path.isdir(dbpath) == False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -12,7 +16,7 @@ def test_createuser(zenith_cli, pageserver: ZenithPageserver, postgres: Postgres
zenith_cli.run(["branch", "test_createuser", "empty"])
pg = postgres.create_start('test_createuser')
print("postgres is running on 'test_createuser' branch")
log.info("postgres is running on 'test_createuser' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -15,7 +19,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact", "empty"])
pg = postgres.create_start('test_multixact')
print("postgres is running on 'test_multixact' branch")
log.info("postgres is running on 'test_multixact' branch")
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -55,7 +59,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact_new", "test_multixact@" + lsn])
pg_new = postgres.create_start('test_multixact_new')
print("postgres is running on 'test_multixact_new' branch")
log.info("postgres is running on 'test_multixact_new' branch")
pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor()

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
#
@@ -18,7 +22,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
# Create a branch for us
zenith_cli.run(["branch", "test_old_request_lsn", "empty"])
pg = postgres.create_start('test_old_request_lsn')
print('postgres is running on test_old_request_lsn branch')
log.info('postgres is running on test_old_request_lsn branch')
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -46,7 +50,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
from pg_settings where name = 'shared_buffers'
''')
row = cur.fetchone()
print(f'shared_buffers is {row[0]}, table size {row[1]}');
log.info(f'shared_buffers is {row[0]}, table size {row[1]}');
assert int(row[0]) < int(row[1])
cur.execute('VACUUM foo');

View File

@@ -6,6 +6,10 @@ from contextlib import closing
from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
@@ -40,7 +44,7 @@ def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres:
from pg_settings where name = 'shared_buffers'
''')
row = cur.fetchone()
print("shared_buffers is {}, table size {}", row[0], row[1]);
log.info("shared_buffers is {}, table size {}", row[0], row[1]);
assert int(row[0]) < int(row[1])
# Stop and restart pageserver. This is a more or less graceful shutdown, although

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -8,7 +12,7 @@ def test_pgbench(postgres: PostgresFactory, pg_bin, zenith_cli):
zenith_cli.run(["branch", "test_pgbench", "empty"])
pg = postgres.create_start('test_pgbench')
print("postgres is running on 'test_pgbench' branch")
log.info("postgres is running on 'test_pgbench' branch")
connstr = pg.connstr()

View File

@@ -3,6 +3,10 @@ import pytest
from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -30,7 +34,7 @@ def test_restart_compute(
pg = postgres.create_start('test_restart_compute',
wal_acceptors=wal_acceptor_connstrs)
print("postgres is running on 'test_restart_compute' branch")
log.info("postgres is running on 'test_restart_compute' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -39,7 +43,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone()
assert r == (5000050000, )
print("res = ", r)
log.info("res = ", r)
# Remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -52,7 +56,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone()
assert r == (5000050000, )
print("res = ", r)
log.info("res = ", r)
# Insert another row
cur.execute("INSERT INTO t VALUES (100001, 'payload2')")
@@ -60,7 +64,7 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)
# Again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -75,7 +79,7 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)
# And again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -88,4 +92,4 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)

View File

@@ -2,12 +2,16 @@ from contextlib import closing
import psycopg2.extras
import time;
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
def print_gc_result(row):
print("GC duration {elapsed} ms".format_map(row));
print(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
print(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
log.info("GC duration {elapsed} ms".format_map(row));
log.info(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
log.info(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
#
@@ -35,7 +39,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass");
row = cur.fetchone();
print("relfilenode is {}", row[0]);
log.info("relfilenode is {}", row[0]);
# Run GC, to clear out any garbage left behind in the catalogs by
# the CREATE TABLE command. We want to have a clean slate with no garbage
@@ -50,7 +54,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# update to confuse our numbers either.
cur.execute("DELETE FROM foo")
print("Running GC before test")
log.info("Running GC before test")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row);
@@ -61,7 +65,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert a row and run GC. Checkpoint should freeze the layer
# so that there is only the most recent image layer left for the rel,
# removing the old image and delta layer.
print("Inserting one row and running GC")
log.info("Inserting one row and running GC")
cur.execute("INSERT INTO foo VALUES (1)")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
@@ -73,7 +77,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert two more rows and run GC.
# This should create new image and delta layer file with the new contents, and
# then remove the old one image and the just-created delta layer.
print("Inserting two more rows and running GC")
log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
@@ -85,7 +89,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0
# Do it again. Should again create two new layer files and remove old ones.
print("Inserting two more rows and running GC")
log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
@@ -97,7 +101,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0
# Run GC again, with no changes in the database. Should not remove anything.
print("Run GC again, with nothing to do")
log.info("Run GC again, with nothing to do")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row);
@@ -108,7 +112,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
#
# Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage
#
print("Drop table and run GC again");
log.info("Drop table and run GC again");
cur.execute("DROP TABLE foo")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")

View File

@@ -3,6 +3,9 @@ from uuid import UUID
import psycopg2.extras
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
def test_timeline_size(
zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin
@@ -15,7 +18,7 @@ def test_timeline_size(
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
pgmain = postgres.create_start("test_timeline_size")
print("postgres is running on 'test_timeline_size' branch")
log.info("postgres is running on 'test_timeline_size' branch")
with closing(pgmain.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -2,6 +2,9 @@ import os
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, PgBin
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
zenith_cli.run(["branch", "test_twophase", "empty"])
pg = postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5'])
print("postgres is running on 'test_twophase' branch")
log.info("postgres is running on 'test_twophase' branch")
conn = pg.connect()
cur = conn.cursor()
@@ -45,7 +48,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files)
log.info(twophase_files)
assert len(twophase_files) == 4
cur.execute("COMMIT PREPARED 'insert_three'")
@@ -53,7 +56,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files)
log.info(twophase_files)
assert len(twophase_files) == 2
# Create a branch with the transaction in prepared state
@@ -67,7 +70,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
# Check that we restored only needed twophase files
twophase_files2 = os.listdir(pg2.pg_twophase_dir_path())
print(twophase_files2)
log.info(twophase_files2)
assert twophase_files2.sort() == twophase_files.sort()
conn2 = pg2.connect()

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
#
@@ -11,7 +15,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
zenith_cli.run(["branch", "test_vm_bit_clear", "empty"])
pg = postgres.create_start('test_vm_bit_clear')
print("postgres is running on 'test_vm_bit_clear' branch")
log.info("postgres is running on 'test_vm_bit_clear' branch")
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -63,7 +67,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
# server at the right point-in-time avoids that full-page image.
pg_new = postgres.create_start('test_vm_bit_clear_new')
print("postgres is running on 'test_vm_bit_clear_new' branch")
log.info("postgres is running on 'test_vm_bit_clear_new' branch")
pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor()

View File

@@ -4,7 +4,10 @@ import random
from fixtures.zenith_fixtures import WalAcceptor, WalAcceptorFactory, ZenithPageserver, PostgresFactory, Postgres
from typing import List
from fixtures.utils import debug_print
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -63,18 +66,18 @@ class WorkerStats(object):
self.counters[worker_id] += 1
def check_progress(self):
debug_print("Workers progress: {}".format(self.counters))
log.debug("Workers progress: {}".format(self.counters))
# every worker should finish at least one tx
assert all(cnt > 0 for cnt in self.counters)
progress = sum(self.counters)
print('All workers made {} transactions'.format(progress))
log.info('All workers made {} transactions'.format(progress))
async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accounts, max_transfer):
pg_conn = await pg.connect_async()
debug_print('Started worker {}'.format(worker_id))
log.debug('Started worker {}'.format(worker_id))
while stats.running:
from_uid = random.randint(0, n_accounts - 1)
@@ -84,9 +87,9 @@ async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accou
await bank_transfer(pg_conn, from_uid, to_uid, amount)
stats.inc_progress(worker_id)
debug_print('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
log.debug('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
debug_print('Finished worker {}'.format(worker_id))
log.debug('Finished worker {}'.format(worker_id))
await pg_conn.close()
@@ -134,7 +137,7 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
victim.start()
print('Iterations are finished, exiting coroutines...')
log.info('Iterations are finished, exiting coroutines...')
stats.running = False
# await all workers
await asyncio.gather(*workers)

View File

@@ -3,6 +3,10 @@ import os
from fixtures.utils import mkdir_if_needed
from fixtures.zenith_fixtures import PageserverPort, PostgresFactory, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -38,7 +42,7 @@ def test_zenith_regress(postgres: PostgresFactory, pg_bin, zenith_cli, test_outp
'--inputdir={}'.format(src_path),
]
print(pg_regress_command)
log.info(pg_regress_command)
env = {
'PGPORT': str(pg.port),
'PGUSER': pg.username,

View File

@@ -1,5 +1,3 @@
from pprint import pprint
import os
import re
import timeit

View File

@@ -0,0 +1,37 @@
import logging
import logging.config
import time
# timestamp in UTC+-00:00 aka GMT
class UTCFormatter(logging.Formatter):
converter = time.gmtime
LOGGING = {
"version": 1,
"formatters": {
"defaultFormatter": {
"()": UTCFormatter,
"format": "%(asctime)s.%(msecs)-3d %(levelname)s [%(filename)s:%(lineno)d] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "defaultFormatter",
"level": "DEBUG",
"stream": "ext://sys.stderr"
}
},
"loggers": {
"root": {
"level": "DEBUG",
"handlers": ["console"]
},
"root.subroot": {
"level": "INFO"
}
}
}
logging.config.dictConfig(LOGGING)

View File

@@ -3,6 +3,9 @@ import subprocess
from typing import Any, List
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
def get_self_dir() -> str:
""" Get the path to the directory where this script lives. """
@@ -39,7 +42,7 @@ def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str:
with open(stdout_filename, 'w') as stdout_f:
with open(stderr_filename, 'w') as stderr_f:
print('(capturing output to "{}.stdout")'.format(base))
log.info('(capturing output to "{}.stdout")'.format(base))
subprocess.run(cmd, **kwargs, stdout=stdout_f, stderr=stderr_f)
return basepath
@@ -58,14 +61,6 @@ def global_counter() -> int:
_global_counter += 1
return _global_counter
def debug_print(*args, **kwargs) -> None:
""" Print to the console if TEST_DEBUG_PRINT is set in env.
All parameters are passed to print().
"""
if os.environ.get('TEST_DEBUG_PRINT') is not None:
print(*args, **kwargs)
def lsn_to_hex(num: int) -> str:
""" Convert lsn from int to standard hex notation. """
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff)

View File

@@ -27,6 +27,11 @@ from typing_extensions import Literal
import requests
from .utils import (get_self_dir, mkdir_if_needed, subprocess_capture)
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
"""
This file contains pytest fixtures. A fixture is a test resource that can be
summoned by placing its name in the test's arguments.
@@ -186,13 +191,13 @@ class ZenithCli:
>>> result = zenith_cli.run(...)
>>> assert result.stderr == ""
>>> print(result.stdout)
>>> log.info(result.stdout)
"""
assert type(arguments) == list
args = [self.bin_zenith] + arguments
print('Running command "{}"'.format(' '.join(args)))
log.info('Running command "{}"'.format(' '.join(args)))
# Interceipt CalledProcessError and print more info
try:
@@ -209,7 +214,7 @@ class ZenithCli:
stdout: {exc.stdout}
stderr: {exc.stderr}
"""
print(msg)
log.info(msg)
raise Exception(msg) from exc
@@ -417,7 +422,7 @@ class ZenithPageserver(PgProtocol):
def pageserver_port(port_distributor: PortDistributor) -> PageserverPort:
pg = port_distributor.get_port()
http = port_distributor.get_port()
print(f"pageserver_port: pg={pg} http={http}")
log.info(f"pageserver_port: pg={pg} http={http}")
return PageserverPort(pg=pg, http=http)
@@ -441,7 +446,7 @@ def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: Pageserver
yield ps
# After the yield comes any cleanup code we need.
print('Starting pageserver cleanup')
log.info('Starting pageserver cleanup')
ps.stop()
class PgBin:
@@ -479,7 +484,7 @@ class PgBin:
"""
self._fixpath(command)
print('Running command "{}"'.format(' '.join(command)))
log.info('Running command "{}"'.format(' '.join(command)))
env = self._build_env(env)
subprocess.run(command, env=env, cwd=cwd, check=True)
@@ -496,7 +501,7 @@ class PgBin:
"""
self._fixpath(command)
print('Running command "{}"'.format(' '.join(command)))
log.info('Running command "{}"'.format(' '.join(command)))
env = self._build_env(env)
return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs)
@@ -564,12 +569,12 @@ class Postgres(PgProtocol):
assert self.branch is not None
print(f"Starting postgres on branch {self.branch}")
log.info(f"Starting postgres on branch {self.branch}")
run_result = self.zenith_cli.run(['pg', 'start', self.branch, f'--tenantid={self.tenant_id}', f'--port={self.port}'])
self.running = True
print(f"stdout: {run_result.stdout}")
log.info(f"stdout: {run_result.stdout}")
return self
@@ -784,7 +789,7 @@ def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin:
yield pgfactory
# After the yield comes any cleanup code we need.
print('Starting postgres cleanup')
log.info('Starting postgres cleanup')
pgfactory.stop_all()
def read_pid(path: Path):
@@ -815,7 +820,7 @@ class WalAcceptor:
# Tell page server it can receive WAL from this WAL safekeeper
cmd.extend(["--pageserver", f"localhost:{self.pageserver_port}"])
cmd.extend(["--recall", "1 second"])
print('Running command "{}"'.format(' '.join(cmd)))
log.info('Running command "{}"'.format(' '.join(cmd)))
env = {'PAGESERVER_AUTH_TOKEN': self.auth_token} if self.auth_token else None
subprocess.run(cmd, check=True, env=env)
@@ -844,10 +849,10 @@ class WalAcceptor:
return pid
def stop(self) -> 'WalAcceptor':
print('Stopping wal acceptor {}'.format(self.num))
log.info('Stopping wal acceptor {}'.format(self.num))
pid = self.get_pid()
if pid is None:
print("Wal acceptor {} is not running".format(self.num))
log.info("Wal acceptor {} is not running".format(self.num))
return self
try:
@@ -873,10 +878,10 @@ class WalAcceptor:
conn.autocommit = True
with conn.cursor() as cur:
request_json = json.dumps(request)
print(f"JSON_CTRL request on port {self.port}: {request_json}")
log.info(f"JSON_CTRL request on port {self.port}: {request_json}")
cur.execute("JSON_CTRL " + request_json)
all = cur.fetchall()
print(f"JSON_CTRL response: {all[0][0]}")
log.info(f"JSON_CTRL response: {all[0][0]}")
return json.loads(all[0][0])
class WalAcceptorFactory:
@@ -934,7 +939,7 @@ def wa_factory(zenith_binpath: str, repo_dir: str, pageserver_port: PageserverPo
)
yield wafactory
# After the yield comes any cleanup code we need.
print('Starting wal acceptors cleanup')
log.info('Starting wal acceptors cleanup')
wafactory.stop_all()
@@ -943,7 +948,7 @@ def base_dir() -> str:
""" find the base directory (currently this is the git root) """
base_dir = os.path.normpath(os.path.join(get_self_dir(), '../..'))
print('\nbase_dir is', base_dir)
log.info('\nbase_dir is', base_dir)
return base_dir
@@ -972,7 +977,7 @@ def test_output_dir(request: Any, top_output_dir: str) -> str:
test_name = 'shared'
test_output_dir = os.path.join(top_output_dir, test_name)
print('test_output_dir is', test_output_dir)
log.info('test_output_dir is', test_output_dir)
shutil.rmtree(test_output_dir, ignore_errors=True)
mkdir_if_needed(test_output_dir)
return test_output_dir
@@ -1014,7 +1019,7 @@ def pg_distrib_dir(base_dir: str) -> str:
pg_dir = env_postgres_bin
else:
pg_dir = os.path.normpath(os.path.join(base_dir, DEFAULT_POSTGRES_DIR))
print('postgres dir is', pg_dir)
log.info('postgres dir is', pg_dir)
if not os.path.exists(os.path.join(pg_dir, 'bin/postgres')):
raise Exception('postgres not found at "{}"'.format(pg_dir))
return pg_dir
@@ -1053,7 +1058,7 @@ def list_files_to_compare(pgdata_dir: str):
pgdata_files.append(rel_file)
pgdata_files.sort()
print(pgdata_files)
log.info(pgdata_files)
return pgdata_files
# pg is the existing and running compute node, that we want to compare with a basebackup
@@ -1099,9 +1104,9 @@ def check_restored_datadir_content(zenith_cli: ZenithCli, test_output_dir: str,
restored_dir_path,
pgdata_files,
shallow=False)
print('filecmp result mismatch and error lists:')
print(mismatch)
print(error)
log.info('filecmp result mismatch and error lists:')
log.info(mismatch)
log.info(error)
for f in mismatch:

View File

@@ -2,6 +2,10 @@ import os
from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str):
@@ -31,7 +35,7 @@ def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg
zenith_cli.run(["branch", "test_bulk_insert", "empty"])
pg = postgres.create_start('test_bulk_insert')
print("postgres is running on 'test_bulk_insert' branch")
log.info("postgres is running on 'test_bulk_insert' branch")
# Open a connection directly to the page server that we'll use to force
# flushing the layers to disk

View File

@@ -2,6 +2,10 @@ import os
from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str):
@@ -31,7 +35,7 @@ def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin
zenith_cli.run(["branch", "test_pgbench_perf", "empty"])
pg = postgres.create_start('test_pgbench_perf')
print("postgres is running on 'test_pgbench_perf' branch")
log.info("postgres is running on 'test_pgbench_perf' branch")
# Open a connection directly to the page server that we'll use to force
# flushing the layers to disk

View File

@@ -1,6 +1,10 @@
import pytest
import os
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
"""
Use this test to see what happens when tests fail.
@@ -22,7 +26,7 @@ def test_broken(zenith_cli, pageserver, postgres, pg_bin):
zenith_cli.run(["branch", "test_broken", "empty"])
postgres.create_start("test_broken")
print('postgres is running')
log.info('postgres is running')
print('THIS NEXT COMMAND WILL FAIL:')
log.info('THIS NEXT COMMAND WILL FAIL:')
pg_bin.run('pgbench -i_am_a_broken_test'.split())