Use logging in python tests

This commit is contained in:
Arthur Petukhovsky
2021-09-28 13:17:51 +03:00
parent fb05e4cb0b
commit 3fdd85bcb8
23 changed files with 201 additions and 94 deletions

View File

@@ -1,6 +1,9 @@
import subprocess
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
zenith_cli.run(["branch", "test_branch_behind", "empty"])
pgmain = postgres.create_start('test_branch_behind')
print("postgres is running on 'test_branch_behind' branch")
log.info("postgres is running on 'test_branch_behind' branch")
main_pg_conn = pgmain.connect()
main_cur = main_pg_conn.cursor()
@@ -27,7 +30,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_a = main_cur.fetchone()[0]
print('LSN after 100 rows: ' + lsn_a)
log.info('LSN after 100 rows: ' + lsn_a)
# Insert some more rows. (This generates enough WAL to fill a few segments.)
main_cur.execute('''
@@ -37,7 +40,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
''')
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_b = main_cur.fetchone()[0]
print('LSN after 100100 rows: ' + lsn_b)
log.info('LSN after 100100 rows: ' + lsn_b)
# Branch at the point where only 100 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_hundred", "test_branch_behind@" + lsn_a])
@@ -52,7 +55,7 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
lsn_c = main_cur.fetchone()[0]
print('LSN after 200100 rows: ' + lsn_c)
log.info('LSN after 200100 rows: ' + lsn_c)
# Branch at the point where only 200 rows were inserted
zenith_cli.run(["branch", "test_branch_behind_more", "test_branch_behind@" + lsn_b])
@@ -89,4 +92,4 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
try:
zenith_cli.run(["branch", "test_branch_preinitdb", "test_branch_behind@0/42"])
except subprocess.CalledProcessError:
print("Branch creation with pre-initdb LSN failed (as expected)")
log.info("Branch creation with pre-initdb LSN failed (as expected)")

View File

@@ -5,6 +5,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -24,7 +28,7 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
]
pg = postgres.create_start('test_clog_truncate', config_lines=config)
print('postgres is running on test_clog_truncate branch')
log.info('postgres is running on test_clog_truncate branch')
# Install extension containing function needed for test
pg.safe_psql('CREATE EXTENSION zenith_test_utils')
@@ -33,22 +37,22 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
cur.execute('select test_consume_xids(1000*1000*10);')
print('xids consumed')
log.info('xids consumed')
# call a checkpoint to trigger TruncateSubtrans
cur.execute('CHECKPOINT;')
# ensure WAL flush
cur.execute('select txid_current()')
print(cur.fetchone())
log.info(cur.fetchone())
# wait for autovacuum to truncate the pg_xact
# XXX Is it worth to add a timeout here?
pg_xact_0000_path = os.path.join(pg.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path = " + pg_xact_0000_path)
log.info("pg_xact_0000_path = " + pg_xact_0000_path)
while os.path.isfile(pg_xact_0000_path):
print("file exists. wait for truncation. " "pg_xact_0000_path = " + pg_xact_0000_path)
log.info("file exists. wait for truncation. " "pg_xact_0000_path = " + pg_xact_0000_path)
time.sleep(5)
# checkpoint to advance latest lsn
@@ -59,14 +63,14 @@ def test_clog_truncate(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
lsn_after_truncation = cur.fetchone()[0]
# create new branch after clog truncation and start a compute node on it
print('create branch at lsn_after_truncation ' + lsn_after_truncation)
log.info('create branch at lsn_after_truncation ' + lsn_after_truncation)
zenith_cli.run(
["branch", "test_clog_truncate_new", "test_clog_truncate@" + lsn_after_truncation])
pg2 = postgres.create_start('test_clog_truncate_new')
print('postgres is running on test_clog_truncate_new branch')
log.info('postgres is running on test_clog_truncate_new branch')
# check that new node doesn't contain truncated segment
pg_xact_0000_path_new = os.path.join(pg2.pg_xact_dir_path(), '0000')
print("pg_xact_0000_path_new = " + pg_xact_0000_path_new)
log.info("pg_xact_0000_path_new = " + pg_xact_0000_path_new)
assert os.path.isfile(pg_xact_0000_path_new) is False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -14,7 +18,7 @@ def test_config(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFact
# change config
pg = postgres.create_start('test_config', config_lines=['log_min_messages=debug1'])
print('postgres is running on test_config branch')
log.info('postgres is running on test_config branch')
with closing(pg.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -4,6 +4,10 @@ import pathlib
from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory, ZenithCli, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -19,7 +23,7 @@ def test_createdb(
zenith_cli.run(["branch", "test_createdb", "empty"])
pg = postgres.create_start('test_createdb')
print("postgres is running on 'test_createdb' branch")
log.info("postgres is running on 'test_createdb' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -53,7 +57,7 @@ def test_dropdb(
zenith_cli.run(["branch", "test_dropdb", "empty"])
pg = postgres.create_start('test_dropdb')
print("postgres is running on 'test_dropdb' branch")
log.info("postgres is running on 'test_dropdb' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -88,13 +92,13 @@ def test_dropdb(
# Test that database subdir exists on the branch before drop
dbpath = pathlib.Path(pg_before.pgdata_dir) / 'base' / str(dboid)
print(dbpath)
log.info(dbpath)
assert os.path.isdir(dbpath) == True
# Test that database subdir doesn't exist on the branch after drop
dbpath = pathlib.Path(pg_after.pgdata_dir) / 'base' / str(dboid)
print(dbpath)
log.info(dbpath)
assert os.path.isdir(dbpath) == False

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -12,7 +16,7 @@ def test_createuser(zenith_cli, pageserver: ZenithPageserver, postgres: Postgres
zenith_cli.run(["branch", "test_createuser", "empty"])
pg = postgres.create_start('test_createuser')
print("postgres is running on 'test_createuser' branch")
log.info("postgres is running on 'test_createuser' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_restored_datadir_content
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -15,7 +19,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact", "empty"])
pg = postgres.create_start('test_multixact')
print("postgres is running on 'test_multixact' branch")
log.info("postgres is running on 'test_multixact' branch")
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -55,7 +59,7 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
zenith_cli.run(["branch", "test_multixact_new", "test_multixact@" + lsn])
pg_new = postgres.create_start('test_multixact_new')
print("postgres is running on 'test_multixact_new' branch")
log.info("postgres is running on 'test_multixact_new' branch")
pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor()

View File

@@ -2,6 +2,10 @@ from contextlib import closing
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
#
@@ -18,7 +22,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
# Create a branch for us
zenith_cli.run(["branch", "test_old_request_lsn", "empty"])
pg = postgres.create_start('test_old_request_lsn')
print('postgres is running on test_old_request_lsn branch')
log.info('postgres is running on test_old_request_lsn branch')
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -46,7 +50,7 @@ def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
from pg_settings where name = 'shared_buffers'
''')
row = cur.fetchone()
print(f'shared_buffers is {row[0]}, table size {row[1]}');
log.info(f'shared_buffers is {row[0]}, table size {row[1]}');
assert int(row[0]) < int(row[1])
cur.execute('VACUUM foo');

View File

@@ -6,6 +6,10 @@ from contextlib import closing
from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
@@ -40,7 +44,7 @@ def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres:
from pg_settings where name = 'shared_buffers'
''')
row = cur.fetchone()
print("shared_buffers is {}, table size {}", row[0], row[1]);
log.info("shared_buffers is {}, table size {}", row[0], row[1]);
assert int(row[0]) < int(row[1])
# Stop and restart pageserver. This is a more or less graceful shutdown, although

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -8,7 +12,7 @@ def test_pgbench(postgres: PostgresFactory, pg_bin, zenith_cli):
zenith_cli.run(["branch", "test_pgbench", "empty"])
pg = postgres.create_start('test_pgbench')
print("postgres is running on 'test_pgbench' branch")
log.info("postgres is running on 'test_pgbench' branch")
connstr = pg.connstr()

View File

@@ -3,6 +3,10 @@ import pytest
from contextlib import closing
from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -30,7 +34,7 @@ def test_restart_compute(
pg = postgres.create_start('test_restart_compute',
wal_acceptors=wal_acceptor_connstrs)
print("postgres is running on 'test_restart_compute' branch")
log.info("postgres is running on 'test_restart_compute' branch")
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
@@ -39,7 +43,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone()
assert r == (5000050000, )
print("res = ", r)
log.info("res = ", r)
# Remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -52,7 +56,7 @@ def test_restart_compute(
cur.execute('SELECT sum(key) FROM t')
r = cur.fetchone()
assert r == (5000050000, )
print("res = ", r)
log.info("res = ", r)
# Insert another row
cur.execute("INSERT INTO t VALUES (100001, 'payload2')")
@@ -60,7 +64,7 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)
# Again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -75,7 +79,7 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)
# And again remove data directory and restart
pg.stop_and_destroy().create_start('test_restart_compute',
@@ -88,4 +92,4 @@ def test_restart_compute(
r = cur.fetchone()
assert r == (100001, )
print("res = ", r)
log.info("res = ", r)

View File

@@ -2,12 +2,16 @@ from contextlib import closing
import psycopg2.extras
import time;
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
def print_gc_result(row):
print("GC duration {elapsed} ms".format_map(row));
print(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
print(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
log.info("GC duration {elapsed} ms".format_map(row));
log.info(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
log.info(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
#
@@ -35,7 +39,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass");
row = cur.fetchone();
print("relfilenode is {}", row[0]);
log.info("relfilenode is {}", row[0]);
# Run GC, to clear out any garbage left behind in the catalogs by
# the CREATE TABLE command. We want to have a clean slate with no garbage
@@ -50,7 +54,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# update to confuse our numbers either.
cur.execute("DELETE FROM foo")
print("Running GC before test")
log.info("Running GC before test")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row);
@@ -61,7 +65,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert a row and run GC. Checkpoint should freeze the layer
# so that there is only the most recent image layer left for the rel,
# removing the old image and delta layer.
print("Inserting one row and running GC")
log.info("Inserting one row and running GC")
cur.execute("INSERT INTO foo VALUES (1)")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
@@ -73,7 +77,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
# Insert two more rows and run GC.
# This should create new image and delta layer file with the new contents, and
# then remove the old one image and the just-created delta layer.
print("Inserting two more rows and running GC")
log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
@@ -85,7 +89,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0
# Do it again. Should again create two new layer files and remove old ones.
print("Inserting two more rows and running GC")
log.info("Inserting two more rows and running GC")
cur.execute("INSERT INTO foo VALUES (2)")
cur.execute("INSERT INTO foo VALUES (3)")
@@ -97,7 +101,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
assert row['layer_relfiles_dropped'] == 0
# Run GC again, with no changes in the database. Should not remove anything.
print("Run GC again, with nothing to do")
log.info("Run GC again, with nothing to do")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
row = pscur.fetchone()
print_gc_result(row);
@@ -108,7 +112,7 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
#
# Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage
#
print("Drop table and run GC again");
log.info("Drop table and run GC again");
cur.execute("DROP TABLE foo")
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")

View File

@@ -3,6 +3,9 @@ from uuid import UUID
import psycopg2.extras
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
def test_timeline_size(
zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin
@@ -15,7 +18,7 @@ def test_timeline_size(
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
pgmain = postgres.create_start("test_timeline_size")
print("postgres is running on 'test_timeline_size' branch")
log.info("postgres is running on 'test_timeline_size' branch")
with closing(pgmain.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -2,6 +2,9 @@ import os
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, PgBin
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -13,7 +16,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
zenith_cli.run(["branch", "test_twophase", "empty"])
pg = postgres.create_start('test_twophase', config_lines=['max_prepared_transactions=5'])
print("postgres is running on 'test_twophase' branch")
log.info("postgres is running on 'test_twophase' branch")
conn = pg.connect()
cur = conn.cursor()
@@ -45,7 +48,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files)
log.info(twophase_files)
assert len(twophase_files) == 4
cur.execute("COMMIT PREPARED 'insert_three'")
@@ -53,7 +56,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
cur.execute('CHECKPOINT')
twophase_files = os.listdir(pg.pg_twophase_dir_path())
print(twophase_files)
log.info(twophase_files)
assert len(twophase_files) == 2
# Create a branch with the transaction in prepared state
@@ -67,7 +70,7 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa
# Check that we restored only needed twophase files
twophase_files2 = os.listdir(pg2.pg_twophase_dir_path())
print(twophase_files2)
log.info(twophase_files2)
assert twophase_files2.sort() == twophase_files.sort()
conn2 = pg2.connect()

View File

@@ -1,5 +1,9 @@
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
#
@@ -11,7 +15,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
zenith_cli.run(["branch", "test_vm_bit_clear", "empty"])
pg = postgres.create_start('test_vm_bit_clear')
print("postgres is running on 'test_vm_bit_clear' branch")
log.info("postgres is running on 'test_vm_bit_clear' branch")
pg_conn = pg.connect()
cur = pg_conn.cursor()
@@ -63,7 +67,7 @@ def test_vm_bit_clear(pageserver: ZenithPageserver, postgres: PostgresFactory, p
# server at the right point-in-time avoids that full-page image.
pg_new = postgres.create_start('test_vm_bit_clear_new')
print("postgres is running on 'test_vm_bit_clear_new' branch")
log.info("postgres is running on 'test_vm_bit_clear_new' branch")
pg_new_conn = pg_new.connect()
cur_new = pg_new_conn.cursor()

View File

@@ -4,7 +4,10 @@ import random
from fixtures.zenith_fixtures import WalAcceptor, WalAcceptorFactory, ZenithPageserver, PostgresFactory, Postgres
from typing import List
from fixtures.utils import debug_print
import logging
import fixtures.log_helper # configures loggers
log = logging.getLogger('root')
pytest_plugins = ("fixtures.zenith_fixtures")
@@ -63,18 +66,18 @@ class WorkerStats(object):
self.counters[worker_id] += 1
def check_progress(self):
debug_print("Workers progress: {}".format(self.counters))
log.debug("Workers progress: {}".format(self.counters))
# every worker should finish at least one tx
assert all(cnt > 0 for cnt in self.counters)
progress = sum(self.counters)
print('All workers made {} transactions'.format(progress))
log.info('All workers made {} transactions'.format(progress))
async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accounts, max_transfer):
pg_conn = await pg.connect_async()
debug_print('Started worker {}'.format(worker_id))
log.debug('Started worker {}'.format(worker_id))
while stats.running:
from_uid = random.randint(0, n_accounts - 1)
@@ -84,9 +87,9 @@ async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accou
await bank_transfer(pg_conn, from_uid, to_uid, amount)
stats.inc_progress(worker_id)
debug_print('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
log.debug('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
debug_print('Finished worker {}'.format(worker_id))
log.debug('Finished worker {}'.format(worker_id))
await pg_conn.close()
@@ -134,7 +137,7 @@ async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_
victim.start()
print('Iterations are finished, exiting coroutines...')
log.info('Iterations are finished, exiting coroutines...')
stats.running = False
# await all workers
await asyncio.gather(*workers)