mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 13:32:57 +00:00
test_runner: replace all .format() with f-strings (#7194)
This commit is contained in:
@@ -94,4 +94,5 @@ select = [
|
||||
"I", # isort
|
||||
"W", # pycodestyle
|
||||
"B", # bugbear
|
||||
"UP032", # f-string
|
||||
]
|
||||
|
||||
@@ -64,14 +64,14 @@ def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str:
|
||||
Returns basepath for files with captured output.
|
||||
"""
|
||||
assert isinstance(cmd, list)
|
||||
base = os.path.basename(cmd[0]) + "_{}".format(global_counter())
|
||||
base = f"{os.path.basename(cmd[0])}_{global_counter()}"
|
||||
basepath = os.path.join(capture_dir, base)
|
||||
stdout_filename = basepath + ".stdout"
|
||||
stderr_filename = basepath + ".stderr"
|
||||
|
||||
with open(stdout_filename, "w") as stdout_f:
|
||||
with open(stderr_filename, "w") as stderr_f:
|
||||
print('(capturing output to "{}.stdout")'.format(base))
|
||||
print(f'(capturing output to "{base}.stdout")')
|
||||
subprocess.run(cmd, **kwargs, stdout=stdout_f, stderr=stderr_f)
|
||||
|
||||
return basepath
|
||||
@@ -82,11 +82,9 @@ class PgBin:
|
||||
|
||||
def __init__(self, log_dir: Path, pg_distrib_dir, pg_version):
|
||||
self.log_dir = log_dir
|
||||
self.pg_bin_path = os.path.join(str(pg_distrib_dir), "v{}".format(pg_version), "bin")
|
||||
self.pg_bin_path = os.path.join(str(pg_distrib_dir), f"v{pg_version}", "bin")
|
||||
self.env = os.environ.copy()
|
||||
self.env["LD_LIBRARY_PATH"] = os.path.join(
|
||||
str(pg_distrib_dir), "v{}".format(pg_version), "lib"
|
||||
)
|
||||
self.env["LD_LIBRARY_PATH"] = os.path.join(str(pg_distrib_dir), f"v{pg_version}", "lib")
|
||||
|
||||
def _fixpath(self, command: List[str]):
|
||||
if "/" not in command[0]:
|
||||
@@ -110,7 +108,7 @@ class PgBin:
|
||||
"""
|
||||
|
||||
self._fixpath(command)
|
||||
print('Running command "{}"'.format(" ".join(command)))
|
||||
print(f'Running command "{" ".join(command)}"')
|
||||
env = self._build_env(env)
|
||||
subprocess.run(command, env=env, cwd=cwd, check=True)
|
||||
|
||||
@@ -128,7 +126,7 @@ class PgBin:
|
||||
"""
|
||||
|
||||
self._fixpath(command)
|
||||
print('Running command "{}"'.format(" ".join(command)))
|
||||
print(f'Running command "{" ".join(command)}"')
|
||||
env = self._build_env(env)
|
||||
return subprocess_capture(
|
||||
str(self.log_dir), command, env=env, cwd=cwd, check=True, **kwargs
|
||||
@@ -300,7 +298,7 @@ class NeonPageserverHttpClient(requests.Session):
|
||||
|
||||
def lsn_to_hex(num: int) -> str:
|
||||
"""Convert lsn from int to standard hex notation."""
|
||||
return "{:X}/{:X}".format(num >> 32, num & 0xFFFFFFFF)
|
||||
return f"{num >> 32:X}/{num & 0xFFFFFFFF:X}"
|
||||
|
||||
|
||||
def lsn_from_hex(lsn_hex: str) -> int:
|
||||
@@ -331,16 +329,12 @@ def wait_for_upload(
|
||||
if current_lsn >= lsn:
|
||||
return
|
||||
print(
|
||||
"waiting for remote_consistent_lsn to reach {}, now {}, iteration {}".format(
|
||||
lsn_to_hex(lsn), lsn_to_hex(current_lsn), i + 1
|
||||
)
|
||||
f"waiting for remote_consistent_lsn to reach {lsn_to_hex(lsn)}, now {lsn_to_hex(current_lsn)}, iteration {i + 1}"
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
raise Exception(
|
||||
"timed out while waiting for remote_consistent_lsn to reach {}, was {}".format(
|
||||
lsn_to_hex(lsn), lsn_to_hex(current_lsn)
|
||||
)
|
||||
f"timed out while waiting for remote_consistent_lsn to reach {lsn_to_hex(lsn)}, was {lsn_to_hex(current_lsn)}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -482,20 +482,18 @@ def pytest_terminal_summary(
|
||||
terminalreporter.section("Benchmark results", "-")
|
||||
is_header_printed = True
|
||||
|
||||
terminalreporter.write(
|
||||
"{}.{}: ".format(test_report.head_line, recorded_property["name"])
|
||||
)
|
||||
terminalreporter.write(f"{test_report.head_line}.{recorded_property['name']}: ")
|
||||
unit = recorded_property["unit"]
|
||||
value = recorded_property["value"]
|
||||
if unit == "MB":
|
||||
terminalreporter.write("{0:,.0f}".format(value), green=True)
|
||||
terminalreporter.write(f"{value:,.0f}", green=True)
|
||||
elif unit in ("s", "ms") and isinstance(value, float):
|
||||
terminalreporter.write("{0:,.3f}".format(value), green=True)
|
||||
terminalreporter.write(f"{value:,.3f}", green=True)
|
||||
elif isinstance(value, float):
|
||||
terminalreporter.write("{0:,.4f}".format(value), green=True)
|
||||
terminalreporter.write(f"{value:,.4f}", green=True)
|
||||
else:
|
||||
terminalreporter.write(str(value), green=True)
|
||||
terminalreporter.line(" {}".format(unit))
|
||||
terminalreporter.line(f" {unit}")
|
||||
|
||||
result_entry.append(recorded_property)
|
||||
|
||||
|
||||
@@ -3605,7 +3605,7 @@ class Safekeeper:
|
||||
return self
|
||||
|
||||
def stop(self, immediate: bool = False) -> "Safekeeper":
|
||||
log.info("Stopping safekeeper {}".format(self.id))
|
||||
log.info(f"Stopping safekeeper {self.id}")
|
||||
self.env.neon_cli.safekeeper_stop(self.id, immediate)
|
||||
self.running = False
|
||||
return self
|
||||
@@ -4037,13 +4037,13 @@ def check_restored_datadir_content(test_output_dir: Path, env: NeonEnv, endpoint
|
||||
for f in mismatch:
|
||||
f1 = os.path.join(endpoint.pgdata_dir, f)
|
||||
f2 = os.path.join(restored_dir_path, f)
|
||||
stdout_filename = "{}.filediff".format(f2)
|
||||
stdout_filename = f"{f2}.filediff"
|
||||
|
||||
with open(stdout_filename, "w") as stdout_f:
|
||||
subprocess.run("xxd -b {} > {}.hex ".format(f1, f1), shell=True)
|
||||
subprocess.run("xxd -b {} > {}.hex ".format(f2, f2), shell=True)
|
||||
subprocess.run(f"xxd -b {f1} > {f1}.hex ", shell=True)
|
||||
subprocess.run(f"xxd -b {f2} > {f2}.hex ", shell=True)
|
||||
|
||||
cmd = "diff {}.hex {}.hex".format(f1, f2)
|
||||
cmd = f"diff {f1}.hex {f2}.hex"
|
||||
subprocess.run([cmd], stdout=stdout_f, shell=True)
|
||||
|
||||
assert (mismatch, error) == ([], [])
|
||||
|
||||
@@ -204,13 +204,11 @@ def wait_for_last_record_lsn(
|
||||
return current_lsn
|
||||
if i % 10 == 0:
|
||||
log.info(
|
||||
"{}/{} waiting for last_record_lsn to reach {}, now {}, iteration {}".format(
|
||||
tenant, timeline, lsn, current_lsn, i + 1
|
||||
)
|
||||
f"{tenant}/{timeline} waiting for last_record_lsn to reach {lsn}, now {current_lsn}, iteration {i + 1}"
|
||||
)
|
||||
time.sleep(0.1)
|
||||
raise Exception(
|
||||
"timed out while waiting for last_record_lsn to reach {}, was {}".format(lsn, current_lsn)
|
||||
f"timed out while waiting for last_record_lsn to reach {lsn}, was {current_lsn}"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -125,19 +125,19 @@ async def run_update_loop_worker(ep: Endpoint, n_txns: int, idx: int):
|
||||
await conn.execute(f"ALTER TABLE {table} SET (autovacuum_enabled = false)")
|
||||
await conn.execute(f"INSERT INTO {table} VALUES (1, 0)")
|
||||
await conn.execute(
|
||||
f"""
|
||||
CREATE PROCEDURE updating{table}() as
|
||||
$$
|
||||
DECLARE
|
||||
i integer;
|
||||
BEGIN
|
||||
FOR i IN 1..{n_txns} LOOP
|
||||
UPDATE {table} SET x = x + 1 WHERE pk=1;
|
||||
COMMIT;
|
||||
END LOOP;
|
||||
END
|
||||
$$ LANGUAGE plpgsql
|
||||
"""
|
||||
CREATE PROCEDURE updating{0}() as
|
||||
$$
|
||||
DECLARE
|
||||
i integer;
|
||||
BEGIN
|
||||
FOR i IN 1..{1} LOOP
|
||||
UPDATE {0} SET x = x + 1 WHERE pk=1;
|
||||
COMMIT;
|
||||
END LOOP;
|
||||
END
|
||||
$$ LANGUAGE plpgsql
|
||||
""".format(table, n_txns)
|
||||
)
|
||||
await conn.execute("SET statement_timeout=0")
|
||||
await conn.execute(f"call updating{table}()")
|
||||
|
||||
@@ -78,7 +78,7 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
|
||||
p = random.randint(0, i)
|
||||
|
||||
timer = timeit.default_timer()
|
||||
env.neon_cli.create_branch("b{}".format(i + 1), "b{}".format(p), tenant_id=tenant)
|
||||
env.neon_cli.create_branch(f"b{i + 1}", f"b{p}", tenant_id=tenant)
|
||||
dur = timeit.default_timer() - timer
|
||||
|
||||
log.info(f"Creating branch b{i+1} took {dur}s")
|
||||
|
||||
@@ -84,11 +84,11 @@ def test_branching_with_pgbench(
|
||||
threads = []
|
||||
|
||||
if ty == "cascade":
|
||||
env.neon_cli.create_branch("b{}".format(i + 1), "b{}".format(i), tenant_id=tenant)
|
||||
env.neon_cli.create_branch(f"b{i + 1}", f"b{i}", tenant_id=tenant)
|
||||
else:
|
||||
env.neon_cli.create_branch("b{}".format(i + 1), "b0", tenant_id=tenant)
|
||||
env.neon_cli.create_branch(f"b{i + 1}", "b0", tenant_id=tenant)
|
||||
|
||||
endpoints.append(env.endpoints.create_start("b{}".format(i + 1), tenant_id=tenant))
|
||||
endpoints.append(env.endpoints.create_start(f"b{i + 1}", tenant_id=tenant))
|
||||
|
||||
threads.append(
|
||||
threading.Thread(target=run_pgbench, args=(endpoints[-1].connstr(),), daemon=True)
|
||||
|
||||
@@ -74,8 +74,8 @@ def test_large_schema(neon_env_builder: NeonEnvBuilder):
|
||||
cur.execute("select * from pg_depend order by refclassid, refobjid, refobjsubid")
|
||||
|
||||
# Check layer file sizes
|
||||
timeline_path = "{}/tenants/{}/timelines/{}/".format(
|
||||
env.pageserver.workdir, env.initial_tenant, env.initial_timeline
|
||||
timeline_path = (
|
||||
f"{env.pageserver.workdir}/tenants/{env.initial_tenant}/timelines/{env.initial_timeline}/"
|
||||
)
|
||||
for filename in os.listdir(timeline_path):
|
||||
if filename.startswith("00000"):
|
||||
|
||||
@@ -57,9 +57,7 @@ def test_layer_bloating(neon_simple_env: NeonEnv, vanilla_pg):
|
||||
time.sleep(10)
|
||||
|
||||
# Check layer file sizes
|
||||
timeline_path = "{}/tenants/{}/timelines/{}/".format(
|
||||
env.pageserver.workdir, env.initial_tenant, timeline
|
||||
)
|
||||
timeline_path = f"{env.pageserver.workdir}/tenants/{env.initial_tenant}/timelines/{timeline}/"
|
||||
log.info(f"Check {timeline_path}")
|
||||
for filename in os.listdir(timeline_path):
|
||||
if filename.startswith("00000"):
|
||||
|
||||
@@ -9,7 +9,6 @@ of the pageserver are:
|
||||
- Updates to remote_consistent_lsn may only be made visible after validating generation
|
||||
"""
|
||||
|
||||
|
||||
import enum
|
||||
import re
|
||||
import time
|
||||
|
||||
@@ -22,7 +22,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
with closing(endpoint.connect()) as con:
|
||||
with con.cursor() as c:
|
||||
for e in extensions:
|
||||
c.execute("create extension if not exists {};".format(e))
|
||||
c.execute(f"create extension if not exists {e};")
|
||||
|
||||
c.execute("create table foo (c int) with (autovacuum_enabled = false)")
|
||||
c.execute("insert into foo values (1)")
|
||||
@@ -42,14 +42,12 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
log.info("Test table is populated, validating buffer cache")
|
||||
|
||||
cache_entries = query_scalar(
|
||||
c, "select count(*) from pg_buffercache where relfilenode = {}".format(relfilenode)
|
||||
c, f"select count(*) from pg_buffercache where relfilenode = {relfilenode}"
|
||||
)
|
||||
assert cache_entries > 0, "No buffers cached for the test relation"
|
||||
|
||||
c.execute(
|
||||
"select reltablespace, reldatabase, relfilenode from pg_buffercache where relfilenode = {}".format(
|
||||
relfilenode
|
||||
)
|
||||
f"select reltablespace, reldatabase, relfilenode from pg_buffercache where relfilenode = {relfilenode}"
|
||||
)
|
||||
reln = c.fetchone()
|
||||
assert reln is not None
|
||||
@@ -59,22 +57,20 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
c.execute("select clear_buffer_cache()")
|
||||
|
||||
cache_entries = query_scalar(
|
||||
c, "select count(*) from pg_buffercache where relfilenode = {}".format(relfilenode)
|
||||
c, f"select count(*) from pg_buffercache where relfilenode = {relfilenode}"
|
||||
)
|
||||
assert cache_entries == 0, "Failed to clear buffer cache"
|
||||
|
||||
log.info("Cache is clear, reading stale page version")
|
||||
|
||||
c.execute(
|
||||
"select lsn, lower, upper from page_header(get_raw_page_at_lsn('foo', 'main', 0, '{}'))".format(
|
||||
first[0]
|
||||
)
|
||||
f"select lsn, lower, upper from page_header(get_raw_page_at_lsn('foo', 'main', 0, '{first[0]}'))"
|
||||
)
|
||||
direct_first = c.fetchone()
|
||||
assert first == direct_first, "Failed fetch page at historic lsn"
|
||||
|
||||
cache_entries = query_scalar(
|
||||
c, "select count(*) from pg_buffercache where relfilenode = {}".format(relfilenode)
|
||||
c, f"select count(*) from pg_buffercache where relfilenode = {relfilenode}"
|
||||
)
|
||||
assert cache_entries == 0, "relation buffers detected after invalidation"
|
||||
|
||||
@@ -87,7 +83,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
assert second == direct_latest, "Failed fetch page at latest lsn"
|
||||
|
||||
cache_entries = query_scalar(
|
||||
c, "select count(*) from pg_buffercache where relfilenode = {}".format(relfilenode)
|
||||
c, f"select count(*) from pg_buffercache where relfilenode = {relfilenode}"
|
||||
)
|
||||
assert cache_entries == 0, "relation buffers detected after invalidation"
|
||||
|
||||
@@ -96,9 +92,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
)
|
||||
|
||||
c.execute(
|
||||
"select lsn, lower, upper from page_header(get_raw_page_at_lsn( {}, {}, {}, 0, 0, '{}' ))".format(
|
||||
reln[0], reln[1], reln[2], first[0]
|
||||
)
|
||||
f"select lsn, lower, upper from page_header(get_raw_page_at_lsn({reln[0]}, {reln[1]}, {reln[2]}, 0, 0, '{first[0]}'))"
|
||||
)
|
||||
direct_first = c.fetchone()
|
||||
assert first == direct_first, "Failed fetch page at historic lsn using oid"
|
||||
@@ -108,9 +102,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
)
|
||||
|
||||
c.execute(
|
||||
"select lsn, lower, upper from page_header(get_raw_page_at_lsn( {}, {}, {}, 0, 0, NULL ))".format(
|
||||
reln[0], reln[1], reln[2]
|
||||
)
|
||||
f"select lsn, lower, upper from page_header(get_raw_page_at_lsn({reln[0]}, {reln[1]}, {reln[2]}, 0, 0, NULL))"
|
||||
)
|
||||
direct_latest = c.fetchone()
|
||||
assert second == direct_latest, "Failed fetch page at latest lsn"
|
||||
@@ -122,9 +114,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
)
|
||||
|
||||
c.execute(
|
||||
"select lsn, lower, upper from page_header(get_raw_page_at_lsn( {}, {}, {}, 0, 0, '{}' ))".format(
|
||||
reln[0], reln[1], reln[2], first[0]
|
||||
)
|
||||
f"select lsn, lower, upper from page_header(get_raw_page_at_lsn({reln[0]}, {reln[1]}, {reln[2]}, 0, 0, '{first[0]}'))"
|
||||
)
|
||||
direct_first = c.fetchone()
|
||||
assert first == direct_first, "Failed fetch page at historic lsn using oid"
|
||||
@@ -134,7 +124,7 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
c.execute("select * from page_header(get_raw_page('foo', 'main', 0));")
|
||||
raise AssertionError("query should have failed")
|
||||
except UndefinedTable as e:
|
||||
log.info("Caught an expected failure: {}".format(e))
|
||||
log.info(f"Caught an expected failure: {e}")
|
||||
|
||||
|
||||
def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
@@ -148,7 +138,7 @@ def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
with closing(endpoint.connect()) as con:
|
||||
with con.cursor() as c:
|
||||
for e in extensions:
|
||||
c.execute("create extension if not exists {};".format(e))
|
||||
c.execute(f"create extension if not exists {e};")
|
||||
|
||||
log.info("read a page of a missing relation")
|
||||
try:
|
||||
@@ -157,7 +147,7 @@ def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
)
|
||||
raise AssertionError("query should have failed")
|
||||
except UndefinedTable as e:
|
||||
log.info("Caught an expected failure: {}".format(e))
|
||||
log.info(f"Caught an expected failure: {e}")
|
||||
|
||||
c.execute("create table foo (c int) with (autovacuum_enabled = false)")
|
||||
c.execute("insert into foo values (1)")
|
||||
@@ -169,7 +159,7 @@ def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
)
|
||||
raise AssertionError("query should have failed")
|
||||
except IoError as e:
|
||||
log.info("Caught an expected failure: {}".format(e))
|
||||
log.info(f"Caught an expected failure: {e}")
|
||||
|
||||
log.info("Pass NULL as an input")
|
||||
expected = (None, None, None)
|
||||
|
||||
@@ -103,9 +103,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
n_timelines = 3
|
||||
|
||||
branch_names = [
|
||||
"test_safekeepers_many_timelines_{}".format(tlin) for tlin in range(n_timelines)
|
||||
]
|
||||
branch_names = [f"test_safekeepers_many_timelines_{tlin}" for tlin in range(n_timelines)]
|
||||
# pageserver, safekeeper operate timelines via their ids (can be represented in hex as 'ad50847381e248feaac9876cc71ae418')
|
||||
# that's not really human readable, so the branch names are introduced in Neon CLI.
|
||||
# Neon CLI stores its branch <-> timeline mapping in its internals,
|
||||
@@ -1136,13 +1134,13 @@ def cmp_sk_wal(sks: List[Safekeeper], tenant_id: TenantId, timeline_id: Timeline
|
||||
for f in mismatch:
|
||||
f1 = os.path.join(sk0.timeline_dir(tenant_id, timeline_id), f)
|
||||
f2 = os.path.join(sk.timeline_dir(tenant_id, timeline_id), f)
|
||||
stdout_filename = "{}.filediff".format(f2)
|
||||
stdout_filename = f"{f2}.filediff"
|
||||
|
||||
with open(stdout_filename, "w") as stdout_f:
|
||||
subprocess.run("xxd {} > {}.hex ".format(f1, f1), shell=True)
|
||||
subprocess.run("xxd {} > {}.hex ".format(f2, f2), shell=True)
|
||||
subprocess.run(f"xxd {f1} > {f1}.hex ", shell=True)
|
||||
subprocess.run(f"xxd {f2} > {f2}.hex ", shell=True)
|
||||
|
||||
cmd = "diff {}.hex {}.hex".format(f1, f2)
|
||||
cmd = f"diff {f1}.hex {f2}.hex"
|
||||
subprocess.run([cmd], stdout=stdout_f, shell=True)
|
||||
|
||||
assert (mismatch, not_regular) == (
|
||||
|
||||
@@ -76,20 +76,20 @@ class WorkerStats(object):
|
||||
self.counters[worker_id] += 1
|
||||
|
||||
def check_progress(self):
|
||||
log.debug("Workers progress: {}".format(self.counters))
|
||||
log.debug(f"Workers progress: {self.counters}")
|
||||
|
||||
# every worker should finish at least one tx
|
||||
assert all(cnt > 0 for cnt in self.counters)
|
||||
|
||||
progress = sum(self.counters)
|
||||
log.info("All workers made {} transactions".format(progress))
|
||||
log.info(f"All workers made {progress} transactions")
|
||||
|
||||
|
||||
async def run_random_worker(
|
||||
stats: WorkerStats, endpoint: Endpoint, worker_id, n_accounts, max_transfer
|
||||
):
|
||||
pg_conn = await endpoint.connect_async()
|
||||
log.debug("Started worker {}".format(worker_id))
|
||||
log.debug(f"Started worker {worker_id}")
|
||||
|
||||
while stats.running:
|
||||
from_uid = random.randint(0, n_accounts - 1)
|
||||
@@ -99,9 +99,9 @@ async def run_random_worker(
|
||||
await bank_transfer(pg_conn, from_uid, to_uid, amount)
|
||||
stats.inc_progress(worker_id)
|
||||
|
||||
log.debug("Executed transfer({}) {} => {}".format(amount, from_uid, to_uid))
|
||||
log.debug(f"Executed transfer({amount}) {from_uid} => {to_uid}")
|
||||
|
||||
log.debug("Finished worker {}".format(worker_id))
|
||||
log.debug(f"Finished worker {worker_id}")
|
||||
|
||||
await pg_conn.close()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user