test_runner: fix flake8 warnings

This commit is contained in:
Alexander Bayandin
2022-08-18 20:41:13 +01:00
committed by Alexander Bayandin
parent ae3227509c
commit 39a3bcac36
35 changed files with 92 additions and 140 deletions

View File

@@ -28,13 +28,13 @@ import tempfile
import time import time
import uuid import uuid
from contextlib import closing from contextlib import closing
from os import path
from pathlib import Path from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union, cast from typing import Any, Dict, List, Optional, Tuple, cast
import psycopg2 import psycopg2
import requests import requests
from psycopg2.extensions import connection as PgConnection from psycopg2.extensions import connection as PgConnection
from psycopg2.extensions import parse_dsn
############################################### ###############################################
### client-side utils copied from test fixtures ### client-side utils copied from test fixtures
@@ -149,10 +149,8 @@ class PgProtocol:
# enough for our tests, but if you need a longer, you can # enough for our tests, but if you need a longer, you can
# change it by calling "SET statement_timeout" after # change it by calling "SET statement_timeout" after
# connecting. # connecting.
if "options" in conn_options: conn_options["options"] = f"-cstatement_timeout=120s {conn_options.get('options', '')}"
conn_options["options"] = f"-cstatement_timeout=120s " + conn_options["options"]
else:
conn_options["options"] = "-cstatement_timeout=120s"
return conn_options return conn_options
# autocommit=True here by default because that's what we need most of the time # autocommit=True here by default because that's what we need most of the time
@@ -250,7 +248,7 @@ class NeonPageserverHttpClient(requests.Session):
except requests.RequestException as e: except requests.RequestException as e:
try: try:
msg = res.json()["msg"] msg = res.json()["msg"]
except: except: # noqa: E722
msg = "" msg = ""
raise NeonPageserverApiException(msg) from e raise NeonPageserverApiException(msg) from e
@@ -477,8 +475,8 @@ def import_timeline(
import_cmd = f"import basebackup {tenant_id} {timeline_id} {last_lsn} {last_lsn}" import_cmd = f"import basebackup {tenant_id} {timeline_id} {last_lsn} {last_lsn}"
full_cmd = rf"""cat {tar_filename} | {psql_path} {pageserver_connstr} -c '{import_cmd}' """ full_cmd = rf"""cat {tar_filename} | {psql_path} {pageserver_connstr} -c '{import_cmd}' """
stderr_filename2 = path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stderr") stderr_filename2 = os.path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stderr")
stdout_filename = path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stdout") stdout_filename = os.path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stdout")
print(f"Running: {full_cmd}") print(f"Running: {full_cmd}")
@@ -495,7 +493,7 @@ def import_timeline(
check=True, check=True,
) )
print(f"Done import") print("Done import")
# Wait until pageserver persists the files # Wait until pageserver persists the files
wait_for_upload( wait_for_upload(
@@ -508,7 +506,7 @@ def export_timeline(
): ):
# Choose filenames # Choose filenames
incomplete_filename = tar_filename + ".incomplete" incomplete_filename = tar_filename + ".incomplete"
stderr_filename = path.join(args.work_dir, f"{tenant_id}_{timeline_id}.stderr") stderr_filename = os.path.join(args.work_dir, f"{tenant_id}_{timeline_id}.stderr")
# Construct export command # Construct export command
query = f"fullbackup {tenant_id} {timeline_id} {last_lsn} {prev_lsn}" query = f"fullbackup {tenant_id} {timeline_id} {last_lsn} {prev_lsn}"
@@ -563,7 +561,7 @@ def main(args: argparse.Namespace):
continue continue
# Choose filenames # Choose filenames
tar_filename = path.join( tar_filename = os.path.join(
args.work_dir, f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar" args.work_dir, f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar"
) )

View File

@@ -11,7 +11,7 @@ def test_basebackup_error(neon_simple_env: NeonEnv):
env.neon_cli.create_branch("test_basebackup_error", "empty") env.neon_cli.create_branch("test_basebackup_error", "empty")
# Introduce failpoint # Introduce failpoint
env.pageserver.safe_psql(f"failpoints basebackup-before-control-file=return") env.pageserver.safe_psql("failpoints basebackup-before-control-file=return")
with pytest.raises(Exception, match="basebackup-before-control-file"): with pytest.raises(Exception, match="basebackup-before-control-file"):
pg = env.postgres.create_start("test_basebackup_error") env.postgres.create_start("test_basebackup_error")

View File

@@ -65,7 +65,7 @@ def test_branch_and_gc(neon_simple_env: NeonEnv):
} }
) )
timeline_main = env.neon_cli.create_timeline(f"test_main", tenant_id=tenant) timeline_main = env.neon_cli.create_timeline("test_main", tenant_id=tenant)
pg_main = env.postgres.create_start("test_main", tenant_id=tenant) pg_main = env.postgres.create_start("test_main", tenant_id=tenant)
main_cur = pg_main.connect().cursor() main_cur = pg_main.connect().cursor()
@@ -148,7 +148,7 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
# Use `failpoint=sleep` and `threading` to make the GC iteration triggers *before* the # Use `failpoint=sleep` and `threading` to make the GC iteration triggers *before* the
# branch creation task but the individual timeline GC iteration happens *after* # branch creation task but the individual timeline GC iteration happens *after*
# the branch creation task. # the branch creation task.
env.pageserver.safe_psql(f"failpoints before-timeline-gc=sleep(2000)") env.pageserver.safe_psql("failpoints before-timeline-gc=sleep(2000)")
def do_gc(): def do_gc():
env.pageserver.safe_psql(f"do_gc {tenant.hex} {b0.hex} 0") env.pageserver.safe_psql(f"do_gc {tenant.hex} {b0.hex} 0")

View File

@@ -1,8 +1,6 @@
import concurrent.futures import concurrent.futures
import os import os
from contextlib import closing
from typing import List, Tuple from typing import List, Tuple
from uuid import UUID
import pytest import pytest
from fixtures.log_helper import log from fixtures.log_helper import log
@@ -24,7 +22,7 @@ def test_broken_timeline(neon_env_builder: NeonEnvBuilder):
tenant_id = tenant_id_uuid.hex tenant_id = tenant_id_uuid.hex
timeline_id = timeline_id_uuid.hex timeline_id = timeline_id_uuid.hex
pg = env.postgres.create_start(f"main", tenant_id=tenant_id_uuid) pg = env.postgres.create_start("main", tenant_id=tenant_id_uuid)
with pg.cursor() as cur: with pg.cursor() as cur:
cur.execute("CREATE TABLE t(key int primary key, value text)") cur.execute("CREATE TABLE t(key int primary key, value text)")
cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'") cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'")
@@ -102,7 +100,7 @@ def test_fix_broken_timelines_on_startup(neon_simple_env: NeonEnv):
tenant_id, _ = env.neon_cli.create_tenant() tenant_id, _ = env.neon_cli.create_tenant()
# Introduce failpoint when creating a new timeline # Introduce failpoint when creating a new timeline
env.pageserver.safe_psql(f"failpoints before-checkpoint-new-timeline=return") env.pageserver.safe_psql("failpoints before-checkpoint-new-timeline=return")
with pytest.raises(Exception, match="before-checkpoint-new-timeline"): with pytest.raises(Exception, match="before-checkpoint-new-timeline"):
_ = env.neon_cli.create_timeline("test_fix_broken_timelines", tenant_id) _ = env.neon_cli.create_timeline("test_fix_broken_timelines", tenant_id)

View File

@@ -1,6 +1,5 @@
import os import os
import time import time
from contextlib import closing
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv from fixtures.neon_fixtures import NeonEnv
@@ -49,7 +48,7 @@ def test_clog_truncate(neon_simple_env: NeonEnv):
log.info(f"pg_xact_0000_path = {pg_xact_0000_path}") log.info(f"pg_xact_0000_path = {pg_xact_0000_path}")
while os.path.isfile(pg_xact_0000_path): while os.path.isfile(pg_xact_0000_path):
log.info(f"file exists. wait for truncation. " "pg_xact_0000_path = {pg_xact_0000_path}") log.info(f"file exists. wait for truncation: {pg_xact_0000_path=}")
time.sleep(5) time.sleep(5)
# checkpoint to advance latest lsn # checkpoint to advance latest lsn

View File

@@ -1,6 +1,5 @@
import os import os
import pathlib import pathlib
from contextlib import closing
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
@@ -92,14 +91,14 @@ def test_dropdb(neon_simple_env: NeonEnv, test_output_dir):
dbpath = pathlib.Path(pg_before.pgdata_dir) / "base" / str(dboid) dbpath = pathlib.Path(pg_before.pgdata_dir) / "base" / str(dboid)
log.info(dbpath) log.info(dbpath)
assert os.path.isdir(dbpath) == True assert os.path.isdir(dbpath) is True
# Test that database subdir doesn't exist on the branch after drop # Test that database subdir doesn't exist on the branch after drop
assert pg_after.pgdata_dir assert pg_after.pgdata_dir
dbpath = pathlib.Path(pg_after.pgdata_dir) / "base" / str(dboid) dbpath = pathlib.Path(pg_after.pgdata_dir) / "base" / str(dboid)
log.info(dbpath) log.info(dbpath)
assert os.path.isdir(dbpath) == False assert os.path.isdir(dbpath) is False
# Check that we restore the content of the datadir correctly # Check that we restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, pg) check_restored_datadir_content(test_output_dir, env, pg)

View File

@@ -1,6 +1,4 @@
import pytest from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverHttpClient
def test_fsm_truncate(neon_env_builder: NeonEnvBuilder): def test_fsm_truncate(neon_env_builder: NeonEnvBuilder):

View File

@@ -24,7 +24,7 @@ async def update_table(pg: Postgres):
while updates_performed < updates_to_perform: while updates_performed < updates_to_perform:
updates_performed += 1 updates_performed += 1
id = random.randrange(1, num_rows) id = random.randrange(1, num_rows)
row = await pg_conn.fetchrow(f"UPDATE foo SET counter = counter + 1 WHERE id = {id}") await pg_conn.fetchrow(f"UPDATE foo SET counter = counter + 1 WHERE id = {id}")
# Perform aggressive GC with 0 horizon # Perform aggressive GC with 0 horizon

View File

@@ -1,13 +1,7 @@
import math from datetime import timedelta
import time
from contextlib import closing
from datetime import timedelta, timezone, tzinfo
from uuid import UUID
import psycopg2.errors
import psycopg2.extras
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.utils import query_scalar from fixtures.utils import query_scalar

View File

@@ -1,16 +1,12 @@
import os
import pathlib import pathlib
import subprocess import subprocess
from typing import Optional from typing import Optional
from uuid import UUID, uuid4 from uuid import UUID, uuid4
import pytest
from fixtures.log_helper import log
from fixtures.neon_fixtures import ( from fixtures.neon_fixtures import (
DEFAULT_BRANCH_NAME, DEFAULT_BRANCH_NAME,
NeonEnv, NeonEnv,
NeonEnvBuilder, NeonEnvBuilder,
NeonPageserverApiException,
NeonPageserverHttpClient, NeonPageserverHttpClient,
neon_binpath, neon_binpath,
pg_distrib_dir, pg_distrib_dir,
@@ -24,13 +20,15 @@ def test_pageserver_init_node_id(neon_simple_env: NeonEnv):
repo_dir = neon_simple_env.repo_dir repo_dir = neon_simple_env.repo_dir
pageserver_config = repo_dir / "pageserver.toml" pageserver_config = repo_dir / "pageserver.toml"
pageserver_bin = pathlib.Path(neon_binpath) / "pageserver" pageserver_bin = pathlib.Path(neon_binpath) / "pageserver"
run_pageserver = lambda args: subprocess.run(
[str(pageserver_bin), "-D", str(repo_dir), *args], def run_pageserver(args):
check=False, return subprocess.run(
universal_newlines=True, [str(pageserver_bin), "-D", str(repo_dir), *args],
stdout=subprocess.PIPE, check=False,
stderr=subprocess.PIPE, universal_newlines=True,
) stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# remove initial config # remove initial config
pageserver_config.unlink() pageserver_config.unlink()

View File

@@ -32,7 +32,7 @@ async def copy_test_data_to_table(pg: Postgres, worker_id: int, table_name: str)
async def parallel_load_same_table(pg: Postgres, n_parallel: int): async def parallel_load_same_table(pg: Postgres, n_parallel: int):
workers = [] workers = []
for worker_id in range(n_parallel): for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f"copytest") worker = copy_test_data_to_table(pg, worker_id, "copytest")
workers.append(asyncio.create_task(worker)) workers.append(asyncio.create_task(worker))
# await all workers # await all workers
@@ -49,7 +49,7 @@ def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5):
# Create test table # Create test table
conn = pg.connect() conn = pg.connect()
cur = conn.cursor() cur = conn.cursor()
cur.execute(f"CREATE TABLE copytest (i int, t text)") cur.execute("CREATE TABLE copytest (i int, t text)")
# Run COPY TO to load the table with parallel connections. # Run COPY TO to load the table with parallel connections.
asyncio.run(parallel_load_same_table(pg, n_parallel)) asyncio.run(parallel_load_same_table(pg, n_parallel))

View File

@@ -1,7 +1,4 @@
import json
import os
import time import time
from ast import Assert
from contextlib import closing from contextlib import closing
import psycopg2.extras import psycopg2.extras
@@ -33,8 +30,6 @@ def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder):
pg = env.postgres.create_start("test_pageserver_recovery") pg = env.postgres.create_start("test_pageserver_recovery")
log.info("postgres is running on 'test_pageserver_recovery' branch") log.info("postgres is running on 'test_pageserver_recovery' branch")
connstr = pg.connstr()
with closing(pg.connect()) as conn: with closing(pg.connect()) as conn:
with conn.cursor() as cur: with conn.cursor() as cur:
with closing(env.pageserver.connect()) as psconn: with closing(env.pageserver.connect()) as psconn:

View File

@@ -99,7 +99,7 @@ def test_remote_storage_backup_and_restore(
env.pageserver.start() env.pageserver.start()
# Introduce failpoint in download # Introduce failpoint in download
env.pageserver.safe_psql(f"failpoints remote-storage-download-pre-rename=return") env.pageserver.safe_psql("failpoints remote-storage-download-pre-rename=return")
client.tenant_attach(UUID(tenant_id)) client.tenant_attach(UUID(tenant_id))

View File

@@ -1,7 +1,6 @@
from contextlib import closing from contextlib import closing
import psycopg2.extras import psycopg2.extras
import pytest
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.neon_fixtures import NeonEnvBuilder
@@ -22,8 +21,8 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
} }
) )
env.neon_cli.create_timeline(f"test_tenant_conf", tenant_id=tenant) env.neon_cli.create_timeline("test_tenant_conf", tenant_id=tenant)
pg = env.postgres.create_start( env.postgres.create_start(
"test_tenant_conf", "test_tenant_conf",
"main", "main",
tenant, tenant,

View File

@@ -14,7 +14,6 @@ from fixtures.neon_fixtures import (
NeonEnv, NeonEnv,
NeonEnvBuilder, NeonEnvBuilder,
NeonPageserverHttpClient, NeonPageserverHttpClient,
PageserverPort,
PortDistributor, PortDistributor,
Postgres, Postgres,
assert_no_in_progress_downloads_for_tenant, assert_no_in_progress_downloads_for_tenant,
@@ -56,7 +55,7 @@ def new_pageserver_helper(
f"-c listen_pg_addr='localhost:{pg_port}'", f"-c listen_pg_addr='localhost:{pg_port}'",
f"-c listen_http_addr='localhost:{http_port}'", f"-c listen_http_addr='localhost:{http_port}'",
f"-c pg_distrib_dir='{pg_distrib_dir}'", f"-c pg_distrib_dir='{pg_distrib_dir}'",
f"-c id=2", "-c id=2",
f"-c remote_storage={{local_path='{remote_storage_mock_path}'}}", f"-c remote_storage={{local_path='{remote_storage_mock_path}'}}",
] ]
if broker is not None: if broker is not None:
@@ -92,7 +91,7 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve
with pg_cur(pg) as cur: with pg_cur(pg) as cur:
cur.execute("INSERT INTO load VALUES ('some payload')") cur.execute("INSERT INTO load VALUES ('some payload')")
inserted_ctr += 1 inserted_ctr += 1
except: except: # noqa: E722
if not failed: if not failed:
log.info("load failed") log.info("load failed")
failed = True failed = True

View File

@@ -1,10 +1,9 @@
import time
from uuid import UUID from uuid import UUID
from fixtures.neon_fixtures import NeonEnvBuilder, wait_until from fixtures.neon_fixtures import NeonEnvBuilder, wait_until
def get_only_element(l): def get_only_element(l): # noqa: E741
assert len(l) == 1 assert len(l) == 1
return l[0] return l[0]
@@ -46,7 +45,7 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder):
# Create tenant, start compute # Create tenant, start compute
tenant, _ = env.neon_cli.create_tenant() tenant, _ = env.neon_cli.create_tenant()
timeline = env.neon_cli.create_timeline(name, tenant_id=tenant) env.neon_cli.create_timeline(name, tenant_id=tenant)
pg = env.postgres.create_start(name, tenant_id=tenant) pg = env.postgres.create_start(name, tenant_id=tenant)
assert get_state(tenant) == "Active" assert get_state(tenant) == "Active"

View File

@@ -7,7 +7,6 @@
# #
import asyncio import asyncio
from contextlib import closing
from typing import List, Tuple from typing import List, Tuple
from uuid import UUID from uuid import UUID
@@ -25,12 +24,12 @@ from fixtures.utils import lsn_from_hex
async def tenant_workload(env: NeonEnv, pg: Postgres): async def tenant_workload(env: NeonEnv, pg: Postgres):
pageserver_conn = await env.pageserver.connect_async() await env.pageserver.connect_async()
pg_conn = await pg.connect_async() pg_conn = await pg.connect_async()
tenant_id = await pg_conn.fetchval("show neon.tenant_id") await pg_conn.fetchval("show neon.tenant_id")
timeline_id = await pg_conn.fetchval("show neon.timeline_id") await pg_conn.fetchval("show neon.timeline_id")
await pg_conn.execute("CREATE TABLE t(key int primary key, value text)") await pg_conn.execute("CREATE TABLE t(key int primary key, value text)")
for i in range(1, 100): for i in range(1, 100):
@@ -72,10 +71,10 @@ def test_tenants_many(neon_env_builder: NeonEnvBuilder, remote_storatge_kind: Re
"checkpoint_distance": "5000000", "checkpoint_distance": "5000000",
} }
) )
env.neon_cli.create_timeline(f"test_tenants_many", tenant_id=tenant) env.neon_cli.create_timeline("test_tenants_many", tenant_id=tenant)
pg = env.postgres.create_start( pg = env.postgres.create_start(
f"test_tenants_many", "test_tenants_many",
tenant_id=tenant, tenant_id=tenant,
) )
tenants_pgs.append((tenant, pg)) tenants_pgs.append((tenant, pg))

View File

@@ -125,7 +125,7 @@ def wait_for_pageserver_catchup(pgmain: Postgres, polling_interval=1, timeout=60
elapsed = time.time() - started_at elapsed = time.time() - started_at
if elapsed > timeout: if elapsed > timeout:
raise RuntimeError( raise RuntimeError(
f"timed out waiting for pageserver to reach pg_current_wal_flush_lsn()" "timed out waiting for pageserver to reach pg_current_wal_flush_lsn()"
) )
res = pgmain.safe_psql( res = pgmain.safe_psql(
@@ -390,7 +390,7 @@ def test_tenant_physical_size(neon_simple_env: NeonEnv):
tenant, timeline = env.neon_cli.create_tenant() tenant, timeline = env.neon_cli.create_tenant()
def get_timeline_physical_size(timeline: UUID): def get_timeline_physical_size(timeline: UUID):
res = client.timeline_detail(tenant, timeline) res = client.timeline_detail(tenant, timeline, include_non_incremental_physical_size=True)
return res["local"]["current_physical_size_non_incremental"] return res["local"]["current_physical_size_non_incremental"]
timeline_total_size = get_timeline_physical_size(timeline) timeline_total_size = get_timeline_physical_size(timeline)

View File

@@ -180,7 +180,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
while not self.should_stop.is_set(): while not self.should_stop.is_set():
collect_metrics("during INSERT INTO") collect_metrics("during INSERT INTO")
time.sleep(1) time.sleep(1)
except: except: # noqa: E722
log.error( log.error(
"MetricsChecker's thread failed, the test will be failed on .stop() call", "MetricsChecker's thread failed, the test will be failed on .stop() call",
exc_info=True, exc_info=True,
@@ -552,7 +552,7 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder, remote_storatge_kind: R
while True: while True:
elapsed = time.time() - started_at elapsed = time.time() - started_at
if elapsed > wait_lsn_timeout: if elapsed > wait_lsn_timeout:
raise RuntimeError(f"Timed out waiting for WAL redo") raise RuntimeError("Timed out waiting for WAL redo")
pageserver_lsn = env.pageserver.http_client().timeline_detail( pageserver_lsn = env.pageserver.http_client().timeline_detail(
uuid.UUID(tenant_id), uuid.UUID((timeline_id)) uuid.UUID(tenant_id), uuid.UUID((timeline_id))
@@ -615,7 +615,7 @@ class ProposerPostgres(PgProtocol):
"shared_preload_libraries = 'neon'\n", "shared_preload_libraries = 'neon'\n",
f"neon.timeline_id = '{self.timeline_id.hex}'\n", f"neon.timeline_id = '{self.timeline_id.hex}'\n",
f"neon.tenant_id = '{self.tenant_id.hex}'\n", f"neon.tenant_id = '{self.tenant_id.hex}'\n",
f"neon.pageserver_connstring = ''\n", "neon.pageserver_connstring = ''\n",
f"neon.safekeepers = '{safekeepers}'\n", f"neon.safekeepers = '{safekeepers}'\n",
f"listen_addresses = '{self.listen_addr}'\n", f"listen_addresses = '{self.listen_addr}'\n",
f"port = '{self.port}'\n", f"port = '{self.port}'\n",

View File

@@ -49,7 +49,7 @@ def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, c
# checkpoint one more time to ensure that the lsn we get is the latest one # checkpoint one more time to ensure that the lsn we get is the latest one
pg.safe_psql("CHECKPOINT") pg.safe_psql("CHECKPOINT")
lsn = pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0] pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0]
# Check that we restore the content of the datadir correctly # Check that we restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, pg) check_restored_datadir_content(test_output_dir, env, pg)

View File

@@ -60,6 +60,7 @@ class PgBenchRunResult:
run_duration: float run_duration: float
run_start_timestamp: int run_start_timestamp: int
run_end_timestamp: int run_end_timestamp: int
scale: int
# TODO progress # TODO progress
@@ -130,6 +131,7 @@ class PgBenchRunResult:
run_duration=run_duration, run_duration=run_duration,
run_start_timestamp=run_start_timestamp, run_start_timestamp=run_start_timestamp,
run_end_timestamp=run_end_timestamp, run_end_timestamp=run_end_timestamp,
scale=scale,
) )
@@ -304,6 +306,12 @@ class NeonBenchmarker:
"", "",
MetricReport.TEST_PARAM, MetricReport.TEST_PARAM,
) )
self.record(
f"{prefix}.scale",
pg_bench_result.scale,
"",
MetricReport.TEST_PARAM,
)
def record_pg_bench_init_result(self, prefix: str, result: PgBenchInitResult): def record_pg_bench_init_result(self, prefix: str, result: PgBenchInitResult):
test_params = [ test_params = [

View File

@@ -1,8 +1,6 @@
from collections import defaultdict from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List from typing import Dict, List
from fixtures.log_helper import log
from prometheus_client.parser import text_string_to_metric_families from prometheus_client.parser import text_string_to_metric_families
from prometheus_client.samples import Sample from prometheus_client.samples import Sample

View File

@@ -838,7 +838,7 @@ class NeonEnv:
) )
if config.auth_enabled: if config.auth_enabled:
toml += textwrap.dedent( toml += textwrap.dedent(
f""" """
auth_enabled = true auth_enabled = true
""" """
) )
@@ -985,7 +985,7 @@ class NeonPageserverHttpClient(requests.Session):
except requests.RequestException as e: except requests.RequestException as e:
try: try:
msg = res.json()["msg"] msg = res.json()["msg"]
except: except: # noqa: E722
msg = "" msg = ""
raise NeonPageserverApiException(msg) from e raise NeonPageserverApiException(msg) from e
@@ -1065,19 +1065,15 @@ class NeonPageserverHttpClient(requests.Session):
include_non_incremental_logical_size: bool = False, include_non_incremental_logical_size: bool = False,
include_non_incremental_physical_size: bool = False, include_non_incremental_physical_size: bool = False,
) -> Dict[Any, Any]: ) -> Dict[Any, Any]:
params = {}
include_non_incremental_logical_size_str = "0"
if include_non_incremental_logical_size: if include_non_incremental_logical_size:
include_non_incremental_logical_size_str = "1" params["include-non-incremental-logical-size"] = "yes"
include_non_incremental_physical_size_str = "0"
if include_non_incremental_physical_size: if include_non_incremental_physical_size:
include_non_incremental_physical_size_str = "1" params["include-non-incremental-physical-size"] = "yes"
res = self.get( res = self.get(
f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline/{timeline_id.hex}" f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline/{timeline_id.hex}",
+ "?include-non-incremental-logical-size={include_non_incremental_logical_size_str}" params=params,
+ "&include-non-incremental-physical-size={include_non_incremental_physical_size_str}"
) )
self.verbose_error(res) self.verbose_error(res)
res_json = res.json() res_json = res.json()
@@ -1532,7 +1528,7 @@ class NeonPageserver(PgProtocol):
`overrides` allows to add some config to this pageserver start. `overrides` allows to add some config to this pageserver start.
Returns self. Returns self.
""" """
assert self.running == False assert self.running is False
self.env.neon_cli.pageserver_start(overrides=overrides) self.env.neon_cli.pageserver_start(overrides=overrides)
self.running = True self.running = True
@@ -1867,9 +1863,7 @@ class Postgres(PgProtocol):
log.info(f"Starting postgres node {self.node_name}") log.info(f"Starting postgres node {self.node_name}")
run_result = self.env.neon_cli.pg_start( self.env.neon_cli.pg_start(self.node_name, tenant_id=self.tenant_id, port=self.port)
self.node_name, tenant_id=self.tenant_id, port=self.port
)
self.running = True self.running = True
return self return self
@@ -2078,7 +2072,7 @@ class Safekeeper:
running: bool = False running: bool = False
def start(self) -> "Safekeeper": def start(self) -> "Safekeeper":
assert self.running == False assert self.running is False
self.env.neon_cli.safekeeper_start(self.id) self.env.neon_cli.safekeeper_start(self.id)
self.running = True self.running = True
# wait for wal acceptor start by checking its status # wait for wal acceptor start by checking its status
@@ -2270,7 +2264,7 @@ class Etcd:
# Set --quota-backend-bytes to keep the etcd virtual memory # Set --quota-backend-bytes to keep the etcd virtual memory
# size smaller. Our test etcd clusters are very small. # size smaller. Our test etcd clusters are very small.
# See https://github.com/etcd-io/etcd/issues/7910 # See https://github.com/etcd-io/etcd/issues/7910
f"--quota-backend-bytes=100000000", "--quota-backend-bytes=100000000",
] ]
self.handle = subprocess.Popen(args, stdout=log_file, stderr=log_file) self.handle = subprocess.Popen(args, stdout=log_file, stderr=log_file)
@@ -2395,7 +2389,7 @@ def should_skip_file(filename: str) -> bool:
try: try:
list(map(int, tmp_name)) list(map(int, tmp_name))
except: except: # noqa: E722
return False return False
return True return True
@@ -2508,7 +2502,12 @@ def wait_until(number_of_iterations: int, interval: float, func):
def assert_timeline_local( def assert_timeline_local(
pageserver_http_client: NeonPageserverHttpClient, tenant: uuid.UUID, timeline: uuid.UUID pageserver_http_client: NeonPageserverHttpClient, tenant: uuid.UUID, timeline: uuid.UUID
): ):
timeline_detail = pageserver_http_client.timeline_detail(tenant, timeline) timeline_detail = pageserver_http_client.timeline_detail(
tenant,
timeline,
include_non_incremental_logical_size=True,
include_non_incremental_physical_size=True,
)
assert timeline_detail.get("local", {}).get("disk_consistent_lsn"), timeline_detail assert timeline_detail.get("local", {}).get("disk_consistent_lsn"), timeline_detail
return timeline_detail return timeline_detail

View File

@@ -110,7 +110,7 @@ def get_dir_size(path: str) -> int:
for name in files: for name in files:
try: try:
totalbytes += os.path.getsize(os.path.join(root, name)) totalbytes += os.path.getsize(os.path.join(root, name))
except FileNotFoundError as e: except FileNotFoundError:
pass # file could be concurrently removed pass # file could be concurrently removed
return totalbytes return totalbytes

View File

@@ -1,9 +1,6 @@
from contextlib import closing from contextlib import closing
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.compare_fixtures import PgCompare
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
# #

View File

@@ -1,11 +1,7 @@
from contextlib import closing from contextlib import closing
from io import BufferedReader, RawIOBase from io import BufferedReader, RawIOBase
from itertools import repeat
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.compare_fixtures import PgCompare
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
class CopyTestData(RawIOBase): class CopyTestData(RawIOBase):
@@ -28,7 +24,7 @@ class CopyTestData(RawIOBase):
self.rownum += 1 self.rownum += 1
# Number of bytes to read in this call # Number of bytes to read in this call
l = min(len(self.linebuf) - self.ptr, len(b)) l = min(len(self.linebuf) - self.ptr, len(b)) # noqa: E741
b[:l] = self.linebuf[self.ptr : (self.ptr + l)] b[:l] = self.linebuf[self.ptr : (self.ptr + l)]
self.ptr += l self.ptr += l

View File

@@ -46,7 +46,7 @@ $$;
# Write 3-4 MB to evict t from compute cache # Write 3-4 MB to evict t from compute cache
cur.execute("create table f (i integer);") cur.execute("create table f (i integer);")
cur.execute(f"insert into f values (generate_series(1,100000));") cur.execute("insert into f values (generate_series(1,100000));")
# Read # Read
with env.record_duration("read"): with env.record_duration("read"):

View File

@@ -1,10 +1,6 @@
import os
from contextlib import closing from contextlib import closing
from fixtures.benchmark_fixture import MetricReport from fixtures.compare_fixtures import PgCompare
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
# #

View File

@@ -31,7 +31,7 @@ def test_hot_page(env: PgCompare):
# Write 3-4 MB to evict t from compute cache # Write 3-4 MB to evict t from compute cache
cur.execute("create table f (i integer);") cur.execute("create table f (i integer);")
cur.execute(f"insert into f values (generate_series(1,100000));") cur.execute("insert into f values (generate_series(1,100000));")
# Read # Read
with env.record_duration("read"): with env.record_duration("read"):

View File

@@ -1,11 +1,8 @@
import asyncio import asyncio
from io import BytesIO from io import BytesIO
import asyncpg from fixtures.compare_fixtures import PgCompare
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.neon_fixtures import PgProtocol
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, PgProtocol, Postgres
async def repeat_bytes(buf, repetitions: int): async def repeat_bytes(buf, repetitions: int):
@@ -59,7 +56,7 @@ def test_parallel_copy_different_tables(neon_with_baseline: PgCompare, n_paralle
async def parallel_load_same_table(pg: PgProtocol, n_parallel: int): async def parallel_load_same_table(pg: PgProtocol, n_parallel: int):
workers = [] workers = []
for worker_id in range(n_parallel): for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f"copytest") worker = copy_test_data_to_table(pg, worker_id, "copytest")
workers.append(asyncio.create_task(worker)) workers.append(asyncio.create_task(worker))
# await all workers # await all workers
@@ -72,7 +69,7 @@ def test_parallel_copy_same_table(neon_with_baseline: PgCompare, n_parallel=5):
conn = env.pg.connect() conn = env.pg.connect()
cur = conn.cursor() cur = conn.cursor()
cur.execute(f"CREATE TABLE copytest (i int, t text)") cur.execute("CREATE TABLE copytest (i int, t text)")
with env.record_pageserver_writes("pageserver_writes"): with env.record_pageserver_writes("pageserver_writes"):
with env.record_duration("load"): with env.record_duration("load"):

View File

@@ -1,13 +1,8 @@
import os
import random import random
import time
from contextlib import closing from contextlib import closing
import psycopg2.extras
from fixtures.benchmark_fixture import MetricReport from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare from fixtures.compare_fixtures import PgCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.utils import query_scalar from fixtures.utils import query_scalar

View File

@@ -1,13 +1,11 @@
# Test sequential scan speed # Test sequential scan speed
# #
from contextlib import closing from contextlib import closing
from dataclasses import dataclass
import pytest import pytest
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import PgCompare from fixtures.compare_fixtures import PgCompare
from fixtures.log_helper import log from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
@pytest.mark.parametrize( @pytest.mark.parametrize(

View File

@@ -10,13 +10,9 @@
# in LSN order, writing the oldest layer first. That creates a new 10 MB image # in LSN order, writing the oldest layer first. That creates a new 10 MB image
# layer to be created for each of those small updates. This is the Write # layer to be created for each of those small updates. This is the Write
# Amplification problem at its finest. # Amplification problem at its finest.
import os
from contextlib import closing from contextlib import closing
from fixtures.benchmark_fixture import MetricReport from fixtures.compare_fixtures import PgCompare
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
def test_write_amplification(neon_with_baseline: PgCompare): def test_write_amplification(neon_with_baseline: PgCompare):

View File

@@ -1,7 +1,6 @@
#! /usr/bin/env python3 #! /usr/bin/env python3
import os import os
import ssl
import pg8000.dbapi import pg8000.dbapi

View File

@@ -1,6 +1,4 @@
import os
import shutil import shutil
import subprocess
from pathlib import Path from pathlib import Path
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile