test_runner: fix flake8 warnings

This commit is contained in:
Alexander Bayandin
2022-08-18 20:41:13 +01:00
committed by Alexander Bayandin
parent ae3227509c
commit 39a3bcac36
35 changed files with 92 additions and 140 deletions

View File

@@ -28,13 +28,13 @@ import tempfile
import time
import uuid
from contextlib import closing
from os import path
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing import Any, Dict, List, Optional, Tuple, cast
import psycopg2
import requests
from psycopg2.extensions import connection as PgConnection
from psycopg2.extensions import parse_dsn
###############################################
### client-side utils copied from test fixtures
@@ -149,10 +149,8 @@ class PgProtocol:
# enough for our tests, but if you need a longer, you can
# change it by calling "SET statement_timeout" after
# connecting.
if "options" in conn_options:
conn_options["options"] = f"-cstatement_timeout=120s " + conn_options["options"]
else:
conn_options["options"] = "-cstatement_timeout=120s"
conn_options["options"] = f"-cstatement_timeout=120s {conn_options.get('options', '')}"
return conn_options
# autocommit=True here by default because that's what we need most of the time
@@ -250,7 +248,7 @@ class NeonPageserverHttpClient(requests.Session):
except requests.RequestException as e:
try:
msg = res.json()["msg"]
except:
except: # noqa: E722
msg = ""
raise NeonPageserverApiException(msg) from e
@@ -477,8 +475,8 @@ def import_timeline(
import_cmd = f"import basebackup {tenant_id} {timeline_id} {last_lsn} {last_lsn}"
full_cmd = rf"""cat {tar_filename} | {psql_path} {pageserver_connstr} -c '{import_cmd}' """
stderr_filename2 = path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stderr")
stdout_filename = path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stdout")
stderr_filename2 = os.path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stderr")
stdout_filename = os.path.join(args.work_dir, f"import_{tenant_id}_{timeline_id}.stdout")
print(f"Running: {full_cmd}")
@@ -495,7 +493,7 @@ def import_timeline(
check=True,
)
print(f"Done import")
print("Done import")
# Wait until pageserver persists the files
wait_for_upload(
@@ -508,7 +506,7 @@ def export_timeline(
):
# Choose filenames
incomplete_filename = tar_filename + ".incomplete"
stderr_filename = path.join(args.work_dir, f"{tenant_id}_{timeline_id}.stderr")
stderr_filename = os.path.join(args.work_dir, f"{tenant_id}_{timeline_id}.stderr")
# Construct export command
query = f"fullbackup {tenant_id} {timeline_id} {last_lsn} {prev_lsn}"
@@ -563,7 +561,7 @@ def main(args: argparse.Namespace):
continue
# Choose filenames
tar_filename = path.join(
tar_filename = os.path.join(
args.work_dir, f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar"
)

View File

@@ -11,7 +11,7 @@ def test_basebackup_error(neon_simple_env: NeonEnv):
env.neon_cli.create_branch("test_basebackup_error", "empty")
# Introduce failpoint
env.pageserver.safe_psql(f"failpoints basebackup-before-control-file=return")
env.pageserver.safe_psql("failpoints basebackup-before-control-file=return")
with pytest.raises(Exception, match="basebackup-before-control-file"):
pg = env.postgres.create_start("test_basebackup_error")
env.postgres.create_start("test_basebackup_error")

View File

@@ -65,7 +65,7 @@ def test_branch_and_gc(neon_simple_env: NeonEnv):
}
)
timeline_main = env.neon_cli.create_timeline(f"test_main", tenant_id=tenant)
timeline_main = env.neon_cli.create_timeline("test_main", tenant_id=tenant)
pg_main = env.postgres.create_start("test_main", tenant_id=tenant)
main_cur = pg_main.connect().cursor()
@@ -148,7 +148,7 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
# Use `failpoint=sleep` and `threading` to make the GC iteration triggers *before* the
# branch creation task but the individual timeline GC iteration happens *after*
# the branch creation task.
env.pageserver.safe_psql(f"failpoints before-timeline-gc=sleep(2000)")
env.pageserver.safe_psql("failpoints before-timeline-gc=sleep(2000)")
def do_gc():
env.pageserver.safe_psql(f"do_gc {tenant.hex} {b0.hex} 0")

View File

@@ -1,8 +1,6 @@
import concurrent.futures
import os
from contextlib import closing
from typing import List, Tuple
from uuid import UUID
import pytest
from fixtures.log_helper import log
@@ -24,7 +22,7 @@ def test_broken_timeline(neon_env_builder: NeonEnvBuilder):
tenant_id = tenant_id_uuid.hex
timeline_id = timeline_id_uuid.hex
pg = env.postgres.create_start(f"main", tenant_id=tenant_id_uuid)
pg = env.postgres.create_start("main", tenant_id=tenant_id_uuid)
with pg.cursor() as cur:
cur.execute("CREATE TABLE t(key int primary key, value text)")
cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'")
@@ -102,7 +100,7 @@ def test_fix_broken_timelines_on_startup(neon_simple_env: NeonEnv):
tenant_id, _ = env.neon_cli.create_tenant()
# Introduce failpoint when creating a new timeline
env.pageserver.safe_psql(f"failpoints before-checkpoint-new-timeline=return")
env.pageserver.safe_psql("failpoints before-checkpoint-new-timeline=return")
with pytest.raises(Exception, match="before-checkpoint-new-timeline"):
_ = env.neon_cli.create_timeline("test_fix_broken_timelines", tenant_id)

View File

@@ -1,6 +1,5 @@
import os
import time
from contextlib import closing
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
@@ -49,7 +48,7 @@ def test_clog_truncate(neon_simple_env: NeonEnv):
log.info(f"pg_xact_0000_path = {pg_xact_0000_path}")
while os.path.isfile(pg_xact_0000_path):
log.info(f"file exists. wait for truncation. " "pg_xact_0000_path = {pg_xact_0000_path}")
log.info(f"file exists. wait for truncation: {pg_xact_0000_path=}")
time.sleep(5)
# checkpoint to advance latest lsn

View File

@@ -1,6 +1,5 @@
import os
import pathlib
from contextlib import closing
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
@@ -92,14 +91,14 @@ def test_dropdb(neon_simple_env: NeonEnv, test_output_dir):
dbpath = pathlib.Path(pg_before.pgdata_dir) / "base" / str(dboid)
log.info(dbpath)
assert os.path.isdir(dbpath) == True
assert os.path.isdir(dbpath) is True
# Test that database subdir doesn't exist on the branch after drop
assert pg_after.pgdata_dir
dbpath = pathlib.Path(pg_after.pgdata_dir) / "base" / str(dboid)
log.info(dbpath)
assert os.path.isdir(dbpath) == False
assert os.path.isdir(dbpath) is False
# Check that we restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, pg)

View File

@@ -1,6 +1,4 @@
import pytest
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, NeonPageserverHttpClient
from fixtures.neon_fixtures import NeonEnvBuilder
def test_fsm_truncate(neon_env_builder: NeonEnvBuilder):

View File

@@ -24,7 +24,7 @@ async def update_table(pg: Postgres):
while updates_performed < updates_to_perform:
updates_performed += 1
id = random.randrange(1, num_rows)
row = await pg_conn.fetchrow(f"UPDATE foo SET counter = counter + 1 WHERE id = {id}")
await pg_conn.fetchrow(f"UPDATE foo SET counter = counter + 1 WHERE id = {id}")
# Perform aggressive GC with 0 horizon

View File

@@ -1,13 +1,7 @@
import math
import time
from contextlib import closing
from datetime import timedelta, timezone, tzinfo
from uuid import UUID
from datetime import timedelta
import psycopg2.errors
import psycopg2.extras
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, Postgres
from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.utils import query_scalar

View File

@@ -1,16 +1,12 @@
import os
import pathlib
import subprocess
from typing import Optional
from uuid import UUID, uuid4
import pytest
from fixtures.log_helper import log
from fixtures.neon_fixtures import (
DEFAULT_BRANCH_NAME,
NeonEnv,
NeonEnvBuilder,
NeonPageserverApiException,
NeonPageserverHttpClient,
neon_binpath,
pg_distrib_dir,
@@ -24,13 +20,15 @@ def test_pageserver_init_node_id(neon_simple_env: NeonEnv):
repo_dir = neon_simple_env.repo_dir
pageserver_config = repo_dir / "pageserver.toml"
pageserver_bin = pathlib.Path(neon_binpath) / "pageserver"
run_pageserver = lambda args: subprocess.run(
[str(pageserver_bin), "-D", str(repo_dir), *args],
check=False,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def run_pageserver(args):
return subprocess.run(
[str(pageserver_bin), "-D", str(repo_dir), *args],
check=False,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# remove initial config
pageserver_config.unlink()

View File

@@ -32,7 +32,7 @@ async def copy_test_data_to_table(pg: Postgres, worker_id: int, table_name: str)
async def parallel_load_same_table(pg: Postgres, n_parallel: int):
workers = []
for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f"copytest")
worker = copy_test_data_to_table(pg, worker_id, "copytest")
workers.append(asyncio.create_task(worker))
# await all workers
@@ -49,7 +49,7 @@ def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5):
# Create test table
conn = pg.connect()
cur = conn.cursor()
cur.execute(f"CREATE TABLE copytest (i int, t text)")
cur.execute("CREATE TABLE copytest (i int, t text)")
# Run COPY TO to load the table with parallel connections.
asyncio.run(parallel_load_same_table(pg, n_parallel))

View File

@@ -1,7 +1,4 @@
import json
import os
import time
from ast import Assert
from contextlib import closing
import psycopg2.extras
@@ -33,8 +30,6 @@ def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder):
pg = env.postgres.create_start("test_pageserver_recovery")
log.info("postgres is running on 'test_pageserver_recovery' branch")
connstr = pg.connstr()
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
with closing(env.pageserver.connect()) as psconn:

View File

@@ -99,7 +99,7 @@ def test_remote_storage_backup_and_restore(
env.pageserver.start()
# Introduce failpoint in download
env.pageserver.safe_psql(f"failpoints remote-storage-download-pre-rename=return")
env.pageserver.safe_psql("failpoints remote-storage-download-pre-rename=return")
client.tenant_attach(UUID(tenant_id))

View File

@@ -1,7 +1,6 @@
from contextlib import closing
import psycopg2.extras
import pytest
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnvBuilder
@@ -22,8 +21,8 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
}
)
env.neon_cli.create_timeline(f"test_tenant_conf", tenant_id=tenant)
pg = env.postgres.create_start(
env.neon_cli.create_timeline("test_tenant_conf", tenant_id=tenant)
env.postgres.create_start(
"test_tenant_conf",
"main",
tenant,

View File

@@ -14,7 +14,6 @@ from fixtures.neon_fixtures import (
NeonEnv,
NeonEnvBuilder,
NeonPageserverHttpClient,
PageserverPort,
PortDistributor,
Postgres,
assert_no_in_progress_downloads_for_tenant,
@@ -56,7 +55,7 @@ def new_pageserver_helper(
f"-c listen_pg_addr='localhost:{pg_port}'",
f"-c listen_http_addr='localhost:{http_port}'",
f"-c pg_distrib_dir='{pg_distrib_dir}'",
f"-c id=2",
"-c id=2",
f"-c remote_storage={{local_path='{remote_storage_mock_path}'}}",
]
if broker is not None:
@@ -92,7 +91,7 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve
with pg_cur(pg) as cur:
cur.execute("INSERT INTO load VALUES ('some payload')")
inserted_ctr += 1
except:
except: # noqa: E722
if not failed:
log.info("load failed")
failed = True

View File

@@ -1,10 +1,9 @@
import time
from uuid import UUID
from fixtures.neon_fixtures import NeonEnvBuilder, wait_until
def get_only_element(l):
def get_only_element(l): # noqa: E741
assert len(l) == 1
return l[0]
@@ -46,7 +45,7 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder):
# Create tenant, start compute
tenant, _ = env.neon_cli.create_tenant()
timeline = env.neon_cli.create_timeline(name, tenant_id=tenant)
env.neon_cli.create_timeline(name, tenant_id=tenant)
pg = env.postgres.create_start(name, tenant_id=tenant)
assert get_state(tenant) == "Active"

View File

@@ -7,7 +7,6 @@
#
import asyncio
from contextlib import closing
from typing import List, Tuple
from uuid import UUID
@@ -25,12 +24,12 @@ from fixtures.utils import lsn_from_hex
async def tenant_workload(env: NeonEnv, pg: Postgres):
pageserver_conn = await env.pageserver.connect_async()
await env.pageserver.connect_async()
pg_conn = await pg.connect_async()
tenant_id = await pg_conn.fetchval("show neon.tenant_id")
timeline_id = await pg_conn.fetchval("show neon.timeline_id")
await pg_conn.fetchval("show neon.tenant_id")
await pg_conn.fetchval("show neon.timeline_id")
await pg_conn.execute("CREATE TABLE t(key int primary key, value text)")
for i in range(1, 100):
@@ -72,10 +71,10 @@ def test_tenants_many(neon_env_builder: NeonEnvBuilder, remote_storatge_kind: Re
"checkpoint_distance": "5000000",
}
)
env.neon_cli.create_timeline(f"test_tenants_many", tenant_id=tenant)
env.neon_cli.create_timeline("test_tenants_many", tenant_id=tenant)
pg = env.postgres.create_start(
f"test_tenants_many",
"test_tenants_many",
tenant_id=tenant,
)
tenants_pgs.append((tenant, pg))

View File

@@ -125,7 +125,7 @@ def wait_for_pageserver_catchup(pgmain: Postgres, polling_interval=1, timeout=60
elapsed = time.time() - started_at
if elapsed > timeout:
raise RuntimeError(
f"timed out waiting for pageserver to reach pg_current_wal_flush_lsn()"
"timed out waiting for pageserver to reach pg_current_wal_flush_lsn()"
)
res = pgmain.safe_psql(
@@ -390,7 +390,7 @@ def test_tenant_physical_size(neon_simple_env: NeonEnv):
tenant, timeline = env.neon_cli.create_tenant()
def get_timeline_physical_size(timeline: UUID):
res = client.timeline_detail(tenant, timeline)
res = client.timeline_detail(tenant, timeline, include_non_incremental_physical_size=True)
return res["local"]["current_physical_size_non_incremental"]
timeline_total_size = get_timeline_physical_size(timeline)

View File

@@ -180,7 +180,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
while not self.should_stop.is_set():
collect_metrics("during INSERT INTO")
time.sleep(1)
except:
except: # noqa: E722
log.error(
"MetricsChecker's thread failed, the test will be failed on .stop() call",
exc_info=True,
@@ -552,7 +552,7 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder, remote_storatge_kind: R
while True:
elapsed = time.time() - started_at
if elapsed > wait_lsn_timeout:
raise RuntimeError(f"Timed out waiting for WAL redo")
raise RuntimeError("Timed out waiting for WAL redo")
pageserver_lsn = env.pageserver.http_client().timeline_detail(
uuid.UUID(tenant_id), uuid.UUID((timeline_id))
@@ -615,7 +615,7 @@ class ProposerPostgres(PgProtocol):
"shared_preload_libraries = 'neon'\n",
f"neon.timeline_id = '{self.timeline_id.hex}'\n",
f"neon.tenant_id = '{self.tenant_id.hex}'\n",
f"neon.pageserver_connstring = ''\n",
"neon.pageserver_connstring = ''\n",
f"neon.safekeepers = '{safekeepers}'\n",
f"listen_addresses = '{self.listen_addr}'\n",
f"port = '{self.port}'\n",

View File

@@ -49,7 +49,7 @@ def test_neon_regress(neon_simple_env: NeonEnv, test_output_dir: Path, pg_bin, c
# checkpoint one more time to ensure that the lsn we get is the latest one
pg.safe_psql("CHECKPOINT")
lsn = pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0]
pg.safe_psql("select pg_current_wal_insert_lsn()")[0][0]
# Check that we restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, pg)

View File

@@ -60,6 +60,7 @@ class PgBenchRunResult:
run_duration: float
run_start_timestamp: int
run_end_timestamp: int
scale: int
# TODO progress
@@ -130,6 +131,7 @@ class PgBenchRunResult:
run_duration=run_duration,
run_start_timestamp=run_start_timestamp,
run_end_timestamp=run_end_timestamp,
scale=scale,
)
@@ -304,6 +306,12 @@ class NeonBenchmarker:
"",
MetricReport.TEST_PARAM,
)
self.record(
f"{prefix}.scale",
pg_bench_result.scale,
"",
MetricReport.TEST_PARAM,
)
def record_pg_bench_init_result(self, prefix: str, result: PgBenchInitResult):
test_params = [

View File

@@ -1,8 +1,6 @@
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List
from fixtures.log_helper import log
from prometheus_client.parser import text_string_to_metric_families
from prometheus_client.samples import Sample

View File

@@ -838,7 +838,7 @@ class NeonEnv:
)
if config.auth_enabled:
toml += textwrap.dedent(
f"""
"""
auth_enabled = true
"""
)
@@ -985,7 +985,7 @@ class NeonPageserverHttpClient(requests.Session):
except requests.RequestException as e:
try:
msg = res.json()["msg"]
except:
except: # noqa: E722
msg = ""
raise NeonPageserverApiException(msg) from e
@@ -1065,19 +1065,15 @@ class NeonPageserverHttpClient(requests.Session):
include_non_incremental_logical_size: bool = False,
include_non_incremental_physical_size: bool = False,
) -> Dict[Any, Any]:
include_non_incremental_logical_size_str = "0"
params = {}
if include_non_incremental_logical_size:
include_non_incremental_logical_size_str = "1"
include_non_incremental_physical_size_str = "0"
params["include-non-incremental-logical-size"] = "yes"
if include_non_incremental_physical_size:
include_non_incremental_physical_size_str = "1"
params["include-non-incremental-physical-size"] = "yes"
res = self.get(
f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline/{timeline_id.hex}"
+ "?include-non-incremental-logical-size={include_non_incremental_logical_size_str}"
+ "&include-non-incremental-physical-size={include_non_incremental_physical_size_str}"
f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline/{timeline_id.hex}",
params=params,
)
self.verbose_error(res)
res_json = res.json()
@@ -1532,7 +1528,7 @@ class NeonPageserver(PgProtocol):
`overrides` allows to add some config to this pageserver start.
Returns self.
"""
assert self.running == False
assert self.running is False
self.env.neon_cli.pageserver_start(overrides=overrides)
self.running = True
@@ -1867,9 +1863,7 @@ class Postgres(PgProtocol):
log.info(f"Starting postgres node {self.node_name}")
run_result = self.env.neon_cli.pg_start(
self.node_name, tenant_id=self.tenant_id, port=self.port
)
self.env.neon_cli.pg_start(self.node_name, tenant_id=self.tenant_id, port=self.port)
self.running = True
return self
@@ -2078,7 +2072,7 @@ class Safekeeper:
running: bool = False
def start(self) -> "Safekeeper":
assert self.running == False
assert self.running is False
self.env.neon_cli.safekeeper_start(self.id)
self.running = True
# wait for wal acceptor start by checking its status
@@ -2270,7 +2264,7 @@ class Etcd:
# Set --quota-backend-bytes to keep the etcd virtual memory
# size smaller. Our test etcd clusters are very small.
# See https://github.com/etcd-io/etcd/issues/7910
f"--quota-backend-bytes=100000000",
"--quota-backend-bytes=100000000",
]
self.handle = subprocess.Popen(args, stdout=log_file, stderr=log_file)
@@ -2395,7 +2389,7 @@ def should_skip_file(filename: str) -> bool:
try:
list(map(int, tmp_name))
except:
except: # noqa: E722
return False
return True
@@ -2508,7 +2502,12 @@ def wait_until(number_of_iterations: int, interval: float, func):
def assert_timeline_local(
pageserver_http_client: NeonPageserverHttpClient, tenant: uuid.UUID, timeline: uuid.UUID
):
timeline_detail = pageserver_http_client.timeline_detail(tenant, timeline)
timeline_detail = pageserver_http_client.timeline_detail(
tenant,
timeline,
include_non_incremental_logical_size=True,
include_non_incremental_physical_size=True,
)
assert timeline_detail.get("local", {}).get("disk_consistent_lsn"), timeline_detail
return timeline_detail

View File

@@ -110,7 +110,7 @@ def get_dir_size(path: str) -> int:
for name in files:
try:
totalbytes += os.path.getsize(os.path.join(root, name))
except FileNotFoundError as e:
except FileNotFoundError:
pass # file could be concurrently removed
return totalbytes

View File

@@ -1,9 +1,6 @@
from contextlib import closing
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.compare_fixtures import PgCompare
#

View File

@@ -1,11 +1,7 @@
from contextlib import closing
from io import BufferedReader, RawIOBase
from itertools import repeat
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.compare_fixtures import PgCompare
class CopyTestData(RawIOBase):
@@ -28,7 +24,7 @@ class CopyTestData(RawIOBase):
self.rownum += 1
# Number of bytes to read in this call
l = min(len(self.linebuf) - self.ptr, len(b))
l = min(len(self.linebuf) - self.ptr, len(b)) # noqa: E741
b[:l] = self.linebuf[self.ptr : (self.ptr + l)]
self.ptr += l

View File

@@ -46,7 +46,7 @@ $$;
# Write 3-4 MB to evict t from compute cache
cur.execute("create table f (i integer);")
cur.execute(f"insert into f values (generate_series(1,100000));")
cur.execute("insert into f values (generate_series(1,100000));")
# Read
with env.record_duration("read"):

View File

@@ -1,10 +1,6 @@
import os
from contextlib import closing
from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.compare_fixtures import PgCompare
#

View File

@@ -31,7 +31,7 @@ def test_hot_page(env: PgCompare):
# Write 3-4 MB to evict t from compute cache
cur.execute("create table f (i integer);")
cur.execute(f"insert into f values (generate_series(1,100000));")
cur.execute("insert into f values (generate_series(1,100000));")
# Read
with env.record_duration("read"):

View File

@@ -1,11 +1,8 @@
import asyncio
from io import BytesIO
import asyncpg
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, PgProtocol, Postgres
from fixtures.compare_fixtures import PgCompare
from fixtures.neon_fixtures import PgProtocol
async def repeat_bytes(buf, repetitions: int):
@@ -59,7 +56,7 @@ def test_parallel_copy_different_tables(neon_with_baseline: PgCompare, n_paralle
async def parallel_load_same_table(pg: PgProtocol, n_parallel: int):
workers = []
for worker_id in range(n_parallel):
worker = copy_test_data_to_table(pg, worker_id, f"copytest")
worker = copy_test_data_to_table(pg, worker_id, "copytest")
workers.append(asyncio.create_task(worker))
# await all workers
@@ -72,7 +69,7 @@ def test_parallel_copy_same_table(neon_with_baseline: PgCompare, n_parallel=5):
conn = env.pg.connect()
cur = conn.cursor()
cur.execute(f"CREATE TABLE copytest (i int, t text)")
cur.execute("CREATE TABLE copytest (i int, t text)")
with env.record_pageserver_writes("pageserver_writes"):
with env.record_duration("load"):

View File

@@ -1,13 +1,8 @@
import os
import random
import time
from contextlib import closing
import psycopg2.extras
from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.compare_fixtures import PgCompare
from fixtures.utils import query_scalar

View File

@@ -1,13 +1,11 @@
# Test sequential scan speed
#
from contextlib import closing
from dataclasses import dataclass
import pytest
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import PgCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
@pytest.mark.parametrize(

View File

@@ -10,13 +10,9 @@
# in LSN order, writing the oldest layer first. That creates a new 10 MB image
# layer to be created for each of those small updates. This is the Write
# Amplification problem at its finest.
import os
from contextlib import closing
from fixtures.benchmark_fixture import MetricReport
from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv
from fixtures.compare_fixtures import PgCompare
def test_write_amplification(neon_with_baseline: PgCompare):

View File

@@ -1,7 +1,6 @@
#! /usr/bin/env python3
import os
import ssl
import pg8000.dbapi

View File

@@ -1,6 +1,4 @@
import os
import shutil
import subprocess
from pathlib import Path
from tempfile import NamedTemporaryFile