mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 21:42:56 +00:00
Replace flake8 and isort with ruff (#3810)
- Introduce ruff (https://beta.ruff.rs/) to replace flake8 and isort - Update mypy and black
This commit is contained in:
committed by
GitHub
parent
68ae020b37
commit
3d869cbcde
@@ -17,6 +17,7 @@ import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
from fixtures.types import TenantId, TimelineId
|
||||
|
||||
|
||||
@@ -6,8 +6,15 @@ from typing import Dict, Iterator, List
|
||||
|
||||
import pytest
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.neon_fixtures import NeonEnv, PgBin, PgProtocol, RemotePostgres, VanillaPostgres
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
PgBin,
|
||||
PgProtocol,
|
||||
RemotePostgres,
|
||||
VanillaPostgres,
|
||||
)
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
|
||||
|
||||
@@ -35,6 +35,13 @@ import requests
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
# Type-related stuff
|
||||
from psycopg2.extensions import connection as PgConnection
|
||||
from psycopg2.extensions import cursor as PgCursor
|
||||
from psycopg2.extensions import make_dsn, parse_dsn
|
||||
from typing_extensions import Literal
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import Metrics, parse_metrics
|
||||
from fixtures.types import Lsn, TenantId, TimelineId
|
||||
@@ -46,12 +53,6 @@ from fixtures.utils import (
|
||||
subprocess_capture,
|
||||
)
|
||||
|
||||
# Type-related stuff
|
||||
from psycopg2.extensions import connection as PgConnection
|
||||
from psycopg2.extensions import cursor as PgCursor
|
||||
from psycopg2.extensions import make_dsn, parse_dsn
|
||||
from typing_extensions import Literal
|
||||
|
||||
"""
|
||||
This file contains pytest fixtures. A fixture is a test resource that can be
|
||||
summoned by placing its name in the test's arguments.
|
||||
@@ -1243,7 +1244,6 @@ class PageserverHttpClient(requests.Session):
|
||||
include_non_incremental_logical_size: bool = False,
|
||||
include_timeline_dir_layer_file_size_sum: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
|
||||
params = {}
|
||||
if include_non_incremental_logical_size:
|
||||
params["include-non-incremental-logical-size"] = "true"
|
||||
@@ -1375,7 +1375,6 @@ class PageserverHttpClient(requests.Session):
|
||||
timeline_id: TimelineId,
|
||||
max_concurrent_downloads: int,
|
||||
) -> dict[str, Any]:
|
||||
|
||||
body = {
|
||||
"max_concurrent_downloads": max_concurrent_downloads,
|
||||
}
|
||||
@@ -1668,7 +1667,7 @@ class AbstractNeonCli(abc.ABC):
|
||||
env_vars["POSTGRES_DISTRIB_DIR"] = str(self.env.pg_distrib_dir)
|
||||
if self.env.rust_log_override is not None:
|
||||
env_vars["RUST_LOG"] = self.env.rust_log_override
|
||||
for (extra_env_key, extra_env_value) in (extra_env_vars or {}).items():
|
||||
for extra_env_key, extra_env_value in (extra_env_vars or {}).items():
|
||||
env_vars[extra_env_key] = extra_env_value
|
||||
|
||||
# Pass coverage settings
|
||||
@@ -2852,7 +2851,6 @@ class PostgresFactory:
|
||||
lsn: Optional[Lsn] = None,
|
||||
config_lines: Optional[List[str]] = None,
|
||||
) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
self.env,
|
||||
tenant_id=tenant_id or self.env.initial_tenant,
|
||||
@@ -2876,7 +2874,6 @@ class PostgresFactory:
|
||||
lsn: Optional[Lsn] = None,
|
||||
config_lines: Optional[List[str]] = None,
|
||||
) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
self.env,
|
||||
tenant_id=tenant_id or self.env.initial_tenant,
|
||||
@@ -3323,7 +3320,6 @@ def check_restored_datadir_content(
|
||||
log.info(f"filecmp result mismatch and error lists:\n\t mismatch={mismatch}\n\t error={error}")
|
||||
|
||||
for f in mismatch:
|
||||
|
||||
f1 = os.path.join(pg.pgdata_dir, f)
|
||||
f2 = os.path.join(restored_dir_path, f)
|
||||
stdout_filename = "{}.filediff".format(f2)
|
||||
|
||||
@@ -17,8 +17,8 @@ class Lsn:
|
||||
self.lsn_int = x
|
||||
else:
|
||||
"""Convert lsn from hex notation to int."""
|
||||
l, r = x.split("/")
|
||||
self.lsn_int = (int(l, 16) << 32) + int(r, 16)
|
||||
left, right = x.split("/")
|
||||
self.lsn_int = (int(left, 16) << 32) + int(right, 16)
|
||||
assert 0 <= self.lsn_int <= 0xFFFFFFFF_FFFFFFFF
|
||||
|
||||
def __str__(self) -> str:
|
||||
|
||||
@@ -8,9 +8,10 @@ from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Tuple, TypeVar
|
||||
|
||||
import allure # type: ignore
|
||||
from fixtures.log_helper import log
|
||||
from psycopg2.extensions import cursor
|
||||
|
||||
from fixtures.log_helper import log
|
||||
|
||||
Fn = TypeVar("Fn", bound=Callable[..., Any])
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import List
|
||||
from fixtures.benchmark_fixture import PgBenchRunResult
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
from fixtures.neon_fixtures import fork_at_current_lsn
|
||||
|
||||
from performance.test_perf_pgbench import utc_now_timestamp
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
@@ -13,7 +13,6 @@ from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
|
||||
@pytest.mark.timeout(10000)
|
||||
@pytest.mark.parametrize("fillfactor", [10, 50, 100])
|
||||
def test_bulk_update(neon_env_builder: NeonEnvBuilder, zenbenchmark, fillfactor):
|
||||
|
||||
env = neon_env_builder.init_start()
|
||||
n_records = 1000000
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import List
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ def test_gist_buffering_build(neon_with_baseline: PgCompare):
|
||||
|
||||
with closing(env.pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
|
||||
# Create test table.
|
||||
cur.execute("create table gist_point_tbl(id int4, p point)")
|
||||
cur.execute(
|
||||
|
||||
@@ -3,6 +3,7 @@ import threading
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.neon_fixtures import Postgres
|
||||
|
||||
from performance.test_perf_pgbench import get_scales_matrix
|
||||
from performance.test_wal_backpressure import record_read_latency
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
# Benchmark searching the layer map, when there are a lot of small layer files.
|
||||
#
|
||||
def test_layer_map(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
||||
|
||||
env = neon_env_builder.init_start()
|
||||
n_iters = 10
|
||||
n_records = 100000
|
||||
|
||||
@@ -36,7 +36,6 @@ async def parallel_load_different_tables(pg: PgProtocol, n_parallel: int):
|
||||
|
||||
# Load 5 different tables in parallel with COPY TO
|
||||
def test_parallel_copy_different_tables(neon_with_baseline: PgCompare, n_parallel=5):
|
||||
|
||||
env = neon_with_baseline
|
||||
conn = env.pg.connect()
|
||||
cur = conn.cursor()
|
||||
|
||||
@@ -10,6 +10,7 @@ from fixtures.compare_fixtures import NeonCompare, PgCompare, VanillaCompare
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import DEFAULT_BRANCH_NAME, NeonEnvBuilder, PgBin
|
||||
from fixtures.types import Lsn
|
||||
|
||||
from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ def test_write_amplification(neon_with_baseline: PgCompare):
|
||||
with conn.cursor() as cur:
|
||||
with env.record_pageserver_writes("pageserver_writes"):
|
||||
with env.record_duration("run"):
|
||||
|
||||
# NOTE: Because each iteration updates every table already created,
|
||||
# the runtime and write amplification is O(n^2), where n is the
|
||||
# number of iterations.
|
||||
|
||||
@@ -2,6 +2,7 @@ import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, WalCraft
|
||||
|
||||
|
||||
# Restart nodes with WAL end having specially crafted shape, like last record
|
||||
# crossing segment boundary, to test decoding issues.
|
||||
|
||||
|
||||
@@ -2,7 +2,12 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
PortDistributor,
|
||||
VanillaPostgres,
|
||||
)
|
||||
from fixtures.types import Lsn, TimelineId
|
||||
from fixtures.utils import query_scalar, subprocess_capture
|
||||
|
||||
|
||||
@@ -68,7 +68,6 @@ async def update_and_gc(env: NeonEnv, pg: Postgres, timeline: TimelineId):
|
||||
# (repro for https://github.com/neondatabase/neon/issues/1047)
|
||||
#
|
||||
def test_gc_aggressive(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
# Disable pitr, because here we want to test branch creation after GC
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
env = neon_env_builder.init_start()
|
||||
@@ -101,7 +100,6 @@ def test_gc_aggressive(neon_env_builder: NeonEnvBuilder):
|
||||
#
|
||||
@pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.LOCAL_FS])
|
||||
def test_gc_index_upload(neon_env_builder: NeonEnvBuilder, remote_storage_kind: RemoteStorageKind):
|
||||
|
||||
# Disable time-based pitr, we will use LSN-based thresholds in the manual GC calls
|
||||
neon_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}"
|
||||
|
||||
|
||||
@@ -146,7 +146,6 @@ def test_basic_eviction(
|
||||
|
||||
|
||||
def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind=RemoteStorageKind.LOCAL_FS,
|
||||
test_name="test_gc_of_remote_layers",
|
||||
|
||||
@@ -250,7 +250,7 @@ def test_ondemand_download_timetravel(
|
||||
# Run queries at different points in time
|
||||
num_layers_downloaded = [0]
|
||||
resident_size = [get_resident_physical_size()]
|
||||
for (checkpoint_number, lsn) in lsns:
|
||||
for checkpoint_number, lsn in lsns:
|
||||
pg_old = env.postgres.create_start(
|
||||
branch_name="main", node_name=f"test_old_lsn_{checkpoint_number}", lsn=lsn
|
||||
)
|
||||
|
||||
@@ -22,7 +22,6 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
|
||||
with closing(pg.connect()) as con:
|
||||
with con.cursor() as c:
|
||||
|
||||
for e in extensions:
|
||||
c.execute("create extension if not exists {};".format(e))
|
||||
|
||||
@@ -150,7 +149,6 @@ def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
|
||||
with closing(pg.connect()) as con:
|
||||
with con.cursor() as c:
|
||||
|
||||
for e in extensions:
|
||||
c.execute("create extension if not exists {};".format(e))
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ def test_timetravel(neon_simple_env: NeonEnv):
|
||||
env.pageserver.stop()
|
||||
env.pageserver.start()
|
||||
|
||||
for (i, lsn) in lsns:
|
||||
for i, lsn in lsns:
|
||||
pg_old = env.postgres.create_start(
|
||||
branch_name="test_timetravel", node_name=f"test_old_lsn_{i}", lsn=lsn
|
||||
)
|
||||
|
||||
@@ -212,7 +212,6 @@ def test_remote_storage_upload_queue_retries(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
remote_storage_kind: RemoteStorageKind,
|
||||
):
|
||||
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind=remote_storage_kind,
|
||||
test_name="test_remote_storage_upload_queue_retries",
|
||||
@@ -374,7 +373,6 @@ def test_remote_timeline_client_calls_started_metric(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
remote_storage_kind: RemoteStorageKind,
|
||||
):
|
||||
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind=remote_storage_kind,
|
||||
test_name="test_remote_timeline_client_metrics",
|
||||
|
||||
@@ -177,6 +177,7 @@ async def reattach_while_busy(
|
||||
# running, and when we retry the queries, they should start working
|
||||
# after the attach has finished.
|
||||
|
||||
|
||||
# FIXME:
|
||||
#
|
||||
# This is pretty unstable at the moment. I've seen it fail with a warning like this:
|
||||
|
||||
@@ -375,7 +375,6 @@ def test_tenant_relocation(
|
||||
neon_env_builder.broker,
|
||||
neon_env_builder.pg_distrib_dir,
|
||||
):
|
||||
|
||||
# Migrate either by attaching from s3 or import/export basebackup
|
||||
if method == "major":
|
||||
cmd = [
|
||||
|
||||
@@ -40,7 +40,6 @@ def test_timeline_delete(neon_simple_env: NeonEnv):
|
||||
with pytest.raises(
|
||||
PageserverApiException, match="Cannot delete timeline which has child timelines"
|
||||
):
|
||||
|
||||
timeline_path = (
|
||||
env.repo_dir
|
||||
/ "tenants"
|
||||
|
||||
@@ -90,7 +90,6 @@ def test_timeline_size_createdropdb(neon_simple_env: NeonEnv):
|
||||
cur.execute("CREATE DATABASE foodb")
|
||||
with closing(pgmain.connect(dbname="foodb")) as conn:
|
||||
with conn.cursor() as cur2:
|
||||
|
||||
cur2.execute("CREATE TABLE foo (t text)")
|
||||
cur2.execute(
|
||||
"""
|
||||
@@ -308,7 +307,6 @@ def test_timeline_initial_logical_size_calculation_cancellation(
|
||||
def test_timeline_physical_size_init(
|
||||
neon_env_builder: NeonEnvBuilder, remote_storage_kind: Optional[RemoteStorageKind]
|
||||
):
|
||||
|
||||
if remote_storage_kind is not None:
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind, "test_timeline_physical_size_init"
|
||||
@@ -385,7 +383,6 @@ def test_timeline_physical_size_post_checkpoint(
|
||||
def test_timeline_physical_size_post_compaction(
|
||||
neon_env_builder: NeonEnvBuilder, remote_storage_kind: Optional[RemoteStorageKind]
|
||||
):
|
||||
|
||||
if remote_storage_kind is not None:
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind, "test_timeline_physical_size_init"
|
||||
@@ -440,7 +437,6 @@ def test_timeline_physical_size_post_compaction(
|
||||
def test_timeline_physical_size_post_gc(
|
||||
neon_env_builder: NeonEnvBuilder, remote_storage_kind: Optional[RemoteStorageKind]
|
||||
):
|
||||
|
||||
if remote_storage_kind is not None:
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind, "test_timeline_physical_size_init"
|
||||
|
||||
@@ -7,7 +7,6 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
# Test truncation of FSM and VM forks of a relation
|
||||
#
|
||||
def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
||||
|
||||
env = neon_env_builder.init_start()
|
||||
n_records = 10000
|
||||
n_iter = 10
|
||||
|
||||
@@ -709,7 +709,6 @@ def test_sync_safekeepers(
|
||||
pg_bin: PgBin,
|
||||
port_distributor: PortDistributor,
|
||||
):
|
||||
|
||||
# We don't really need the full environment for this test, just the
|
||||
# safekeepers would be enough.
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
|
||||
@@ -521,7 +521,6 @@ async def run_race_conditions(env: NeonEnv, pg: Postgres):
|
||||
|
||||
# do inserts while concurrently getting up/down subsets of acceptors
|
||||
def test_race_conditions(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
@@ -588,7 +587,6 @@ async def run_wal_lagging(env: NeonEnv, pg: Postgres):
|
||||
|
||||
# do inserts while restarting postgres and messing with safekeeper addresses
|
||||
def test_wal_lagging(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, PortDistributor, VanillaPostgres
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
PortDistributor,
|
||||
VanillaPostgres,
|
||||
)
|
||||
from fixtures.types import TenantId
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user