diff --git a/poetry.lock b/poetry.lock index 7c84b2969b..96c65fdf05 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3111,30 +3111,30 @@ six = "*" [[package]] name = "ruff" -version = "0.7.0" +version = "0.11.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"}, - {file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"}, - {file = "ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914"}, - {file = "ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d"}, - {file = "ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11"}, - {file = "ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec"}, - {file = "ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2"}, - {file = "ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e"}, - {file = "ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b"}, + {file = "ruff-0.11.2-py3-none-linux_armv6l.whl", hash = "sha256:c69e20ea49e973f3afec2c06376eb56045709f0212615c1adb0eda35e8a4e477"}, + {file = "ruff-0.11.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2c5424cc1c4eb1d8ecabe6d4f1b70470b4f24a0c0171356290b1953ad8f0e272"}, + {file = "ruff-0.11.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ecf20854cc73f42171eedb66f006a43d0a21bfb98a2523a809931cda569552d9"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c543bf65d5d27240321604cee0633a70c6c25c9a2f2492efa9f6d4b8e4199bb"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20967168cc21195db5830b9224be0e964cc9c8ecf3b5a9e3ce19876e8d3a96e3"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:955a9ce63483999d9f0b8f0b4a3ad669e53484232853054cc8b9d51ab4c5de74"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:86b3a27c38b8fce73bcd262b0de32e9a6801b76d52cdb3ae4c914515f0cef608"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3b66a03b248c9fcd9d64d445bafdf1589326bee6fc5c8e92d7562e58883e30f"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0397c2672db015be5aa3d4dac54c69aa012429097ff219392c018e21f5085147"}, + {file = "ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869bcf3f9abf6457fbe39b5a37333aa4eecc52a3b99c98827ccc371a8e5b6f1b"}, + {file = "ruff-0.11.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2a2b50ca35457ba785cd8c93ebbe529467594087b527a08d487cf0ee7b3087e9"}, + {file = "ruff-0.11.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7c69c74bf53ddcfbc22e6eb2f31211df7f65054bfc1f72288fc71e5f82db3eab"}, + {file = "ruff-0.11.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6e8fb75e14560f7cf53b15bbc55baf5ecbe373dd5f3aab96ff7aa7777edd7630"}, + {file = "ruff-0.11.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:842a472d7b4d6f5924e9297aa38149e5dcb1e628773b70e6387ae2c97a63c58f"}, + {file = "ruff-0.11.2-py3-none-win32.whl", hash = "sha256:aca01ccd0eb5eb7156b324cfaa088586f06a86d9e5314b0eb330cb48415097cc"}, + {file = "ruff-0.11.2-py3-none-win_amd64.whl", hash = "sha256:3170150172a8f994136c0c66f494edf199a0bbea7a409f649e4bc8f4d7084080"}, + {file = "ruff-0.11.2-py3-none-win_arm64.whl", hash = "sha256:52933095158ff328f4c77af3d74f0379e34fd52f175144cefc1b192e7ccd32b4"}, + {file = "ruff-0.11.2.tar.gz", hash = "sha256:ec47591497d5a1050175bdf4e1a4e6272cddff7da88a2ad595e1e326041d8d94"}, ] [[package]] @@ -3844,4 +3844,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "715fc8c896dcfa1b15054deeddcdec557ef93af91b26e1c8e4688fe4dbef5296" +content-hash = "fb50cb6b291169dce3188560cdb31a14af95647318f8f0f0d718131dbaf1817a" diff --git a/pyproject.toml b/pyproject.toml index e009b0773e..c5129fac35 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ jsonnet = "^0.21.0-rc2" [tool.poetry.group.dev.dependencies] mypy = "==1.13.0" -ruff = "^0.7.0" +ruff = "^0.11.2" [build-system] requires = ["poetry-core>=1.0.0"] @@ -109,4 +109,5 @@ select = [ "W", # pycodestyle "B", # bugbear "UP", # pyupgrade + "TC", # flake8-type-checking ] diff --git a/scripts/download_basebackup.py b/scripts/download_basebackup.py index e23e4f99c3..08a7128842 100755 --- a/scripts/download_basebackup.py +++ b/scripts/download_basebackup.py @@ -8,9 +8,12 @@ from __future__ import annotations import argparse +from typing import TYPE_CHECKING import psycopg2 -from psycopg2.extensions import connection as PgConnection + +if TYPE_CHECKING: + from psycopg2.extensions import connection as PgConnection def main(args: argparse.Namespace): diff --git a/scripts/force_layer_download.py b/scripts/force_layer_download.py index 835e28c5d6..154150e922 100644 --- a/scripts/force_layer_download.py +++ b/scripts/force_layer_download.py @@ -7,13 +7,13 @@ import logging import signal import sys from collections import defaultdict -from collections.abc import Awaitable from dataclasses import dataclass from typing import TYPE_CHECKING import aiohttp if TYPE_CHECKING: + from collections.abc import Awaitable from typing import Any diff --git a/test_runner/cloud_regress/test_cloud_regress.py b/test_runner/cloud_regress/test_cloud_regress.py index 63427c1912..b90f5c5afc 100644 --- a/test_runner/cloud_regress/test_cloud_regress.py +++ b/test_runner/cloud_regress/test_cloud_regress.py @@ -4,11 +4,15 @@ Run the regression tests on the cloud instance of Neon from __future__ import annotations -from pathlib import Path +from typing import TYPE_CHECKING import pytest -from fixtures.neon_fixtures import RemotePostgres -from fixtures.pg_version import PgVersion + +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.neon_fixtures import RemotePostgres + from fixtures.pg_version import PgVersion @pytest.mark.timeout(7200) diff --git a/test_runner/fixtures/auth_tokens.py b/test_runner/fixtures/auth_tokens.py index 8382ce20b3..7d994b6cc0 100644 --- a/test_runner/fixtures/auth_tokens.py +++ b/test_runner/fixtures/auth_tokens.py @@ -2,11 +2,12 @@ from __future__ import annotations from dataclasses import dataclass from enum import StrEnum -from typing import Any +from typing import TYPE_CHECKING, Any import jwt -from fixtures.common_types import TenantId +if TYPE_CHECKING: + from fixtures.common_types import TenantId @dataclass diff --git a/test_runner/fixtures/benchmark_fixture.py b/test_runner/fixtures/benchmark_fixture.py index fa3747c08f..00e415cc98 100644 --- a/test_runner/fixtures/benchmark_fixture.py +++ b/test_runner/fixtures/benchmark_fixture.py @@ -15,18 +15,20 @@ from typing import TYPE_CHECKING import allure import pytest -from _pytest.config import Config -from _pytest.config.argparsing import Parser -from _pytest.fixtures import FixtureRequest -from _pytest.terminal import TerminalReporter -from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonPageserver if TYPE_CHECKING: from collections.abc import Callable, Iterator, Mapping + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureRequest + from _pytest.terminal import TerminalReporter + + from fixtures.common_types import TenantId, TimelineId + from fixtures.neon_fixtures import NeonPageserver + """ This file contains fixtures for micro-benchmarks. diff --git a/test_runner/fixtures/compare_fixtures.py b/test_runner/fixtures/compare_fixtures.py index c0892399bd..150046b99a 100644 --- a/test_runner/fixtures/compare_fixtures.py +++ b/test_runner/fixtures/compare_fixtures.py @@ -11,7 +11,6 @@ from pathlib import Path from typing import TYPE_CHECKING, final import pytest -from _pytest.fixtures import FixtureRequest from typing_extensions import override from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -24,11 +23,14 @@ from fixtures.neon_fixtures import ( VanillaPostgres, wait_for_last_flush_lsn, ) -from fixtures.pg_stats import PgStatTable if TYPE_CHECKING: from collections.abc import Iterator + from _pytest.fixtures import FixtureRequest + + from fixtures.pg_stats import PgStatTable + class PgCompare(ABC): """Common interface of all postgres implementations, useful for benchmarks. diff --git a/test_runner/fixtures/compute_reconfigure.py b/test_runner/fixtures/compute_reconfigure.py index 205b9141e0..d49c3f5601 100644 --- a/test_runner/fixtures/compute_reconfigure.py +++ b/test_runner/fixtures/compute_reconfigure.py @@ -4,8 +4,6 @@ import concurrent.futures from typing import TYPE_CHECKING import pytest -from pytest_httpserver import HTTPServer -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response from fixtures.common_types import TenantId @@ -15,6 +13,9 @@ if TYPE_CHECKING: from collections.abc import Callable from typing import Any + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request + class ComputeReconfigure: def __init__(self, server: HTTPServer): diff --git a/test_runner/fixtures/fast_import.py b/test_runner/fixtures/fast_import.py index d8fb189345..f9e5f9c1db 100644 --- a/test_runner/fixtures/fast_import.py +++ b/test_runner/fixtures/fast_import.py @@ -147,7 +147,7 @@ def fast_import( pg_distrib_dir, pg_version, workdir, - cleanup=not cast(bool, pytestconfig.getoption("--preserve-database-files")), + cleanup=not cast("bool", pytestconfig.getoption("--preserve-database-files")), ) as fi: yield fi diff --git a/test_runner/fixtures/h2server.py b/test_runner/fixtures/h2server.py index 3e35af3b5b..d6a5fe57a6 100644 --- a/test_runner/fixtures/h2server.py +++ b/test_runner/fixtures/h2server.py @@ -10,7 +10,6 @@ import asyncio import collections import io import json -from collections.abc import AsyncIterable from typing import TYPE_CHECKING, final import pytest_asyncio @@ -31,6 +30,7 @@ from h2.settings import SettingCodes from typing_extensions import override if TYPE_CHECKING: + from collections.abc import AsyncIterable from typing import Any diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 7cf60f2ab2..106a588711 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -1,12 +1,15 @@ from __future__ import annotations from collections import defaultdict +from typing import TYPE_CHECKING from prometheus_client.parser import text_string_to_metric_families -from prometheus_client.samples import Sample from fixtures.log_helper import log +if TYPE_CHECKING: + from prometheus_client.samples import Sample + class Metrics: metrics: dict[str, list[Sample]] diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index 6e53987e7c..d555ee2989 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -7,7 +7,6 @@ import subprocess import tempfile import textwrap from itertools import chain, product -from pathlib import Path from typing import TYPE_CHECKING, cast import toml @@ -15,14 +14,15 @@ import toml from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log from fixtures.pageserver.common_types import IndexPartDump -from fixtures.pg_version import PgVersion if TYPE_CHECKING: + from pathlib import Path from typing import ( Any, - cast, ) + from fixtures.pg_version import PgVersion + # Used to be an ABC. abc.ABC removed due to linter without name change. class AbstractNeonCli: @@ -36,7 +36,7 @@ class AbstractNeonCli: self.extra_env = extra_env self.binpath = binpath - COMMAND: str = cast(str, None) # To be overwritten by the derived class. + COMMAND: str = cast("str", None) # To be overwritten by the derived class. def raw_cli( self, diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index eeaf35b377..d3cb35fe49 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -14,14 +14,12 @@ import threading import time import uuid from collections import defaultdict -from collections.abc import Iterable, Iterator from contextlib import closing, contextmanager from dataclasses import dataclass from datetime import datetime from enum import StrEnum from functools import cached_property from pathlib import Path -from types import TracebackType from typing import TYPE_CHECKING, cast from urllib.parse import quote, urlparse @@ -34,19 +32,12 @@ import psycopg2.sql import pytest import requests import toml -from _pytest.config import Config -from _pytest.config.argparsing import Parser -from _pytest.fixtures import FixtureRequest from jwcrypto import jwk -from mypy_boto3_kms import KMSClient -from mypy_boto3_s3 import S3Client # Type-related stuff from psycopg2.extensions import connection as PgConnection from psycopg2.extensions import cursor as PgCursor from psycopg2.extensions import make_dsn, parse_dsn -from pytest_httpserver import HTTPServer -from urllib3.util.retry import Retry from fixtures import overlayfs from fixtures.auth_tokens import AuthKeys, TokenScope @@ -60,7 +51,6 @@ from fixtures.common_types import ( ) from fixtures.compute_migrations import NUM_COMPUTE_MIGRATIONS from fixtures.endpoint.http import EndpointHttpClient -from fixtures.h2server import H2Server from fixtures.log_helper import log from fixtures.metrics import Metrics, MetricsGetter, parse_metrics from fixtures.neon_cli import NeonLocalCli, Pagectl @@ -78,7 +68,6 @@ from fixtures.pageserver.utils import ( wait_for_last_record_lsn, ) from fixtures.paths import get_test_repo_dir, shared_snapshot_dir -from fixtures.pg_version import PgVersion from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import ( LocalFsStorage, @@ -108,10 +97,21 @@ from fixtures.utils import ( from .neon_api import NeonAPI, NeonApiEndpoint if TYPE_CHECKING: - from collections.abc import Callable + from collections.abc import Callable, Iterable, Iterator + from types import TracebackType from typing import Any, Self, TypeVar + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureRequest + from mypy_boto3_kms import KMSClient + from mypy_boto3_s3 import S3Client + from pytest_httpserver import HTTPServer + from urllib3.util.retry import Retry + + from fixtures.h2server import H2Server from fixtures.paths import SnapshotDirLocked + from fixtures.pg_version import PgVersion T = TypeVar("T") @@ -497,9 +497,9 @@ class NeonEnvBuilder: else: self.pageserver_wal_receiver_protocol = PageserverWalReceiverProtocol.INTERPRETED - assert test_name.startswith( - "test_" - ), "Unexpectedly instantiated from outside a test function" + assert test_name.startswith("test_"), ( + "Unexpectedly instantiated from outside a test function" + ) self.test_name = test_name self.compatibility_neon_binpath = compatibility_neon_binpath self.compatibility_pg_distrib_dir = compatibility_pg_distrib_dir @@ -508,12 +508,12 @@ class NeonEnvBuilder: self.mixdir = self.test_output_dir / "mixdir_neon" if self.version_combination is not None: - assert ( - self.compatibility_neon_binpath is not None - ), "the environment variable COMPATIBILITY_NEON_BIN is required when using mixed versions" - assert ( - self.compatibility_pg_distrib_dir is not None - ), "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required when using mixed versions" + assert self.compatibility_neon_binpath is not None, ( + "the environment variable COMPATIBILITY_NEON_BIN is required when using mixed versions" + ) + assert self.compatibility_pg_distrib_dir is not None, ( + "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required when using mixed versions" + ) self.mixdir.mkdir(mode=0o755, exist_ok=True) self._mix_versions() self.test_may_use_compatibility_snapshot_binaries = True @@ -795,9 +795,9 @@ class NeonEnvBuilder: work = ident_state_dir / "work" assert upper.is_dir() assert work.is_dir() - assert ( - self.test_overlay_dir not in dst.parents - ), "otherwise workdir cleanup below wouldn't work" + assert self.test_overlay_dir not in dst.parents, ( + "otherwise workdir cleanup below wouldn't work" + ) # find index, still not mutating state idxmap = { existing_ident: idx @@ -863,9 +863,9 @@ class NeonEnvBuilder: self.pageserver_remote_storage = ret def enable_safekeeper_remote_storage(self, kind: RemoteStorageKind): - assert ( - self.safekeepers_remote_storage is None - ), "safekeepers_remote_storage already configured" + assert self.safekeepers_remote_storage is None, ( + "safekeepers_remote_storage already configured" + ) self.safekeepers_remote_storage = self._configure_and_create_remote_storage( kind, RemoteStorageUser.SAFEKEEPER @@ -1421,9 +1421,9 @@ class NeonEnv: assert that there is only one. Tests with multiple pageservers should always use get_pageserver with an explicit ID. """ - assert ( - len(self.pageservers) == 1 - ), "env.pageserver must only be used with single pageserver NeonEnv" + assert len(self.pageservers) == 1, ( + "env.pageserver must only be used with single pageserver NeonEnv" + ) return self.pageservers[0] def get_pageserver(self, id: int | None) -> NeonPageserver: @@ -1614,7 +1614,7 @@ def neon_simple_env( compatibility_pg_distrib_dir=compatibility_pg_distrib_dir, pg_version=pg_version, run_id=run_id, - preserve_database_files=cast(bool, pytestconfig.getoption("--preserve-database-files")), + preserve_database_files=cast("bool", pytestconfig.getoption("--preserve-database-files")), test_name=request.node.name, test_output_dir=test_output_dir, pageserver_virtual_file_io_engine=pageserver_virtual_file_io_engine, @@ -1683,7 +1683,7 @@ def neon_env_builder( combination=combination, pg_version=pg_version, run_id=run_id, - preserve_database_files=cast(bool, pytestconfig.getoption("--preserve-database-files")), + preserve_database_files=cast("bool", pytestconfig.getoption("--preserve-database-files")), pageserver_virtual_file_io_engine=pageserver_virtual_file_io_engine, test_name=request.node.name, test_output_dir=test_output_dir, @@ -3577,9 +3577,9 @@ class NeonProxy(PgProtocol): @backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=10) def _wait_until_ready(self): - assert ( - self._popen and self._popen.poll() is None - ), "Proxy exited unexpectedly. Check test log." + assert self._popen and self._popen.poll() is None, ( + "Proxy exited unexpectedly. Check test log." + ) requests.get(f"http://{self.host}:{self.http_port}/v1/status") def http_query(self, query, args, **kwargs): @@ -3787,9 +3787,9 @@ class NeonAuthBroker: @backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=10) def _wait_until_ready(self): - assert ( - self._popen and self._popen.poll() is None - ), "Proxy exited unexpectedly. Check test log." + assert self._popen and self._popen.poll() is None, ( + "Proxy exited unexpectedly. Check test log." + ) requests.get(f"http://{self.host}:{self.http_port}/v1/status") async def query(self, query, args, **kwargs): @@ -4069,9 +4069,9 @@ class Endpoint(PgProtocol, LogUtils): m = re.search(r"=\s*(\S+)", line) assert m is not None, f"malformed config line {line}" size = m.group(1) - assert size_to_bytes(size) >= size_to_bytes( - "1MB" - ), "LFC size cannot be set less than 1MB" + assert size_to_bytes(size) >= size_to_bytes("1MB"), ( + "LFC size cannot be set less than 1MB" + ) lfc_path_escaped = str(lfc_path).replace("'", "''") config_lines = [ f"neon.file_cache_path = '{lfc_path_escaped}'", @@ -4082,12 +4082,12 @@ class Endpoint(PgProtocol, LogUtils): ] + config_lines else: for line in config_lines: - assert ( - line.find("neon.max_file_cache_size") == -1 - ), "Setting LFC parameters is not allowed when LFC is disabled" - assert ( - line.find("neon.file_cache_size_limit") == -1 - ), "Setting LFC parameters is not allowed when LFC is disabled" + assert line.find("neon.max_file_cache_size") == -1, ( + "Setting LFC parameters is not allowed when LFC is disabled" + ) + assert line.find("neon.file_cache_size_limit") == -1, ( + "Setting LFC parameters is not allowed when LFC is disabled" + ) self.config(config_lines) @@ -4925,9 +4925,9 @@ class StorageScrubber: healthy = False else: for _, warnings in with_warnings.items(): - assert ( - len(warnings) > 0 - ), "with_warnings value should not be empty, running without verbose mode?" + assert len(warnings) > 0, ( + "with_warnings value should not be empty, running without verbose mode?" + ) if not self._check_line_list_allowed(warnings): healthy = False break @@ -4941,9 +4941,9 @@ class StorageScrubber: healthy = False else: for _, errors in with_errors.items(): - assert ( - len(errors) > 0 - ), "with_errors value should not be empty, running without verbose mode?" + assert len(errors) > 0, ( + "with_errors value should not be empty, running without verbose mode?" + ) if not self._check_line_list_allowed(errors): healthy = False break diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index f4dede6548..c1c5f470cc 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -5,7 +5,10 @@ from __future__ import annotations import argparse import re import sys -from collections.abc import Iterable +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Iterable def scan_pageserver_log_for_errors( diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 13cab448f3..347bceb785 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -7,8 +7,7 @@ import string import time from collections import defaultdict from dataclasses import dataclass -from datetime import datetime -from typing import Any +from typing import TYPE_CHECKING, Any import requests from requests.adapters import HTTPAdapter @@ -26,6 +25,9 @@ from fixtures.metrics import Metrics, MetricsGetter, parse_metrics from fixtures.pg_version import PgVersion from fixtures.utils import EnhancedJSONEncoder, Fn +if TYPE_CHECKING: + from datetime import datetime + class PageserverApiException(Exception): def __init__(self, message, status_code: int): diff --git a/test_runner/fixtures/pageserver/many_tenants.py b/test_runner/fixtures/pageserver/many_tenants.py index b6d19af84c..4b066d6cf3 100644 --- a/test_runner/fixtures/pageserver/many_tenants.py +++ b/test_runner/fixtures/pageserver/many_tenants.py @@ -4,18 +4,19 @@ import concurrent.futures from typing import TYPE_CHECKING import fixtures.pageserver.remote_storage -from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnv, - NeonEnvBuilder, -) from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind if TYPE_CHECKING: from collections.abc import Callable from typing import Any + from fixtures.common_types import TenantId, TimelineId + from fixtures.neon_fixtures import ( + NeonEnv, + NeonEnvBuilder, + ) + def single_timeline( neon_env_builder: NeonEnvBuilder, diff --git a/test_runner/fixtures/pageserver/remote_storage.py b/test_runner/fixtures/pageserver/remote_storage.py index 54acb9ce50..1fc473b633 100644 --- a/test_runner/fixtures/pageserver/remote_storage.py +++ b/test_runner/fixtures/pageserver/remote_storage.py @@ -5,11 +5,9 @@ import os import queue import shutil import threading -from pathlib import Path from typing import TYPE_CHECKING from fixtures.common_types import TenantId, TimelineId -from fixtures.neon_fixtures import NeonEnv from fixtures.pageserver.common_types import ( InvalidFileName, parse_layer_file_name, @@ -17,8 +15,11 @@ from fixtures.pageserver.common_types import ( from fixtures.remote_storage import LocalFsStorage if TYPE_CHECKING: + from pathlib import Path from typing import Any + from fixtures.neon_fixtures import NeonEnv + def duplicate_one_tenant(env: NeonEnv, template_tenant: TenantId, new_tenant: TenantId): remote_storage = env.pageserver_remote_storage diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index 66f61f9b4c..bc5076758d 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -3,13 +3,6 @@ from __future__ import annotations import time from typing import TYPE_CHECKING -from mypy_boto3_s3.type_defs import ( - DeleteObjectOutputTypeDef, - EmptyResponseMetadataTypeDef, - ListObjectsV2OutputTypeDef, - ObjectTypeDef, -) - from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId from fixtures.log_helper import log from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient @@ -19,6 +12,13 @@ from fixtures.utils import wait_until if TYPE_CHECKING: from typing import Any + from mypy_boto3_s3.type_defs import ( + DeleteObjectOutputTypeDef, + EmptyResponseMetadataTypeDef, + ListObjectsV2OutputTypeDef, + ObjectTypeDef, + ) + def assert_tenant_state( pageserver_http: PageserverHttpClient, @@ -241,9 +241,9 @@ def wait_for_upload_queue_empty( found = False for f in finished: if all([s.labels[label] == f.labels[label] for label in remaining_labels]): - assert ( - not found - ), "duplicate match, remaining_labels don't uniquely identify sample" + assert not found, ( + "duplicate match, remaining_labels don't uniquely identify sample" + ) tl.append((s.labels, int(s.value) - int(f.value))) found = True if not found: diff --git a/test_runner/fixtures/parametrize.py b/test_runner/fixtures/parametrize.py index c33342c89e..e5deb50d46 100644 --- a/test_runner/fixtures/parametrize.py +++ b/test_runner/fixtures/parametrize.py @@ -6,13 +6,14 @@ from typing import TYPE_CHECKING import allure import pytest import toml -from _pytest.python import Metafunc from fixtures.pg_version import PgVersion if TYPE_CHECKING: from typing import Any + from _pytest.python import Metafunc + """ Dynamically parametrize tests by different parameters diff --git a/test_runner/fixtures/paths.py b/test_runner/fixtures/paths.py index fc4fb3629b..ddf2e8f4f0 100644 --- a/test_runner/fixtures/paths.py +++ b/test_runner/fixtures/paths.py @@ -6,7 +6,6 @@ import subprocess import threading from fcntl import LOCK_EX, LOCK_UN, flock from pathlib import Path -from types import TracebackType from typing import TYPE_CHECKING import pytest @@ -18,6 +17,7 @@ from fixtures.utils import allure_attach_from_dir if TYPE_CHECKING: from collections.abc import Iterator + from types import TracebackType BASE_DIR = Path(__file__).parents[2] @@ -101,9 +101,9 @@ def compatibility_snapshot_dir() -> Iterator[Path]: if os.getenv("REMOTE_ENV"): return compatibility_snapshot_dir_env = os.environ.get("COMPATIBILITY_SNAPSHOT_DIR") - assert ( - compatibility_snapshot_dir_env is not None - ), "COMPATIBILITY_SNAPSHOT_DIR is not set. It should be set to `compatibility_snapshot_pg(PG_VERSION)` path generateted by test_create_snapshot (ideally generated by the previous version of Neon)" + assert compatibility_snapshot_dir_env is not None, ( + "COMPATIBILITY_SNAPSHOT_DIR is not set. It should be set to `compatibility_snapshot_pg(PG_VERSION)` path generateted by test_create_snapshot (ideally generated by the previous version of Neon)" + ) compatibility_snapshot_dir = Path(compatibility_snapshot_dir_env).resolve() yield compatibility_snapshot_dir diff --git a/test_runner/fixtures/remote_storage.py b/test_runner/fixtures/remote_storage.py index cac84c07e7..f98ac4b92e 100644 --- a/test_runner/fixtures/remote_storage.py +++ b/test_runner/fixtures/remote_storage.py @@ -7,22 +7,24 @@ import os import re from dataclasses import dataclass from enum import StrEnum -from pathlib import Path from typing import TYPE_CHECKING import boto3 import toml from moto.server import ThreadedMotoServer -from mypy_boto3_s3 import S3Client from typing_extensions import override -from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.log_helper import log from fixtures.pageserver.common_types import IndexPartDump if TYPE_CHECKING: + from pathlib import Path from typing import Any + from mypy_boto3_s3 import S3Client + + from fixtures.common_types import TenantId, TenantShardId, TimelineId + TIMELINE_INDEX_PART_FILE_NAME = "index_part.json" TENANT_HEATMAP_FILE_NAME = "heatmap-v1.json" @@ -448,9 +450,9 @@ class RemoteStorageKind(StrEnum): env_secret_key = os.getenv("AWS_SECRET_ACCESS_KEY") env_access_token = os.getenv("AWS_SESSION_TOKEN") env_profile = os.getenv("AWS_PROFILE") - assert ( - env_access_key and env_secret_key and env_access_token - ) or env_profile, "need to specify either access key and secret access key or profile" + assert (env_access_key and env_secret_key and env_access_token) or env_profile, ( + "need to specify either access key and secret access key or profile" + ) bucket_name = bucket_name or os.getenv("REMOTE_STORAGE_S3_BUCKET") assert bucket_name is not None, "no remote storage bucket name provided" diff --git a/test_runner/fixtures/reruns.py b/test_runner/fixtures/reruns.py index f2a25ae8f6..a68b287f8a 100644 --- a/test_runner/fixtures/reruns.py +++ b/test_runner/fixtures/reruns.py @@ -3,12 +3,11 @@ from __future__ import annotations from collections.abc import MutableMapping from typing import TYPE_CHECKING, cast -import pytest - if TYPE_CHECKING: from collections.abc import MutableMapping from typing import Any + import pytest from _pytest.config import Config diff --git a/test_runner/fixtures/safekeeper/utils.py b/test_runner/fixtures/safekeeper/utils.py index 922cdedccc..5608b8504e 100644 --- a/test_runner/fixtures/safekeeper/utils.py +++ b/test_runner/fixtures/safekeeper/utils.py @@ -1,10 +1,14 @@ from __future__ import annotations -from fixtures.common_types import TenantId, TimelineId +from typing import TYPE_CHECKING + from fixtures.log_helper import log -from fixtures.safekeeper.http import SafekeeperHttpClient from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.common_types import TenantId, TimelineId + from fixtures.safekeeper.http import SafekeeperHttpClient + def wait_walreceivers_absent( sk_http_cli: SafekeeperHttpClient, tenant_id: TenantId, timeline_id: TimelineId diff --git a/test_runner/fixtures/slow.py b/test_runner/fixtures/slow.py index 4c6372d515..d6f74b2b7f 100644 --- a/test_runner/fixtures/slow.py +++ b/test_runner/fixtures/slow.py @@ -3,12 +3,13 @@ from __future__ import annotations from typing import TYPE_CHECKING import pytest -from _pytest.config import Config -from _pytest.config.argparsing import Parser if TYPE_CHECKING: from typing import Any + from _pytest.config import Config + from _pytest.config.argparsing import Parser + """ This plugin allows tests to be marked as slow using pytest.mark.slow. By default slow diff --git a/test_runner/fixtures/storage_controller_proxy.py b/test_runner/fixtures/storage_controller_proxy.py index be95a98ff9..ca3ad43774 100644 --- a/test_runner/fixtures/storage_controller_proxy.py +++ b/test_runner/fixtures/storage_controller_proxy.py @@ -5,9 +5,7 @@ from typing import TYPE_CHECKING import pytest import requests -from pytest_httpserver import HTTPServer from werkzeug.datastructures import Headers -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response from fixtures.log_helper import log @@ -15,6 +13,9 @@ from fixtures.log_helper import log if TYPE_CHECKING: from typing import Any + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request + class StorageControllerProxy: def __init__(self, server: HTTPServer): diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index d1b2a5a400..4ece6e89a8 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -19,7 +19,6 @@ from urllib.parse import urlencode import allure import pytest import zstandard -from psycopg2.extensions import cursor from typing_extensions import override from fixtures.common_types import Id, Lsn @@ -34,6 +33,8 @@ if TYPE_CHECKING: from collections.abc import Iterable from typing import IO + from psycopg2.extensions import cursor + from fixtures.common_types import TimelineId from fixtures.neon_fixtures import PgBin @@ -512,7 +513,9 @@ def assert_no_errors(log_file: Path, service: str, allowed_errors: list[str]): for _lineno, error in errors: log.info(f"not allowed {service} error: {error.strip()}") - assert not errors, f"First log error on {service}: {errors[0]}\nHint: use scripts/check_allowed_errors.sh to test any new allowed_error you add" + assert not errors, ( + f"First log error on {service}: {errors[0]}\nHint: use scripts/check_allowed_errors.sh to test any new allowed_error you add" + ) def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str]): @@ -550,18 +553,18 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str left_list, right_list = map(build_hash_list, [left, right]) - assert len(left_list) == len( - right_list - ), f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}" + assert len(left_list) == len(right_list), ( + f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}" + ) mismatching: set[str] = set() for left_tuple, right_tuple in zip(left_list, right_list, strict=False): left_path, left_hash = left_tuple right_path, right_hash = right_tuple - assert ( - left_path == right_path - ), f"file count matched, expected these to be same paths: {left_path}, {right_path}" + assert left_path == right_path, ( + f"file count matched, expected these to be same paths: {left_path}, {right_path}" + ) if left_hash != right_hash: mismatching.add(left_path) diff --git a/test_runner/fixtures/workload.py b/test_runner/fixtures/workload.py index 1947a9c3fb..e17a8e989b 100644 --- a/test_runner/fixtures/workload.py +++ b/test_runner/fixtures/workload.py @@ -3,7 +3,6 @@ from __future__ import annotations import threading from typing import TYPE_CHECKING -from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import ( Endpoint, @@ -17,6 +16,8 @@ from fixtures.pageserver.utils import wait_for_last_record_lsn if TYPE_CHECKING: from typing import Any + from fixtures.common_types import TenantId, TimelineId + # neon_local doesn't handle creating/modifying endpoints concurrently, so we use a mutex # to ensure we don't do that: this enables running lots of Workloads in parallel safely. ENDPOINT_LOCK = threading.Lock() diff --git a/test_runner/logical_repl/test_clickhouse.py b/test_runner/logical_repl/test_clickhouse.py index 6b522fa46d..c05684baf9 100644 --- a/test_runner/logical_repl/test_clickhouse.py +++ b/test_runner/logical_repl/test_clickhouse.py @@ -7,14 +7,17 @@ from __future__ import annotations import hashlib import os import time +from typing import TYPE_CHECKING import clickhouse_connect import psycopg2 import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import RemotePostgres from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import RemotePostgres + def query_clickhouse( client, diff --git a/test_runner/logical_repl/test_debezium.py b/test_runner/logical_repl/test_debezium.py index 8023d64d3d..a53e6cef92 100644 --- a/test_runner/logical_repl/test_debezium.py +++ b/test_runner/logical_repl/test_debezium.py @@ -7,14 +7,17 @@ from __future__ import annotations import json import os import time +from typing import TYPE_CHECKING import psycopg2 import pytest import requests from fixtures.log_helper import log -from fixtures.neon_fixtures import RemotePostgres from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import RemotePostgres + class DebeziumAPI: """ diff --git a/test_runner/performance/pageserver/util.py b/test_runner/performance/pageserver/util.py index bcc3db69f0..7a6d88f79c 100644 --- a/test_runner/performance/pageserver/util.py +++ b/test_runner/performance/pageserver/util.py @@ -7,18 +7,19 @@ from __future__ import annotations from typing import TYPE_CHECKING import fixtures.pageserver.many_tenants as many_tenants -from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnv, - NeonEnvBuilder, -) from fixtures.pageserver.utils import wait_until_all_tenants_state if TYPE_CHECKING: from collections.abc import Callable from typing import Any + from fixtures.common_types import TenantId, TimelineId + from fixtures.neon_fixtures import ( + NeonEnv, + NeonEnvBuilder, + ) + def ensure_pageserver_ready_for_benchmarking(env: NeonEnv, n_tenants: int): """ diff --git a/test_runner/performance/test_branch_creation.py b/test_runner/performance/test_branch_creation.py index cf2212d447..b2bd94fae7 100644 --- a/test_runner/performance/test_branch_creation.py +++ b/test_runner/performance/test_branch_creation.py @@ -7,16 +7,19 @@ import threading import time import timeit from contextlib import closing +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.common_types import Lsn -from fixtures.compare_fixtures import NeonCompare from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonPageserver from fixtures.pageserver.utils import wait_for_last_record_lsn from fixtures.utils import wait_until -from prometheus_client.samples import Sample + +if TYPE_CHECKING: + from fixtures.compare_fixtures import NeonCompare + from fixtures.neon_fixtures import NeonPageserver + from prometheus_client.samples import Sample def _record_branch_creation_durations(neon_compare: NeonCompare, durs: list[float]): @@ -45,9 +48,9 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int) tenant, _ = env.create_tenant( conf={ "gc_period": "5 s", - "gc_horizon": f"{4 * 1024 ** 2}", - "checkpoint_distance": f"{2 * 1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", + "gc_horizon": f"{4 * 1024**2}", + "checkpoint_distance": f"{2 * 1024**2}", + "compaction_target_size": f"{1024**2}", "compaction_threshold": "2", # set PITR interval to be small, so we can do GC "pitr_interval": "5 s", @@ -82,10 +85,10 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int) env.create_branch(f"b{i + 1}", ancestor_branch_name=f"b{p}", tenant_id=tenant) dur = timeit.default_timer() - timer - log.info(f"Creating branch b{i+1} took {dur}s") + log.info(f"Creating branch b{i + 1} took {dur}s") branch_creation_durations.append(dur) - threads.append(threading.Thread(target=run_pgbench, args=(f"b{i+1}",), daemon=True)) + threads.append(threading.Thread(target=run_pgbench, args=(f"b{i + 1}",), daemon=True)) threads[-1].start() for thread in threads: diff --git a/test_runner/performance/test_branching.py b/test_runner/performance/test_branching.py index dbff116360..1b29dab288 100644 --- a/test_runner/performance/test_branching.py +++ b/test_runner/performance/test_branching.py @@ -2,13 +2,16 @@ from __future__ import annotations import timeit from pathlib import Path +from typing import TYPE_CHECKING from fixtures.benchmark_fixture import PgBenchRunResult -from fixtures.compare_fixtures import NeonCompare from fixtures.neon_fixtures import fork_at_current_lsn from performance.test_perf_pgbench import utc_now_timestamp +if TYPE_CHECKING: + from fixtures.compare_fixtures import NeonCompare + # ----------------------------------------------------------------------- # Start of `test_compare_child_and_root_*` tests # ----------------------------------------------------------------------- diff --git a/test_runner/performance/test_bulk_tenant_create.py b/test_runner/performance/test_bulk_tenant_create.py index 15a03ba456..4307e815d2 100644 --- a/test_runner/performance/test_bulk_tenant_create.py +++ b/test_runner/performance/test_bulk_tenant_create.py @@ -1,10 +1,13 @@ from __future__ import annotations import timeit +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # Run bulk tenant creation test. # diff --git a/test_runner/performance/test_compaction.py b/test_runner/performance/test_compaction.py index eaa89ae754..3df7710494 100644 --- a/test_runner/performance/test_compaction.py +++ b/test_runner/performance/test_compaction.py @@ -1,12 +1,15 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import NeonCompare from fixtures.log_helper import log from fixtures.neon_fixtures import wait_for_last_flush_lsn +if TYPE_CHECKING: + from fixtures.compare_fixtures import NeonCompare + # # Test compaction and image layer creation performance. diff --git a/test_runner/performance/test_compare_pg_stats.py b/test_runner/performance/test_compare_pg_stats.py index a86995d6d3..c6289e39e1 100644 --- a/test_runner/performance/test_compare_pg_stats.py +++ b/test_runner/performance/test_compare_pg_stats.py @@ -3,13 +3,16 @@ from __future__ import annotations import os import threading import time +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import PgCompare -from fixtures.pg_stats import PgStatTable from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + from fixtures.pg_stats import PgStatTable + def get_seeds_matrix(default: int = 100): seeds = os.getenv("TEST_PG_BENCH_SEEDS_MATRIX", default=str(default)) diff --git a/test_runner/performance/test_compute_ctl_api.py b/test_runner/performance/test_compute_ctl_api.py index d6d0a84e8e..fc65cb969d 100644 --- a/test_runner/performance/test_compute_ctl_api.py +++ b/test_runner/performance/test_compute_ctl_api.py @@ -1,10 +1,13 @@ from __future__ import annotations import datetime +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker -from fixtures.neon_fixtures import NeonEnv + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv @pytest.mark.timeout(120) diff --git a/test_runner/performance/test_compute_startup.py b/test_runner/performance/test_compute_startup.py index d002d2e221..abedb4be27 100644 --- a/test_runner/performance/test_compute_startup.py +++ b/test_runner/performance/test_compute_startup.py @@ -1,9 +1,13 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest import requests from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker -from fixtures.neon_fixtures import NeonEnvBuilder, PgBin + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder, PgBin # Just start and measure duration. diff --git a/test_runner/performance/test_copy.py b/test_runner/performance/test_copy.py index 0e56fdc96f..8535e6843d 100644 --- a/test_runner/performance/test_copy.py +++ b/test_runner/performance/test_copy.py @@ -2,11 +2,13 @@ from __future__ import annotations from contextlib import closing from io import BufferedReader, RawIOBase -from typing import final +from typing import TYPE_CHECKING, final -from fixtures.compare_fixtures import PgCompare from typing_extensions import override +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @final class CopyTestData(RawIOBase): diff --git a/test_runner/performance/test_dup_key.py b/test_runner/performance/test_dup_key.py index f7e4a629d6..34a10499e7 100644 --- a/test_runner/performance/test_dup_key.py +++ b/test_runner/performance/test_dup_key.py @@ -1,11 +1,14 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import PgCompare from pytest_lazyfixture import lazy_fixture +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @pytest.mark.parametrize( "env", diff --git a/test_runner/performance/test_gc_feedback.py b/test_runner/performance/test_gc_feedback.py index 7c9e9f47c8..804933d3a5 100644 --- a/test_runner/performance/test_gc_feedback.py +++ b/test_runner/performance/test_gc_feedback.py @@ -1,11 +1,14 @@ from __future__ import annotations import json +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker, mode: str): @@ -18,9 +21,9 @@ def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma # disable default GC and compaction "gc_period": "1000 m", "compaction_period": "0 s", - "gc_horizon": f"{1024 ** 2}", - "checkpoint_distance": f"{1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", + "gc_horizon": f"{1024**2}", + "checkpoint_distance": f"{1024**2}", + "compaction_target_size": f"{1024**2}", # set PITR interval to be small, so we can do GC "pitr_interval": "10 s", # "compaction_threshold": "3", diff --git a/test_runner/performance/test_gist_build.py b/test_runner/performance/test_gist_build.py index e8ef59722d..b82f3ba9d4 100644 --- a/test_runner/performance/test_gist_build.py +++ b/test_runner/performance/test_gist_build.py @@ -1,8 +1,10 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING -from fixtures.compare_fixtures import PgCompare +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare # diff --git a/test_runner/performance/test_hot_page.py b/test_runner/performance/test_hot_page.py index d025566919..a2b109bc59 100644 --- a/test_runner/performance/test_hot_page.py +++ b/test_runner/performance/test_hot_page.py @@ -1,11 +1,14 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import PgCompare from pytest_lazyfixture import lazy_fixture +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @pytest.mark.parametrize( "env", diff --git a/test_runner/performance/test_hot_table.py b/test_runner/performance/test_hot_table.py index 792d35321d..f4ea52dedc 100644 --- a/test_runner/performance/test_hot_table.py +++ b/test_runner/performance/test_hot_table.py @@ -1,11 +1,14 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import PgCompare from pytest_lazyfixture import lazy_fixture +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @pytest.mark.parametrize( "env", diff --git a/test_runner/performance/test_latency.py b/test_runner/performance/test_latency.py index 133a2cfd8a..0431f0bf42 100644 --- a/test_runner/performance/test_latency.py +++ b/test_runner/performance/test_latency.py @@ -1,18 +1,21 @@ from __future__ import annotations import threading +from typing import TYPE_CHECKING import pytest -from fixtures.compare_fixtures import PgCompare -from fixtures.neon_fixtures import PgProtocol from performance.test_perf_pgbench import get_scales_matrix from performance.test_wal_backpressure import record_read_latency +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + from fixtures.neon_fixtures import PgProtocol + def start_write_workload(pg: PgProtocol, scale: int = 10): with pg.connect().cursor() as cur: - cur.execute(f"create table big as select generate_series(1,{scale*100_000})") + cur.execute(f"create table big as select generate_series(1,{scale * 100_000})") # Measure latency of reads on one table, while lots of writes are happening on another table. @@ -24,7 +27,7 @@ def test_measure_read_latency_heavy_write_workload(neon_with_baseline: PgCompare pg = env.pg with pg.connect().cursor() as cur: - cur.execute(f"create table small as select generate_series(1,{scale*100_000})") + cur.execute(f"create table small as select generate_series(1,{scale * 100_000})") write_thread = threading.Thread(target=start_write_workload, args=(pg, scale * 100)) write_thread.start() diff --git a/test_runner/performance/test_logical_replication.py b/test_runner/performance/test_logical_replication.py index 807ed522e1..361dedc12f 100644 --- a/test_runner/performance/test_logical_replication.py +++ b/test_runner/performance/test_logical_replication.py @@ -1,7 +1,6 @@ from __future__ import annotations import time -from collections.abc import Iterator from contextlib import contextmanager from typing import TYPE_CHECKING, cast @@ -14,6 +13,7 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import logical_replication_sync if TYPE_CHECKING: + from collections.abc import Iterator from subprocess import Popen from typing import AnyStr diff --git a/test_runner/performance/test_parallel_copy.py b/test_runner/performance/test_parallel_copy.py index 1689755b6f..f1d1c1904b 100644 --- a/test_runner/performance/test_parallel_copy.py +++ b/test_runner/performance/test_parallel_copy.py @@ -2,8 +2,10 @@ from __future__ import annotations import asyncio from io import BytesIO +from typing import TYPE_CHECKING -from fixtures.neon_fixtures import Endpoint, NeonEnv +if TYPE_CHECKING: + from fixtures.neon_fixtures import Endpoint, NeonEnv async def repeat_bytes(buf, repetitions: int): diff --git a/test_runner/performance/test_parallel_copy_to.py b/test_runner/performance/test_parallel_copy_to.py index ddee0ebcd1..0427ecaf0a 100644 --- a/test_runner/performance/test_parallel_copy_to.py +++ b/test_runner/performance/test_parallel_copy_to.py @@ -2,9 +2,11 @@ from __future__ import annotations import asyncio from io import BytesIO +from typing import TYPE_CHECKING -from fixtures.compare_fixtures import PgCompare -from fixtures.neon_fixtures import PgProtocol +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + from fixtures.neon_fixtures import PgProtocol async def repeat_bytes(buf, repetitions: int): diff --git a/test_runner/performance/test_perf_ingest_using_pgcopydb.py b/test_runner/performance/test_perf_ingest_using_pgcopydb.py index da62422fca..1aff58bcc6 100644 --- a/test_runner/performance/test_perf_ingest_using_pgcopydb.py +++ b/test_runner/performance/test_perf_ingest_using_pgcopydb.py @@ -134,8 +134,8 @@ def run_command_and_log_output(command, log_file_path: Path): # Define a list of necessary environment variables for pgcopydb custom_env_vars = { "LD_LIBRARY_PATH": f"{os.getenv('PGCOPYDB_LIB_PATH')}:{os.getenv('PG_16_LIB_PATH')}", - "PGCOPYDB_SOURCE_PGURI": cast(str, os.getenv("BENCHMARK_INGEST_SOURCE_CONNSTR")), - "PGCOPYDB_TARGET_PGURI": cast(str, os.getenv("BENCHMARK_INGEST_TARGET_CONNSTR")), + "PGCOPYDB_SOURCE_PGURI": cast("str", os.getenv("BENCHMARK_INGEST_SOURCE_CONNSTR")), + "PGCOPYDB_TARGET_PGURI": cast("str", os.getenv("BENCHMARK_INGEST_TARGET_CONNSTR")), "PGOPTIONS": "-c idle_in_transaction_session_timeout=0 -c maintenance_work_mem=8388608 -c max_parallel_maintenance_workers=7", } # Combine the current environment with custom variables diff --git a/test_runner/performance/test_perf_olap.py b/test_runner/performance/test_perf_olap.py index bc4ab64105..90e69565ec 100644 --- a/test_runner/performance/test_perf_olap.py +++ b/test_runner/performance/test_perf_olap.py @@ -3,12 +3,15 @@ from __future__ import annotations import os from dataclasses import dataclass from pathlib import Path +from typing import TYPE_CHECKING import pytest -from _pytest.mark import ParameterSet -from fixtures.compare_fixtures import RemoteCompare from fixtures.log_helper import log +if TYPE_CHECKING: + from _pytest.mark import ParameterSet + from fixtures.compare_fixtures import RemoteCompare + @dataclass class LabelledQuery: diff --git a/test_runner/performance/test_perf_oltp_large_tenant.py b/test_runner/performance/test_perf_oltp_large_tenant.py index 842e6a904b..957a4ec796 100644 --- a/test_runner/performance/test_perf_oltp_large_tenant.py +++ b/test_runner/performance/test_perf_oltp_large_tenant.py @@ -4,14 +4,17 @@ import os import timeit from contextlib import closing from pathlib import Path +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import PgBenchRunResult -from fixtures.compare_fixtures import PgCompare from fixtures.log_helper import log from performance.test_perf_pgbench import get_durations_matrix, utc_now_timestamp +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + def get_custom_scripts( default: str = "insert_webhooks.sql@2 select_any_webhook_with_skew.sql@4 select_recent_webhook.sql@4", diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index 24ff3d23fa..57889ceadf 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -6,12 +6,15 @@ import os import timeit from datetime import datetime from pathlib import Path +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, PgBenchInitResult, PgBenchRunResult -from fixtures.compare_fixtures import PgCompare from fixtures.utils import get_scale_for_db +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @enum.unique class PgBenchLoadType(enum.Enum): diff --git a/test_runner/performance/test_perf_pgvector_queries.py b/test_runner/performance/test_perf_pgvector_queries.py index 4a5ea94c4b..372aab276c 100644 --- a/test_runner/performance/test_perf_pgvector_queries.py +++ b/test_runner/performance/test_perf_pgvector_queries.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest -from fixtures.compare_fixtures import PgCompare from performance.test_perf_pgbench import PgBenchLoadType, get_durations_matrix, run_test_pgbench +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + # The following test runs on an existing database that has pgvector extension installed # and a table with 1 million embedding vectors loaded and indexed with HNSW. diff --git a/test_runner/performance/test_physical_replication.py b/test_runner/performance/test_physical_replication.py index 38b04b9114..6351f03e08 100644 --- a/test_runner/performance/test_physical_replication.py +++ b/test_runner/performance/test_physical_replication.py @@ -2,10 +2,8 @@ from __future__ import annotations import csv import os -import subprocess import time import traceback -from pathlib import Path from typing import TYPE_CHECKING import psycopg2 @@ -15,14 +13,16 @@ from fixtures.benchmark_fixture import MetricReport from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_api import connection_parameters_to_env -from fixtures.pg_version import PgVersion if TYPE_CHECKING: + import subprocess + from pathlib import Path from typing import Any from fixtures.benchmark_fixture import NeonBenchmarker from fixtures.neon_api import NeonAPI from fixtures.neon_fixtures import PgBin + from fixtures.pg_version import PgVersion # Granularity of ~0.5 sec @@ -186,7 +186,7 @@ def test_replication_start_stop( prefix = "pgbench_agg" num_replicas = 2 configuration_test_time_sec = 10 * 60 - pgbench_duration = f"-T{2 ** num_replicas * configuration_test_time_sec}" + pgbench_duration = f"-T{2**num_replicas * configuration_test_time_sec}" error_occurred = False project = neon_api.create_project(pg_version) diff --git a/test_runner/performance/test_random_writes.py b/test_runner/performance/test_random_writes.py index 46848a8af8..bb8048e97d 100644 --- a/test_runner/performance/test_random_writes.py +++ b/test_runner/performance/test_random_writes.py @@ -2,11 +2,14 @@ from __future__ import annotations import random from contextlib import closing +from typing import TYPE_CHECKING from fixtures.benchmark_fixture import MetricReport -from fixtures.compare_fixtures import PgCompare from fixtures.utils import query_scalar +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + # This is a clear-box test that demonstrates the worst case scenario for the # "1 segment per layer" implementation of the pageserver. It writes to random diff --git a/test_runner/performance/test_seqscans.py b/test_runner/performance/test_seqscans.py index 36ee4eb201..37854df1fa 100644 --- a/test_runner/performance/test_seqscans.py +++ b/test_runner/performance/test_seqscans.py @@ -4,13 +4,16 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport -from fixtures.compare_fixtures import PgCompare from fixtures.log_helper import log from pytest_lazyfixture import lazy_fixture +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare + @pytest.mark.parametrize( "rows,iters,workers", diff --git a/test_runner/performance/test_storage_controller_scale.py b/test_runner/performance/test_storage_controller_scale.py index 00470f27f1..58436c4739 100644 --- a/test_runner/performance/test_storage_controller_scale.py +++ b/test_runner/performance/test_storage_controller_scale.py @@ -5,10 +5,10 @@ import random import time from collections import defaultdict from enum import StrEnum +from typing import TYPE_CHECKING import pytest from fixtures.common_types import TenantId, TenantShardId, TimelineArchivalState, TimelineId -from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnv, @@ -22,6 +22,9 @@ from fixtures.pageserver.http import PageserverApiException, PageserverHttpClien from fixtures.pg_version import PgVersion from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.compute_reconfigure import ComputeReconfigure + def get_consistent_node_shard_counts(env: NeonEnv, total_shards) -> defaultdict[str, int]: """ @@ -171,7 +174,7 @@ def test_storage_controller_many_tenants( rss = env.storage_controller.get_metric_value("process_resident_memory_bytes") assert rss is not None - log.info(f"Resident memory: {rss} ({ rss / total_shards} per shard)") + log.info(f"Resident memory: {rss} ({rss / total_shards} per shard)") assert rss < expect_memory_per_shard * total_shards def assert_all_tenants_scheduled_in_home_az(): @@ -186,15 +189,15 @@ def test_storage_controller_many_tenants( assert preferred_az == shard["preferred_az_id"] # Attachment should be in the preferred AZ - assert shard["preferred_az_id"] == az_selector( - shard["node_attached"] - ), f"Shard {shard['tenant_shard_id']} not in {shard['preferred_az_id']}" + assert shard["preferred_az_id"] == az_selector(shard["node_attached"]), ( + f"Shard {shard['tenant_shard_id']} not in {shard['preferred_az_id']}" + ) # Secondary locations should not be in the preferred AZ for node_secondary in shard["node_secondary"]: - assert ( - shard["preferred_az_id"] != az_selector(node_secondary) - ), f"Shard {shard['tenant_shard_id']} secondary should be in {shard['preferred_az_id']}" + assert shard["preferred_az_id"] != az_selector(node_secondary), ( + f"Shard {shard['tenant_shard_id']} secondary should be in {shard['preferred_az_id']}" + ) # There should only be one secondary location (i.e. no migrations in flight) assert len(shard["node_secondary"]) == 1 @@ -531,9 +534,9 @@ def test_storage_controller_many_tenants( for node in nodes: if node["id"] in node_ids: checked_any = True - assert ( - node["availability"] == expected_availability - ), f"Node {node['id']} is not {expected_availability} yet: {node['availability']}" + assert node["availability"] == expected_availability, ( + f"Node {node['id']} is not {expected_availability} yet: {node['availability']}" + ) assert checked_any @@ -550,9 +553,9 @@ def test_storage_controller_many_tenants( desc = env.storage_controller.tenant_describe(tenant_id) for shard in desc["shards"]: # Attachment should be outside the AZ where we killed the pageservers - assert ( - az_selector(shard["node_attached"]) != victim_az - ), f"Shard {shard['tenant_shard_id']} still in {victim_az} (node {shard['node_attached']})" + assert az_selector(shard["node_attached"]) != victim_az, ( + f"Shard {shard['tenant_shard_id']} still in {victim_az} (node {shard['node_attached']})" + ) # Bring back the pageservers for ps in killed_pageservers: diff --git a/test_runner/performance/test_wal_backpressure.py b/test_runner/performance/test_wal_backpressure.py index c6d795ce4d..4824fa1ba8 100644 --- a/test_runner/performance/test_wal_backpressure.py +++ b/test_runner/performance/test_wal_backpressure.py @@ -4,7 +4,6 @@ import statistics import threading import time import timeit -from collections.abc import Generator from typing import TYPE_CHECKING import pytest @@ -17,7 +16,7 @@ from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, flush_ep_to_pageserver from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix if TYPE_CHECKING: - from collections.abc import Callable + from collections.abc import Callable, Generator from typing import Any @@ -37,9 +36,9 @@ def pg_compare(request) -> Generator[PgCompare, None, None]: yield fixture else: - assert ( - len(x) == 2 - ), f"request param ({request.param}) should have a format of `neon_{{safekeepers_enable_fsync}}`" + assert len(x) == 2, ( + f"request param ({request.param}) should have a format of `neon_{{safekeepers_enable_fsync}}`" + ) # `NeonCompare` interface neon_env_builder = request.getfixturevalue("neon_env_builder") @@ -278,7 +277,7 @@ def record_read_latency( t2 = timeit.default_timer() log.info( - f"Executed read query {read_query}, got {cur.fetchall()}, read time {t2-t1:.2f}s" + f"Executed read query {read_query}, got {cur.fetchall()}, read time {t2 - t1:.2f}s" ) read_latencies.append(t2 - t1) except Exception as err: diff --git a/test_runner/performance/test_write_amplification.py b/test_runner/performance/test_write_amplification.py index 87824604f8..55ebd1cd23 100644 --- a/test_runner/performance/test_write_amplification.py +++ b/test_runner/performance/test_write_amplification.py @@ -14,8 +14,10 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING -from fixtures.compare_fixtures import PgCompare +if TYPE_CHECKING: + from fixtures.compare_fixtures import PgCompare def test_write_amplification(neon_with_baseline: PgCompare): diff --git a/test_runner/pg_clients/test_pg_clients.py b/test_runner/pg_clients/test_pg_clients.py index ffa710da06..d402a9ffa8 100644 --- a/test_runner/pg_clients/test_pg_clients.py +++ b/test_runner/pg_clients/test_pg_clients.py @@ -3,11 +3,14 @@ from __future__ import annotations import shutil from pathlib import Path from tempfile import NamedTemporaryFile +from typing import TYPE_CHECKING import pytest -from fixtures.neon_fixtures import RemotePostgres from fixtures.utils import subprocess_capture +if TYPE_CHECKING: + from fixtures.neon_fixtures import RemotePostgres + @pytest.mark.remote_cluster @pytest.mark.parametrize( diff --git a/test_runner/regress/test_ancestor_branch.py b/test_runner/regress/test_ancestor_branch.py index 8cd49d480f..92c8ae9456 100644 --- a/test_runner/regress/test_ancestor_branch.py +++ b/test_runner/regress/test_ancestor_branch.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.common_types import TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # # Create ancestor branches off the main branch. diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 8568bec8b2..22dfcbda92 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -1,19 +1,23 @@ from __future__ import annotations -from collections.abc import Generator from dataclasses import dataclass +from typing import TYPE_CHECKING import pytest from fixtures.common_types import TenantId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnv, - NeonEnvBuilder, -) -from fixtures.pageserver.http import PageserverHttpClient, TenantConfig from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import wait_until +if TYPE_CHECKING: + from collections.abc import Generator + + from fixtures.neon_fixtures import ( + NeonEnv, + NeonEnvBuilder, + ) + from fixtures.pageserver.http import PageserverHttpClient, TenantConfig + @pytest.fixture def positive_env(neon_env_builder: NeonEnvBuilder) -> NeonEnv: @@ -53,9 +57,9 @@ def negative_env(neon_env_builder: NeonEnvBuilder) -> Generator[NegativeTests, N yield NegativeTests(env, tenant_id, config_pre_detach) - assert tenant_id not in [ - TenantId(t["id"]) for t in ps_http.tenant_list() - ], "tenant should not be attached after negative test" + assert tenant_id not in [TenantId(t["id"]) for t in ps_http.tenant_list()], ( + "tenant should not be attached after negative test" + ) env.pageserver.allowed_errors.extend( [ @@ -214,9 +218,9 @@ def test_fully_custom_config(positive_env: NeonEnv): # also self-test that our fully_custom_config covers all of them initial_tenant_config = get_config(ps_http, env.initial_tenant) assert initial_tenant_config.tenant_specific_overrides == {} - assert set(initial_tenant_config.effective_config.keys()) == set( - fully_custom_config.keys() - ), "ensure we cover all config options" + assert set(initial_tenant_config.effective_config.keys()) == set(fully_custom_config.keys()), ( + "ensure we cover all config options" + ) # create a new tenant to test overrides (tenant_id, _) = env.create_tenant() @@ -237,17 +241,15 @@ def test_fully_custom_config(positive_env: NeonEnv): # some more self-validation: assert that none of the values in our # fully custom config are the same as the default values - assert set(our_tenant_config.effective_config.keys()) == set( - fully_custom_config.keys() - ), "ensure we cover all config options" - assert ( - { - k: initial_tenant_config.effective_config[k] - != our_tenant_config.effective_config[k] - for k in fully_custom_config.keys() - } - == {k: True for k in fully_custom_config.keys()} - ), "ensure our custom config has different values than the default config for all config options, so we know we overrode everything" + assert set(our_tenant_config.effective_config.keys()) == set(fully_custom_config.keys()), ( + "ensure we cover all config options" + ) + assert { + k: initial_tenant_config.effective_config[k] != our_tenant_config.effective_config[k] + for k in fully_custom_config.keys() + } == {k: True for k in fully_custom_config.keys()}, ( + "ensure our custom config has different values than the default config for all config options, so we know we overrode everything" + ) # ensure customizations survive reattach env.pageserver.tenant_detach(tenant_id) diff --git a/test_runner/regress/test_auth_broker.py b/test_runner/regress/test_auth_broker.py index 11dc7d56b5..bc3f220011 100644 --- a/test_runner/regress/test_auth_broker.py +++ b/test_runner/regress/test_auth_broker.py @@ -23,13 +23,13 @@ async def test_auth_broker_happy( # local proxy mock just echos back the request # check that we forward the correct data - assert ( - res["headers"]["authorization"] == f"Bearer {token.serialize()}" - ), "JWT should be forwarded" + assert res["headers"]["authorization"] == f"Bearer {token.serialize()}", ( + "JWT should be forwarded" + ) - assert ( - "anonymous" in res["headers"]["neon-connection-string"] - ), "conn string should be forwarded" + assert "anonymous" in res["headers"]["neon-connection-string"], ( + "conn string should be forwarded" + ) assert json.loads(res["body"]) == { "query": "foo", diff --git a/test_runner/regress/test_backpressure.py b/test_runner/regress/test_backpressure.py index c75419b786..da548721cf 100644 --- a/test_runner/regress/test_backpressure.py +++ b/test_runner/regress/test_backpressure.py @@ -3,11 +3,14 @@ from __future__ import annotations import threading import time from contextlib import closing, contextmanager +from typing import TYPE_CHECKING import psycopg2.extras import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder pytest_plugins = "fixtures.neon_fixtures" diff --git a/test_runner/regress/test_bad_connection.py b/test_runner/regress/test_bad_connection.py index bfc5cb174e..d31c0c95d3 100644 --- a/test_runner/regress/test_bad_connection.py +++ b/test_runner/regress/test_bad_connection.py @@ -2,13 +2,16 @@ from __future__ import annotations import random import time +from typing import TYPE_CHECKING import psycopg2.errors import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import USE_LFC +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + @pytest.mark.timeout(600) def test_compute_pageserver_connection_stress(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_basebackup_error.py b/test_runner/regress/test_basebackup_error.py index 2dd1a88ad7..7a21712ef9 100644 --- a/test_runner/regress/test_basebackup_error.py +++ b/test_runner/regress/test_basebackup_error.py @@ -1,7 +1,11 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest -from fixtures.neon_fixtures import NeonEnv + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv # diff --git a/test_runner/regress/test_branch_and_gc.py b/test_runner/regress/test_branch_and_gc.py index 0e28231a86..8447c9bf2d 100644 --- a/test_runner/regress/test_branch_and_gc.py +++ b/test_runner/regress/test_branch_and_gc.py @@ -2,14 +2,17 @@ from __future__ import annotations import threading import time +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.pageserver.http import TimelineCreate406 from fixtures.utils import query_scalar, skip_in_debug_build +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + # Test the GC implementation when running with branching. # This test reproduces the issue https://github.com/neondatabase/neon/issues/707. @@ -58,9 +61,9 @@ def test_branch_and_gc(neon_simple_env: NeonEnv): # disable background GC "gc_period": "0s", # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", # set the target size to be large to allow the image layer to cover the whole key space - "compaction_target_size": f"{1024 ** 3}", + "compaction_target_size": f"{1024**3}", # tweak the default settings to allow quickly create image layers and L1 layers "compaction_period": "1 s", "compaction_threshold": "2", @@ -134,9 +137,9 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv): # disable background GC "gc_period": "0s", # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", # set the target size to be large to allow the image layer to cover the whole key space - "compaction_target_size": f"{1024 ** 3}", + "compaction_target_size": f"{1024**3}", # tweak the default settings to allow quickly create image layers and L1 layers "compaction_period": "1 s", "compaction_threshold": "2", diff --git a/test_runner/regress/test_branch_behind.py b/test_runner/regress/test_branch_behind.py index 619fc15aa3..34680cffce 100644 --- a/test_runner/regress/test_branch_behind.py +++ b/test_runner/regress/test_branch_behind.py @@ -1,12 +1,16 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.pageserver.http import TimelineCreate406 from fixtures.utils import print_gc_result, query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # # Create a couple of branches off the main branch, at a historical point in time. diff --git a/test_runner/regress/test_branching.py b/test_runner/regress/test_branching.py index 85d0cfbf1d..9ce618b2ad 100644 --- a/test_runner/regress/test_branching.py +++ b/test_runner/regress/test_branching.py @@ -4,16 +4,11 @@ import random import threading import time from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - Endpoint, - NeonEnv, - NeonEnvBuilder, - PgBin, -) from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import wait_until_tenant_active from fixtures.utils import query_scalar @@ -21,6 +16,14 @@ from performance.test_perf_pgbench import get_scales_matrix from requests import RequestException from requests.exceptions import RetryError +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + Endpoint, + NeonEnv, + NeonEnvBuilder, + PgBin, + ) + # Test branch creation # @@ -43,9 +46,9 @@ def test_branching_with_pgbench( tenant, _ = env.create_tenant( conf={ "gc_period": "5 s", - "gc_horizon": f"{1024 ** 2}", - "checkpoint_distance": f"{1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", + "gc_horizon": f"{1024**2}", + "checkpoint_distance": f"{1024**2}", + "compaction_target_size": f"{1024**2}", # set PITR interval to be small, so we can do GC "pitr_interval": "5 s", } diff --git a/test_runner/regress/test_broken_timeline.py b/test_runner/regress/test_broken_timeline.py index d49686b57c..1209b3a818 100644 --- a/test_runner/regress/test_broken_timeline.py +++ b/test_runner/regress/test_broken_timeline.py @@ -124,14 +124,14 @@ def test_timeline_init_break_before_checkpoint(neon_env_builder: NeonEnvBuilder) # Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally. new_tenant_timelines = env.neon_cli.timeline_list(tenant_id) - assert ( - new_tenant_timelines == old_tenant_timelines - ), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + assert new_tenant_timelines == old_tenant_timelines, ( + f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + ) timeline_dirs = [d for d in timelines_dir.iterdir()] - assert ( - timeline_dirs == initial_timeline_dirs - ), "pageserver should clean its temp timeline files on timeline creation failure" + assert timeline_dirs == initial_timeline_dirs, ( + "pageserver should clean its temp timeline files on timeline creation failure" + ) # The "exit" case is for a reproducer of issue 6007: an unclean shutdown where we can't do local fs cleanups @@ -176,14 +176,14 @@ def test_timeline_init_break_before_checkpoint_recreate( # Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally. new_tenant_timelines = env.neon_cli.timeline_list(tenant_id) - assert ( - new_tenant_timelines == old_tenant_timelines - ), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + assert new_tenant_timelines == old_tenant_timelines, ( + f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + ) timeline_dirs = [d for d in timelines_dir.iterdir()] - assert ( - timeline_dirs == initial_timeline_dirs - ), "pageserver should clean its temp timeline files on timeline creation failure" + assert timeline_dirs == initial_timeline_dirs, ( + "pageserver should clean its temp timeline files on timeline creation failure" + ) # creating the branch should have worked now new_timeline_id = TimelineId( @@ -211,11 +211,11 @@ def test_timeline_create_break_after_dir_creation(neon_env_builder: NeonEnvBuild # Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally. # "New" timeline is not present in the list, allowing pageserver to retry the same request new_tenant_timelines = env.neon_cli.timeline_list(tenant_id) - assert ( - new_tenant_timelines == old_tenant_timelines - ), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + assert new_tenant_timelines == old_tenant_timelines, ( + f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}" + ) timeline_dirs = [d for d in timelines_dir.iterdir()] - assert ( - timeline_dirs == initial_timeline_dirs - ), "pageserver should clean its temp timeline files on timeline creation failure" + assert timeline_dirs == initial_timeline_dirs, ( + "pageserver should clean its temp timeline files on timeline creation failure" + ) diff --git a/test_runner/regress/test_build_info_metric.py b/test_runner/regress/test_build_info_metric.py index 9a8744571a..200662efd2 100644 --- a/test_runner/regress/test_build_info_metric.py +++ b/test_runner/regress/test_build_info_metric.py @@ -1,7 +1,11 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.metrics import parse_metrics -from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy def test_build_info_metric(neon_env_builder: NeonEnvBuilder, link_proxy: NeonProxy): diff --git a/test_runner/regress/test_change_pageserver.py b/test_runner/regress/test_change_pageserver.py index 7c1f78e476..b004db310c 100644 --- a/test_runner/regress/test_change_pageserver.py +++ b/test_runner/regress/test_change_pageserver.py @@ -1,11 +1,14 @@ from __future__ import annotations import asyncio +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.remote_storage import RemoteStorageKind +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + def test_change_pageserver(neon_env_builder: NeonEnvBuilder): """ diff --git a/test_runner/regress/test_clog_truncate.py b/test_runner/regress/test_clog_truncate.py index 2ae38e6d88..1780ceb203 100644 --- a/test_runner/regress/test_clog_truncate.py +++ b/test_runner/regress/test_clog_truncate.py @@ -1,11 +1,14 @@ from __future__ import annotations import os +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import query_scalar, wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # # Test compute node start after clog truncation diff --git a/test_runner/regress/test_close_fds.py b/test_runner/regress/test_close_fds.py index c0bf7d2462..6911fbf5df 100644 --- a/test_runner/regress/test_close_fds.py +++ b/test_runner/regress/test_close_fds.py @@ -6,9 +6,12 @@ import subprocess import threading import time from contextlib import closing +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv def lsof_path() -> str: diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index c8cce7a4e7..e5f5b80d2d 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -177,7 +177,7 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b "compaction_period": "5s", # No PiTR interval and small GC horizon "pitr_interval": "0s", - "gc_horizon": f"{1024 ** 2}", + "gc_horizon": f"{1024**2}", "lsn_lease_length": "0s", } @@ -867,7 +867,7 @@ def test_image_layer_compression(neon_env_builder: NeonEnvBuilder, enabled: bool ) assert bytes_in is not None assert bytes_out is not None - log.info(f"Compression ratio: {bytes_out/bytes_in} ({bytes_out} in, {bytes_out} out)") + log.info(f"Compression ratio: {bytes_out / bytes_in} ({bytes_out} in, {bytes_out} out)") if enabled: # We are writing high compressible repetitive plain text, expect excellent compression diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index c1508887ec..f61778e4c5 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -7,12 +7,12 @@ import subprocess import tempfile from dataclasses import dataclass from pathlib import Path +from typing import TYPE_CHECKING import fixtures.utils import pytest import toml from fixtures.common_types import TenantId, TimelineId -from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnv, @@ -28,6 +28,9 @@ from fixtures.pg_version import PgVersion from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage from fixtures.workload import Workload +if TYPE_CHECKING: + from fixtures.compute_reconfigure import ComputeReconfigure + # # A test suite that help to prevent unintentionally breaking backward or forward compatibility between Neon releases. # - `test_create_snapshot` a script wrapped in a test that creates a data snapshot. @@ -232,7 +235,9 @@ def test_backward_compatibility( else: raise - assert not breaking_changes_allowed, "Breaking changes are allowed by ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage" + assert not breaking_changes_allowed, ( + "Breaking changes are allowed by ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage" + ) @check_ondisk_data_compatibility_if_enabled @@ -260,12 +265,12 @@ def test_forward_compatibility( # Use previous version's production binaries (pageserver, safekeeper, pg_distrib_dir, etc.). # But always use the current version's neon_local binary. # This is because we want to test the compatibility of the data format, not the compatibility of the neon_local CLI. - assert ( - neon_env_builder.compatibility_neon_binpath is not None - ), "the environment variable COMPATIBILITY_NEON_BIN is required" - assert ( - neon_env_builder.compatibility_pg_distrib_dir is not None - ), "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required" + assert neon_env_builder.compatibility_neon_binpath is not None, ( + "the environment variable COMPATIBILITY_NEON_BIN is required" + ) + assert neon_env_builder.compatibility_pg_distrib_dir is not None, ( + "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required" + ) neon_env_builder.neon_binpath = neon_env_builder.compatibility_neon_binpath neon_env_builder.pg_distrib_dir = neon_env_builder.compatibility_pg_distrib_dir @@ -311,7 +316,9 @@ def test_forward_compatibility( else: raise - assert not breaking_changes_allowed, "Breaking changes are allowed by ALLOW_FORWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage" + assert not breaking_changes_allowed, ( + "Breaking changes are allowed by ALLOW_FORWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage" + ) def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, repo_dir: Path): diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py index 0d3618d1b8..c1f05830b7 100644 --- a/test_runner/regress/test_compute_catalog.py +++ b/test_runner/regress/test_compute_catalog.py @@ -103,22 +103,22 @@ def test_compute_catalog(neon_simple_env: NeonEnv): objects = client.dbs_and_roles() # Assert that 'cloud_admin' role exists in the 'roles' list - assert any( - role["name"] == "cloud_admin" for role in objects["roles"] - ), "The 'cloud_admin' role is missing" + assert any(role["name"] == "cloud_admin" for role in objects["roles"]), ( + "The 'cloud_admin' role is missing" + ) # Assert that 'postgres' database exists in the 'databases' list - assert any( - db["name"] == "postgres" for db in objects["databases"] - ), "The 'postgres' database is missing" + assert any(db["name"] == "postgres" for db in objects["databases"]), ( + "The 'postgres' database is missing" + ) # Check other databases for test_db in TEST_DB_NAMES: db = next((db for db in objects["databases"] if db["name"] == test_db["name"]), None) assert db is not None, f"The '{test_db['name']}' database is missing" - assert ( - db["owner"] == test_db["owner"] - ), f"The '{test_db['name']}' database has incorrect owner" + assert db["owner"] == test_db["owner"], ( + f"The '{test_db['name']}' database has incorrect owner" + ) ddl = client.database_schema(database=test_db["name"]) @@ -135,9 +135,9 @@ def test_compute_catalog(neon_simple_env: NeonEnv): client.database_schema(database="nonexistentdb") raise AssertionError("Expected HTTPError was not raised") except requests.exceptions.HTTPError as e: - assert ( - e.response.status_code == 404 - ), f"Expected 404 status code, but got {e.response.status_code}" + assert e.response.status_code == 404, ( + f"Expected 404 status code, but got {e.response.status_code}" + ) def test_compute_create_drop_dbs_and_roles(neon_simple_env: NeonEnv): diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py index 85cd065a2f..5e3f8671a2 100644 --- a/test_runner/regress/test_compute_metrics.py +++ b/test_runner/regress/test_compute_metrics.py @@ -13,21 +13,21 @@ import _jsonnet import pytest import requests import yaml -from fixtures.endpoint.http import EndpointHttpClient from fixtures.log_helper import log from fixtures.metrics import parse_metrics from fixtures.paths import BASE_DIR, COMPUTE_CONFIG_DIR from fixtures.utils import wait_until -from prometheus_client.samples import Sample if TYPE_CHECKING: from collections.abc import Callable from types import TracebackType from typing import Self, TypedDict + from fixtures.endpoint.http import EndpointHttpClient from fixtures.neon_fixtures import NeonEnv from fixtures.pg_version import PgVersion from fixtures.port_distributor import PortDistributor + from prometheus_client.samples import Sample class Metric(TypedDict): metric_name: str diff --git a/test_runner/regress/test_compute_migrations.py b/test_runner/regress/test_compute_migrations.py index 0dbb187c39..dc555417b4 100644 --- a/test_runner/regress/test_compute_migrations.py +++ b/test_runner/regress/test_compute_migrations.py @@ -1,6 +1,5 @@ from __future__ import annotations -from pathlib import Path from typing import TYPE_CHECKING, cast import pytest @@ -9,6 +8,8 @@ from fixtures.metrics import parse_metrics from fixtures.utils import wait_until if TYPE_CHECKING: + from pathlib import Path + from fixtures.neon_fixtures import NeonEnv diff --git a/test_runner/regress/test_compute_reconfigure.py b/test_runner/regress/test_compute_reconfigure.py index 6619548811..ed453f3f8d 100644 --- a/test_runner/regress/test_compute_reconfigure.py +++ b/test_runner/regress/test_compute_reconfigure.py @@ -1,8 +1,12 @@ from __future__ import annotations -from fixtures.neon_fixtures import NeonEnv +from typing import TYPE_CHECKING + from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + def test_compute_reconfigure(neon_simple_env: NeonEnv): """ diff --git a/test_runner/regress/test_config.py b/test_runner/regress/test_config.py index d48fd01fcb..7a0e4cb3d2 100644 --- a/test_runner/regress/test_config.py +++ b/test_runner/regress/test_config.py @@ -2,8 +2,10 @@ from __future__ import annotations import os from contextlib import closing +from typing import TYPE_CHECKING -from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder # diff --git a/test_runner/regress/test_createuser.py b/test_runner/regress/test_createuser.py index 236f4eb2fe..f136ac256f 100644 --- a/test_runner/regress/test_createuser.py +++ b/test_runner/regress/test_createuser.py @@ -1,8 +1,12 @@ from __future__ import annotations -from fixtures.neon_fixtures import NeonEnv +from typing import TYPE_CHECKING + from fixtures.utils import query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + # # Test CREATE USER to check shared catalog restore diff --git a/test_runner/regress/test_ddl_forwarding.py b/test_runner/regress/test_ddl_forwarding.py index b10e38885e..9c924e9503 100644 --- a/test_runner/regress/test_ddl_forwarding.py +++ b/test_runner/regress/test_ddl_forwarding.py @@ -1,21 +1,21 @@ from __future__ import annotations -from types import TracebackType from typing import TYPE_CHECKING import psycopg2 import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, VanillaPostgres from psycopg2.errors import UndefinedObject -from pytest_httpserver import HTTPServer -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from types import TracebackType from typing import Any, Self from fixtures.httpserver import ListenAddress + from fixtures.neon_fixtures import NeonEnv, VanillaPostgres + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request def handle_db(dbs, roles, operation): diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index 7abcdb3838..b29610e021 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -3,7 +3,6 @@ from __future__ import annotations import enum import time from collections import Counter -from collections.abc import Iterable from dataclasses import dataclass from enum import StrEnum from typing import TYPE_CHECKING @@ -18,14 +17,16 @@ from fixtures.neon_fixtures import ( PgBin, wait_for_last_flush_lsn, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_upload_queue_empty from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import human_bytes, wait_until if TYPE_CHECKING: + from collections.abc import Iterable from typing import Any + from fixtures.pageserver.http import PageserverHttpClient + GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy" @@ -323,9 +324,9 @@ def finish_tenant_creation( layers = pageserver_http.layer_map_info(tenant_id, timeline_id) # log.info(f"{layers}") - assert ( - len(layers.historic_layers) >= min_expected_layers - ), "evictions happen at layer granularity, but we often assert at byte-granularity" + assert len(layers.historic_layers) >= min_expected_layers, ( + "evictions happen at layer granularity, but we often assert at byte-granularity" + ) return pgbench_init_lsn @@ -421,9 +422,9 @@ def test_pageserver_evicts_until_pressure_is_relieved( assert 0 <= actual_change, "nothing can load layers during this test" assert actual_change >= target, "must evict more than half" - assert ( - response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change - ), "report accurately evicted bytes" + assert response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change, ( + "report accurately evicted bytes" + ) assert response["Finished"]["assumed"]["failed"]["count"] == 0, "zero failures expected" @@ -448,18 +449,18 @@ def test_pageserver_respects_overridden_resident_size( large_tenant = max(du_by_timeline, key=du_by_timeline.__getitem__) small_tenant = min(du_by_timeline, key=du_by_timeline.__getitem__) assert du_by_timeline[large_tenant] > du_by_timeline[small_tenant] - assert ( - du_by_timeline[large_tenant] - du_by_timeline[small_tenant] > 5 * env.layer_size - ), "ensure this test will do more than 1 eviction" + assert du_by_timeline[large_tenant] - du_by_timeline[small_tenant] > 5 * env.layer_size, ( + "ensure this test will do more than 1 eviction" + ) # Give the larger tenant a haircut while preventing the smaller tenant from getting one. # To prevent the smaller from getting a haircut, we set min_resident_size to its current size. # To ensure the larger tenant is getting a haircut, any non-zero `target` will do. min_resident_size = du_by_timeline[small_tenant] target = 1 - assert ( - du_by_timeline[large_tenant] > min_resident_size - ), "ensure the larger tenant will get a haircut" + assert du_by_timeline[large_tenant] > min_resident_size, ( + "ensure the larger tenant will get a haircut" + ) env.neon_env.storage_controller.pageserver_api().update_tenant_config( small_tenant[0], {"min_resident_size_override": min_resident_size} ) @@ -490,17 +491,17 @@ def test_pageserver_respects_overridden_resident_size( actual_change = total_on_disk - later_total_on_disk assert 0 <= actual_change, "nothing can load layers during this test" assert actual_change >= target, "eviction must always evict more than target" - assert ( - response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change - ), "report accurately evicted bytes" + assert response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change, ( + "report accurately evicted bytes" + ) assert response["Finished"]["assumed"]["failed"]["count"] == 0, "zero failures expected" - assert ( - later_du_by_timeline[small_tenant] == du_by_timeline[small_tenant] - ), "small tenant sees no haircut" - assert ( - later_du_by_timeline[large_tenant] < du_by_timeline[large_tenant] - ), "large tenant gets a haircut" + assert later_du_by_timeline[small_tenant] == du_by_timeline[small_tenant], ( + "small tenant sees no haircut" + ) + assert later_du_by_timeline[large_tenant] < du_by_timeline[large_tenant], ( + "large tenant gets a haircut" + ) assert du_by_timeline[large_tenant] - later_du_by_timeline[large_tenant] >= target @@ -579,9 +580,9 @@ def test_partial_evict_tenant(eviction_env: EvictionEnv, order: EvictionOrder): later_du_by_timeline = env.du_by_timeline(env.pageserver) for tenant, later_tenant_usage in later_du_by_timeline.items(): - assert ( - later_tenant_usage < du_by_timeline[tenant] - ), "all tenants should have lost some layers" + assert later_tenant_usage < du_by_timeline[tenant], ( + "all tenants should have lost some layers" + ) # with relative order what matters is the amount of layers, with a # fudge factor of whether the eviction bothers tenants with highest @@ -665,9 +666,9 @@ def test_fast_growing_tenant(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, or ratio = after / originally ratios.append(ratio) - assert ( - len(ratios) == 4 - ), "rest of the assertions expect 3 + 1 timelines, ratios, scales, all in order" + assert len(ratios) == 4, ( + "rest of the assertions expect 3 + 1 timelines, ratios, scales, all in order" + ) log.info(f"{ratios}") if order == EvictionOrder.RELATIVE_ORDER_EQUAL: @@ -829,9 +830,9 @@ def test_statvfs_pressure_min_avail_bytes(eviction_env: EvictionEnv): def more_than_min_avail_bytes_freed(): post_eviction_total_size, _, _ = env.timelines_du(env.pageserver) - assert ( - total_size - post_eviction_total_size >= min_avail_bytes - ), f"we requested at least {min_avail_bytes} worth of free space" + assert total_size - post_eviction_total_size >= min_avail_bytes, ( + f"we requested at least {min_avail_bytes} worth of free space" + ) wait_until(more_than_min_avail_bytes_freed, timeout=5) @@ -878,6 +879,6 @@ def test_secondary_mode_eviction(eviction_env_ha: EvictionEnv): post_eviction_total_size, _, _ = env.timelines_du(ps_secondary) - assert ( - total_size - post_eviction_total_size >= evict_bytes - ), "we requested at least evict_bytes worth of free space" + assert total_size - post_eviction_total_size >= evict_bytes, ( + "we requested at least evict_bytes worth of free space" + ) diff --git a/test_runner/regress/test_download_extensions.py b/test_runner/regress/test_download_extensions.py index 30f8c65cbd..77babe12cd 100644 --- a/test_runner/regress/test_download_extensions.py +++ b/test_runner/regress/test_download_extensions.py @@ -3,17 +3,16 @@ from __future__ import annotations import os import shutil import tarfile -from pathlib import Path from typing import TYPE_CHECKING import pytest import zstandard from fixtures.log_helper import log from fixtures.metrics import parse_metrics -from pytest_httpserver import HTTPServer from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from pathlib import Path from typing import Any from fixtures.httpserver import ListenAddress @@ -21,6 +20,7 @@ if TYPE_CHECKING: NeonEnvBuilder, ) from fixtures.pg_version import PgVersion + from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request diff --git a/test_runner/regress/test_endpoint_crash.py b/test_runner/regress/test_endpoint_crash.py index 03bfd1cb8d..feeb4c8f90 100644 --- a/test_runner/regress/test_endpoint_crash.py +++ b/test_runner/regress/test_endpoint_crash.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.pg_version import PgVersion from fixtures.utils import WITH_SANITIZERS, run_only_on_postgres +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + @pytest.mark.parametrize( "sql_func", diff --git a/test_runner/regress/test_explain_with_lfc_stats.py b/test_runner/regress/test_explain_with_lfc_stats.py index 382556fd7e..a44ffcc4b0 100644 --- a/test_runner/regress/test_explain_with_lfc_stats.py +++ b/test_runner/regress/test_explain_with_lfc_stats.py @@ -1,12 +1,15 @@ from __future__ import annotations from pathlib import Path +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.utils import USE_LFC +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") def test_explain_with_lfc_stats(neon_simple_env: NeonEnv): diff --git a/test_runner/regress/test_fsm_truncate.py b/test_runner/regress/test_fsm_truncate.py index 55a010f26a..c6a43bafe1 100644 --- a/test_runner/regress/test_fsm_truncate.py +++ b/test_runner/regress/test_fsm_truncate.py @@ -1,6 +1,9 @@ from __future__ import annotations -from fixtures.neon_fixtures import NeonEnvBuilder +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder def test_fsm_truncate(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index 62d59528cf..2625ae212a 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -1,7 +1,7 @@ from __future__ import annotations import os -from pathlib import Path +from typing import TYPE_CHECKING from fixtures.common_types import Lsn from fixtures.log_helper import log @@ -10,9 +10,13 @@ from fixtures.neon_fixtures import ( PgBin, VanillaPostgres, ) -from fixtures.port_distributor import PortDistributor from fixtures.utils import query_scalar, subprocess_capture +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.port_distributor import PortDistributor + num_rows = 1000 diff --git a/test_runner/regress/test_gc_aggressive.py b/test_runner/regress/test_gc_aggressive.py index 97c38cf658..4c196a099b 100644 --- a/test_runner/regress/test_gc_aggressive.py +++ b/test_runner/regress/test_gc_aggressive.py @@ -3,8 +3,8 @@ from __future__ import annotations import asyncio import concurrent.futures import random +from typing import TYPE_CHECKING -from fixtures.common_types import TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import ( Endpoint, @@ -14,6 +14,9 @@ from fixtures.neon_fixtures import ( ) from fixtures.remote_storage import RemoteStorageKind +if TYPE_CHECKING: + from fixtures.common_types import TimelineId + # Test configuration # # Create a table with {NUM_ROWS} rows, and perform {UPDATES_TO_PERFORM} random diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index 743fa72aba..55737c35f0 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -6,17 +6,11 @@ import re import shutil import tarfile from contextlib import closing -from pathlib import Path +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - Endpoint, - NeonEnv, - NeonEnvBuilder, - PgBin, -) from fixtures.pageserver.utils import ( timeline_delete_wait_completed, wait_for_last_record_lsn, @@ -24,6 +18,16 @@ from fixtures.pageserver.utils import ( from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import assert_pageserver_backups_equal, subprocess_capture +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.neon_fixtures import ( + Endpoint, + NeonEnv, + NeonEnvBuilder, + PgBin, + ) + def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_builder): # Put data in vanilla pg @@ -179,7 +183,7 @@ def test_import_from_pageserver_multisegment( logical_size = env.pageserver.http_client().timeline_detail(env.initial_tenant, timeline)[ "current_logical_size" ] - log.info(f"timeline logical size = {logical_size / (1024 ** 2)}MB") + log.info(f"timeline logical size = {logical_size / (1024**2)}MB") assert logical_size > 1024**3 # = 1GB tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline, test_output_dir) diff --git a/test_runner/regress/test_import_pgdata.py b/test_runner/regress/test_import_pgdata.py index a3ef75ddb0..ca794f6685 100644 --- a/test_runner/regress/test_import_pgdata.py +++ b/test_runner/regress/test_import_pgdata.py @@ -287,9 +287,9 @@ def test_pgdata_import_smoke( shard_ps = env.get_pageserver(shard["node_id"]) result = shard_ps.timeline_scan_no_disposable_keys(shard["shard_id"], timeline_id) assert result.tally.disposable_count == 0 - assert ( - result.tally.not_disposable_count > 0 - ), "sanity check, each shard should have some data" + assert result.tally.not_disposable_count > 0, ( + "sanity check, each shard should have some data" + ) # # validate that we can write diff --git a/test_runner/regress/test_ingestion_layer_size.py b/test_runner/regress/test_ingestion_layer_size.py index 7e99d4b2f2..7ba044d5f3 100644 --- a/test_runner/regress/test_ingestion_layer_size.py +++ b/test_runner/regress/test_ingestion_layer_size.py @@ -1,13 +1,17 @@ from __future__ import annotations -from collections.abc import Iterable from dataclasses import dataclass +from typing import TYPE_CHECKING from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn -from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo from fixtures.utils import human_bytes, skip_in_debug_build +if TYPE_CHECKING: + from collections.abc import Iterable + + from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo + @skip_in_debug_build("debug run is unnecessarily slow") def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder): @@ -27,9 +31,9 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder): # bucket lower limits buckets = [0, minimum_initdb_size, minimum_good_layer_size, minimum_too_large_layer_size] - assert ( - minimum_initdb_size < minimum_good_layer_size - ), "keep checkpoint_distance higher than the initdb size (find it by experimenting)" + assert minimum_initdb_size < minimum_good_layer_size, ( + "keep checkpoint_distance higher than the initdb size (find it by experimenting)" + ) env = neon_env_builder.init_start( initial_tenant_conf={ @@ -57,9 +61,9 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder): assert size is not None assert isinstance(size[0], int) log.info(f"gin index size: {human_bytes(size[0])}") - assert ( - size[0] > checkpoint_distance * 3 - ), f"gin index is not large enough: {human_bytes(size[0])}" + assert size[0] > checkpoint_distance * 3, ( + f"gin index is not large enough: {human_bytes(size[0])}" + ) wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline) ps_http = env.pageserver.http_client() @@ -91,13 +95,13 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder): log.info("non-cumulative layer size distribution after compaction:") print_layer_size_histogram(post_compact) - assert ( - post_ingest.counts[3] == 0 - ), f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2*checkpoint_distance)})" + assert post_ingest.counts[3] == 0, ( + f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2 * checkpoint_distance)})" + ) assert post_ingest.counts[1] == 1, "expect one smaller layer for initdb" - assert ( - post_ingest.counts[0] <= 1 - ), "expect at most one tiny layer from shutting down the endpoint" + assert post_ingest.counts[0] <= 1, ( + "expect at most one tiny layer from shutting down the endpoint" + ) # just make sure we don't have trouble splitting the layers apart assert post_compact.counts[3] == 0 diff --git a/test_runner/regress/test_large_schema.py b/test_runner/regress/test_large_schema.py index ae5113ed45..7f56ef498b 100644 --- a/test_runner/regress/test_large_schema.py +++ b/test_runner/regress/test_large_schema.py @@ -2,9 +2,12 @@ from __future__ import annotations import os import time +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # This test creates large number of tables which cause large catalog. diff --git a/test_runner/regress/test_layer_eviction.py b/test_runner/regress/test_layer_eviction.py index 2eb38c49b2..2abe03a2e0 100644 --- a/test_runner/regress/test_layer_eviction.py +++ b/test_runner/regress/test_layer_eviction.py @@ -59,70 +59,72 @@ def test_basic_eviction(neon_env_builder: NeonEnvBuilder): (parse_layer_file_name(path.name), path) for path in env.pageserver.list_layers(tenant_id, timeline_id) ) - assert ( - len(initial_local_layers) > 1 - ), f"Should create multiple layers for timeline, but got {initial_local_layers}" + assert len(initial_local_layers) > 1, ( + f"Should create multiple layers for timeline, but got {initial_local_layers}" + ) # Compare layer map dump with the local layers, ensure everything's present locally and matches initial_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id) - assert ( - not initial_layer_map_info.in_memory_layers - ), "Should have no in memory layers after flushing" - assert len(initial_local_layers) == len( - initial_layer_map_info.historic_layers - ), "Should have the same layers in memory and on disk" + assert not initial_layer_map_info.in_memory_layers, ( + "Should have no in memory layers after flushing" + ) + assert len(initial_local_layers) == len(initial_layer_map_info.historic_layers), ( + "Should have the same layers in memory and on disk" + ) for returned_layer in initial_layer_map_info.historic_layers: - assert ( - returned_layer.kind == "Delta" - ), f"Did not create and expect image layers, but got {returned_layer}" - assert ( - not returned_layer.remote - ), f"All created layers should be present locally, but got {returned_layer}" + assert returned_layer.kind == "Delta", ( + f"Did not create and expect image layers, but got {returned_layer}" + ) + assert not returned_layer.remote, ( + f"All created layers should be present locally, but got {returned_layer}" + ) returned_layer_name = parse_layer_file_name(returned_layer.layer_file_name) - assert ( - returned_layer_name in initial_local_layers - ), f"Did not find returned layer {returned_layer_name} in local layers {list(initial_local_layers.keys())}" + assert returned_layer_name in initial_local_layers, ( + f"Did not find returned layer {returned_layer_name} in local layers {list(initial_local_layers.keys())}" + ) local_layer_path = ( env.pageserver.timeline_dir(tenant_id, timeline_id) / initial_local_layers[returned_layer_name] ) - assert ( - returned_layer.layer_file_size == local_layer_path.stat().st_size - ), f"Returned layer {returned_layer} has a different file size than local layer {local_layer_path}" + assert returned_layer.layer_file_size == local_layer_path.stat().st_size, ( + f"Returned layer {returned_layer} has a different file size than local layer {local_layer_path}" + ) # Detach all layers, ensre they are not in the local FS, but are still dumped as part of the layer map for local_layer_name, local_layer_path in initial_local_layers.items(): client.evict_layer( tenant_id=tenant_id, timeline_id=timeline_id, layer_name=local_layer_path.name ) - assert not env.pageserver.layer_exists( - tenant_id, timeline_id, local_layer_name - ), f"Did not expect to find {local_layer_name} layer after evicting" + assert not env.pageserver.layer_exists(tenant_id, timeline_id, local_layer_name), ( + f"Did not expect to find {local_layer_name} layer after evicting" + ) empty_layers = env.pageserver.list_layers(tenant_id, timeline_id) - assert not empty_layers, f"After evicting all layers, timeline {tenant_id}/{timeline_id} should have no layers locally, but got: {empty_layers}" + assert not empty_layers, ( + f"After evicting all layers, timeline {tenant_id}/{timeline_id} should have no layers locally, but got: {empty_layers}" + ) evicted_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id) - assert ( - not evicted_layer_map_info.in_memory_layers - ), "Should have no in memory layers after flushing and evicting" - assert len(initial_local_layers) == len( - evicted_layer_map_info.historic_layers - ), "Should have the same layers in memory and on disk initially" + assert not evicted_layer_map_info.in_memory_layers, ( + "Should have no in memory layers after flushing and evicting" + ) + assert len(initial_local_layers) == len(evicted_layer_map_info.historic_layers), ( + "Should have the same layers in memory and on disk initially" + ) for returned_layer in evicted_layer_map_info.historic_layers: - assert ( - returned_layer.kind == "Delta" - ), f"Did not create and expect image layers, but got {returned_layer}" - assert ( - returned_layer.remote - ), f"All layers should be evicted and not present locally, but got {returned_layer}" + assert returned_layer.kind == "Delta", ( + f"Did not create and expect image layers, but got {returned_layer}" + ) + assert returned_layer.remote, ( + f"All layers should be evicted and not present locally, but got {returned_layer}" + ) returned_layer_name = parse_layer_file_name(returned_layer.layer_file_name) - assert ( - returned_layer_name in initial_local_layers - ), f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}" + assert returned_layer_name in initial_local_layers, ( + f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}" + ) # redownload all evicted layers and ensure the initial state is restored for local_layer_name, _local_layer_path in initial_local_layers.items(): @@ -142,15 +144,15 @@ def test_basic_eviction(neon_env_builder: NeonEnvBuilder): (parse_layer_file_name(path.name), path) for path in env.pageserver.list_layers(tenant_id, timeline_id) ) - assert ( - redownloaded_layers == initial_local_layers - ), "Should have the same layers locally after redownloading the evicted layers" + assert redownloaded_layers == initial_local_layers, ( + "Should have the same layers locally after redownloading the evicted layers" + ) redownloaded_layer_map_info = client.layer_map_info( tenant_id=tenant_id, timeline_id=timeline_id ) - assert ( - redownloaded_layer_map_info == initial_layer_map_info - ), "Should have the same layer map after redownloading the evicted layers" + assert redownloaded_layer_map_info == initial_layer_map_info, ( + "Should have the same layer map after redownloading the evicted layers" + ) def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder): @@ -266,9 +268,9 @@ def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder): resident_physical_size_metric = ps_http.get_timeline_metric( tenant_id, timeline_id, "pageserver_resident_physical_size" ) - assert ( - resident_physical_size_metric == 0 - ), "ensure that resident_physical_size metric is zero" + assert resident_physical_size_metric == 0, ( + "ensure that resident_physical_size metric is zero" + ) assert resident_physical_size_metric == sum( layer.layer_file_size for layer in info.historic_layers if not layer.remote ), "ensure that resident_physical_size metric corresponds to layer map dump" diff --git a/test_runner/regress/test_layer_writers_fail.py b/test_runner/regress/test_layer_writers_fail.py index dd31e2725b..e07321e0ab 100644 --- a/test_runner/regress/test_layer_writers_fail.py +++ b/test_runner/regress/test_layer_writers_fail.py @@ -13,9 +13,9 @@ def test_image_layer_writer_fail_before_finish(neon_simple_env: NeonEnv): tenant_id, timeline_id = env.create_tenant( conf={ # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", # set the target size to be large to allow the image layer to cover the whole key space - "compaction_target_size": f"{1024 ** 3}", + "compaction_target_size": f"{1024**3}", # tweak the default settings to allow quickly create image layers and L1 layers "compaction_period": "1 s", "compaction_threshold": "2", @@ -44,9 +44,9 @@ def test_image_layer_writer_fail_before_finish(neon_simple_env: NeonEnv): ) ) - assert ( - len(new_temp_layer_files) == 0 - ), "pageserver should clean its temporary new image layer files on failure" + assert len(new_temp_layer_files) == 0, ( + "pageserver should clean its temporary new image layer files on failure" + ) @pytest.mark.skip("See https://github.com/neondatabase/neon/issues/2703") @@ -57,9 +57,9 @@ def test_delta_layer_writer_fail_before_finish(neon_simple_env: NeonEnv): tenant_id, timeline_id = env.create_tenant( conf={ # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", # set the target size to be large to allow the image layer to cover the whole key space - "compaction_target_size": f"{1024 ** 3}", + "compaction_target_size": f"{1024**3}", # tweak the default settings to allow quickly create image layers and L1 layers "compaction_period": "1 s", "compaction_threshold": "2", @@ -90,6 +90,6 @@ def test_delta_layer_writer_fail_before_finish(neon_simple_env: NeonEnv): ) ) - assert ( - len(new_temp_layer_files) == 0 - ), "pageserver should clean its temporary new delta layer files on failure" + assert len(new_temp_layer_files) == 0, ( + "pageserver should clean its temporary new delta layer files on failure" + ) diff --git a/test_runner/regress/test_layers_from_future.py b/test_runner/regress/test_layers_from_future.py index 872d3dc4cf..b4eba2779d 100644 --- a/test_runner/regress/test_layers_from_future.py +++ b/test_runner/regress/test_layers_from_future.py @@ -127,9 +127,9 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder, attach_mode: str): ip = get_index_part() assert len(ip.layer_metadata.keys()) - assert ( - ip.disk_consistent_lsn < last_record_lsn - ), "sanity check for what above loop is supposed to do" + assert ip.disk_consistent_lsn < last_record_lsn, ( + "sanity check for what above loop is supposed to do" + ) # create the image layer from the future env.storage_controller.pageserver_api().update_tenant_config( @@ -233,9 +233,9 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder, attach_mode: str): start = time.monotonic() while True: post_stat = future_layer_path.stat() - assert ( - pre_stat.st_mtime == post_stat.st_mtime - ), "observed PUT overtake the stucked DELETE => bug isn't fixed yet" + assert pre_stat.st_mtime == post_stat.st_mtime, ( + "observed PUT overtake the stucked DELETE => bug isn't fixed yet" + ) if time.monotonic() - start > max_race_opportunity_window: log.info( "a correct implementation would never let the later PUT overtake the earlier DELETE" diff --git a/test_runner/regress/test_lfc_prefetch.py b/test_runner/regress/test_lfc_prefetch.py index dd422d996e..27a5416eff 100644 --- a/test_runner/regress/test_lfc_prefetch.py +++ b/test_runner/regress/test_lfc_prefetch.py @@ -1,12 +1,15 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.utils import USE_LFC +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + @pytest.mark.timeout(600) @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") diff --git a/test_runner/regress/test_lfc_resize.py b/test_runner/regress/test_lfc_resize.py index ea7d38a3d9..51074751e0 100644 --- a/test_runner/regress/test_lfc_resize.py +++ b/test_runner/regress/test_lfc_resize.py @@ -5,12 +5,15 @@ import re import subprocess import threading import time +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, PgBin from fixtures.utils import USE_LFC +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, PgBin + @pytest.mark.timeout(600) @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") diff --git a/test_runner/regress/test_lfc_working_set_approximation.py b/test_runner/regress/test_lfc_working_set_approximation.py index ae0f26c69f..e422622167 100644 --- a/test_runner/regress/test_lfc_working_set_approximation.py +++ b/test_runner/regress/test_lfc_working_set_approximation.py @@ -2,12 +2,15 @@ from __future__ import annotations import time from pathlib import Path +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.utils import USE_LFC, query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") def test_lfc_working_set_approximation(neon_simple_env: NeonEnv): diff --git a/test_runner/regress/test_local_file_cache.py b/test_runner/regress/test_local_file_cache.py index 52ee2f32a2..49d5c1916c 100644 --- a/test_runner/regress/test_local_file_cache.py +++ b/test_runner/regress/test_local_file_cache.py @@ -5,11 +5,14 @@ import queue import random import threading import time +from typing import TYPE_CHECKING import pytest -from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder from fixtures.utils import USE_LFC, query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder + """ Test whether LFC doesn't error out when the LRU is empty, but the LFC is already at its maximum size. diff --git a/test_runner/regress/test_logging.py b/test_runner/regress/test_logging.py index d94c786f49..9eafcf24bc 100644 --- a/test_runner/regress/test_logging.py +++ b/test_runner/regress/test_logging.py @@ -1,12 +1,15 @@ from __future__ import annotations import uuid +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import run_only_on_default_postgres, wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + @pytest.mark.parametrize("level", ["trace", "debug", "info", "warn", "error"]) @run_only_on_default_postgres("it does not use any postgres functionality") diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index e42e71646d..7280a91a12 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -39,9 +39,9 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool): # disable default GC and compaction "gc_period": "1000 m", "compaction_period": "0 s", - "gc_horizon": f"{1024 ** 2}", - "checkpoint_distance": f"{1024 ** 2}", - "compaction_target_size": f"{1024 ** 2}", + "gc_horizon": f"{1024**2}", + "checkpoint_distance": f"{1024**2}", + "compaction_target_size": f"{1024**2}", } ) diff --git a/test_runner/regress/test_nbtree_pagesplit_cycleid.py b/test_runner/regress/test_nbtree_pagesplit_cycleid.py index 32ec6fcb92..f8e9a953bd 100644 --- a/test_runner/regress/test_nbtree_pagesplit_cycleid.py +++ b/test_runner/regress/test_nbtree_pagesplit_cycleid.py @@ -48,9 +48,9 @@ def test_nbtree_pagesplit_cycleid(neon_simple_env: NeonEnv): ses1.execute(BTREE_NUM_CYCLEID_PAGES) pages = ses1.fetchall() - assert ( - len(pages) == 0 - ), f"0 back splits with cycle ID expected, real {len(pages)} first {pages[0]}" + assert len(pages) == 0, ( + f"0 back splits with cycle ID expected, real {len(pages)} first {pages[0]}" + ) # Delete enough tuples to clear the first index page. # (there are up to 407 rows per 8KiB page; 406 for non-rightmost leafs. ses1.execute("DELETE FROM t WHERE id <= 406;") @@ -119,9 +119,9 @@ def test_nbtree_pagesplit_cycleid(neon_simple_env: NeonEnv): # check that our expectations are correct ses1.execute(BTREE_NUM_CYCLEID_PAGES) pages = ses1.fetchall() - assert ( - len(pages) == 1 and pages[0][0] == 3 - ), f"3 page splits with cycle ID expected; actual {pages}" + assert len(pages) == 1 and pages[0][0] == 3, ( + f"3 page splits with cycle ID expected; actual {pages}" + ) # final cleanup ses3t.join() diff --git a/test_runner/regress/test_neon_cli.py b/test_runner/regress/test_neon_cli.py index 72db72f2b9..8bd0662ef8 100644 --- a/test_runner/regress/test_neon_cli.py +++ b/test_runner/regress/test_neon_cli.py @@ -1,8 +1,7 @@ from __future__ import annotations import subprocess -from pathlib import Path -from typing import cast +from typing import TYPE_CHECKING, cast import pytest import requests @@ -13,9 +12,13 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, parse_project_git_version_output, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.utils import run_only_on_default_postgres, skip_in_debug_build +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.pageserver.http import PageserverHttpClient + def helper_compare_timeline_list( pageserver_http_client: PageserverHttpClient, env: NeonEnv, initial_tenant: TenantId @@ -65,7 +68,7 @@ def test_cli_timeline_list(neon_simple_env: NeonEnv): def helper_compare_tenant_list(pageserver_http_client: PageserverHttpClient, env: NeonEnv): tenants = pageserver_http_client.tenant_list() - tenants_api = sorted(map(lambda t: cast(str, t["id"]), tenants)) + tenants_api = sorted(map(lambda t: cast("str", t["id"]), tenants)) res = env.neon_cli.tenant_list() tenants_cli = sorted(map(lambda t: t.split()[0], res.stdout.splitlines())) diff --git a/test_runner/regress/test_neon_extension.py b/test_runner/regress/test_neon_extension.py index 4035398a5f..e79ab458ca 100644 --- a/test_runner/regress/test_neon_extension.py +++ b/test_runner/regress/test_neon_extension.py @@ -2,9 +2,12 @@ from __future__ import annotations import time from contextlib import closing +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # Verify that the neon extension is installed and has the correct version. diff --git a/test_runner/regress/test_neon_local_cli.py b/test_runner/regress/test_neon_local_cli.py index 8d9aab6848..00aeb6c4fe 100644 --- a/test_runner/regress/test_neon_local_cli.py +++ b/test_runner/regress/test_neon_local_cli.py @@ -1,9 +1,13 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest from fixtures.common_types import TimelineId -from fixtures.neon_fixtures import NeonEnvBuilder -from fixtures.port_distributor import PortDistributor + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + from fixtures.port_distributor import PortDistributor # Test that neon cli is able to start and stop all processes with the user defaults. diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index 49cd91906f..f99d79e138 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.pg_version import PgVersion from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): env = neon_simple_env diff --git a/test_runner/regress/test_next_xid.py b/test_runner/regress/test_next_xid.py index db8da51125..693dd628d7 100644 --- a/test_runner/regress/test_next_xid.py +++ b/test_runner/regress/test_next_xid.py @@ -2,7 +2,7 @@ from __future__ import annotations import os import time -from pathlib import Path +from typing import TYPE_CHECKING from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log @@ -16,6 +16,9 @@ from fixtures.neon_fixtures import ( from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import query_scalar +if TYPE_CHECKING: + from pathlib import Path + def test_next_xid(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() diff --git a/test_runner/regress/test_normal_work.py b/test_runner/regress/test_normal_work.py index c8458b963e..44590ea4b9 100644 --- a/test_runner/regress/test_normal_work.py +++ b/test_runner/regress/test_normal_work.py @@ -1,9 +1,13 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder -from fixtures.pageserver.http import PageserverHttpClient + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder + from fixtures.pageserver.http import PageserverHttpClient def check_tenant( diff --git a/test_runner/regress/test_oid_overflow.py b/test_runner/regress/test_oid_overflow.py index e2bde8be6f..76766a0754 100644 --- a/test_runner/regress/test_oid_overflow.py +++ b/test_runner/regress/test_oid_overflow.py @@ -1,7 +1,11 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder def test_oid_overflow(neon_env_builder: NeonEnvBuilder): @@ -32,7 +36,7 @@ def test_oid_overflow(neon_env_builder: NeonEnvBuilder): log.info(f"t1.relfilenode={oid}") cur.execute("set statement_timeout=0") - cur.execute(f"select test_consume_oids({oid-1})") + cur.execute(f"select test_consume_oids({oid - 1})") cur.execute("VACUUM FULL t2") cur.execute("SELECT relfilenode FROM pg_class where relname='t2'") diff --git a/test_runner/regress/test_old_request_lsn.py b/test_runner/regress/test_old_request_lsn.py index a615464582..8090077729 100644 --- a/test_runner/regress/test_old_request_lsn.py +++ b/test_runner/regress/test_old_request_lsn.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.common_types import TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import print_gc_result, query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # # Test where Postgres generates a lot of WAL, and it's garbage collected away, but diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index c344f30f4d..b292d08b60 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -61,12 +61,12 @@ def test_ondemand_download_large_rel(neon_env_builder: NeonEnvBuilder): initial_tenant_conf={ # disable background GC "gc_period": "0s", - "gc_horizon": f"{10 * 1024 ** 3}", # 10 GB + "gc_horizon": f"{10 * 1024**3}", # 10 GB # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{10 * 1024 ** 2}", # 10 MB + "checkpoint_distance": f"{10 * 1024**2}", # 10 MB # allow compaction with the checkpoint "compaction_threshold": "3", - "compaction_target_size": f"{10 * 1024 ** 2}", # 10 MB + "compaction_target_size": f"{10 * 1024**2}", # 10 MB # but don't run compaction in background or on restart "compaction_period": "0s", } @@ -160,10 +160,10 @@ def test_ondemand_download_timetravel(neon_env_builder: NeonEnvBuilder): "gc_period": "0s", "compaction_period": "0s", # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1 * 1024 ** 2}", # 1 MB + "checkpoint_distance": f"{1 * 1024**2}", # 1 MB "compaction_threshold": "1", "image_creation_threshold": "1", - "compaction_target_size": f"{1 * 1024 ** 2}", # 1 MB + "compaction_target_size": f"{1 * 1024**2}", # 1 MB } ) pageserver_http = env.pageserver.http_client() @@ -334,10 +334,10 @@ def test_download_remote_layers_api( "gc_period": "0s", "compaction_period": "0s", # small checkpoint distance to create more delta layer files - "checkpoint_distance": f"{1 * 1024 ** 2}", # 1 MB + "checkpoint_distance": f"{1 * 1024**2}", # 1 MB "compaction_threshold": "999999", "image_creation_threshold": "999999", - "compaction_target_size": f"{1 * 1024 ** 2}", # 1 MB + "compaction_target_size": f"{1 * 1024**2}", # 1 MB } ) @@ -419,15 +419,15 @@ def test_download_remote_layers_api( ###### Phase 1: exercise download error code path this_time = get_api_current_physical_size() - assert ( - filled_current_physical == this_time - ), "current_physical_size is sum of loaded layer sizes, independent of whether local or remote" + assert filled_current_physical == this_time, ( + "current_physical_size is sum of loaded layer sizes, independent of whether local or remote" + ) post_unlink_size = get_resident_physical_size() log.info(f"post_unlink_size: {post_unlink_size}") - assert ( - post_unlink_size < filled_size - ), "we just deleted layers and didn't cause anything to re-download them yet" + assert post_unlink_size < filled_size, ( + "we just deleted layers and didn't cause anything to re-download them yet" + ) # issue downloads that we know will fail info = client.timeline_download_remote_layers( @@ -449,9 +449,9 @@ def test_download_remote_layers_api( == info["successful_download_count"] + info["failed_download_count"] ) assert get_api_current_physical_size() == filled_current_physical - assert ( - get_resident_physical_size() == post_unlink_size - ), "didn't download anything new due to failpoint" + assert get_resident_physical_size() == post_unlink_size, ( + "didn't download anything new due to failpoint" + ) ##### Retry, this time without failpoints client.configure_failpoints(("remote-storage-download-pre-rename", "off")) @@ -515,9 +515,9 @@ def test_compaction_downloads_on_demand_without_image_creation(neon_env_builder: m = pageserver_http.get_metrics() # these are global counters total_bytes = m.query_one("pageserver_remote_ondemand_downloaded_bytes_total").value - assert ( - total_bytes < 2**53 and total_bytes.is_integer() - ), "bytes should still be safe integer-in-f64" + assert total_bytes < 2**53 and total_bytes.is_integer(), ( + "bytes should still be safe integer-in-f64" + ) count = m.query_one("pageserver_remote_ondemand_downloaded_layers_total").value assert count < 2**53 and count.is_integer(), "count should still be safe integer-in-f64" return (int(total_bytes), int(count)) diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index 17ffeca23b..7f9207047e 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -1,14 +1,18 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.neon_fixtures import ( DEFAULT_BRANCH_NAME, NeonEnv, NeonEnvBuilder, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.utils import run_only_on_default_postgres, wait_until +if TYPE_CHECKING: + from fixtures.pageserver.http import PageserverHttpClient + def check_client(env: NeonEnv, client: PageserverHttpClient): pg_version = env.pg_version @@ -65,15 +69,15 @@ def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv): tenant_id=tenant_id, timeline_id=timeline_id, include_non_incremental_logical_size=True ) - assert ( - timeline_details.get("wal_source_connstr") is None - ), "Should not be able to connect to WAL streaming without PG compute node running" - assert ( - timeline_details.get("last_received_msg_lsn") is None - ), "Should not be able to connect to WAL streaming without PG compute node running" - assert ( - timeline_details.get("last_received_msg_ts") is None - ), "Should not be able to connect to WAL streaming without PG compute node running" + assert timeline_details.get("wal_source_connstr") is None, ( + "Should not be able to connect to WAL streaming without PG compute node running" + ) + assert timeline_details.get("last_received_msg_lsn") is None, ( + "Should not be able to connect to WAL streaming without PG compute node running" + ) + assert timeline_details.get("last_received_msg_ts") is None, ( + "Should not be able to connect to WAL streaming without PG compute node running" + ) def expect_updated_msg_lsn( @@ -89,14 +93,14 @@ def expect_updated_msg_lsn( assert "last_received_msg_lsn" in timeline_details.keys() assert "last_received_msg_ts" in timeline_details.keys() - assert ( - timeline_details["last_received_msg_lsn"] is not None - ), "the last received message's LSN is empty" + assert timeline_details["last_received_msg_lsn"] is not None, ( + "the last received message's LSN is empty" + ) last_msg_lsn = Lsn(timeline_details["last_received_msg_lsn"]) - assert ( - prev_msg_lsn is None or prev_msg_lsn < last_msg_lsn - ), f"the last received message's LSN {last_msg_lsn} hasn't been updated compared to the previous message's LSN {prev_msg_lsn}" + assert prev_msg_lsn is None or prev_msg_lsn < last_msg_lsn, ( + f"the last received message's LSN {last_msg_lsn} hasn't been updated compared to the previous message's LSN {prev_msg_lsn}" + ) return last_msg_lsn diff --git a/test_runner/regress/test_pageserver_catchup.py b/test_runner/regress/test_pageserver_catchup.py index 3567e05f81..f3d7cd5bdb 100644 --- a/test_runner/regress/test_pageserver_catchup.py +++ b/test_runner/regress/test_pageserver_catchup.py @@ -1,6 +1,9 @@ from __future__ import annotations -from fixtures.neon_fixtures import NeonEnvBuilder +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # Test safekeeper sync and pageserver catch up diff --git a/test_runner/regress/test_pageserver_crash_consistency.py b/test_runner/regress/test_pageserver_crash_consistency.py index e9eee2760e..2e943e5bd8 100644 --- a/test_runner/regress/test_pageserver_crash_consistency.py +++ b/test_runner/regress/test_pageserver_crash_consistency.py @@ -92,9 +92,9 @@ def test_local_only_layers_after_crash(neon_env_builder: NeonEnvBuilder, pg_bin: env.pageserver.start() wait_until_tenant_active(pageserver_http, tenant_id) - assert not env.pageserver.layer_exists( - tenant_id, timeline_id, l1_found - ), "partial compaction result should had been removed during startup" + assert not env.pageserver.layer_exists(tenant_id, timeline_id, l1_found), ( + "partial compaction result should had been removed during startup" + ) # wait for us to catch up again wait_for_last_record_lsn(pageserver_http, tenant_id, timeline_id, lsn) diff --git a/test_runner/regress/test_pageserver_getpage_throttle.py b/test_runner/regress/test_pageserver_getpage_throttle.py index 9644ebe3e2..c5d6650ca8 100644 --- a/test_runner/regress/test_pageserver_getpage_throttle.py +++ b/test_runner/regress/test_pageserver_getpage_throttle.py @@ -3,15 +3,18 @@ from __future__ import annotations import copy import json import uuid +from typing import TYPE_CHECKING import pytest from anyio import Path from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder, PgBin from fixtures.pg_version import PgVersion from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder, PgBin + def test_pageserver_getpage_throttle(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): env = neon_env_builder.init_start() @@ -101,12 +104,12 @@ def test_pageserver_getpage_throttle(neon_env_builder: NeonEnvBuilder, pg_bin: P expect_ncompleted = duration_secs * rate_limit_rps delta_abs = abs(expect_ncompleted - actual_ncompleted) threshold = 0.05 * expect_ncompleted - assert ( - threshold / rate_limit_rps < 0.1 * duration_secs - ), "test self-test: unrealistic expecations regarding precision in this test" - assert ( - delta_abs < 0.05 * expect_ncompleted - ), "the throttling deviates more than 5percent from the expectation" + assert threshold / rate_limit_rps < 0.1 * duration_secs, ( + "test self-test: unrealistic expecations regarding precision in this test" + ) + assert delta_abs < 0.05 * expect_ncompleted, ( + "the throttling deviates more than 5percent from the expectation" + ) log.info("validate that we logged the throttling") @@ -127,14 +130,14 @@ def test_pageserver_getpage_throttle(neon_env_builder: NeonEnvBuilder, pg_bin: P actual_throttled_secs = actual_throttled_usecs / 1_000_000 log.info("validate that the metric doesn't include throttle wait time") - assert ( - duration_secs >= 10 * actual_smgr_query_seconds - ), "smgr metrics should not include throttle wait time" + assert duration_secs >= 10 * actual_smgr_query_seconds, ( + "smgr metrics should not include throttle wait time" + ) log.info("validate that the throttling wait time metrics is correct") - assert ( - pytest.approx(actual_throttled_secs + actual_smgr_query_seconds, 0.1) == duration_secs - ), "most of the time in this test is spent throttled because the rate-limit's contribution to latency dominates" + assert pytest.approx(actual_throttled_secs + actual_smgr_query_seconds, 0.1) == duration_secs, ( + "most of the time in this test is spent throttled because the rate-limit's contribution to latency dominates" + ) throttle_config_with_field_fair_set = { diff --git a/test_runner/regress/test_pageserver_layer_rolling.py b/test_runner/regress/test_pageserver_layer_rolling.py index fcc465f90a..91c4ef521c 100644 --- a/test_runner/regress/test_pageserver_layer_rolling.py +++ b/test_runner/regress/test_pageserver_layer_rolling.py @@ -2,6 +2,7 @@ from __future__ import annotations import asyncio import time +from typing import TYPE_CHECKING import psutil import pytest @@ -12,10 +13,12 @@ from fixtures.neon_fixtures import ( NeonEnvBuilder, tenant_get_shards, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_for_upload from fixtures.utils import skip_in_debug_build, wait_until +if TYPE_CHECKING: + from fixtures.pageserver.http import PageserverHttpClient + TIMELINE_COUNT = 10 ENTRIES_PER_TIMELINE = 10_000 CHECKPOINT_TIMEOUT_SECONDS = 60 @@ -130,7 +133,7 @@ def test_pageserver_small_inmemory_layers( tenant_conf = { # Large `checkpoint_distance` effectively disables size # based checkpointing. - "checkpoint_distance": f"{2 * 1024 ** 3}", + "checkpoint_distance": f"{2 * 1024**3}", "checkpoint_timeout": f"{CHECKPOINT_TIMEOUT_SECONDS}s", "compaction_period": "1s", } @@ -179,7 +182,7 @@ def test_idle_checkpoints(neon_env_builder: NeonEnvBuilder): tenant_conf = { # Large `checkpoint_distance` effectively disables size # based checkpointing. - "checkpoint_distance": f"{2 * 1024 ** 3}", + "checkpoint_distance": f"{2 * 1024**3}", "checkpoint_timeout": f"{CHECKPOINT_TIMEOUT_SECONDS}s", "compaction_period": "1s", } @@ -279,9 +282,9 @@ def test_total_size_limit(neon_env_builder: NeonEnvBuilder): http_client = env.pageserver.http_client() initdb_lsn = Lsn(http_client.timeline_detail(tenant, timeline)["initdb_lsn"]) this_timeline_ingested = last_flush_lsn - initdb_lsn - assert ( - this_timeline_ingested < checkpoint_distance * 0.8 - ), "this test is supposed to fill InMemoryLayer" + assert this_timeline_ingested < checkpoint_distance * 0.8, ( + "this test is supposed to fill InMemoryLayer" + ) total_bytes_ingested += this_timeline_ingested log.info(f"Ingested {total_bytes_ingested} bytes since initdb (vs max dirty {max_dirty_data})") diff --git a/test_runner/regress/test_pageserver_metric_collection.py b/test_runner/regress/test_pageserver_metric_collection.py index aedfdbd210..f80edced5c 100644 --- a/test_runner/regress/test_pageserver_metric_collection.py +++ b/test_runner/regress/test_pageserver_metric_collection.py @@ -5,7 +5,6 @@ import json import os import time from dataclasses import dataclass -from pathlib import Path from queue import SimpleQueue from typing import TYPE_CHECKING @@ -20,14 +19,15 @@ from fixtures.remote_storage import ( RemoteStorageKind, remote_storage_to_toml_inline_table, ) -from pytest_httpserver import HTTPServer -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from pathlib import Path from typing import Any from fixtures.httpserver import ListenAddress + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request # TODO: collect all of the env setup *AFTER* removal of RemoteStorageKind.NOOP @@ -287,9 +287,9 @@ def test_metric_collection_cleans_up_tempfile( initially = iterate_pageserver_workdir(env.pageserver.workdir, "last_consumption_metrics.json") - assert ( - len(initially.matching) == 2 - ), f"expecting actual file and tempfile, but not found: {initially.matching}" + assert len(initially.matching) == 2, ( + f"expecting actual file and tempfile, but not found: {initially.matching}" + ) uploads.put("ready") env.pageserver.start() @@ -308,9 +308,9 @@ def test_metric_collection_cleans_up_tempfile( # it is possible we shutdown the pageserver right at the correct time, so the old tempfile # is gone, but we also have a new one. only = set(["last_consumption_metrics.json"]) - assert ( - initially.matching.intersection(later.matching) == only - ), "only initial tempfile should had been removed" + assert initially.matching.intersection(later.matching) == only, ( + "only initial tempfile should had been removed" + ) assert initially.other.issuperset(later.other), "no other files should had been removed" @@ -497,9 +497,9 @@ class SyntheticSizeVerifier: def post_batch(self, parent): if self.prev is not None: # this is assuming no one goes and deletes the cache file - assert ( - self.value is not None - ), "after calculating first synthetic size, cached or more recent should be sent" + assert self.value is not None, ( + "after calculating first synthetic size, cached or more recent should be sent" + ) self.prev = self.value self.value = None diff --git a/test_runner/regress/test_pageserver_reconnect.py b/test_runner/regress/test_pageserver_reconnect.py index be63208428..775ab37021 100644 --- a/test_runner/regress/test_pageserver_reconnect.py +++ b/test_runner/regress/test_pageserver_reconnect.py @@ -3,10 +3,13 @@ from __future__ import annotations import threading import time from contextlib import closing +from typing import TYPE_CHECKING import psycopg2.errors from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, PgBin + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, PgBin # Test updating neon.pageserver_connstring setting on the fly. @@ -25,7 +28,7 @@ def test_pageserver_reconnect(neon_simple_env: NeonEnv, pg_bin: PgBin): def run_pgbench(connstr: str): log.info(f"Start a pgbench workload on pg {connstr}") pg_bin.run_capture(["pgbench", "-i", "-I", "dtGvp", f"-s{scale}", connstr]) - pg_bin.run_capture(["pgbench", f"-T{int(n_reconnects*timeout)}", connstr]) + pg_bin.run_capture(["pgbench", f"-T{int(n_reconnects * timeout)}", connstr]) thread = threading.Thread(target=run_pgbench, args=(endpoint.connstr(),), daemon=True) thread.start() diff --git a/test_runner/regress/test_pageserver_restart.py b/test_runner/regress/test_pageserver_restart.py index 21cb780c06..ee5efd9398 100644 --- a/test_runner/regress/test_pageserver_restart.py +++ b/test_runner/regress/test_pageserver_restart.py @@ -2,14 +2,17 @@ from __future__ import annotations import random from contextlib import closing +from typing import TYPE_CHECKING import psycopg2.errors as pgerr import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.remote_storage import s3_storage from fixtures.utils import skip_in_debug_build, wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # Test restarting page server, while safekeeper and compute node keep # running. @@ -135,9 +138,9 @@ def test_pageserver_restart(neon_env_builder: NeonEnvBuilder): for phase, expectation in expectations: assert phase in values, f"No data for phase {phase}" sample = values[phase] - assert expectation( - sample.value, prev_value - ), f"Unexpected value for {phase}: {sample.value}" + assert expectation(sample.value, prev_value), ( + f"Unexpected value for {phase}: {sample.value}" + ) prev_value = sample.value # Startup is complete, this metric should exist but be zero diff --git a/test_runner/regress/test_pageserver_restarts_under_workload.py b/test_runner/regress/test_pageserver_restarts_under_workload.py index ec74e03f89..9f19c887a4 100644 --- a/test_runner/regress/test_pageserver_restarts_under_workload.py +++ b/test_runner/regress/test_pageserver_restarts_under_workload.py @@ -6,9 +6,12 @@ from __future__ import annotations import threading import time +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, PgBin + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, PgBin # Test restarting page server, while safekeeper and compute node keep diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index 9fcd9865d7..3749df2229 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -4,7 +4,6 @@ import json import os import random import time -from pathlib import Path from typing import TYPE_CHECKING import pytest @@ -24,12 +23,14 @@ from fixtures.pageserver.utils import ( from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind, S3Storage, s3_storage from fixtures.utils import run_only_on_default_postgres, skip_in_debug_build, wait_until from fixtures.workload import Workload -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from pathlib import Path from typing import Any + from werkzeug.wrappers.request import Request + # A tenant configuration that is convenient for generating uploads and deletions # without a large amount of postgres traffic. @@ -628,7 +629,7 @@ def test_secondary_downloads(neon_env_builder: NeonEnvBuilder): except: # On assertion failures, log some details to help with debugging heatmap = env.pageserver_remote_storage.heatmap_content(tenant_id) - log.warn(f"heatmap contents: {json.dumps(heatmap,indent=2)}") + log.warn(f"heatmap contents: {json.dumps(heatmap, indent=2)}") raise # Scrub the remote storage diff --git a/test_runner/regress/test_pg_query_cancellation.py b/test_runner/regress/test_pg_query_cancellation.py index d4ed7230fa..782595fad0 100644 --- a/test_runner/regress/test_pg_query_cancellation.py +++ b/test_runner/regress/test_pg_query_cancellation.py @@ -1,13 +1,16 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonPageserver -from fixtures.pageserver.http import PageserverHttpClient from psycopg2.errors import QueryCanceled +if TYPE_CHECKING: + from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonPageserver + from fixtures.pageserver.http import PageserverHttpClient + CRITICAL_PG_PS_WAIT_FAILPOINTS: set[str] = { "ps::connection-start::pre-login", "ps::connection-start::startup-packet", diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 1d9f385358..a3fae97327 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -4,7 +4,6 @@ from __future__ import annotations from concurrent.futures import ThreadPoolExecutor -from pathlib import Path from typing import TYPE_CHECKING, Any, cast import pytest @@ -21,6 +20,8 @@ from fixtures.remote_storage import s3_storage from fixtures.utils import skip_in_debug_build if TYPE_CHECKING: + from pathlib import Path + from fixtures.neon_fixtures import PgBin from pytest import CaptureFixture diff --git a/test_runner/regress/test_pg_waldump.py b/test_runner/regress/test_pg_waldump.py index c98d395451..3ece555a72 100644 --- a/test_runner/regress/test_pg_waldump.py +++ b/test_runner/regress/test_pg_waldump.py @@ -2,10 +2,13 @@ from __future__ import annotations import os import shutil +from typing import TYPE_CHECKING -from fixtures.neon_fixtures import NeonEnv, PgBin from fixtures.utils import subprocess_capture +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv, PgBin + def check_wal_segment(pg_waldump_path: str, segment_path: str, test_output_dir): # use special --ignore option to ignore the validation checks in pg_waldump diff --git a/test_runner/regress/test_pitr_gc.py b/test_runner/regress/test_pitr_gc.py index d983d77e72..0e3e667844 100644 --- a/test_runner/regress/test_pitr_gc.py +++ b/test_runner/regress/test_pitr_gc.py @@ -1,10 +1,14 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.common_types import TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import print_gc_result, query_scalar +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + # # Check pitr_interval GC behavior. diff --git a/test_runner/regress/test_postgres_version.py b/test_runner/regress/test_postgres_version.py index 5eb743809f..da5b3993a4 100644 --- a/test_runner/regress/test_postgres_version.py +++ b/test_runner/regress/test_postgres_version.py @@ -3,9 +3,11 @@ from __future__ import annotations import json import re from pathlib import Path +from typing import TYPE_CHECKING -from fixtures.neon_fixtures import PgBin -from fixtures.pg_version import PgVersion +if TYPE_CHECKING: + from fixtures.neon_fixtures import PgBin + from fixtures.pg_version import PgVersion def test_postgres_version(base_dir: Path, pg_bin: PgBin, pg_version: PgVersion): @@ -32,8 +34,8 @@ def test_postgres_version(base_dir: Path, pg_bin: PgBin, pg_version: PgVersion): version = match.group("version") commit = match.group("commit") - assert ( - pg_version.v_prefixed in expected_revisions - ), f"Released PostgreSQL version `{pg_version.v_prefixed}` doesn't exist in `vendor/revisions.json`, please update it if these changes are intentional" + assert pg_version.v_prefixed in expected_revisions, ( + f"Released PostgreSQL version `{pg_version.v_prefixed}` doesn't exist in `vendor/revisions.json`, please update it if these changes are intentional" + ) msg = f"Unexpected Postgres {pg_version} version: `{output}`, please update `vendor/revisions.json` if these changes are intentional" assert [version, commit] == expected_revisions[pg_version.v_prefixed], msg diff --git a/test_runner/regress/test_prefetch_buffer_resize.py b/test_runner/regress/test_prefetch_buffer_resize.py index 99fe80e621..299320e770 100644 --- a/test_runner/regress/test_prefetch_buffer_resize.py +++ b/test_runner/regress/test_prefetch_buffer_resize.py @@ -1,9 +1,12 @@ from __future__ import annotations import random +from typing import TYPE_CHECKING import pytest -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder @pytest.mark.parametrize("shard_count", [None, 4]) diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index 3c7fd0b897..9860658ba5 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -648,6 +648,6 @@ def test_sql_over_http_connection_cancel(static_proxy: NeonProxy): assert res["rowCount"] == 1, "HTTP query should insert" res = static_proxy.http_query(query, [0, 1], user="http", password="http", expected_code=400) - assert ( - "duplicate key value violates unique constraint" in res["message"] - ), "HTTP query should conflict" + assert "duplicate key value violates unique constraint" in res["message"], ( + "HTTP query should conflict" + ) diff --git a/test_runner/regress/test_proxy_allowed_ips.py b/test_runner/regress/test_proxy_allowed_ips.py index c59da8c6b0..7384326385 100644 --- a/test_runner/regress/test_proxy_allowed_ips.py +++ b/test_runner/regress/test_proxy_allowed_ips.py @@ -1,11 +1,15 @@ from __future__ import annotations +from typing import TYPE_CHECKING + import psycopg2 import pytest -from fixtures.neon_fixtures import ( - NeonProxy, - VanillaPostgres, -) + +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + NeonProxy, + VanillaPostgres, + ) TABLE_NAME = "neon_control_plane.endpoints" diff --git a/test_runner/regress/test_proxy_metric_collection.py b/test_runner/regress/test_proxy_metric_collection.py index 5ff4a99c51..85d8a6daaa 100644 --- a/test_runner/regress/test_proxy_metric_collection.py +++ b/test_runner/regress/test_proxy_metric_collection.py @@ -1,7 +1,5 @@ from __future__ import annotations -from collections.abc import Iterator -from pathlib import Path from typing import TYPE_CHECKING import pytest @@ -11,13 +9,16 @@ from fixtures.neon_fixtures import ( NeonProxy, VanillaPostgres, ) -from fixtures.port_distributor import PortDistributor -from pytest_httpserver import HTTPServer -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from collections.abc import Iterator + from pathlib import Path + from fixtures.httpserver import ListenAddress + from fixtures.port_distributor import PortDistributor + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request def proxy_metrics_handler(request: Request) -> Response: diff --git a/test_runner/regress/test_proxy_websockets.py b/test_runner/regress/test_proxy_websockets.py index f14317a39f..8a7c0288e0 100644 --- a/test_runner/regress/test_proxy_websockets.py +++ b/test_runner/regress/test_proxy_websockets.py @@ -2,14 +2,17 @@ from __future__ import annotations import asyncio import ssl +from typing import TYPE_CHECKING import asyncpg import pytest import websocket_tunnel import websockets from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonProxy -from fixtures.port_distributor import PortDistributor + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonProxy + from fixtures.port_distributor import PortDistributor @pytest.mark.asyncio @@ -84,9 +87,9 @@ async def test_websockets(static_proxy: NeonProxy): assert query_response[0:1] == b"D", "should be data row message" data_row_len = int.from_bytes(query_response[1:5], byteorder="big") + 1 data_row, query_response = query_response[:data_row_len], query_response[data_row_len:] - assert ( - data_row == b"D\x00\x00\x00\x0b\x00\x01\x00\x00\x00\x011" - ), "should contain 1 column with text value 1" + assert data_row == b"D\x00\x00\x00\x0b\x00\x01\x00\x00\x00\x011", ( + "should contain 1 column with text value 1" + ) assert query_response[0:1] == b"C", "should be command complete message" command_complete_len = int.from_bytes(query_response[1:5], byteorder="big") + 1 @@ -184,9 +187,9 @@ async def test_websockets_pipelined(static_proxy: NeonProxy): assert query_response[0:1] == b"D", "should be data row message" data_row_len = int.from_bytes(query_response[1:5], byteorder="big") + 1 data_row, query_response = query_response[:data_row_len], query_response[data_row_len:] - assert ( - data_row == b"D\x00\x00\x00\x0b\x00\x01\x00\x00\x00\x011" - ), "should contain 1 column with text value 1" + assert data_row == b"D\x00\x00\x00\x0b\x00\x01\x00\x00\x00\x011", ( + "should contain 1 column with text value 1" + ) assert query_response[0:1] == b"C", "should be command complete message" command_complete_len = int.from_bytes(query_response[1:5], byteorder="big") + 1 diff --git a/test_runner/regress/test_read_validation.py b/test_runner/regress/test_read_validation.py index 70a7a675df..88655c1d41 100644 --- a/test_runner/regress/test_read_validation.py +++ b/test_runner/regress/test_read_validation.py @@ -1,12 +1,15 @@ from __future__ import annotations from contextlib import closing +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv from fixtures.utils import query_scalar from psycopg2.errors import IoError, UndefinedTable +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + pytest_plugins = "fixtures.neon_fixtures" extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"] diff --git a/test_runner/regress/test_readonly_node.py b/test_runner/regress/test_readonly_node.py index fe970a868c..ee934a900d 100644 --- a/test_runner/regress/test_readonly_node.py +++ b/test_runner/regress/test_readonly_node.py @@ -1,6 +1,7 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId @@ -13,10 +14,12 @@ from fixtures.neon_fixtures import ( last_flush_lsn_upload, tenant_get_shards, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_last_record_lsn from fixtures.utils import query_scalar, wait_until +if TYPE_CHECKING: + from fixtures.pageserver.http import PageserverHttpClient + # # Create read-only compute nodes, anchored at historical points in time. @@ -207,9 +210,9 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder): # Note: cannot assert on `layers_removed` here because it could be layers # not guarded by the lease. Instead, use layer map dump. - assert layers_guarded_before_gc.issubset( - layers_guarded_after_gc - ), "Layers guarded by lease before GC should not be removed" + assert layers_guarded_before_gc.issubset(layers_guarded_after_gc), ( + "Layers guarded by lease before GC should not be removed" + ) log.info(f"{gc_result=}") diff --git a/test_runner/regress/test_recovery.py b/test_runner/regress/test_recovery.py index dab01fcd1a..5e081a4fda 100644 --- a/test_runner/regress/test_recovery.py +++ b/test_runner/regress/test_recovery.py @@ -2,9 +2,12 @@ from __future__ import annotations import time from contextlib import closing +from typing import TYPE_CHECKING from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # diff --git a/test_runner/regress/test_relations.py b/test_runner/regress/test_relations.py index 07eacfc775..b2ddcb1c2e 100644 --- a/test_runner/regress/test_relations.py +++ b/test_runner/regress/test_relations.py @@ -1,8 +1,11 @@ from __future__ import annotations -from fixtures.neon_fixtures import ( - NeonEnvBuilder, -) +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + NeonEnvBuilder, + ) def test_pageserver_reldir_v2( diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index e8721f1ea0..298aae39ee 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -180,9 +180,9 @@ def test_remote_storage_backup_and_restore( # The initiated attach operation should survive the restart, and continue from where it was. env.pageserver.stop() layer_download_failed_regex = r"Failed to download a remote file: simulated failure of remote operation Download.*[0-9A-F]+-[0-9A-F]+" - assert not env.pageserver.log_contains( - layer_download_failed_regex - ), "we shouldn't have tried any layer downloads yet since list remote timelines has a failpoint" + assert not env.pageserver.log_contains(layer_download_failed_regex), ( + "we shouldn't have tried any layer downloads yet since list remote timelines has a failpoint" + ) env.pageserver.start() # The attach should have got far enough that it recovers on restart (i.e. tenant's @@ -197,9 +197,9 @@ def test_remote_storage_backup_and_restore( detail = client.timeline_detail(tenant_id, timeline_id) log.info("Timeline detail after attach completed: %s", detail) - assert ( - Lsn(detail["last_record_lsn"]) >= current_lsn - ), "current db Lsn should should not be less than the one stored on remote storage" + assert Lsn(detail["last_record_lsn"]) >= current_lsn, ( + "current db Lsn should should not be less than the one stored on remote storage" + ) log.info("select some data, this will cause layers to be downloaded") endpoint = env.endpoints.create_start("main") @@ -456,9 +456,9 @@ def test_remote_timeline_client_calls_started_metric( def ensure_calls_started_grew(): for (file_kind, op_kind), observations in calls_started.items(): log.info(f"ensure_calls_started_grew: {file_kind} {op_kind}: {observations}") - assert all( - x < y for x, y in zip(observations, observations[1:], strict=False) - ), f"observations for {file_kind} {op_kind} did not grow monotonically: {observations}" + assert all(x < y for x, y in zip(observations, observations[1:], strict=False)), ( + f"observations for {file_kind} {op_kind} did not grow monotonically: {observations}" + ) def churn(data_pass1, data_pass2): # overwrite the same data in place, vacuum inbetween, and @@ -540,7 +540,7 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue( "l0_flush_stall_threshold": "0", "compaction_target_size": f"{64 * 1024}", # large horizon to avoid automatic GC (our assert on gc_result below relies on that) - "gc_horizon": f"{1024 ** 4}", + "gc_horizon": f"{1024**4}", "gc_period": "1h", # disable PITR so that GC considers just gc_horizon "pitr_interval": "0s", @@ -574,9 +574,9 @@ def test_timeline_deletion_with_files_stuck_in_upload_queue( try: client.timeline_checkpoint(tenant_id, timeline_id) except PageserverApiException: - assert ( - checkpoint_allowed_to_fail.is_set() - ), "checkpoint op should only fail in response to timeline deletion" + assert checkpoint_allowed_to_fail.is_set(), ( + "checkpoint op should only fail in response to timeline deletion" + ) checkpoint_thread = threading.Thread(target=checkpoint_thread_fn) checkpoint_thread.start() @@ -662,9 +662,9 @@ def test_empty_branch_remote_storage_upload(neon_env_builder: NeonEnvBuilder): ) ) expected_timelines = set([env.initial_timeline, new_branch_timeline_id]) - assert ( - timelines_before_detach == expected_timelines - ), f"Expected to have an initial timeline and the branch timeline only, but got {timelines_before_detach}" + assert timelines_before_detach == expected_timelines, ( + f"Expected to have an initial timeline and the branch timeline only, but got {timelines_before_detach}" + ) client.tenant_detach(env.initial_tenant) env.pageserver.tenant_attach(env.initial_tenant) @@ -677,9 +677,9 @@ def test_empty_branch_remote_storage_upload(neon_env_builder: NeonEnvBuilder): ) ) - assert ( - timelines_before_detach == timelines_after_detach - ), f"Expected to have same timelines after reattach, but got {timelines_after_detach}" + assert timelines_before_detach == timelines_after_detach, ( + f"Expected to have same timelines after reattach, but got {timelines_after_detach}" + ) def test_empty_branch_remote_storage_upload_on_restart(neon_env_builder: NeonEnvBuilder): @@ -724,9 +724,9 @@ def test_empty_branch_remote_storage_upload_on_restart(neon_env_builder: NeonEnv new_branch_on_remote_storage = env.pageserver_remote_storage.timeline_path( env.initial_tenant, new_branch_timeline_id ) - assert ( - not new_branch_on_remote_storage.exists() - ), "failpoint should had prohibited index_part.json upload" + assert not new_branch_on_remote_storage.exists(), ( + "failpoint should had prohibited index_part.json upload" + ) # during reconciliation we should had scheduled the uploads and on the # retried create_timeline, we will await for those to complete on next @@ -768,9 +768,9 @@ def test_empty_branch_remote_storage_upload_on_restart(neon_env_builder: NeonEnv client.configure_failpoints(("before-upload-index", "off")) exception = q.get() - assert ( - exception is None - ), "create_timeline should have succeeded, because we deleted unuploaded local state" + assert exception is None, ( + "create_timeline should have succeeded, because we deleted unuploaded local state" + ) # this is because creating a timeline always awaits for the uploads to complete assert_nothing_to_upload(client, env.initial_tenant, new_branch_timeline_id) diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 8764da3c2f..082808f9ff 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -2,13 +2,10 @@ from __future__ import annotations import time from datetime import UTC, datetime +from typing import TYPE_CHECKING from fixtures.common_types import Lsn from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnvBuilder, - PgBin, -) from fixtures.pageserver.utils import ( assert_prefix_empty, enable_remote_storage_versioning, @@ -18,6 +15,12 @@ from fixtures.pageserver.utils import ( from fixtures.remote_storage import RemoteStorageKind, s3_storage from fixtures.utils import run_pg_bench_small +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + NeonEnvBuilder, + PgBin, + ) + def test_tenant_s3_restore( neon_env_builder: NeonEnvBuilder, @@ -80,14 +83,14 @@ def test_tenant_s3_restore( ts_before_deletion = datetime.now(tz=UTC).replace(tzinfo=None) time.sleep(4) - assert ( - ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1 - ), "tenant removed before we deletion was issued" + assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1, ( + "tenant removed before we deletion was issued" + ) ps_http.tenant_delete(tenant_id) ps_http.deletion_queue_flush(execute=True) - assert ( - ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 0 - ), "tenant removed before we deletion was issued" + assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 0, ( + "tenant removed before we deletion was issued" + ) env.storage_controller.attach_hook_drop(tenant_id) tenant_path = env.pageserver.tenant_dir(tenant_id) diff --git a/test_runner/regress/test_safekeeper_deletion.py b/test_runner/regress/test_safekeeper_deletion.py index b46095d583..b681a86103 100644 --- a/test_runner/regress/test_safekeeper_deletion.py +++ b/test_runner/regress/test_safekeeper_deletion.py @@ -4,19 +4,22 @@ import threading import time from contextlib import closing from enum import StrEnum +from typing import TYPE_CHECKING import pytest import requests from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - Endpoint, - NeonEnvBuilder, -) from fixtures.remote_storage import S3Storage, s3_storage from fixtures.safekeeper_utils import is_segment_offloaded from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + Endpoint, + NeonEnvBuilder, + ) + @pytest.mark.parametrize("auth_enabled", [False, True]) def test_safekeeper_delete_timeline(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): diff --git a/test_runner/regress/test_setup.py b/test_runner/regress/test_setup.py index dfbbd575b7..fd0f5705c8 100644 --- a/test_runner/regress/test_setup.py +++ b/test_runner/regress/test_setup.py @@ -2,7 +2,10 @@ from __future__ import annotations -from fixtures.neon_fixtures import NeonEnvBuilder +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # Test that pageserver and safekeeper can restart quickly. diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index c07aa0d40e..0bfc4b1d8c 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -8,7 +8,6 @@ from typing import TYPE_CHECKING, Any import pytest import requests from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineArchivalState, TimelineId -from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.log_helper import log from fixtures.neon_fixtures import ( DEFAULT_AZ_ID, @@ -23,13 +22,14 @@ from fixtures.pageserver.utils import assert_prefix_empty, assert_prefix_not_emp from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind, s3_storage from fixtures.utils import skip_in_debug_build, wait_until from fixtures.workload import Workload -from pytest_httpserver import HTTPServer from typing_extensions import override -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: + from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.httpserver import ListenAddress + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request def test_sharding_smoke( @@ -334,9 +334,9 @@ def test_sharding_split_compaction( result = ps.timeline_scan_no_disposable_keys(shard, timeline_id) tally = result.tally raw_page_count = tally.not_disposable_count + tally.disposable_count - assert tally.not_disposable_count > ( - raw_page_count // 2 - ), "compaction doesn't rewrite layers that are >=50pct local" + assert tally.not_disposable_count > (raw_page_count // 2), ( + "compaction doesn't rewrite layers that are >=50pct local" + ) log.info("check sizes") timeline_info = ps.http_client().timeline_detail(shard, timeline_id) @@ -1601,7 +1601,7 @@ def test_sharding_backpressure(neon_env_builder: NeonEnvBuilder): delta_bytes = lsn - last_flush_lsn avg_speed = delta_bytes / delta / 1024 / 1024 log.info( - f"flush_lsn {lsn}, written {delta_bytes/1024}kb for {delta:.3f}s, avg_speed {avg_speed:.3f} MiB/s" + f"flush_lsn {lsn}, written {delta_bytes / 1024}kb for {delta:.3f}s, avg_speed {avg_speed:.3f} MiB/s" ) last_flush_lsn = lsn diff --git a/test_runner/regress/test_sni_router.py b/test_runner/regress/test_sni_router.py index 3487542d6e..19952fc71b 100644 --- a/test_runner/regress/test_sni_router.py +++ b/test_runner/regress/test_sni_router.py @@ -2,18 +2,19 @@ from __future__ import annotations import socket import subprocess -from pathlib import Path from typing import TYPE_CHECKING import backoff from fixtures.log_helper import log from fixtures.neon_fixtures import PgProtocol, VanillaPostgres -from fixtures.port_distributor import PortDistributor if TYPE_CHECKING: + from pathlib import Path from types import TracebackType from typing import Self + from fixtures.port_distributor import PortDistributor + def generate_tls_cert(cn, certout, keyout): subprocess.run( diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index f2386c9533..35a75ca607 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -13,7 +13,6 @@ import fixtures.utils import pytest from fixtures.auth_tokens import TokenScope from fixtures.common_types import TenantId, TenantShardId, TimelineId -from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.log_helper import log from fixtures.neon_fixtures import ( DEFAULT_AZ_ID, @@ -40,9 +39,7 @@ from fixtures.pageserver.utils import ( timeline_delete_wait_completed, ) from fixtures.pg_version import PgVersion -from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import RemoteStorageKind, s3_storage -from fixtures.storage_controller_proxy import StorageControllerProxy from fixtures.utils import ( run_only_on_default_postgres, run_pg_bench_small, @@ -50,18 +47,21 @@ from fixtures.utils import ( wait_until, ) from fixtures.workload import Workload -from mypy_boto3_s3.type_defs import ( - ObjectTypeDef, -) -from pytest_httpserver import HTTPServer from urllib3 import Retry -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response if TYPE_CHECKING: from typing import Any + from fixtures.compute_reconfigure import ComputeReconfigure from fixtures.httpserver import ListenAddress + from fixtures.port_distributor import PortDistributor + from fixtures.storage_controller_proxy import StorageControllerProxy + from mypy_boto3_s3.type_defs import ( + ObjectTypeDef, + ) + from pytest_httpserver import HTTPServer + from werkzeug.wrappers.request import Request def get_node_shard_counts(env: NeonEnv, tenant_ids): @@ -147,9 +147,9 @@ def test_storage_controller_smoke( for node_id, count in get_node_shard_counts(env, tenant_ids).items(): # we used a multiple of pagservers for the total shard count, # so expect equal number on all pageservers - assert count == tenant_shard_count / len( - env.pageservers - ), f"Node {node_id} has bad count {count}" + assert count == tenant_shard_count / len(env.pageservers), ( + f"Node {node_id} has bad count {count}" + ) # Creating and deleting timelines should work, using identical API to pageserver timeline_crud_tenant = next(iter(tenant_ids)) diff --git a/test_runner/regress/test_storage_scrubber.py b/test_runner/regress/test_storage_scrubber.py index 0f4e5688a9..70af299de3 100644 --- a/test_runner/regress/test_storage_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -6,19 +6,22 @@ import shutil import threading import time from concurrent.futures import ThreadPoolExecutor +from typing import TYPE_CHECKING import pytest from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - NeonEnv, - NeonEnvBuilder, -) from fixtures.pg_version import PgVersion from fixtures.remote_storage import S3Storage, s3_storage from fixtures.utils import wait_until from fixtures.workload import Workload +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + NeonEnv, + NeonEnvBuilder, + ) + @pytest.mark.parametrize("shard_count", [None, 4]) def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: int | None): diff --git a/test_runner/regress/test_subscriber_restart.py b/test_runner/regress/test_subscriber_restart.py index 8ad7282ea2..f084911fcc 100644 --- a/test_runner/regress/test_subscriber_restart.py +++ b/test_runner/regress/test_subscriber_restart.py @@ -2,11 +2,14 @@ from __future__ import annotations import threading import time +from typing import TYPE_CHECKING import pytest -from fixtures.neon_fixtures import NeonEnv from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv + # This test checks of logical replication subscriber is able to correctly restart replication without receiving duplicates. # It requires tracking information about replication origins at page server side diff --git a/test_runner/regress/test_tenant_conf.py b/test_runner/regress/test_tenant_conf.py index 0c2d535af4..de6bdc0aec 100644 --- a/test_runner/regress/test_tenant_conf.py +++ b/test_runner/regress/test_tenant_conf.py @@ -5,9 +5,6 @@ from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn -from fixtures.neon_fixtures import ( - NeonEnvBuilder, -) from fixtures.pageserver.utils import assert_tenant_state, wait_for_upload from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import run_only_on_default_postgres, wait_until @@ -16,6 +13,10 @@ from fixtures.workload import Workload if TYPE_CHECKING: from typing import Any + from fixtures.neon_fixtures import ( + NeonEnvBuilder, + ) + def test_tenant_config(neon_env_builder: NeonEnvBuilder): """Test per tenant configuration""" @@ -68,9 +69,9 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): # check the configuration of the default tenant # it should match global configuration default_tenant_config = http_client.tenant_config(tenant_id=env.initial_tenant) - assert ( - not default_tenant_config.tenant_specific_overrides - ), "Should have no specific settings yet" + assert not default_tenant_config.tenant_specific_overrides, ( + "Should have no specific settings yet" + ) effective_config = default_tenant_config.effective_config assert effective_config["checkpoint_distance"] == 10000 assert effective_config["compaction_target_size"] == 1048576 @@ -92,22 +93,22 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): new_specific_config = new_tenant_config.tenant_specific_overrides assert new_specific_config["checkpoint_distance"] == 20000 assert new_specific_config["gc_period"] == "30s" - assert len(new_specific_config) == len( - new_conf - ), f"No more specific properties were expected, but got: {new_specific_config}" + assert len(new_specific_config) == len(new_conf), ( + f"No more specific properties were expected, but got: {new_specific_config}" + ) new_effective_config = new_tenant_config.effective_config - assert ( - new_effective_config["checkpoint_distance"] == 20000 - ), "Specific 'checkpoint_distance' config should override the default value" - assert ( - new_effective_config["gc_period"] == "30s" - ), "Specific 'gc_period' config should override the default value" - assert ( - new_effective_config["evictions_low_residence_duration_metric_threshold"] == "42s" - ), "Should override default value" - assert new_effective_config["eviction_policy"] == { - "kind": "NoEviction" - }, "Specific 'eviction_policy' config should override the default value" + assert new_effective_config["checkpoint_distance"] == 20000, ( + "Specific 'checkpoint_distance' config should override the default value" + ) + assert new_effective_config["gc_period"] == "30s", ( + "Specific 'gc_period' config should override the default value" + ) + assert new_effective_config["evictions_low_residence_duration_metric_threshold"] == "42s", ( + "Should override default value" + ) + assert new_effective_config["eviction_policy"] == {"kind": "NoEviction"}, ( + "Specific 'eviction_policy' config should override the default value" + ) assert new_effective_config["compaction_target_size"] == 1048576 assert new_effective_config["compaction_period"] == "20s" assert new_effective_config["compaction_threshold"] == 10 @@ -134,22 +135,22 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): assert updated_specific_config["checkpoint_distance"] == 15000 assert updated_specific_config["gc_period"] == "1m 20s" assert updated_specific_config["compaction_period"] == "1m 20s" - assert len(updated_specific_config) == len( - conf_update - ), f"No more specific properties were expected, but got: {updated_specific_config}" + assert len(updated_specific_config) == len(conf_update), ( + f"No more specific properties were expected, but got: {updated_specific_config}" + ) updated_effective_config = updated_tenant_config.effective_config - assert ( - updated_effective_config["checkpoint_distance"] == 15000 - ), "Specific 'checkpoint_distance' config should override the default value" - assert ( - updated_effective_config["gc_period"] == "1m 20s" - ), "Specific 'gc_period' config should override the default value" - assert ( - updated_effective_config["compaction_period"] == "1m 20s" - ), "Specific 'compaction_period' config should override the default value" - assert ( - updated_effective_config["evictions_low_residence_duration_metric_threshold"] == "23h" - ), "Should override default value" + assert updated_effective_config["checkpoint_distance"] == 15000, ( + "Specific 'checkpoint_distance' config should override the default value" + ) + assert updated_effective_config["gc_period"] == "1m 20s", ( + "Specific 'gc_period' config should override the default value" + ) + assert updated_effective_config["compaction_period"] == "1m 20s", ( + "Specific 'compaction_period' config should override the default value" + ) + assert updated_effective_config["evictions_low_residence_duration_metric_threshold"] == "23h", ( + "Should override default value" + ) assert updated_effective_config["eviction_policy"] == { "kind": "LayerAccessThreshold", "period": "1m 20s", @@ -167,9 +168,9 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): env.pageserver.start() restarted_tenant_config = http_client.tenant_config(tenant_id=tenant) - assert ( - restarted_tenant_config == updated_tenant_config - ), "Updated config should not change after the restart" + assert restarted_tenant_config == updated_tenant_config, ( + "Updated config should not change after the restart" + ) # update the config with very short config and make sure no trailing chars are left from previous config final_conf = { @@ -180,13 +181,13 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): final_tenant_config = http_client.tenant_config(tenant_id=tenant) final_specific_config = final_tenant_config.tenant_specific_overrides assert final_specific_config["pitr_interval"] == "1m" - assert len(final_specific_config) == len( - final_conf - ), f"No more specific properties were expected, but got: {final_specific_config}" + assert len(final_specific_config) == len(final_conf), ( + f"No more specific properties were expected, but got: {final_specific_config}" + ) final_effective_config = final_tenant_config.effective_config - assert ( - final_effective_config["pitr_interval"] == "1m" - ), "Specific 'pitr_interval' config should override the default value" + assert final_effective_config["pitr_interval"] == "1m", ( + "Specific 'pitr_interval' config should override the default value" + ) assert final_effective_config["checkpoint_distance"] == 10000 assert final_effective_config["compaction_target_size"] == 1048576 assert final_effective_config["compaction_period"] == "20s" @@ -207,9 +208,9 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder): env.pageserver.start() restarted_final_tenant_config = http_client.tenant_config(tenant_id=tenant) - assert ( - restarted_final_tenant_config == final_tenant_config - ), "Updated config should not change after the restart" + assert restarted_final_tenant_config == final_tenant_config, ( + "Updated config should not change after the restart" + ) def test_creating_tenant_conf_after_attach(neon_env_builder: NeonEnvBuilder): @@ -299,9 +300,9 @@ def test_live_reconfig_get_evictions_low_residence_duration_metric_threshold( tenant_id, {"evictions_low_residence_duration_metric_threshold": default_value} ) updated_metric = get_metric() - assert int(updated_metric.value) == int( - metric.value - ), "metric is unchanged when setting same value" + assert int(updated_metric.value) == int(metric.value), ( + "metric is unchanged when setting same value" + ) env.config_tenant(tenant_id, {"evictions_low_residence_duration_metric_threshold": "2day"}) metric = get_metric() diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index 3720f653c5..8379908631 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -3,6 +3,7 @@ from __future__ import annotations import json from concurrent.futures import ThreadPoolExecutor from threading import Thread +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId @@ -23,9 +24,11 @@ from fixtures.remote_storage import RemoteStorageKind, s3_storage from fixtures.utils import run_pg_bench_small, wait_until from fixtures.workload import Workload from requests.exceptions import ReadTimeout -from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from werkzeug.wrappers.request import Request + def error_tolerant_delete(ps_http, tenant_id): """ diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 3f21dc895a..7f80a9bcfd 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -5,16 +5,12 @@ import random import time from enum import StrEnum from threading import Thread +from typing import TYPE_CHECKING import asyncpg import pytest from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - Endpoint, - NeonEnv, - NeonEnvBuilder, -) from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient from fixtures.pageserver.utils import ( wait_for_last_record_lsn, @@ -25,7 +21,14 @@ from fixtures.remote_storage import ( RemoteStorageKind, ) from fixtures.utils import query_scalar, wait_until -from prometheus_client.samples import Sample + +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + Endpoint, + NeonEnv, + NeonEnvBuilder, + ) + from prometheus_client.samples import Sample # In tests that overlap endpoint activity with tenant attach/detach, there are # a variety of warnings that the page service may emit when it cannot acquire @@ -434,9 +437,9 @@ def test_detach_while_activating( tenants_after_detach = [tenant["id"] for tenant in pageserver_http.tenant_list()] assert tenant_id not in tenants_after_detach, "Detached tenant should be missing" - assert len(tenants_after_detach) + 1 == len( - tenants_before_detach - ), "Only ignored tenant should be missing" + assert len(tenants_after_detach) + 1 == len(tenants_before_detach), ( + "Only ignored tenant should be missing" + ) # Subsequently attaching it again should still work pageserver_http.configure_failpoints([("attach-before-activate-sleep", "off")]) @@ -478,9 +481,9 @@ def insert_test_data( def ensure_test_data(data_id: int, data: str, endpoint: Endpoint): with endpoint.cursor() as cur: - assert ( - query_scalar(cur, f"SELECT secret FROM test WHERE id = {data_id};") == data - ), "Should have timeline data back" + assert query_scalar(cur, f"SELECT secret FROM test WHERE id = {data_id};") == data, ( + "Should have timeline data back" + ) def test_metrics_while_ignoring_broken_tenant_and_reloading( diff --git a/test_runner/regress/test_tenant_relocation.py b/test_runner/regress/test_tenant_relocation.py index df53a98e92..f395aa665d 100644 --- a/test_runner/regress/test_tenant_relocation.py +++ b/test_runner/regress/test_tenant_relocation.py @@ -5,14 +5,11 @@ import shutil import threading import time from contextlib import closing, contextmanager -from pathlib import Path from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder, NeonPageserver -from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import ( assert_tenant_state, wait_for_last_record_lsn, @@ -28,8 +25,12 @@ from fixtures.utils import ( ) if TYPE_CHECKING: + from pathlib import Path from typing import Any + from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder, NeonPageserver + from fixtures.pageserver.http import PageserverHttpClient + def assert_abs_margin_ratio(a: float, b: float, margin_ratio: float): assert abs(a - b) / a < margin_ratio, abs(a - b) / a @@ -158,9 +159,9 @@ def switch_pg_to_new_pageserver( timeline_to_detach_local_path = origin_ps.timeline_dir(tenant_id, timeline_id) files_before_detach = os.listdir(timeline_to_detach_local_path) - assert ( - len(files_before_detach) >= 1 - ), f"Regular timeline {timeline_to_detach_local_path} should have at least one layer file, but got {files_before_detach}" + assert len(files_before_detach) >= 1, ( + f"Regular timeline {timeline_to_detach_local_path} should have at least one layer file, but got {files_before_detach}" + ) return timeline_to_detach_local_path @@ -175,9 +176,9 @@ def post_migration_check(endpoint: Endpoint, sum_before_migration: int, old_loca cur.execute("SELECT sum(key) FROM t") assert cur.fetchone() == (sum_before_migration + 1500500,) - assert not os.path.exists( - old_local_path - ), f"After detach, local timeline dir {old_local_path} should be removed" + assert not os.path.exists(old_local_path), ( + f"After detach, local timeline dir {old_local_path} should be removed" + ) @pytest.mark.parametrize( diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index 81e727a3aa..a50a1beed6 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -1,10 +1,9 @@ from __future__ import annotations from concurrent.futures import ThreadPoolExecutor -from pathlib import Path +from typing import TYPE_CHECKING import pytest -from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import ( Endpoint, @@ -19,9 +18,14 @@ from fixtures.pageserver.utils import ( timeline_delete_wait_completed, wait_until_tenant_active, ) -from fixtures.pg_version import PgVersion from fixtures.utils import skip_in_debug_build, wait_until +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.common_types import Lsn, TenantId, TimelineId + from fixtures.pg_version import PgVersion + def test_empty_tenant_size(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_configs() @@ -578,9 +582,9 @@ def test_get_tenant_size_with_multiple_branches( wait_for_last_flush_lsn(env, second_branch_endpoint, tenant_id, second_branch_timeline_id) size_after_thinning_branch = http_client.tenant_size(tenant_id) - assert ( - size_after_thinning_branch > size_after_growing_second_branch - ), "tenant_size should grow with dropped tables and full vacuum" + assert size_after_thinning_branch > size_after_growing_second_branch, ( + "tenant_size should grow with dropped tables and full vacuum" + ) first_branch_endpoint.stop_and_destroy() second_branch_endpoint.stop_and_destroy() diff --git a/test_runner/regress/test_tenant_tasks.py b/test_runner/regress/test_tenant_tasks.py index 4c26b64d22..814ebc14f5 100644 --- a/test_runner/regress/test_tenant_tasks.py +++ b/test_runner/regress/test_tenant_tasks.py @@ -1,8 +1,9 @@ from __future__ import annotations +from typing import TYPE_CHECKING + from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.pageserver.utils import ( assert_tenant_state, timeline_delete_wait_completed, @@ -10,6 +11,9 @@ from fixtures.pageserver.utils import ( ) from fixtures.utils import wait_until +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + def get_only_element(l): # noqa: E741 assert len(l) == 1 diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index afe444f227..c613a79374 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -8,6 +8,7 @@ from contextlib import closing from datetime import datetime from itertools import chain from pathlib import Path +from typing import TYPE_CHECKING import pytest import requests @@ -29,7 +30,9 @@ from fixtures.pageserver.utils import timeline_delete_wait_completed, wait_until from fixtures.pg_version import PgVersion from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import wait_until -from prometheus_client.samples import Sample + +if TYPE_CHECKING: + from prometheus_client.samples import Sample def test_tenant_creation_fails(neon_simple_env: NeonEnv): @@ -313,9 +316,9 @@ def test_pageserver_with_empty_tenants(neon_env_builder: NeonEnvBuilder): files_in_timelines_dir = sum( 1 for _p in Path.iterdir(env.pageserver.timeline_dir(tenant_with_empty_timelines)) ) - assert ( - files_in_timelines_dir == 0 - ), f"Tenant {tenant_with_empty_timelines} should have an empty timelines/ directory" + assert files_in_timelines_dir == 0, ( + f"Tenant {tenant_with_empty_timelines} should have an empty timelines/ directory" + ) # Trigger timeline re-initialization after pageserver restart env.endpoints.stop_all() @@ -335,14 +338,14 @@ def test_pageserver_with_empty_tenants(neon_env_builder: NeonEnvBuilder): tenants = client.tenant_list() [loaded_tenant] = [t for t in tenants if t["id"] == str(tenant_with_empty_timelines)] - assert ( - loaded_tenant["state"]["slug"] == "Active" - ), "Tenant {tenant_with_empty_timelines} with empty timelines dir should be active and ready for timeline creation" + assert loaded_tenant["state"]["slug"] == "Active", ( + "Tenant {tenant_with_empty_timelines} with empty timelines dir should be active and ready for timeline creation" + ) loaded_tenant_status = client.tenant_status(tenant_with_empty_timelines) - assert ( - loaded_tenant_status["state"]["slug"] == "Active" - ), f"Tenant {tenant_with_empty_timelines} without timelines dir should be active" + assert loaded_tenant_status["state"]["slug"] == "Active", ( + f"Tenant {tenant_with_empty_timelines} without timelines dir should be active" + ) time.sleep(1) # to allow metrics propagation @@ -357,9 +360,9 @@ def test_pageserver_with_empty_tenants(neon_env_builder: NeonEnvBuilder): ).value ) - assert ( - tenant_active_count == 1 - ), f"Tenant {tenant_with_empty_timelines} should have metric as active" + assert tenant_active_count == 1, ( + f"Tenant {tenant_with_empty_timelines} should have metric as active" + ) def test_create_churn_during_restart(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/regress/test_tenants_with_remote_storage.py b/test_runner/regress/test_tenants_with_remote_storage.py index 6b27c41d1c..47056e2786 100644 --- a/test_runner/regress/test_tenants_with_remote_storage.py +++ b/test_runner/regress/test_tenants_with_remote_storage.py @@ -181,13 +181,13 @@ def test_tenants_attached_after_download(neon_env_builder: NeonEnvBuilder): wait_until(lambda: assert_tenant_state(client, tenant_id, "Active")) restored_timelines = client.timeline_list(tenant_id) - assert ( - len(restored_timelines) == 1 - ), f"Tenant {tenant_id} should have its timeline reattached after its layer is downloaded from the remote storage" + assert len(restored_timelines) == 1, ( + f"Tenant {tenant_id} should have its timeline reattached after its layer is downloaded from the remote storage" + ) restored_timeline = restored_timelines[0] - assert ( - restored_timeline["timeline_id"] == str(timeline_id) - ), f"Tenant {tenant_id} should have its old timeline {timeline_id} restored from the remote storage" + assert restored_timeline["timeline_id"] == str(timeline_id), ( + f"Tenant {tenant_id} should have its old timeline {timeline_id} restored from the remote storage" + ) # Check that we had to retry the downloads assert env.pageserver.log_contains(".*download .* succeeded after 1 retries.*") @@ -235,9 +235,9 @@ def test_tenant_redownloads_truncated_file_on_startup( os.truncate(path, 0) local_layer_truncated = (path, correct_size) break - assert ( - local_layer_truncated is not None - ), f"Found no local layer files to delete in directory {timeline_dir}" + assert local_layer_truncated is not None, ( + f"Found no local layer files to delete in directory {timeline_dir}" + ) (path, expected_size) = local_layer_truncated @@ -256,13 +256,13 @@ def test_tenant_redownloads_truncated_file_on_startup( wait_until(lambda: assert_tenant_state(client, tenant_id, "Active")) restored_timelines = client.timeline_list(tenant_id) - assert ( - len(restored_timelines) == 1 - ), f"Tenant {tenant_id} should have its timeline reattached after its layer is downloaded from the remote storage" + assert len(restored_timelines) == 1, ( + f"Tenant {tenant_id} should have its timeline reattached after its layer is downloaded from the remote storage" + ) retored_timeline = restored_timelines[0] - assert ( - retored_timeline["timeline_id"] == str(timeline_id) - ), f"Tenant {tenant_id} should have its old timeline {timeline_id} restored from the remote storage" + assert retored_timeline["timeline_id"] == str(timeline_id), ( + f"Tenant {tenant_id} should have its old timeline {timeline_id} restored from the remote storage" + ) # Request non-incremental logical size. Calculating it needs the layer file that # we corrupted, forcing it to be redownloaded. @@ -277,9 +277,9 @@ def test_tenant_redownloads_truncated_file_on_startup( # if the upload ever was ongoing, this check would be racy, but at least one # extra http request has been made in between so assume it's enough delay - assert ( - os.stat(remote_layer_path).st_size == expected_size - ), "truncated file should not had been uploaded around re-download" + assert os.stat(remote_layer_path).st_size == expected_size, ( + "truncated file should not had been uploaded around re-download" + ) endpoint = env.endpoints.create_start("main") @@ -295,6 +295,6 @@ def test_tenant_redownloads_truncated_file_on_startup( # re-uploaded truncated. this is a rather bogus check given the current # implementation, but it's critical it doesn't happen so wasting a few # lines of python to do this. - assert ( - os.stat(remote_layer_path).st_size == expected_size - ), "truncated file should not had been uploaded after next checkpoint" + assert os.stat(remote_layer_path).st_size == expected_size, ( + "truncated file should not had been uploaded after next checkpoint" + ) diff --git a/test_runner/regress/test_threshold_based_eviction.py b/test_runner/regress/test_threshold_based_eviction.py index b16448ef00..8d3d7b623c 100644 --- a/test_runner/regress/test_threshold_based_eviction.py +++ b/test_runner/regress/test_threshold_based_eviction.py @@ -10,12 +10,12 @@ from fixtures.neon_fixtures import ( PgBin, last_flush_lsn_upload, ) -from fixtures.pageserver.http import LayerMapInfo from fixtures.remote_storage import RemoteStorageKind -from pytest_httpserver import HTTPServer if TYPE_CHECKING: from fixtures.httpserver import ListenAddress + from fixtures.pageserver.http import LayerMapInfo + from pytest_httpserver import HTTPServer # NB: basic config change tests are in test_tenant_conf.py @@ -181,14 +181,14 @@ def test_threshold_based_eviction( # TODO: can we be more precise here? E.g., require we're stable _within_ X*threshold, # instead of what we do here, i.e., stable _for at least_ X*threshold toward the end of the observation window - assert ( - stable_for > consider_stable_when_no_change_for_seconds - ), "layer residencies did not become stable within the observation window" + assert stable_for > consider_stable_when_no_change_for_seconds, ( + "layer residencies did not become stable within the observation window" + ) post = map_info_changes[-1][1].by_local_and_remote() assert len(post.remote_layers) > 0, "some layers should be evicted once it's stabilized" assert len(post.local_layers) > 0, "the imitate accesses should keep some layers resident" - assert ( - env.pageserver.log_contains(metrics_refused_log_line) is not None - ), "ensure the metrics collection worker ran" + assert env.pageserver.log_contains(metrics_refused_log_line) is not None, ( + "ensure the metrics collection worker ran" + ) diff --git a/test_runner/regress/test_timeline_archive.py b/test_runner/regress/test_timeline_archive.py index 2bad0bb671..17abe1ea75 100644 --- a/test_runner/regress/test_timeline_archive.py +++ b/test_runner/regress/test_timeline_archive.py @@ -4,6 +4,7 @@ import json import random import threading import time +from typing import TYPE_CHECKING import pytest import requests @@ -23,11 +24,13 @@ from fixtures.pageserver.utils import ( from fixtures.pg_version import PgVersion from fixtures.remote_storage import S3Storage, s3_storage from fixtures.utils import run_only_on_default_postgres, skip_in_debug_build, wait_until -from mypy_boto3_s3.type_defs import ( - ObjectTypeDef, -) from psycopg2.errors import IoError, UndefinedTable +if TYPE_CHECKING: + from mypy_boto3_s3.type_defs import ( + ObjectTypeDef, + ) + @pytest.mark.parametrize("shard_count", [0, 4]) def test_timeline_archive(neon_env_builder: NeonEnvBuilder, shard_count: int): @@ -292,7 +295,7 @@ def test_timeline_offload_persist(neon_env_builder: NeonEnvBuilder, delete_timel conf={ "gc_period": "0s", "compaction_period": "0s", - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", } ) @@ -898,7 +901,7 @@ def test_timeline_offload_generations(neon_env_builder: NeonEnvBuilder): conf={ "gc_period": "0s", "compaction_period": "0s", - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", } ) diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index fbece68367..f7629edf7a 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -167,7 +167,7 @@ def test_delete_timeline_exercise_crash_safety_failpoints( initial_tenant_conf={ "gc_period": "0s", "compaction_period": "0s", - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", "image_creation_threshold": "100", } ) @@ -363,9 +363,9 @@ def test_timeline_resurrection_on_attach( wait_until_tenant_active(ps_http, tenant_id=tenant_id) timelines = ps_http.timeline_list(tenant_id=tenant_id) - assert {TimelineId(tl["timeline_id"]) for tl in timelines} == { - main_timeline_id - }, "the deleted timeline should not have been resurrected" + assert {TimelineId(tl["timeline_id"]) for tl in timelines} == {main_timeline_id}, ( + "the deleted timeline should not have been resurrected" + ) assert all([tl["state"] == "Active" for tl in timelines]) @@ -423,9 +423,9 @@ def test_timeline_delete_fail_before_local_delete(neon_env_builder: NeonEnvBuild wait_timeline_detail_404(ps_http, env.initial_tenant, leaf_timeline_id) - assert ( - not leaf_timeline_path.exists() - ), "timeline load procedure should have resumed the deletion interrupted by the failpoint" + assert not leaf_timeline_path.exists(), ( + "timeline load procedure should have resumed the deletion interrupted by the failpoint" + ) timelines = ps_http.timeline_list(env.initial_tenant) assert {TimelineId(tl["timeline_id"]) for tl in timelines} == { intermediate_timeline_id, @@ -705,7 +705,7 @@ def test_delete_orphaned_objects( initial_tenant_conf={ "gc_period": "0s", "compaction_period": "0s", - "checkpoint_distance": f"{1024 ** 2}", + "checkpoint_distance": f"{1024**2}", "image_creation_threshold": "100", } ) diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index 96664f2b8d..2a916438e5 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -431,9 +431,9 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder): if expected_ancestor is None: assert ancestor_timeline_id is None else: - assert ( - TimelineId(ancestor_timeline_id) == expected_ancestor - ), f"when checking branch {branch_name}, mapping={expected_result}" + assert TimelineId(ancestor_timeline_id) == expected_ancestor, ( + f"when checking branch {branch_name}, mapping={expected_result}" + ) index_part = env.pageserver_remote_storage.index_content( env.initial_tenant, queried_timeline @@ -1301,9 +1301,9 @@ def test_sharded_tad_interleaved_after_partial_success(neon_env_builder: NeonEnv offset, ) if mode == "delete_reparentable_timeline": - assert ( - retried is None - ), "detaching should had converged after both nodes saw the deletion" + assert retried is None, ( + "detaching should had converged after both nodes saw the deletion" + ) elif mode == "create_reparentable_timeline": assert retried is not None, "detaching should not have converged" _, offset = retried @@ -1531,9 +1531,9 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon # first round -- do more checking to make sure the gc gets paused try_detach() - assert ( - http.timeline_detail(env.initial_tenant, detached)["ancestor_timeline_id"] is None - ), "first round should had detached 'detached'" + assert http.timeline_detail(env.initial_tenant, detached)["ancestor_timeline_id"] is None, ( + "first round should had detached 'detached'" + ) reparented, not_reparented = reparenting_progress(timelines) assert reparented == 1 @@ -1569,9 +1569,9 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon for _ in range(2): try_detach() - assert ( - http.timeline_detail(env.initial_tenant, detached)["ancestor_timeline_id"] is None - ), "first round should had detached 'detached'" + assert http.timeline_detail(env.initial_tenant, detached)["ancestor_timeline_id"] is None, ( + "first round should had detached 'detached'" + ) reparented, not_reparented = reparenting_progress(timelines) assert reparented == reparented_before + 1 @@ -1611,9 +1611,9 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon assert reparented == len(timelines) time.sleep(2) - assert ( - env.pageserver.log_contains(".*: attach finished, activating", offset) is None - ), "there should be no restart with the final detach_ancestor as it only completed" + assert env.pageserver.log_contains(".*: attach finished, activating", offset) is None, ( + "there should be no restart with the final detach_ancestor as it only completed" + ) # gc is unblocked env.pageserver.assert_log_contains(".* gc_loop.*: 5 timelines need GC", offset) @@ -1702,7 +1702,7 @@ def test_pageserver_compaction_detach_ancestor_smoke(neon_env_builder: NeonEnvBu "compaction_period": "5s", # No PiTR interval and small GC horizon "pitr_interval": "0s", - "gc_horizon": f"{1024 ** 2}", + "gc_horizon": f"{1024**2}", "lsn_lease_length": "0s", # Small checkpoint distance to create many layers "checkpoint_distance": 1024**2, diff --git a/test_runner/regress/test_timeline_gc_blocking.py b/test_runner/regress/test_timeline_gc_blocking.py index 7605e1f758..9a710f5b80 100644 --- a/test_runner/regress/test_timeline_gc_blocking.py +++ b/test_runner/regress/test_timeline_gc_blocking.py @@ -3,16 +3,19 @@ from __future__ import annotations import time from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import ( - LogCursor, - NeonEnvBuilder, - NeonPageserver, -) from fixtures.pageserver.utils import wait_timeline_detail_404 +if TYPE_CHECKING: + from fixtures.neon_fixtures import ( + LogCursor, + NeonEnvBuilder, + NeonPageserver, + ) + @pytest.mark.parametrize("sharded", [True, False]) def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool): diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index e2fdacdbfc..7247027165 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -6,7 +6,7 @@ import random import time from collections import defaultdict from contextlib import closing -from pathlib import Path +from typing import TYPE_CHECKING import psycopg2.errors import psycopg2.extras @@ -22,18 +22,22 @@ from fixtures.neon_fixtures import ( VanillaPostgres, wait_for_last_flush_lsn, ) -from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import ( assert_tenant_state, timeline_delete_wait_completed, wait_for_upload_queue_empty, wait_until_tenant_active, ) -from fixtures.pg_version import PgVersion -from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import get_timeline_dir_size, wait_until +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.pageserver.http import PageserverHttpClient + from fixtures.pg_version import PgVersion + from fixtures.port_distributor import PortDistributor + def test_timeline_size(neon_simple_env: NeonEnv): env = neon_simple_env @@ -310,9 +314,9 @@ def test_timeline_size_quota(neon_env_builder: NeonEnvBuilder): new_res = client.timeline_detail( env.initial_tenant, new_timeline_id, include_non_incremental_logical_size=True ) - assert ( - new_res["current_logical_size"] == new_res["current_logical_size_non_incremental"] - ), "after the WAL is streamed, current_logical_size is expected to be calculated and to be equal its non-incremental value" + assert new_res["current_logical_size"] == new_res["current_logical_size_non_incremental"], ( + "after the WAL is streamed, current_logical_size is expected to be calculated and to be equal its non-incremental value" + ) @pytest.mark.parametrize("deletion_method", ["tenant_detach", "timeline_delete"]) diff --git a/test_runner/regress/test_truncate.py b/test_runner/regress/test_truncate.py index 946dab2676..5c494a1368 100644 --- a/test_runner/regress/test_truncate.py +++ b/test_runner/regress/test_truncate.py @@ -1,8 +1,10 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING -from fixtures.neon_fixtures import NeonEnvBuilder +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder # @@ -34,10 +36,10 @@ def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark): cur.execute(f"insert into t1 values (generate_series(1,{n_records}))") cur.execute("vacuum t1") for _ in range(n_iter): - cur.execute(f"delete from t1 where x>{n_records//2}") + cur.execute(f"delete from t1 where x>{n_records // 2}") cur.execute("vacuum t1") time.sleep(1) # let pageserver a chance to create image layers - cur.execute(f"insert into t1 values (generate_series({n_records//2+1}, {n_records}))") + cur.execute(f"insert into t1 values (generate_series({n_records // 2 + 1}, {n_records}))") cur.execute("vacuum t1") time.sleep(1) # let pageserver a chance to create image layers diff --git a/test_runner/regress/test_twophase.py b/test_runner/regress/test_twophase.py index e37e8dd3e8..169c966fa5 100644 --- a/test_runner/regress/test_twophase.py +++ b/test_runner/regress/test_twophase.py @@ -1,7 +1,7 @@ from __future__ import annotations import os -from pathlib import Path +from typing import TYPE_CHECKING from fixtures.common_types import TimelineId from fixtures.log_helper import log @@ -13,6 +13,9 @@ from fixtures.neon_fixtures import ( wait_for_wal_insert_lsn, ) +if TYPE_CHECKING: + from pathlib import Path + # # Test branching, when a transaction is in prepared state diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index b30c02e0e4..85fa4c821e 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -163,9 +163,9 @@ def test_vm_bit_clear_on_heap_lock_whitebox(neon_env_builder: NeonEnvBuilder): relfrozenxid = int( query_scalar(cur, "SELECT relfrozenxid FROM pg_class WHERE relname='vmtest_lock'") ) - assert ( - relfrozenxid > xid - ), f"Inserted rows were not frozen. This can be caused by concurrent activity in the database. (XID {xid}, relfrozenxid {relfrozenxid}" + assert relfrozenxid > xid, ( + f"Inserted rows were not frozen. This can be caused by concurrent activity in the database. (XID {xid}, relfrozenxid {relfrozenxid}" + ) # Lock a row. This clears the all-frozen VM bit for that page. cur.execute("BEGIN") @@ -324,7 +324,7 @@ def test_check_visibility_map(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): # Run pgbench in 4 different databases, to exercise different shards. dbnames = [f"pgbench{i}" for i in range(PGBENCH_RUNS)] for i, dbname in enumerate(dbnames): - log.info(f"pgbench run {i+1}/{PGBENCH_RUNS}") + log.info(f"pgbench run {i + 1}/{PGBENCH_RUNS}") endpoint.safe_psql(f"create database {dbname}") connstr = endpoint.connstr(dbname=dbname) # Initialize the data set, but don't vacuum yet. diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 89c4a96499..e3d39f9315 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -39,7 +39,6 @@ from fixtures.pageserver.utils import ( timeline_delete_wait_completed, ) from fixtures.pg_version import PgVersion -from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import ( RemoteStorageKind, default_remote_storage, @@ -72,6 +71,8 @@ from fixtures.utils import ( if TYPE_CHECKING: from typing import Any, Self + from fixtures.port_distributor import PortDistributor + @dataclass class TimelineMetrics: @@ -138,20 +139,24 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder): for flush_lsn, commit_lsn in zip(m.flush_lsns, m.commit_lsns, strict=False): # Invariant. May be < when transaction is in progress. - assert ( - commit_lsn <= flush_lsn - ), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + assert commit_lsn <= flush_lsn, ( + f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + ) # We only call collect_metrics() after a transaction is confirmed by # the compute node, which only happens after a consensus of safekeepers # has confirmed the transaction. We assume majority consensus here. assert ( 2 * sum(m.last_record_lsn <= lsn for lsn in m.flush_lsns) > neon_env_builder.num_safekeepers - ), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + ), ( + f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + ) assert ( 2 * sum(m.last_record_lsn <= lsn for lsn in m.commit_lsns) > neon_env_builder.num_safekeepers - ), f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + ), ( + f"timeline_id={timeline_id}, timeline_detail={timeline_detail}, sk_metrics={sk_metrics}" + ) timeline_metrics.append(m) log.info(f"{message}: {timeline_metrics}") return timeline_metrics @@ -1112,16 +1117,16 @@ def cmp_sk_wal(sks: list[Safekeeper], tenant_id: TenantId, timeline_id: Timeline statuses = [sk_http_cli.timeline_status(tenant_id, timeline_id) for sk_http_cli in sk_http_clis] term_flush_lsns = [(s.last_log_term, s.flush_lsn) for s in statuses] for tfl, sk in zip(term_flush_lsns[1:], sks[1:], strict=False): - assert ( - term_flush_lsns[0] == tfl - ), f"(last_log_term, flush_lsn) are not equal on sks {sks[0].id} and {sk.id}: {term_flush_lsns[0]} != {tfl}" + assert term_flush_lsns[0] == tfl, ( + f"(last_log_term, flush_lsn) are not equal on sks {sks[0].id} and {sk.id}: {term_flush_lsns[0]} != {tfl}" + ) # check that WALs are identic. segs = [sk.list_segments(tenant_id, timeline_id) for sk in sks] for cmp_segs, sk in zip(segs[1:], sks[1:], strict=False): - assert ( - segs[0] == cmp_segs - ), f"lists of segments on sks {sks[0].id} and {sk.id} are not identic: {segs[0]} and {cmp_segs}" + assert segs[0] == cmp_segs, ( + f"lists of segments on sks {sks[0].id} and {sk.id} are not identic: {segs[0]} and {cmp_segs}" + ) log.info(f"comparing segs {segs[0]}") sk0 = sks[0] @@ -2418,7 +2423,7 @@ def test_s3_eviction( for j in range(n_timelines): detail = ps_client.timeline_detail(env.initial_tenant, timelines[j]) log.debug( - f'{branch_names[j]}: RCL={detail["remote_consistent_lsn"]}, LRL={detail["last_record_lsn"]}' + f"{branch_names[j]}: RCL={detail['remote_consistent_lsn']}, LRL={detail['last_record_lsn']}" ) i = random.randint(0, n_timelines - 1) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index 56539a0a08..b7c7478e78 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -4,9 +4,8 @@ import asyncio import random import time from dataclasses import dataclass -from pathlib import Path +from typing import TYPE_CHECKING -import asyncpg import pytest import toml from fixtures.common_types import Lsn, TenantId, TimelineId @@ -21,6 +20,11 @@ from fixtures.neon_fixtures import ( from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import skip_in_debug_build +if TYPE_CHECKING: + from pathlib import Path + + import asyncpg + log = getLogger("root.safekeeper_async") @@ -692,7 +696,7 @@ async def run_race_conditions(env: NeonEnv, endpoint: Endpoint): expected_sum += i i += 1 - log.info(f"Executed {i-1} queries") + log.info(f"Executed {i - 1} queries") res = await conn.fetchval("SELECT sum(key) FROM t") assert res == expected_sum @@ -766,7 +770,7 @@ async def run_wal_lagging(env: NeonEnv, endpoint: Endpoint, test_output_dir: Pat endpoint.start() conn = await endpoint.connect_async() - log.info(f"Executed {i-1} queries") + log.info(f"Executed {i - 1} queries") res = await conn.fetchval("SELECT sum(key) FROM t") assert res == expected_sum diff --git a/test_runner/regress/test_wal_receiver.py b/test_runner/regress/test_wal_receiver.py index d22a900c59..0252b590cc 100644 --- a/test_runner/regress/test_wal_receiver.py +++ b/test_runner/regress/test_wal_receiver.py @@ -5,12 +5,13 @@ from typing import TYPE_CHECKING from fixtures.common_types import Lsn, TenantId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder from fixtures.utils import wait_until if TYPE_CHECKING: from typing import Any + from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder + # Checks that pageserver's walreceiver state is printed in the logs during WAL wait timeout. # Ensures that walreceiver does not run without any data inserted and only starts after the insertion. @@ -33,9 +34,9 @@ def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): except Exception as e: exception_string = str(e) assert expected_timeout_error in exception_string, "Should time out during waiting for WAL" - assert ( - "WalReceiver status: Not active" in exception_string - ), "Walreceiver should not be active before any data writes" + assert "WalReceiver status: Not active" in exception_string, ( + "Walreceiver should not be active before any data writes" + ) insert_test_elements(env, tenant_id, start=0, count=1_000) try: @@ -43,9 +44,9 @@ def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): except Exception as e: exception_string = str(e) assert expected_timeout_error in exception_string, "Should time out during waiting for WAL" - assert ( - "WalReceiver status: Not active" not in exception_string - ), "Should not be inactive anymore after INSERTs are made" + assert "WalReceiver status: Not active" not in exception_string, ( + "Should not be inactive anymore after INSERTs are made" + ) assert "WalReceiver status" in exception_string, "But still should have some other status" @@ -88,14 +89,14 @@ def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuil trigger_wait_lsn_timeout(env, tenant_id) except Exception as e: exception_string = str(e) - assert ( - expected_timeout_error in exception_string - ), "Should time out during waiting for WAL" + assert expected_timeout_error in exception_string, ( + "Should time out during waiting for WAL" + ) for safekeeper in env.safekeepers: - assert ( - str(safekeeper.id) in exception_string - ), f"Should have safekeeper {safekeeper.id} printed in walreceiver state after WAL wait timeout" + assert str(safekeeper.id) in exception_string, ( + f"Should have safekeeper {safekeeper.id} printed in walreceiver state after WAL wait timeout" + ) wait_until(all_sks_in_wareceiver_state, timeout=30) @@ -110,19 +111,19 @@ def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuil except Exception as e: # Strip out the part before stdout, as it contains full command with the list of all safekeepers exception_string = str(e).split("stdout", 1)[-1] - assert ( - expected_timeout_error in exception_string - ), "Should time out during waiting for WAL" + assert expected_timeout_error in exception_string, ( + "Should time out during waiting for WAL" + ) for safekeeper in env.safekeepers: if safekeeper.id == stopped_safekeeper_id: - assert ( - str(safekeeper.id) not in exception_string - ), f"Should not have stopped safekeeper {safekeeper.id} printed in walreceiver state after 2nd WAL wait timeout" + assert str(safekeeper.id) not in exception_string, ( + f"Should not have stopped safekeeper {safekeeper.id} printed in walreceiver state after 2nd WAL wait timeout" + ) else: - assert ( - str(safekeeper.id) in exception_string - ), f"Should have safekeeper {safekeeper.id} printed in walreceiver state after 2nd WAL wait timeout" + assert str(safekeeper.id) in exception_string, ( + f"Should have safekeeper {safekeeper.id} printed in walreceiver state after 2nd WAL wait timeout" + ) wait_until(all_but_stopped_sks_in_wareceiver_state, timeout=30) diff --git a/test_runner/regress/test_wal_restore.py b/test_runner/regress/test_wal_restore.py index c8e51fde13..0bb63308bb 100644 --- a/test_runner/regress/test_wal_restore.py +++ b/test_runner/regress/test_wal_restore.py @@ -3,7 +3,7 @@ from __future__ import annotations import sys import tarfile import tempfile -from pathlib import Path +from typing import TYPE_CHECKING import pytest import zstandard @@ -19,11 +19,15 @@ from fixtures.pageserver.utils import ( remote_storage_delete_key, timeline_delete_wait_completed, ) -from fixtures.port_distributor import PortDistributor from fixtures.remote_storage import LocalFsStorage, S3Storage, s3_storage -from mypy_boto3_s3.type_defs import ( - ObjectTypeDef, -) + +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.port_distributor import PortDistributor + from mypy_boto3_s3.type_defs import ( + ObjectTypeDef, + ) @pytest.mark.skipif( diff --git a/test_runner/regress/test_walredo_not_left_behind_on_detach.py b/test_runner/regress/test_walredo_not_left_behind_on_detach.py index 182e57b8a4..7e9e9481a8 100644 --- a/test_runner/regress/test_walredo_not_left_behind_on_detach.py +++ b/test_runner/regress/test_walredo_not_left_behind_on_detach.py @@ -1,14 +1,17 @@ from __future__ import annotations import time +from typing import TYPE_CHECKING import psutil import pytest from fixtures.common_types import TenantId from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.pageserver.http import PageserverApiException +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + def assert_child_processes(pageserver_pid, wal_redo_present=False, defunct_present=False): children = psutil.Process(pageserver_pid).children() diff --git a/test_runner/test_broken.py b/test_runner/test_broken.py index 112e699395..8d04a16f1a 100644 --- a/test_runner/test_broken.py +++ b/test_runner/test_broken.py @@ -1,10 +1,13 @@ from __future__ import annotations import os +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log -from fixtures.neon_fixtures import NeonEnv + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnv """ Use this test to see what happens when tests fail.