diff --git a/pre-commit.py b/pre-commit.py index ae432e8225..c9567e0c50 100755 --- a/pre-commit.py +++ b/pre-commit.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 +from __future__ import annotations + import argparse import enum import os import subprocess import sys -from typing import List @enum.unique @@ -55,12 +56,12 @@ def mypy() -> str: return "poetry run mypy" -def get_commit_files() -> List[str]: +def get_commit_files() -> list[str]: files = subprocess.check_output("git diff --cached --name-only --diff-filter=ACM".split()) return files.decode().splitlines() -def check(name: str, suffix: str, cmd: str, changed_files: List[str], no_color: bool = False): +def check(name: str, suffix: str, cmd: str, changed_files: list[str], no_color: bool = False): print(f"Checking: {name} ", end="") applicable_files = list(filter(lambda fname: fname.strip().endswith(suffix), changed_files)) if not applicable_files: diff --git a/pyproject.toml b/pyproject.toml index 556edf5589..9cd315bb96 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,5 +97,8 @@ select = [ "I", # isort "W", # pycodestyle "B", # bugbear - "UP032", # f-string + "UP", # pyupgrade ] + +[tool.ruff.lint.pyupgrade] +keep-runtime-typing = true # Remove this stanza when we require Python 3.10 diff --git a/scripts/benchmark_durations.py b/scripts/benchmark_durations.py index 4ca433679a..a9a90c7370 100755 --- a/scripts/benchmark_durations.py +++ b/scripts/benchmark_durations.py @@ -1,9 +1,10 @@ #! /usr/bin/env python3 +from __future__ import annotations + import argparse import json import logging -from typing import Dict import psycopg2 import psycopg2.extras @@ -110,7 +111,7 @@ def main(args: argparse.Namespace): output = args.output percentile = args.percentile - res: Dict[str, float] = {} + res: dict[str, float] = {} try: logging.info("connecting to the database...") diff --git a/scripts/download_basebackup.py b/scripts/download_basebackup.py index 1f84e41fef..f00ee87eb7 100755 --- a/scripts/download_basebackup.py +++ b/scripts/download_basebackup.py @@ -4,6 +4,9 @@ # # This can be useful in disaster recovery. # + +from __future__ import annotations + import argparse import psycopg2 diff --git a/scripts/flaky_tests.py b/scripts/flaky_tests.py index 919a9278a9..9312f8b3e7 100755 --- a/scripts/flaky_tests.py +++ b/scripts/flaky_tests.py @@ -1,16 +1,21 @@ #! /usr/bin/env python3 +from __future__ import annotations + import argparse import json import logging import os from collections import defaultdict -from typing import Any, DefaultDict, Dict, Optional +from typing import TYPE_CHECKING import psycopg2 import psycopg2.extras import toml +if TYPE_CHECKING: + from typing import Any, Optional + FLAKY_TESTS_QUERY = """ SELECT DISTINCT parent_suite, suite, name @@ -33,7 +38,7 @@ def main(args: argparse.Namespace): build_type = args.build_type pg_version = args.pg_version - res: DefaultDict[str, DefaultDict[str, Dict[str, bool]]] + res: defaultdict[str, defaultdict[str, dict[str, bool]]] res = defaultdict(lambda: defaultdict(dict)) try: @@ -60,7 +65,7 @@ def main(args: argparse.Namespace): pageserver_virtual_file_io_engine_parameter = "" # re-use existing records of flaky tests from before parametrization by compaction_algorithm - def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[Dict[str, Any]]: + def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]: """Duplicated from parametrize.py""" toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM") if toml_table is None: diff --git a/scripts/force_layer_download.py b/scripts/force_layer_download.py index 5472d86d8f..a4fd3f6132 100644 --- a/scripts/force_layer_download.py +++ b/scripts/force_layer_download.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import argparse import asyncio import json @@ -5,11 +7,15 @@ import logging import signal import sys from collections import defaultdict +from collections.abc import Awaitable from dataclasses import dataclass -from typing import Any, Awaitable, Dict, List, Tuple +from typing import TYPE_CHECKING import aiohttp +if TYPE_CHECKING: + from typing import Any + class ClientException(Exception): pass @@ -89,7 +95,7 @@ class Client: class Completed: """The status dict returned by the API""" - status: Dict[str, Any] + status: dict[str, Any] sigint_received = asyncio.Event() @@ -179,7 +185,7 @@ async def main_impl(args, report_out, client: Client): """ Returns OS exit status. """ - tenant_and_timline_ids: List[Tuple[str, str]] = [] + tenant_and_timline_ids: list[tuple[str, str]] = [] # fill tenant_and_timline_ids based on spec for spec in args.what: comps = spec.split(":") @@ -215,14 +221,14 @@ async def main_impl(args, report_out, client: Client): tenant_and_timline_ids = tmp logging.info("create tasks and process them at specified concurrency") - task_q: asyncio.Queue[Tuple[str, Awaitable[Any]]] = asyncio.Queue() + task_q: asyncio.Queue[tuple[str, Awaitable[Any]]] = asyncio.Queue() tasks = { f"{tid}:{tlid}": do_timeline(client, tid, tlid) for tid, tlid in tenant_and_timline_ids } for task in tasks.items(): task_q.put_nowait(task) - result_q: asyncio.Queue[Tuple[str, Any]] = asyncio.Queue() + result_q: asyncio.Queue[tuple[str, Any]] = asyncio.Queue() taskq_handlers = [] for _ in range(0, args.concurrent_tasks): taskq_handlers.append(taskq_handler(task_q, result_q)) diff --git a/scripts/ingest_perf_test_result.py b/scripts/ingest_perf_test_result.py index 35a1e29720..40071c01b0 100644 --- a/scripts/ingest_perf_test_result.py +++ b/scripts/ingest_perf_test_result.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 + +from __future__ import annotations + import argparse import json import logging diff --git a/scripts/ingest_regress_test_result-new-format.py b/scripts/ingest_regress_test_result-new-format.py index 40d7254e00..e0dd0a7189 100644 --- a/scripts/ingest_regress_test_result-new-format.py +++ b/scripts/ingest_regress_test_result-new-format.py @@ -1,5 +1,7 @@ #! /usr/bin/env python3 +from __future__ import annotations + import argparse import dataclasses import json @@ -11,7 +13,6 @@ from contextlib import contextmanager from dataclasses import dataclass from datetime import datetime, timezone from pathlib import Path -from typing import Tuple import backoff import psycopg2 @@ -91,7 +92,7 @@ def create_table(cur): cur.execute(CREATE_TABLE) -def parse_test_name(test_name: str) -> Tuple[str, int, str]: +def parse_test_name(test_name: str) -> tuple[str, int, str]: build_type, pg_version = None, None if match := TEST_NAME_RE.search(test_name): found = match.groupdict() diff --git a/scripts/sk_cleanup_tenants/script.py b/scripts/sk_cleanup_tenants/script.py index c20a4bb830..8af19ae7bd 100644 --- a/scripts/sk_cleanup_tenants/script.py +++ b/scripts/sk_cleanup_tenants/script.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import argparse import logging import os diff --git a/test_runner/cloud_regress/test_cloud_regress.py b/test_runner/cloud_regress/test_cloud_regress.py index de71357232..715d4a4881 100644 --- a/test_runner/cloud_regress/test_cloud_regress.py +++ b/test_runner/cloud_regress/test_cloud_regress.py @@ -2,6 +2,8 @@ Run the regression tests on the cloud instance of Neon """ +from __future__ import annotations + from pathlib import Path from typing import Any diff --git a/test_runner/conftest.py b/test_runner/conftest.py index 996ca4d652..d6e7fcf7ca 100644 --- a/test_runner/conftest.py +++ b/test_runner/conftest.py @@ -1,3 +1,5 @@ +from __future__ import annotations + pytest_plugins = ( "fixtures.pg_version", "fixtures.parametrize", diff --git a/test_runner/fixtures/__init__.py b/test_runner/fixtures/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/fixtures/__init__.py +++ b/test_runner/fixtures/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/fixtures/benchmark_fixture.py b/test_runner/fixtures/benchmark_fixture.py index 0c36cd6ef7..88f9ec1cd0 100644 --- a/test_runner/fixtures/benchmark_fixture.py +++ b/test_runner/fixtures/benchmark_fixture.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import calendar import dataclasses import enum @@ -5,12 +7,11 @@ import json import os import re import timeit +from collections.abc import Iterator from contextlib import contextmanager from datetime import datetime from pathlib import Path - -# Type-related stuff -from typing import Callable, ClassVar, Dict, Iterator, Optional +from typing import TYPE_CHECKING import allure import pytest @@ -23,6 +24,10 @@ from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonPageserver +if TYPE_CHECKING: + from typing import Callable, ClassVar, Optional + + """ This file contains fixtures for micro-benchmarks. @@ -138,18 +143,6 @@ class PgBenchRunResult: @dataclasses.dataclass class PgBenchInitResult: - # Taken from https://github.com/postgres/postgres/blob/REL_15_1/src/bin/pgbench/pgbench.c#L5144-L5171 - EXTRACTORS: ClassVar[Dict[str, re.Pattern]] = { # type: ignore[type-arg] - "drop_tables": re.compile(r"drop tables (\d+\.\d+) s"), - "create_tables": re.compile(r"create tables (\d+\.\d+) s"), - "client_side_generate": re.compile(r"client-side generate (\d+\.\d+) s"), - "server_side_generate": re.compile(r"server-side generate (\d+\.\d+) s"), - "vacuum": re.compile(r"vacuum (\d+\.\d+) s"), - "primary_keys": re.compile(r"primary keys (\d+\.\d+) s"), - "foreign_keys": re.compile(r"foreign keys (\d+\.\d+) s"), - "total": re.compile(r"done in (\d+\.\d+) s"), # Total time printed by pgbench - } - total: Optional[float] drop_tables: Optional[float] create_tables: Optional[float] @@ -162,6 +155,20 @@ class PgBenchInitResult: start_timestamp: int end_timestamp: int + # Taken from https://github.com/postgres/postgres/blob/REL_15_1/src/bin/pgbench/pgbench.c#L5144-L5171 + EXTRACTORS: ClassVar[dict[str, re.Pattern[str]]] = dataclasses.field( + default_factory=lambda: { + "drop_tables": re.compile(r"drop tables (\d+\.\d+) s"), + "create_tables": re.compile(r"create tables (\d+\.\d+) s"), + "client_side_generate": re.compile(r"client-side generate (\d+\.\d+) s"), + "server_side_generate": re.compile(r"server-side generate (\d+\.\d+) s"), + "vacuum": re.compile(r"vacuum (\d+\.\d+) s"), + "primary_keys": re.compile(r"primary keys (\d+\.\d+) s"), + "foreign_keys": re.compile(r"foreign keys (\d+\.\d+) s"), + "total": re.compile(r"done in (\d+\.\d+) s"), # Total time printed by pgbench + } + ) + @classmethod def parse_from_stderr( cls, @@ -175,7 +182,7 @@ class PgBenchInitResult: last_line = stderr.splitlines()[-1] - timings: Dict[str, Optional[float]] = {} + timings: dict[str, Optional[float]] = {} last_line_items = re.split(r"\(|\)|,", last_line) for item in last_line_items: for key, regex in cls.EXTRACTORS.items(): @@ -385,7 +392,7 @@ class NeonBenchmarker: self, pageserver: NeonPageserver, metric_name: str, - label_filters: Optional[Dict[str, str]] = None, + label_filters: Optional[dict[str, str]] = None, ) -> int: """Fetch the value of given int counter from pageserver metrics.""" all_metrics = pageserver.http_client().get_metrics() diff --git a/test_runner/fixtures/common_types.py b/test_runner/fixtures/common_types.py index d8390138c9..3022c0279f 100644 --- a/test_runner/fixtures/common_types.py +++ b/test_runner/fixtures/common_types.py @@ -1,10 +1,16 @@ +from __future__ import annotations + import random from dataclasses import dataclass from enum import Enum from functools import total_ordering -from typing import Any, Dict, Type, TypeVar, Union +from typing import TYPE_CHECKING, TypeVar + +if TYPE_CHECKING: + from typing import Any, Union + + T = TypeVar("T", bound="Id") -T = TypeVar("T", bound="Id") DEFAULT_WAL_SEG_SIZE = 16 * 1024 * 1024 @@ -56,7 +62,7 @@ class Lsn: return NotImplemented return self.lsn_int - other.lsn_int - def __add__(self, other: Union[int, "Lsn"]) -> "Lsn": + def __add__(self, other: Union[int, Lsn]) -> Lsn: if isinstance(other, int): return Lsn(self.lsn_int + other) elif isinstance(other, Lsn): @@ -70,7 +76,7 @@ class Lsn: def as_int(self) -> int: return self.lsn_int - def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> "Lsn": + def segment_lsn(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> Lsn: return Lsn(self.lsn_int - (self.lsn_int % seg_sz)) def segno(self, seg_sz: int = DEFAULT_WAL_SEG_SIZE) -> int: @@ -127,7 +133,7 @@ class Id: return hash(str(self.id)) @classmethod - def generate(cls: Type[T]) -> T: + def generate(cls: type[T]) -> T: """Generate a random ID""" return cls(random.randbytes(16).hex()) @@ -162,7 +168,7 @@ class TenantTimelineId: timeline_id: TimelineId @classmethod - def from_json(cls, d: Dict[str, Any]) -> "TenantTimelineId": + def from_json(cls, d: dict[str, Any]) -> TenantTimelineId: return TenantTimelineId( tenant_id=TenantId(d["tenant_id"]), timeline_id=TimelineId(d["timeline_id"]), @@ -181,7 +187,7 @@ class TenantShardId: assert self.shard_number < self.shard_count or self.shard_count == 0 @classmethod - def parse(cls: Type[TTenantShardId], input) -> TTenantShardId: + def parse(cls: type[TTenantShardId], input) -> TTenantShardId: if len(input) == 32: return cls( tenant_id=TenantId(input), diff --git a/test_runner/fixtures/compare_fixtures.py b/test_runner/fixtures/compare_fixtures.py index fb9c2d2b86..ce191ac91c 100644 --- a/test_runner/fixtures/compare_fixtures.py +++ b/test_runner/fixtures/compare_fixtures.py @@ -1,11 +1,13 @@ +from __future__ import annotations + import os import time from abc import ABC, abstractmethod +from collections.abc import Iterator from contextlib import _GeneratorContextManager, contextmanager # Type-related stuff from pathlib import Path -from typing import Dict, Iterator, List import pytest from _pytest.fixtures import FixtureRequest @@ -72,7 +74,7 @@ class PgCompare(ABC): pass @contextmanager - def record_pg_stats(self, pg_stats: List[PgStatTable]) -> Iterator[None]: + def record_pg_stats(self, pg_stats: list[PgStatTable]) -> Iterator[None]: init_data = self._retrieve_pg_stats(pg_stats) yield @@ -82,8 +84,8 @@ class PgCompare(ABC): for k in set(init_data) & set(data): self.zenbenchmark.record(k, data[k] - init_data[k], "", MetricReport.HIGHER_IS_BETTER) - def _retrieve_pg_stats(self, pg_stats: List[PgStatTable]) -> Dict[str, int]: - results: Dict[str, int] = {} + def _retrieve_pg_stats(self, pg_stats: list[PgStatTable]) -> dict[str, int]: + results: dict[str, int] = {} with self.pg.connect().cursor() as cur: for pg_stat in pg_stats: diff --git a/test_runner/fixtures/compute_reconfigure.py b/test_runner/fixtures/compute_reconfigure.py index 66fc35b6aa..d2305ea431 100644 --- a/test_runner/fixtures/compute_reconfigure.py +++ b/test_runner/fixtures/compute_reconfigure.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures from typing import Any diff --git a/test_runner/fixtures/endpoint/__init__.py b/test_runner/fixtures/endpoint/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/fixtures/endpoint/__init__.py +++ b/test_runner/fixtures/endpoint/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py index 42f0539c19..aedd711dbd 100644 --- a/test_runner/fixtures/endpoint/http.py +++ b/test_runner/fixtures/endpoint/http.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import requests from requests.adapters import HTTPAdapter diff --git a/test_runner/fixtures/flaky.py b/test_runner/fixtures/flaky.py index d13f3318b0..4ca87520a0 100644 --- a/test_runner/fixtures/flaky.py +++ b/test_runner/fixtures/flaky.py @@ -1,6 +1,9 @@ +from __future__ import annotations + import json +from collections.abc import MutableMapping from pathlib import Path -from typing import Any, List, MutableMapping, cast +from typing import TYPE_CHECKING, cast import pytest from _pytest.config import Config @@ -10,6 +13,9 @@ from allure_pytest.utils import allure_name, allure_suite_labels from fixtures.log_helper import log +if TYPE_CHECKING: + from typing import Any + """ The plugin reruns flaky tests. It uses `pytest.mark.flaky` provided by `pytest-rerunfailures` plugin and flaky tests detected by `scripts/flaky_tests.py` @@ -27,7 +33,7 @@ def pytest_addoption(parser: Parser): ) -def pytest_collection_modifyitems(config: Config, items: List[pytest.Item]): +def pytest_collection_modifyitems(config: Config, items: list[pytest.Item]): if not config.getoption("--flaky-tests-json"): return @@ -66,5 +72,5 @@ def pytest_collection_modifyitems(config: Config, items: List[pytest.Item]): # - [2] https://github.com/pytest-dev/pytest-timeout/issues/142 timeout_marker = item.get_closest_marker("timeout") if timeout_marker is not None: - kwargs = cast(MutableMapping[str, Any], timeout_marker.kwargs) + kwargs = cast("MutableMapping[str, Any]", timeout_marker.kwargs) kwargs["func_only"] = True diff --git a/test_runner/fixtures/httpserver.py b/test_runner/fixtures/httpserver.py index a321d59266..9d5b5d6422 100644 --- a/test_runner/fixtures/httpserver.py +++ b/test_runner/fixtures/httpserver.py @@ -1,4 +1,4 @@ -from typing import Tuple +from __future__ import annotations import pytest from pytest_httpserver import HTTPServer @@ -40,6 +40,6 @@ def httpserver(make_httpserver): @pytest.fixture(scope="function") -def httpserver_listen_address(port_distributor) -> Tuple[str, int]: +def httpserver_listen_address(port_distributor) -> tuple[str, int]: port = port_distributor.get_port() return ("localhost", port) diff --git a/test_runner/fixtures/log_helper.py b/test_runner/fixtures/log_helper.py index 17f2402391..70d76a39c4 100644 --- a/test_runner/fixtures/log_helper.py +++ b/test_runner/fixtures/log_helper.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import logging.config diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 005dc6cb0d..adc90a41d0 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -1,21 +1,26 @@ +from __future__ import annotations + from collections import defaultdict -from typing import Dict, List, Optional, Tuple +from typing import TYPE_CHECKING from prometheus_client.parser import text_string_to_metric_families from prometheus_client.samples import Sample from fixtures.log_helper import log +if TYPE_CHECKING: + from typing import Optional + class Metrics: - metrics: Dict[str, List[Sample]] + metrics: dict[str, list[Sample]] name: str def __init__(self, name: str = ""): self.metrics = defaultdict(list) self.name = name - def query_all(self, name: str, filter: Optional[Dict[str, str]] = None) -> List[Sample]: + def query_all(self, name: str, filter: Optional[dict[str, str]] = None) -> list[Sample]: filter = filter or {} res = [] @@ -27,7 +32,7 @@ class Metrics: pass return res - def query_one(self, name: str, filter: Optional[Dict[str, str]] = None) -> Sample: + def query_one(self, name: str, filter: Optional[dict[str, str]] = None) -> Sample: res = self.query_all(name, filter or {}) assert len(res) == 1, f"expected single sample for {name} {filter}, found {res}" return res[0] @@ -43,7 +48,7 @@ class MetricsGetter: raise NotImplementedError() def get_metric_value( - self, name: str, filter: Optional[Dict[str, str]] = None + self, name: str, filter: Optional[dict[str, str]] = None ) -> Optional[float]: metrics = self.get_metrics() results = metrics.query_all(name, filter=filter) @@ -54,8 +59,8 @@ class MetricsGetter: return results[0].value def get_metrics_values( - self, names: list[str], filter: Optional[Dict[str, str]] = None, absence_ok=False - ) -> Dict[str, float]: + self, names: list[str], filter: Optional[dict[str, str]] = None, absence_ok=False + ) -> dict[str, float]: """ When fetching multiple named metrics, it is more efficient to use this than to call `get_metric_value` repeatedly. @@ -97,7 +102,7 @@ def parse_metrics(text: str, name: str = "") -> Metrics: return metrics -def histogram(prefix_without_trailing_underscore: str) -> List[str]: +def histogram(prefix_without_trailing_underscore: str) -> list[str]: assert not prefix_without_trailing_underscore.endswith("_") return [f"{prefix_without_trailing_underscore}_{x}" for x in ["bucket", "count", "sum"]] @@ -107,7 +112,7 @@ def counter(name: str) -> str: return f"{name}_total" -PAGESERVER_PER_TENANT_REMOTE_TIMELINE_CLIENT_METRICS: Tuple[str, ...] = ( +PAGESERVER_PER_TENANT_REMOTE_TIMELINE_CLIENT_METRICS: tuple[str, ...] = ( "pageserver_remote_timeline_client_calls_started_total", "pageserver_remote_timeline_client_calls_finished_total", "pageserver_remote_physical_size", @@ -115,7 +120,7 @@ PAGESERVER_PER_TENANT_REMOTE_TIMELINE_CLIENT_METRICS: Tuple[str, ...] = ( "pageserver_remote_timeline_client_bytes_finished_total", ) -PAGESERVER_GLOBAL_METRICS: Tuple[str, ...] = ( +PAGESERVER_GLOBAL_METRICS: tuple[str, ...] = ( "pageserver_storage_operations_seconds_global_count", "pageserver_storage_operations_seconds_global_sum", "pageserver_storage_operations_seconds_global_bucket", @@ -147,7 +152,7 @@ PAGESERVER_GLOBAL_METRICS: Tuple[str, ...] = ( counter("pageserver_tenant_throttling_count_global"), ) -PAGESERVER_PER_TENANT_METRICS: Tuple[str, ...] = ( +PAGESERVER_PER_TENANT_METRICS: tuple[str, ...] = ( "pageserver_current_logical_size", "pageserver_resident_physical_size", "pageserver_io_operations_bytes_total", diff --git a/test_runner/fixtures/neon_api.py b/test_runner/fixtures/neon_api.py index 0636cfad06..846a790f1f 100644 --- a/test_runner/fixtures/neon_api.py +++ b/test_runner/fixtures/neon_api.py @@ -6,12 +6,12 @@ from typing import TYPE_CHECKING, cast import requests if TYPE_CHECKING: - from typing import Any, Dict, Literal, Optional, Union + from typing import Any, Literal, Optional, Union from fixtures.pg_version import PgVersion -def connection_parameters_to_env(params: Dict[str, str]) -> Dict[str, str]: +def connection_parameters_to_env(params: dict[str, str]) -> dict[str, str]: return { "PGHOST": params["host"], "PGDATABASE": params["database"], @@ -41,8 +41,8 @@ class NeonAPI: branch_name: Optional[str] = None, branch_role_name: Optional[str] = None, branch_database_name: Optional[str] = None, - ) -> Dict[str, Any]: - data: Dict[str, Any] = { + ) -> dict[str, Any]: + data: dict[str, Any] = { "project": { "branch": {}, }, @@ -70,9 +70,9 @@ class NeonAPI: assert resp.status_code == 201 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) - def get_project_details(self, project_id: str) -> Dict[str, Any]: + def get_project_details(self, project_id: str) -> dict[str, Any]: resp = self.__request( "GET", f"/projects/{project_id}", @@ -82,12 +82,12 @@ class NeonAPI: }, ) assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def delete_project( self, project_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: resp = self.__request( "DELETE", f"/projects/{project_id}", @@ -99,13 +99,13 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def start_endpoint( self, project_id: str, endpoint_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: resp = self.__request( "POST", f"/projects/{project_id}/endpoints/{endpoint_id}/start", @@ -116,13 +116,13 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def suspend_endpoint( self, project_id: str, endpoint_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: resp = self.__request( "POST", f"/projects/{project_id}/endpoints/{endpoint_id}/suspend", @@ -133,13 +133,13 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def restart_endpoint( self, project_id: str, endpoint_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: resp = self.__request( "POST", f"/projects/{project_id}/endpoints/{endpoint_id}/restart", @@ -150,16 +150,16 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def create_endpoint( self, project_id: str, branch_id: str, endpoint_type: Literal["read_write", "read_only"], - settings: Dict[str, Any], - ) -> Dict[str, Any]: - data: Dict[str, Any] = { + settings: dict[str, Any], + ) -> dict[str, Any]: + data: dict[str, Any] = { "endpoint": { "branch_id": branch_id, }, @@ -182,7 +182,7 @@ class NeonAPI: assert resp.status_code == 201 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def get_connection_uri( self, @@ -192,7 +192,7 @@ class NeonAPI: database_name: str = "neondb", role_name: str = "neondb_owner", pooled: bool = True, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: resp = self.__request( "GET", f"/projects/{project_id}/connection_uri", @@ -210,9 +210,9 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) - def get_branches(self, project_id: str) -> Dict[str, Any]: + def get_branches(self, project_id: str) -> dict[str, Any]: resp = self.__request( "GET", f"/projects/{project_id}/branches", @@ -223,9 +223,9 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) - def get_endpoints(self, project_id: str) -> Dict[str, Any]: + def get_endpoints(self, project_id: str) -> dict[str, Any]: resp = self.__request( "GET", f"/projects/{project_id}/endpoints", @@ -236,9 +236,9 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) - def get_operations(self, project_id: str) -> Dict[str, Any]: + def get_operations(self, project_id: str) -> dict[str, Any]: resp = self.__request( "GET", f"/projects/{project_id}/operations", @@ -250,7 +250,7 @@ class NeonAPI: assert resp.status_code == 200 - return cast("Dict[str, Any]", resp.json()) + return cast("dict[str, Any]", resp.json()) def wait_for_operation_to_finish(self, project_id: str): has_running = True diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index c27d22620e..2d499fd982 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -9,15 +9,7 @@ import tempfile import textwrap from itertools import chain, product from pathlib import Path -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, - TypeVar, - cast, -) +from typing import TYPE_CHECKING, cast import toml @@ -27,7 +19,15 @@ from fixtures.pageserver.common_types import IndexPartDump from fixtures.pg_version import PgVersion from fixtures.utils import AuxFileStore -T = TypeVar("T") +if TYPE_CHECKING: + from typing import ( + Any, + Optional, + TypeVar, + cast, + ) + + T = TypeVar("T") class AbstractNeonCli(abc.ABC): @@ -37,7 +37,7 @@ class AbstractNeonCli(abc.ABC): Do not use directly, use specific subclasses instead. """ - def __init__(self, extra_env: Optional[Dict[str, str]], binpath: Path): + def __init__(self, extra_env: Optional[dict[str, str]], binpath: Path): self.extra_env = extra_env self.binpath = binpath @@ -45,11 +45,11 @@ class AbstractNeonCli(abc.ABC): def raw_cli( self, - arguments: List[str], - extra_env_vars: Optional[Dict[str, str]] = None, + arguments: list[str], + extra_env_vars: Optional[dict[str, str]] = None, check_return_code=True, timeout=None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: """ Run the command with the specified arguments. @@ -92,9 +92,8 @@ class AbstractNeonCli(abc.ABC): args, env=env_vars, check=False, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + text=True, + capture_output=True, timeout=timeout, ) except subprocess.TimeoutExpired as e: @@ -118,7 +117,7 @@ class AbstractNeonCli(abc.ABC): if len(lines) < 2: log.debug(f"Run {res.args} success: {stripped}") else: - log.debug("Run %s success:\n%s" % (res.args, textwrap.indent(stripped, indent))) + log.debug("Run %s success:\n%s", res.args, textwrap.indent(stripped, indent)) elif check_return_code: # this way command output will be in recorded and shown in CI in failure message indent = indent * 2 @@ -175,7 +174,7 @@ class NeonLocalCli(AbstractNeonCli): def __init__( self, - extra_env: Optional[Dict[str, str]], + extra_env: Optional[dict[str, str]], binpath: Path, repo_dir: Path, pg_distrib_dir: Path, @@ -197,7 +196,7 @@ class NeonLocalCli(AbstractNeonCli): tenant_id: TenantId, timeline_id: TimelineId, pg_version: PgVersion, - conf: Optional[Dict[str, Any]] = None, + conf: Optional[dict[str, Any]] = None, shard_count: Optional[int] = None, shard_stripe_size: Optional[int] = None, placement_policy: Optional[str] = None, @@ -258,7 +257,7 @@ class NeonLocalCli(AbstractNeonCli): res = self.raw_cli(["tenant", "set-default", "--tenant-id", str(tenant_id)]) res.check_returncode() - def tenant_config(self, tenant_id: TenantId, conf: Dict[str, str]): + def tenant_config(self, tenant_id: TenantId, conf: dict[str, str]): """ Update tenant config. """ @@ -274,7 +273,7 @@ class NeonLocalCli(AbstractNeonCli): res = self.raw_cli(args) res.check_returncode() - def tenant_list(self) -> "subprocess.CompletedProcess[str]": + def tenant_list(self) -> subprocess.CompletedProcess[str]: res = self.raw_cli(["tenant", "list"]) res.check_returncode() return res @@ -368,7 +367,7 @@ class NeonLocalCli(AbstractNeonCli): res = self.raw_cli(cmd) res.check_returncode() - def timeline_list(self, tenant_id: TenantId) -> List[Tuple[str, TimelineId]]: + def timeline_list(self, tenant_id: TenantId) -> list[tuple[str, TimelineId]]: """ Returns a list of (branch_name, timeline_id) tuples out of parsed `neon timeline list` CLI output. """ @@ -389,9 +388,9 @@ class NeonLocalCli(AbstractNeonCli): def init( self, - init_config: Dict[str, Any], + init_config: dict[str, Any], force: Optional[str] = None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: with tempfile.NamedTemporaryFile(mode="w+") as init_config_tmpfile: init_config_tmpfile.write(toml.dumps(init_config)) init_config_tmpfile.flush() @@ -434,15 +433,15 @@ class NeonLocalCli(AbstractNeonCli): def pageserver_start( self, id: int, - extra_env_vars: Optional[Dict[str, str]] = None, + extra_env_vars: Optional[dict[str, str]] = None, timeout_in_seconds: Optional[int] = None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: start_args = ["pageserver", "start", f"--id={id}"] if timeout_in_seconds is not None: start_args.append(f"--start-timeout={timeout_in_seconds}s") return self.raw_cli(start_args, extra_env_vars=extra_env_vars) - def pageserver_stop(self, id: int, immediate=False) -> "subprocess.CompletedProcess[str]": + def pageserver_stop(self, id: int, immediate=False) -> subprocess.CompletedProcess[str]: cmd = ["pageserver", "stop", f"--id={id}"] if immediate: cmd.extend(["-m", "immediate"]) @@ -453,10 +452,10 @@ class NeonLocalCli(AbstractNeonCli): def safekeeper_start( self, id: int, - extra_opts: Optional[List[str]] = None, - extra_env_vars: Optional[Dict[str, str]] = None, + extra_opts: Optional[list[str]] = None, + extra_env_vars: Optional[dict[str, str]] = None, timeout_in_seconds: Optional[int] = None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: if extra_opts is not None: extra_opts = [f"-e={opt}" for opt in extra_opts] else: @@ -469,7 +468,7 @@ class NeonLocalCli(AbstractNeonCli): def safekeeper_stop( self, id: Optional[int] = None, immediate=False - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: args = ["safekeeper", "stop"] if id is not None: args.append(str(id)) @@ -479,13 +478,13 @@ class NeonLocalCli(AbstractNeonCli): def storage_broker_start( self, timeout_in_seconds: Optional[int] = None - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: cmd = ["storage_broker", "start"] if timeout_in_seconds is not None: cmd.append(f"--start-timeout={timeout_in_seconds}s") return self.raw_cli(cmd) - def storage_broker_stop(self) -> "subprocess.CompletedProcess[str]": + def storage_broker_stop(self) -> subprocess.CompletedProcess[str]: cmd = ["storage_broker", "stop"] return self.raw_cli(cmd) @@ -501,7 +500,7 @@ class NeonLocalCli(AbstractNeonCli): lsn: Optional[Lsn] = None, pageserver_id: Optional[int] = None, allow_multiple=False, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: args = [ "endpoint", "create", @@ -534,12 +533,12 @@ class NeonLocalCli(AbstractNeonCli): def endpoint_start( self, endpoint_id: str, - safekeepers: Optional[List[int]] = None, + safekeepers: Optional[list[int]] = None, remote_ext_config: Optional[str] = None, pageserver_id: Optional[int] = None, allow_multiple=False, basebackup_request_tries: Optional[int] = None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: args = [ "endpoint", "start", @@ -568,9 +567,9 @@ class NeonLocalCli(AbstractNeonCli): endpoint_id: str, tenant_id: Optional[TenantId] = None, pageserver_id: Optional[int] = None, - safekeepers: Optional[List[int]] = None, + safekeepers: Optional[list[int]] = None, check_return_code=True, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: args = ["endpoint", "reconfigure", endpoint_id] if tenant_id is not None: args.extend(["--tenant-id", str(tenant_id)]) @@ -586,7 +585,7 @@ class NeonLocalCli(AbstractNeonCli): destroy=False, check_return_code=True, mode: Optional[str] = None, - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: args = [ "endpoint", "stop", @@ -602,7 +601,7 @@ class NeonLocalCli(AbstractNeonCli): def mappings_map_branch( self, name: str, tenant_id: TenantId, timeline_id: TimelineId - ) -> "subprocess.CompletedProcess[str]": + ) -> subprocess.CompletedProcess[str]: """ Map tenant id and timeline id to a neon_local branch name. They do not have to exist. Usually needed when creating branches via PageserverHttpClient and not neon_local. @@ -623,10 +622,10 @@ class NeonLocalCli(AbstractNeonCli): return self.raw_cli(args, check_return_code=True) - def start(self, check_return_code=True) -> "subprocess.CompletedProcess[str]": + def start(self, check_return_code=True) -> subprocess.CompletedProcess[str]: return self.raw_cli(["start"], check_return_code=check_return_code) - def stop(self, check_return_code=True) -> "subprocess.CompletedProcess[str]": + def stop(self, check_return_code=True) -> subprocess.CompletedProcess[str]: return self.raw_cli(["stop"], check_return_code=check_return_code) @@ -638,7 +637,7 @@ class WalCraft(AbstractNeonCli): COMMAND = "wal_craft" - def postgres_config(self) -> List[str]: + def postgres_config(self) -> list[str]: res = self.raw_cli(["print-postgres-config"]) res.check_returncode() return res.stdout.split("\n") diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 21cc7565c9..5a7f6e46d3 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -13,6 +13,7 @@ import threading import time import uuid from collections import defaultdict +from collections.abc import Iterable, Iterator from contextlib import closing, contextmanager from dataclasses import dataclass from datetime import datetime @@ -21,20 +22,7 @@ from fcntl import LOCK_EX, LOCK_UN, flock from functools import cached_property from pathlib import Path from types import TracebackType -from typing import ( - Any, - Callable, - Dict, - Iterable, - Iterator, - List, - Optional, - Tuple, - Type, - TypeVar, - Union, - cast, -) +from typing import TYPE_CHECKING, cast from urllib.parse import quote, urlparse import asyncpg @@ -99,7 +87,17 @@ from fixtures.utils import AuxFileStore as AuxFileStore # reexport from .neon_api import NeonAPI, NeonApiEndpoint -T = TypeVar("T") +if TYPE_CHECKING: + from typing import ( + Any, + Callable, + Optional, + TypeVar, + Union, + ) + + T = TypeVar("T") + """ This file contains pytest fixtures. A fixture is a test resource that can be @@ -118,7 +116,7 @@ Don't import functions from this file, or pytest will emit warnings. Instead put directly-importable functions into utils.py or another separate file. """ -Env = Dict[str, str] +Env = dict[str, str] DEFAULT_OUTPUT_DIR: str = "test_output" DEFAULT_BRANCH_NAME: str = "main" @@ -250,7 +248,7 @@ class PgProtocol: """ return str(make_dsn(**self.conn_options(**kwargs))) - def conn_options(self, **kwargs: Any) -> Dict[str, Any]: + def conn_options(self, **kwargs: Any) -> dict[str, Any]: """ Construct a dictionary of connection options from default values and extra parameters. An option can be dropped from the returning dictionary by None-valued extra parameter. @@ -319,7 +317,7 @@ class PgProtocol: conn_options["server_settings"] = {key: val} return await asyncpg.connect(**conn_options) - def safe_psql(self, query: str, **kwargs: Any) -> List[Tuple[Any, ...]]: + def safe_psql(self, query: str, **kwargs: Any) -> list[tuple[Any, ...]]: """ Execute query against the node and return all rows. This method passes all extra params to connstr. @@ -328,12 +326,12 @@ class PgProtocol: def safe_psql_many( self, queries: Iterable[str], log_query=True, **kwargs: Any - ) -> List[List[Tuple[Any, ...]]]: + ) -> list[list[tuple[Any, ...]]]: """ Execute queries against the node and return all rows. This method passes all extra params to connstr. """ - result: List[List[Any]] = [] + result: list[list[Any]] = [] with closing(self.connect(**kwargs)) as conn: with conn.cursor() as cur: for query in queries: @@ -379,7 +377,7 @@ class NeonEnvBuilder: test_overlay_dir: Optional[Path] = None, pageserver_remote_storage: Optional[RemoteStorage] = None, # toml that will be decomposed into `--config-override` flags during `pageserver --init` - pageserver_config_override: Optional[str | Callable[[Dict[str, Any]], None]] = None, + pageserver_config_override: Optional[str | Callable[[dict[str, Any]], None]] = None, num_safekeepers: int = 1, num_pageservers: int = 1, # Use non-standard SK ids to check for various parsing bugs @@ -394,7 +392,7 @@ class NeonEnvBuilder: initial_timeline: Optional[TimelineId] = None, pageserver_virtual_file_io_engine: Optional[str] = None, pageserver_aux_file_policy: Optional[AuxFileStore] = None, - pageserver_default_tenant_config_compaction_algorithm: Optional[Dict[str, Any]] = None, + pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]] = None, safekeeper_extra_opts: Optional[list[str]] = None, storage_controller_port_override: Optional[int] = None, pageserver_io_buffer_alignment: Optional[int] = None, @@ -429,7 +427,7 @@ class NeonEnvBuilder: self.enable_scrub_on_exit = True self.test_output_dir = test_output_dir self.test_overlay_dir = test_overlay_dir - self.overlay_mounts_created_by_us: List[Tuple[str, Path]] = [] + self.overlay_mounts_created_by_us: list[tuple[str, Path]] = [] self.config_init_force: Optional[str] = None self.top_output_dir = top_output_dir self.control_plane_compute_hook_api: Optional[str] = None @@ -438,7 +436,7 @@ class NeonEnvBuilder: self.pageserver_virtual_file_io_engine: Optional[str] = pageserver_virtual_file_io_engine self.pageserver_default_tenant_config_compaction_algorithm: Optional[ - Dict[str, Any] + dict[str, Any] ] = pageserver_default_tenant_config_compaction_algorithm if self.pageserver_default_tenant_config_compaction_algorithm is not None: log.debug( @@ -468,7 +466,7 @@ class NeonEnvBuilder: def init_start( self, - initial_tenant_conf: Optional[Dict[str, Any]] = None, + initial_tenant_conf: Optional[dict[str, Any]] = None, default_remote_storage_if_missing: bool = True, initial_tenant_shard_count: Optional[int] = None, initial_tenant_shard_stripe_size: Optional[int] = None, @@ -823,7 +821,7 @@ class NeonEnvBuilder: overlayfs_mounts = {mountpoint for _, mountpoint in self.overlay_mounts_created_by_us} - directories_to_clean: List[Path] = [] + directories_to_clean: list[Path] = [] for test_entry in Path(self.repo_dir).glob("**/*"): if test_entry in overlayfs_mounts: continue @@ -854,12 +852,12 @@ class NeonEnvBuilder: if isinstance(x, S3Storage): x.do_cleanup() - def __enter__(self) -> "NeonEnvBuilder": + def __enter__(self) -> NeonEnvBuilder: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ): @@ -970,8 +968,8 @@ class NeonEnv: self.port_distributor = config.port_distributor self.s3_mock_server = config.mock_s3_server self.endpoints = EndpointFactory(self) - self.safekeepers: List[Safekeeper] = [] - self.pageservers: List[NeonPageserver] = [] + self.safekeepers: list[Safekeeper] = [] + self.pageservers: list[NeonPageserver] = [] self.broker = NeonBroker(self) self.pageserver_remote_storage = config.pageserver_remote_storage self.safekeepers_remote_storage = config.safekeepers_remote_storage @@ -1043,7 +1041,7 @@ class NeonEnv: self.pageserver_io_buffer_alignment = config.pageserver_io_buffer_alignment # Create the neon_local's `NeonLocalInitConf` - cfg: Dict[str, Any] = { + cfg: dict[str, Any] = { "default_tenant_id": str(self.initial_tenant), "broker": { "listen_addr": self.broker.listen_addr(), @@ -1072,7 +1070,7 @@ class NeonEnv: http=self.port_distributor.get_port(), ) - ps_cfg: Dict[str, Any] = { + ps_cfg: dict[str, Any] = { "id": ps_id, "listen_pg_addr": f"localhost:{pageserver_port.pg}", "listen_http_addr": f"localhost:{pageserver_port.http}", @@ -1120,7 +1118,7 @@ class NeonEnv: http=self.port_distributor.get_port(), ) id = config.safekeepers_id_start + i # assign ids sequentially - sk_cfg: Dict[str, Any] = { + sk_cfg: dict[str, Any] = { "id": id, "pg_port": port.pg, "pg_tenant_only_port": port.pg_tenant_only, @@ -1285,9 +1283,8 @@ class NeonEnv: res = subprocess.run( [bin_pageserver, "--version"], check=True, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + text=True, + capture_output=True, ) return res.stdout @@ -1330,13 +1327,13 @@ class NeonEnv: self, tenant_id: Optional[TenantId] = None, timeline_id: Optional[TimelineId] = None, - conf: Optional[Dict[str, Any]] = None, + conf: Optional[dict[str, Any]] = None, shard_count: Optional[int] = None, shard_stripe_size: Optional[int] = None, placement_policy: Optional[str] = None, set_default: bool = False, aux_file_policy: Optional[AuxFileStore] = None, - ) -> Tuple[TenantId, TimelineId]: + ) -> tuple[TenantId, TimelineId]: """ Creates a new tenant, returns its id and its initial timeline's id. """ @@ -1357,7 +1354,7 @@ class NeonEnv: return tenant_id, timeline_id - def config_tenant(self, tenant_id: Optional[TenantId], conf: Dict[str, str]): + def config_tenant(self, tenant_id: Optional[TenantId], conf: dict[str, str]): """ Update tenant config. """ @@ -1409,7 +1406,7 @@ def neon_simple_env( pg_version: PgVersion, pageserver_virtual_file_io_engine: str, pageserver_aux_file_policy: Optional[AuxFileStore], - pageserver_default_tenant_config_compaction_algorithm: Optional[Dict[str, Any]], + pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]], pageserver_io_buffer_alignment: Optional[int], ) -> Iterator[NeonEnv]: """ @@ -1457,7 +1454,7 @@ def neon_env_builder( test_overlay_dir: Path, top_output_dir: Path, pageserver_virtual_file_io_engine: str, - pageserver_default_tenant_config_compaction_algorithm: Optional[Dict[str, Any]], + pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]], pageserver_aux_file_policy: Optional[AuxFileStore], record_property: Callable[[str, object], None], pageserver_io_buffer_alignment: Optional[int], @@ -1519,7 +1516,7 @@ class LogUtils: def assert_log_contains( self, pattern: str, offset: None | LogCursor = None - ) -> Tuple[str, LogCursor]: + ) -> tuple[str, LogCursor]: """Convenient for use inside wait_until()""" res = self.log_contains(pattern, offset=offset) @@ -1528,7 +1525,7 @@ class LogUtils: def log_contains( self, pattern: str, offset: None | LogCursor = None - ) -> Optional[Tuple[str, LogCursor]]: + ) -> Optional[tuple[str, LogCursor]]: """Check that the log contains a line that matches the given regex""" logfile = self.logfile if not logfile.exists(): @@ -1609,7 +1606,7 @@ class NeonStorageController(MetricsGetter, LogUtils): self.running = True return self - def stop(self, immediate: bool = False) -> "NeonStorageController": + def stop(self, immediate: bool = False) -> NeonStorageController: if self.running: self.env.neon_cli.storage_controller_stop(immediate) self.running = False @@ -1671,7 +1668,7 @@ class NeonStorageController(MetricsGetter, LogUtils): return resp - def headers(self, scope: Optional[TokenScope]) -> Dict[str, str]: + def headers(self, scope: Optional[TokenScope]) -> dict[str, str]: headers = {} if self.auth_enabled and scope is not None: jwt_token = self.env.auth_keys.generate_token(scope=scope) @@ -1857,13 +1854,13 @@ class NeonStorageController(MetricsGetter, LogUtils): tenant_id: TenantId, shard_count: Optional[int] = None, shard_stripe_size: Optional[int] = None, - tenant_config: Optional[Dict[Any, Any]] = None, - placement_policy: Optional[Union[Dict[Any, Any] | str]] = None, + tenant_config: Optional[dict[Any, Any]] = None, + placement_policy: Optional[Union[dict[Any, Any] | str]] = None, ): """ Use this rather than pageserver_api() when you need to include shard parameters """ - body: Dict[str, Any] = {"new_tenant_id": str(tenant_id)} + body: dict[str, Any] = {"new_tenant_id": str(tenant_id)} if shard_count is not None: shard_params = {"count": shard_count} @@ -2079,8 +2076,8 @@ class NeonStorageController(MetricsGetter, LogUtils): time.sleep(backoff) - def metadata_health_update(self, healthy: List[TenantShardId], unhealthy: List[TenantShardId]): - body: Dict[str, Any] = { + def metadata_health_update(self, healthy: list[TenantShardId], unhealthy: list[TenantShardId]): + body: dict[str, Any] = { "healthy_tenant_shards": [str(t) for t in healthy], "unhealthy_tenant_shards": [str(t) for t in unhealthy], } @@ -2101,7 +2098,7 @@ class NeonStorageController(MetricsGetter, LogUtils): return response.json() def metadata_health_list_outdated(self, duration: str): - body: Dict[str, Any] = {"not_scrubbed_for": duration} + body: dict[str, Any] = {"not_scrubbed_for": duration} response = self.request( "POST", @@ -2135,7 +2132,7 @@ class NeonStorageController(MetricsGetter, LogUtils): response.raise_for_status() return response.json() - def configure_failpoints(self, config_strings: Tuple[str, str] | List[Tuple[str, str]]): + def configure_failpoints(self, config_strings: tuple[str, str] | list[tuple[str, str]]): if isinstance(config_strings, tuple): pairs = [config_strings] else: @@ -2152,13 +2149,13 @@ class NeonStorageController(MetricsGetter, LogUtils): log.info(f"Got failpoints request response code {res.status_code}") res.raise_for_status() - def get_tenants_placement(self) -> defaultdict[str, Dict[str, Any]]: + def get_tenants_placement(self) -> defaultdict[str, dict[str, Any]]: """ Get the intent and observed placements of all tenants known to the storage controller. """ tenants = self.tenant_list() - tenant_placement: defaultdict[str, Dict[str, Any]] = defaultdict( + tenant_placement: defaultdict[str, dict[str, Any]] = defaultdict( lambda: { "observed": {"attached": None, "secondary": []}, "intent": {"attached": None, "secondary": []}, @@ -2265,12 +2262,12 @@ class NeonStorageController(MetricsGetter, LogUtils): response.raise_for_status() return [TenantShardId.parse(tid) for tid in response.json()["updated"]] - def __enter__(self) -> "NeonStorageController": + def __enter__(self) -> NeonStorageController: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -2279,7 +2276,7 @@ class NeonStorageController(MetricsGetter, LogUtils): class NeonProxiedStorageController(NeonStorageController): def __init__(self, env: NeonEnv, proxy_port: int, auth_enabled: bool): - super(NeonProxiedStorageController, self).__init__(env, proxy_port, auth_enabled) + super().__init__(env, proxy_port, auth_enabled) self.instances: dict[int, dict[str, Any]] = {} def start( @@ -2298,7 +2295,7 @@ class NeonProxiedStorageController(NeonStorageController): def stop_instance( self, immediate: bool = False, instance_id: Optional[int] = None - ) -> "NeonStorageController": + ) -> NeonStorageController: assert instance_id in self.instances if self.instances[instance_id]["running"]: self.env.neon_cli.storage_controller_stop(immediate, instance_id) @@ -2307,7 +2304,7 @@ class NeonProxiedStorageController(NeonStorageController): self.running = any(meta["running"] for meta in self.instances.values()) return self - def stop(self, immediate: bool = False) -> "NeonStorageController": + def stop(self, immediate: bool = False) -> NeonStorageController: for iid, details in self.instances.items(): if details["running"]: self.env.neon_cli.storage_controller_stop(immediate, iid) @@ -2326,7 +2323,7 @@ class NeonProxiedStorageController(NeonStorageController): def log_contains( self, pattern: str, offset: None | LogCursor = None - ) -> Optional[Tuple[str, LogCursor]]: + ) -> Optional[tuple[str, LogCursor]]: raise NotImplementedError() @@ -2358,7 +2355,7 @@ class NeonPageserver(PgProtocol, LogUtils): # env.pageserver.allowed_errors.append(".*could not open garage door.*") # # The entries in the list are regular experessions. - self.allowed_errors: List[str] = list(DEFAULT_PAGESERVER_ALLOWED_ERRORS) + self.allowed_errors: list[str] = list(DEFAULT_PAGESERVER_ALLOWED_ERRORS) def timeline_dir( self, @@ -2383,19 +2380,19 @@ class NeonPageserver(PgProtocol, LogUtils): def config_toml_path(self) -> Path: return self.workdir / "pageserver.toml" - def edit_config_toml(self, edit_fn: Callable[[Dict[str, Any]], T]) -> T: + def edit_config_toml(self, edit_fn: Callable[[dict[str, Any]], T]) -> T: """ Edit the pageserver's config toml file in place. """ path = self.config_toml_path - with open(path, "r") as f: + with open(path) as f: config = toml.load(f) res = edit_fn(config) with open(path, "w") as f: toml.dump(config, f) return res - def patch_config_toml_nonrecursive(self, patch: Dict[str, Any]) -> Dict[str, Any]: + def patch_config_toml_nonrecursive(self, patch: dict[str, Any]) -> dict[str, Any]: """ Non-recursively merge the given `patch` dict into the existing config toml, using `dict.update()`. Returns the replaced values. @@ -2404,7 +2401,7 @@ class NeonPageserver(PgProtocol, LogUtils): """ replacements = {} - def doit(config: Dict[str, Any]): + def doit(config: dict[str, Any]): while len(patch) > 0: key, new = patch.popitem() old = config.get(key, None) @@ -2416,9 +2413,9 @@ class NeonPageserver(PgProtocol, LogUtils): def start( self, - extra_env_vars: Optional[Dict[str, str]] = None, + extra_env_vars: Optional[dict[str, str]] = None, timeout_in_seconds: Optional[int] = None, - ) -> "NeonPageserver": + ) -> NeonPageserver: """ Start the page server. `overrides` allows to add some config to this pageserver start. @@ -2444,7 +2441,7 @@ class NeonPageserver(PgProtocol, LogUtils): return self - def stop(self, immediate: bool = False) -> "NeonPageserver": + def stop(self, immediate: bool = False) -> NeonPageserver: """ Stop the page server. Returns self. @@ -2492,12 +2489,12 @@ class NeonPageserver(PgProtocol, LogUtils): wait_until(20, 0.5, complete) - def __enter__(self) -> "NeonPageserver": + def __enter__(self) -> NeonPageserver: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -2544,7 +2541,7 @@ class NeonPageserver(PgProtocol, LogUtils): def tenant_attach( self, tenant_id: TenantId, - config: None | Dict[str, Any] = None, + config: None | dict[str, Any] = None, generation: Optional[int] = None, override_storage_controller_generation: bool = False, ): @@ -2583,7 +2580,7 @@ class NeonPageserver(PgProtocol, LogUtils): ) -> dict[str, Any]: path = self.tenant_dir(tenant_shard_id) / "config-v1" log.info(f"Reading location conf from {path}") - bytes = open(path, "r").read() + bytes = open(path).read() try: decoded: dict[str, Any] = toml.loads(bytes) return decoded @@ -2594,7 +2591,7 @@ class NeonPageserver(PgProtocol, LogUtils): def tenant_create( self, tenant_id: TenantId, - conf: Optional[Dict[str, Any]] = None, + conf: Optional[dict[str, Any]] = None, auth_token: Optional[str] = None, generation: Optional[int] = None, ) -> TenantId: @@ -2660,7 +2657,7 @@ class PgBin: self.env = os.environ.copy() self.env["LD_LIBRARY_PATH"] = str(self.pg_lib_dir) - def _fixpath(self, command: List[str]): + def _fixpath(self, command: list[str]): if "/" not in str(command[0]): command[0] = str(self.pg_bin_path / command[0]) @@ -2680,7 +2677,7 @@ class PgBin: def run_nonblocking( self, - command: List[str], + command: list[str], env: Optional[Env] = None, cwd: Optional[Union[str, Path]] = None, ) -> subprocess.Popen[Any]: @@ -2704,7 +2701,7 @@ class PgBin: def run( self, - command: List[str], + command: list[str], env: Optional[Env] = None, cwd: Optional[Union[str, Path]] = None, ) -> None: @@ -2727,7 +2724,7 @@ class PgBin: def run_capture( self, - command: List[str], + command: list[str], env: Optional[Env] = None, cwd: Optional[str] = None, with_command_header=True, @@ -2840,14 +2837,14 @@ class VanillaPostgres(PgProtocol): ] ) - def configure(self, options: List[str]): + def configure(self, options: list[str]): """Append lines into postgresql.conf file.""" assert not self.running with open(os.path.join(self.pgdatadir, "postgresql.conf"), "a") as conf_file: conf_file.write("\n".join(options)) conf_file.write("\n") - def edit_hba(self, hba: List[str]): + def edit_hba(self, hba: list[str]): """Prepend hba lines into pg_hba.conf file.""" assert not self.running with open(os.path.join(self.pgdatadir, "pg_hba.conf"), "r+") as conf_file: @@ -2875,12 +2872,12 @@ class VanillaPostgres(PgProtocol): """Return size of pgdatadir subdirectory in bytes.""" return get_dir_size(self.pgdatadir / subdir) - def __enter__(self) -> "VanillaPostgres": + def __enter__(self) -> VanillaPostgres: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -2910,7 +2907,7 @@ class RemotePostgres(PgProtocol): # The remote server is assumed to be running already self.running = True - def configure(self, options: List[str]): + def configure(self, options: list[str]): raise Exception("cannot change configuration of remote Posgres instance") def start(self): @@ -2924,12 +2921,12 @@ class RemotePostgres(PgProtocol): # See https://www.postgresql.org/docs/14/functions-admin.html#FUNCTIONS-ADMIN-GENFILE raise Exception("cannot get size of a Postgres instance") - def __enter__(self) -> "RemotePostgres": + def __enter__(self) -> RemotePostgres: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -3265,7 +3262,7 @@ class NeonProxy(PgProtocol): def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -3403,7 +3400,7 @@ class Endpoint(PgProtocol, LogUtils): self.http_port = http_port self.check_stop_result = check_stop_result # passed to endpoint create and endpoint reconfigure - self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) + self.active_safekeepers: list[int] = list(map(lambda sk: sk.id, env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf # Semaphore is set to 1 when we start, and acquire'd back to zero when we stop @@ -3426,10 +3423,10 @@ class Endpoint(PgProtocol, LogUtils): endpoint_id: Optional[str] = None, hot_standby: bool = False, lsn: Optional[Lsn] = None, - config_lines: Optional[List[str]] = None, + config_lines: Optional[list[str]] = None, pageserver_id: Optional[int] = None, allow_multiple: bool = False, - ) -> "Endpoint": + ) -> Endpoint: """ Create a new Postgres endpoint. Returns self. @@ -3472,10 +3469,10 @@ class Endpoint(PgProtocol, LogUtils): self, remote_ext_config: Optional[str] = None, pageserver_id: Optional[int] = None, - safekeepers: Optional[List[int]] = None, + safekeepers: Optional[list[int]] = None, allow_multiple: bool = False, basebackup_request_tries: Optional[int] = None, - ) -> "Endpoint": + ) -> Endpoint: """ Start the Postgres instance. Returns self. @@ -3524,7 +3521,7 @@ class Endpoint(PgProtocol, LogUtils): """Path to the postgresql.conf in the endpoint directory (not the one in pgdata)""" return self.endpoint_path() / "postgresql.conf" - def config(self, lines: List[str]) -> "Endpoint": + def config(self, lines: list[str]) -> Endpoint: """ Add lines to postgresql.conf. Lines should be an array of valid postgresql.conf rows. @@ -3538,7 +3535,7 @@ class Endpoint(PgProtocol, LogUtils): return self - def edit_hba(self, hba: List[str]): + def edit_hba(self, hba: list[str]): """Prepend hba lines into pg_hba.conf file.""" with open(os.path.join(self.pg_data_dir_path(), "pg_hba.conf"), "r+") as conf_file: data = conf_file.read() @@ -3553,7 +3550,7 @@ class Endpoint(PgProtocol, LogUtils): return self._running._value > 0 def reconfigure( - self, pageserver_id: Optional[int] = None, safekeepers: Optional[List[int]] = None + self, pageserver_id: Optional[int] = None, safekeepers: Optional[list[int]] = None ): assert self.endpoint_id is not None # If `safekeepers` is not None, they are remember them as active and use @@ -3568,7 +3565,7 @@ class Endpoint(PgProtocol, LogUtils): """Update the endpoint.json file used by control_plane.""" # Read config config_path = os.path.join(self.endpoint_path(), "endpoint.json") - with open(config_path, "r") as f: + with open(config_path) as f: data_dict: dict[str, Any] = json.load(f) # Write it back updated @@ -3601,8 +3598,8 @@ class Endpoint(PgProtocol, LogUtils): def stop( self, mode: str = "fast", - sks_wait_walreceiver_gone: Optional[tuple[List[Safekeeper], TimelineId]] = None, - ) -> "Endpoint": + sks_wait_walreceiver_gone: Optional[tuple[list[Safekeeper], TimelineId]] = None, + ) -> Endpoint: """ Stop the Postgres instance if it's running. @@ -3636,7 +3633,7 @@ class Endpoint(PgProtocol, LogUtils): return self - def stop_and_destroy(self, mode: str = "immediate") -> "Endpoint": + def stop_and_destroy(self, mode: str = "immediate") -> Endpoint: """ Stop the Postgres instance, then destroy the endpoint. Returns self. @@ -3658,12 +3655,12 @@ class Endpoint(PgProtocol, LogUtils): endpoint_id: Optional[str] = None, hot_standby: bool = False, lsn: Optional[Lsn] = None, - config_lines: Optional[List[str]] = None, + config_lines: Optional[list[str]] = None, remote_ext_config: Optional[str] = None, pageserver_id: Optional[int] = None, allow_multiple=False, basebackup_request_tries: Optional[int] = None, - ) -> "Endpoint": + ) -> Endpoint: """ Create an endpoint, apply config, and start Postgres. Returns self. @@ -3690,12 +3687,12 @@ class Endpoint(PgProtocol, LogUtils): return self - def __enter__(self) -> "Endpoint": + def __enter__(self) -> Endpoint: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): @@ -3726,7 +3723,7 @@ class EndpointFactory: def __init__(self, env: NeonEnv): self.env = env self.num_instances: int = 0 - self.endpoints: List[Endpoint] = [] + self.endpoints: list[Endpoint] = [] def create_start( self, @@ -3735,7 +3732,7 @@ class EndpointFactory: tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, hot_standby: bool = False, - config_lines: Optional[List[str]] = None, + config_lines: Optional[list[str]] = None, remote_ext_config: Optional[str] = None, pageserver_id: Optional[int] = None, basebackup_request_tries: Optional[int] = None, @@ -3767,7 +3764,7 @@ class EndpointFactory: tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, hot_standby: bool = False, - config_lines: Optional[List[str]] = None, + config_lines: Optional[list[str]] = None, pageserver_id: Optional[int] = None, ) -> Endpoint: ep = Endpoint( @@ -3791,7 +3788,7 @@ class EndpointFactory: pageserver_id=pageserver_id, ) - def stop_all(self, fail_on_error=True) -> "EndpointFactory": + def stop_all(self, fail_on_error=True) -> EndpointFactory: exception = None for ep in self.endpoints: try: @@ -3806,7 +3803,7 @@ class EndpointFactory: return self def new_replica( - self, origin: Endpoint, endpoint_id: str, config_lines: Optional[List[str]] = None + self, origin: Endpoint, endpoint_id: str, config_lines: Optional[list[str]] = None ): branch_name = origin.branch_name assert origin in self.endpoints @@ -3822,7 +3819,7 @@ class EndpointFactory: ) def new_replica_start( - self, origin: Endpoint, endpoint_id: str, config_lines: Optional[List[str]] = None + self, origin: Endpoint, endpoint_id: str, config_lines: Optional[list[str]] = None ): branch_name = origin.branch_name assert origin in self.endpoints @@ -3860,7 +3857,7 @@ class Safekeeper(LogUtils): port: SafekeeperPort, id: int, running: bool = False, - extra_opts: Optional[List[str]] = None, + extra_opts: Optional[list[str]] = None, ): self.env = env self.port = port @@ -3886,8 +3883,8 @@ class Safekeeper(LogUtils): self.extra_opts = extra_opts def start( - self, extra_opts: Optional[List[str]] = None, timeout_in_seconds: Optional[int] = None - ) -> "Safekeeper": + self, extra_opts: Optional[list[str]] = None, timeout_in_seconds: Optional[int] = None + ) -> Safekeeper: if extra_opts is None: # Apply either the extra_opts passed in, or the ones from our constructor: we do not merge the two. extra_opts = self.extra_opts @@ -3922,7 +3919,7 @@ class Safekeeper(LogUtils): break # success return self - def stop(self, immediate: bool = False) -> "Safekeeper": + def stop(self, immediate: bool = False) -> Safekeeper: log.info(f"Stopping safekeeper {self.id}") self.env.neon_cli.safekeeper_stop(self.id, immediate) self.running = False @@ -3934,8 +3931,8 @@ class Safekeeper(LogUtils): assert not self.log_contains("timeout while acquiring WalResidentTimeline guard") def append_logical_message( - self, tenant_id: TenantId, timeline_id: TimelineId, request: Dict[str, Any] - ) -> Dict[str, Any]: + self, tenant_id: TenantId, timeline_id: TimelineId, request: dict[str, Any] + ) -> dict[str, Any]: """ Send JSON_CTRL query to append LogicalMessage to WAL and modify safekeeper state. It will construct LogicalMessage from provided @@ -3988,7 +3985,7 @@ class Safekeeper(LogUtils): def pull_timeline( self, srcs: list[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ pull_timeline from srcs to self. """ @@ -4024,7 +4021,7 @@ class Safekeeper(LogUtils): mysegs = [s for s in segs if f"sk{self.id}" in s] return mysegs - def list_segments(self, tenant_id, timeline_id) -> List[str]: + def list_segments(self, tenant_id, timeline_id) -> list[str]: """ Get list of segment names of the given timeline. """ @@ -4129,7 +4126,7 @@ class StorageScrubber: self.log_dir = log_dir def scrubber_cli( - self, args: list[str], timeout, extra_env: Optional[Dict[str, str]] = None + self, args: list[str], timeout, extra_env: Optional[dict[str, str]] = None ) -> str: assert isinstance(self.env.pageserver_remote_storage, S3Storage) s3_storage = self.env.pageserver_remote_storage @@ -4176,10 +4173,10 @@ class StorageScrubber: def scan_metadata_safekeeper( self, - timeline_lsns: List[Dict[str, Any]], + timeline_lsns: list[dict[str, Any]], cloud_admin_api_url: str, cloud_admin_api_token: str, - ) -> Tuple[bool, Any]: + ) -> tuple[bool, Any]: extra_env = { "CLOUD_ADMIN_API_URL": cloud_admin_api_url, "CLOUD_ADMIN_API_TOKEN": cloud_admin_api_token, @@ -4192,9 +4189,9 @@ class StorageScrubber: self, post_to_storage_controller: bool = False, node_kind: NodeKind = NodeKind.PAGESERVER, - timeline_lsns: Optional[List[Dict[str, Any]]] = None, - extra_env: Optional[Dict[str, str]] = None, - ) -> Tuple[bool, Any]: + timeline_lsns: Optional[list[dict[str, Any]]] = None, + extra_env: Optional[dict[str, str]] = None, + ) -> tuple[bool, Any]: """ Returns the health status and the metadata summary. """ @@ -4501,7 +4498,7 @@ def should_skip_file(filename: str) -> bool: # # Test helpers # -def list_files_to_compare(pgdata_dir: Path) -> List[str]: +def list_files_to_compare(pgdata_dir: Path) -> list[str]: pgdata_files = [] for root, _dirs, filenames in os.walk(pgdata_dir): for filename in filenames: diff --git a/test_runner/fixtures/overlayfs.py b/test_runner/fixtures/overlayfs.py index 3e2f661893..e0ebfeb8f4 100644 --- a/test_runner/fixtures/overlayfs.py +++ b/test_runner/fixtures/overlayfs.py @@ -1,5 +1,7 @@ +from __future__ import annotations + +from collections.abc import Iterator from pathlib import Path -from typing import Iterator import psutil diff --git a/test_runner/fixtures/pageserver/__init__.py b/test_runner/fixtures/pageserver/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/fixtures/pageserver/__init__.py +++ b/test_runner/fixtures/pageserver/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index f8d9a51c91..fa85563e35 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -1,14 +1,16 @@ #! /usr/bin/env python3 +from __future__ import annotations + import argparse import re import sys -from typing import Iterable, List, Tuple +from collections.abc import Iterable def scan_pageserver_log_for_errors( - input: Iterable[str], allowed_errors: List[str] -) -> List[Tuple[int, str]]: + input: Iterable[str], allowed_errors: list[str] +) -> list[tuple[int, str]]: error_or_warn = re.compile(r"\s(ERROR|WARN)") errors = [] for lineno, line in enumerate(input, start=1): @@ -113,7 +115,7 @@ DEFAULT_STORAGE_CONTROLLER_ALLOWED_ERRORS = [ def _check_allowed_errors(input): - allowed_errors: List[str] = list(DEFAULT_PAGESERVER_ALLOWED_ERRORS) + allowed_errors: list[str] = list(DEFAULT_PAGESERVER_ALLOWED_ERRORS) # add any test specifics here; cli parsing is not provided for the # difficulty of copypasting regexes as arguments without any quoting diff --git a/test_runner/fixtures/pageserver/common_types.py b/test_runner/fixtures/pageserver/common_types.py index a6c327a8a0..2319701e0b 100644 --- a/test_runner/fixtures/pageserver/common_types.py +++ b/test_runner/fixtures/pageserver/common_types.py @@ -1,9 +1,14 @@ +from __future__ import annotations + import re from dataclasses import dataclass -from typing import Any, Dict, Tuple, Union +from typing import TYPE_CHECKING, Union from fixtures.common_types import KEY_MAX, KEY_MIN, Key, Lsn +if TYPE_CHECKING: + from typing import Any + @dataclass class IndexLayerMetadata: @@ -53,7 +58,7 @@ IMAGE_LAYER_FILE_NAME = re.compile( ) -def parse_image_layer(f_name: str) -> Tuple[int, int, int]: +def parse_image_layer(f_name: str) -> tuple[int, int, int]: """Parse an image layer file name. Return key start, key end, and snapshot lsn""" match = IMAGE_LAYER_FILE_NAME.match(f_name) @@ -68,7 +73,7 @@ DELTA_LAYER_FILE_NAME = re.compile( ) -def parse_delta_layer(f_name: str) -> Tuple[int, int, int, int]: +def parse_delta_layer(f_name: str) -> tuple[int, int, int, int]: """Parse a delta layer file name. Return key start, key end, lsn start, and lsn end""" match = DELTA_LAYER_FILE_NAME.match(f_name) if match is None: @@ -121,11 +126,11 @@ def is_future_layer(layer_file_name: LayerName, disk_consistent_lsn: Lsn): @dataclass class IndexPartDump: - layer_metadata: Dict[LayerName, IndexLayerMetadata] + layer_metadata: dict[LayerName, IndexLayerMetadata] disk_consistent_lsn: Lsn @classmethod - def from_json(cls, d: Dict[str, Any]) -> "IndexPartDump": + def from_json(cls, d: dict[str, Any]) -> IndexPartDump: return IndexPartDump( layer_metadata={ parse_layer_file_name(n): IndexLayerMetadata(v["file_size"], v["generation"]) diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index 49ad54d456..84a7e5f0a2 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -4,7 +4,7 @@ import time from collections import defaultdict from dataclasses import dataclass from datetime import datetime -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Any import requests from requests.adapters import HTTPAdapter @@ -16,6 +16,9 @@ from fixtures.metrics import Metrics, MetricsGetter, parse_metrics from fixtures.pg_version import PgVersion from fixtures.utils import Fn +if TYPE_CHECKING: + from typing import Optional, Union + class PageserverApiException(Exception): def __init__(self, message, status_code: int): @@ -43,7 +46,7 @@ class InMemoryLayerInfo: lsn_end: Optional[str] @classmethod - def from_json(cls, d: Dict[str, Any]) -> InMemoryLayerInfo: + def from_json(cls, d: dict[str, Any]) -> InMemoryLayerInfo: return InMemoryLayerInfo( kind=d["kind"], lsn_start=d["lsn_start"], @@ -64,7 +67,7 @@ class HistoricLayerInfo: visible: bool @classmethod - def from_json(cls, d: Dict[str, Any]) -> HistoricLayerInfo: + def from_json(cls, d: dict[str, Any]) -> HistoricLayerInfo: # instead of parsing the key range lets keep the definition of "L0" in pageserver l0_ness = d.get("l0") assert l0_ness is None or isinstance(l0_ness, bool) @@ -86,53 +89,53 @@ class HistoricLayerInfo: @dataclass class LayerMapInfo: - in_memory_layers: List[InMemoryLayerInfo] - historic_layers: List[HistoricLayerInfo] + in_memory_layers: list[InMemoryLayerInfo] + historic_layers: list[HistoricLayerInfo] @classmethod - def from_json(cls, d: Dict[str, Any]) -> LayerMapInfo: + def from_json(cls, d: dict[str, Any]) -> LayerMapInfo: info = LayerMapInfo(in_memory_layers=[], historic_layers=[]) json_in_memory_layers = d["in_memory_layers"] - assert isinstance(json_in_memory_layers, List) + assert isinstance(json_in_memory_layers, list) for json_in_memory_layer in json_in_memory_layers: info.in_memory_layers.append(InMemoryLayerInfo.from_json(json_in_memory_layer)) json_historic_layers = d["historic_layers"] - assert isinstance(json_historic_layers, List) + assert isinstance(json_historic_layers, list) for json_historic_layer in json_historic_layers: info.historic_layers.append(HistoricLayerInfo.from_json(json_historic_layer)) return info - def kind_count(self) -> Dict[str, int]: - counts: Dict[str, int] = defaultdict(int) + def kind_count(self) -> dict[str, int]: + counts: dict[str, int] = defaultdict(int) for inmem_layer in self.in_memory_layers: counts[inmem_layer.kind] += 1 for hist_layer in self.historic_layers: counts[hist_layer.kind] += 1 return counts - def delta_layers(self) -> List[HistoricLayerInfo]: + def delta_layers(self) -> list[HistoricLayerInfo]: return [x for x in self.historic_layers if x.kind == "Delta"] - def image_layers(self) -> List[HistoricLayerInfo]: + def image_layers(self) -> list[HistoricLayerInfo]: return [x for x in self.historic_layers if x.kind == "Image"] - def delta_l0_layers(self) -> List[HistoricLayerInfo]: + def delta_l0_layers(self) -> list[HistoricLayerInfo]: return [x for x in self.historic_layers if x.kind == "Delta" and x.l0] - def historic_by_name(self) -> Set[str]: + def historic_by_name(self) -> set[str]: return set(x.layer_file_name for x in self.historic_layers) @dataclass class TenantConfig: - tenant_specific_overrides: Dict[str, Any] - effective_config: Dict[str, Any] + tenant_specific_overrides: dict[str, Any] + effective_config: dict[str, Any] @classmethod - def from_json(cls, d: Dict[str, Any]) -> TenantConfig: + def from_json(cls, d: dict[str, Any]) -> TenantConfig: return TenantConfig( tenant_specific_overrides=d["tenant_specific_overrides"], effective_config=d["effective_config"], @@ -209,7 +212,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): def check_status(self): self.get(f"http://localhost:{self.port}/v1/status").raise_for_status() - def configure_failpoints(self, config_strings: Tuple[str, str] | List[Tuple[str, str]]): + def configure_failpoints(self, config_strings: tuple[str, str] | list[tuple[str, str]]): self.is_testing_enabled_or_skip() if isinstance(config_strings, tuple): @@ -233,7 +236,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): res = self.post(f"http://localhost:{self.port}/v1/reload_auth_validation_keys") self.verbose_error(res) - def tenant_list(self) -> List[Dict[Any, Any]]: + def tenant_list(self) -> list[dict[Any, Any]]: res = self.get(f"http://localhost:{self.port}/v1/tenant") self.verbose_error(res) res_json = res.json() @@ -244,7 +247,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): self, tenant_id: Union[TenantId, TenantShardId], generation: int, - config: None | Dict[str, Any] = None, + config: None | dict[str, Any] = None, ): config = config or {} @@ -324,7 +327,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): def tenant_status( self, tenant_id: Union[TenantId, TenantShardId], activate: bool = False - ) -> Dict[Any, Any]: + ) -> dict[Any, Any]: """ :activate: hint the server not to accelerate activation of this tenant in response to this query. False by default for tests, because they generally want to observed the @@ -378,8 +381,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter): def patch_tenant_config_client_side( self, tenant_id: TenantId, - inserts: Optional[Dict[str, Any]] = None, - removes: Optional[List[str]] = None, + inserts: Optional[dict[str, Any]] = None, + removes: Optional[list[str]] = None, ): current = self.tenant_config(tenant_id).tenant_specific_overrides if inserts is not None: @@ -394,7 +397,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): def tenant_size_and_modelinputs( self, tenant_id: Union[TenantId, TenantShardId] - ) -> Tuple[int, Dict[str, Any]]: + ) -> tuple[int, dict[str, Any]]: """ Returns the tenant size, together with the model inputs as the second tuple item. """ @@ -424,7 +427,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): tenant_id: Union[TenantId, TenantShardId], timestamp: datetime, done_if_after: datetime, - shard_counts: Optional[List[int]] = None, + shard_counts: Optional[list[int]] = None, ): """ Issues a request to perform time travel operations on the remote storage @@ -432,7 +435,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): if shard_counts is None: shard_counts = [] - body: Dict[str, Any] = { + body: dict[str, Any] = { "shard_counts": shard_counts, } res = self.put( @@ -446,7 +449,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): tenant_id: Union[TenantId, TenantShardId], include_non_incremental_logical_size: bool = False, include_timeline_dir_layer_file_size_sum: bool = False, - ) -> List[Dict[str, Any]]: + ) -> list[dict[str, Any]]: params = {} if include_non_incremental_logical_size: params["include-non-incremental-logical-size"] = "true" @@ -470,8 +473,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ancestor_start_lsn: Optional[Lsn] = None, existing_initdb_timeline_id: Optional[TimelineId] = None, **kwargs, - ) -> Dict[Any, Any]: - body: Dict[str, Any] = { + ) -> dict[Any, Any]: + body: dict[str, Any] = { "new_timeline_id": str(new_timeline_id), "ancestor_start_lsn": str(ancestor_start_lsn) if ancestor_start_lsn else None, "ancestor_timeline_id": str(ancestor_timeline_id) if ancestor_timeline_id else None, @@ -504,7 +507,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): include_timeline_dir_layer_file_size_sum: bool = False, force_await_initial_logical_size: bool = False, **kwargs, - ) -> Dict[Any, Any]: + ) -> dict[Any, Any]: params = {} if include_non_incremental_logical_size: params["include-non-incremental-logical-size"] = "true" @@ -844,7 +847,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ) if len(res) != 2: return None - inc, dec = [res[metric] for metric in metrics] + inc, dec = (res[metric] for metric in metrics) queue_count = int(inc) - int(dec) assert queue_count >= 0 return queue_count @@ -885,7 +888,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): timeline_id: TimelineId, batch_size: int | None = None, **kwargs, - ) -> Set[TimelineId]: + ) -> set[TimelineId]: params = {} if batch_size is not None: params["batch_size"] = batch_size diff --git a/test_runner/fixtures/pageserver/many_tenants.py b/test_runner/fixtures/pageserver/many_tenants.py index 97e63ed4ba..37b4246d40 100644 --- a/test_runner/fixtures/pageserver/many_tenants.py +++ b/test_runner/fixtures/pageserver/many_tenants.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import concurrent.futures -from typing import Any, Callable, Dict, Tuple +from typing import TYPE_CHECKING import fixtures.pageserver.remote_storage from fixtures.common_types import TenantId, TimelineId @@ -10,10 +12,13 @@ from fixtures.neon_fixtures import ( ) from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind +if TYPE_CHECKING: + from typing import Any, Callable + def single_timeline( neon_env_builder: NeonEnvBuilder, - setup_template: Callable[[NeonEnv], Tuple[TenantId, TimelineId, Dict[str, Any]]], + setup_template: Callable[[NeonEnv], tuple[TenantId, TimelineId, dict[str, Any]]], ncopies: int, ) -> NeonEnv: """ diff --git a/test_runner/fixtures/pageserver/remote_storage.py b/test_runner/fixtures/pageserver/remote_storage.py index bc54fc4c8d..54acb9ce50 100644 --- a/test_runner/fixtures/pageserver/remote_storage.py +++ b/test_runner/fixtures/pageserver/remote_storage.py @@ -1,10 +1,12 @@ +from __future__ import annotations + import concurrent.futures import os import queue import shutil import threading from pathlib import Path -from typing import Any, List, Tuple +from typing import TYPE_CHECKING from fixtures.common_types import TenantId, TimelineId from fixtures.neon_fixtures import NeonEnv @@ -14,6 +16,9 @@ from fixtures.pageserver.common_types import ( ) from fixtures.remote_storage import LocalFsStorage +if TYPE_CHECKING: + from typing import Any + def duplicate_one_tenant(env: NeonEnv, template_tenant: TenantId, new_tenant: TenantId): remote_storage = env.pageserver_remote_storage @@ -50,13 +55,13 @@ def duplicate_one_tenant(env: NeonEnv, template_tenant: TenantId, new_tenant: Te return None -def duplicate_tenant(env: NeonEnv, template_tenant: TenantId, ncopies: int) -> List[TenantId]: +def duplicate_tenant(env: NeonEnv, template_tenant: TenantId, ncopies: int) -> list[TenantId]: assert isinstance(env.pageserver_remote_storage, LocalFsStorage) def work(tenant_id): duplicate_one_tenant(env, template_tenant, tenant_id) - new_tenants: List[TenantId] = [TenantId.generate() for _ in range(0, ncopies)] + new_tenants: list[TenantId] = [TenantId.generate() for _ in range(0, ncopies)] with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: executor.map(work, new_tenants) return new_tenants @@ -79,7 +84,7 @@ def local_layer_name_from_remote_name(remote_name: str) -> str: def copy_all_remote_layer_files_to_local_tenant_dir( - env: NeonEnv, tenant_timelines: List[Tuple[TenantId, TimelineId]] + env: NeonEnv, tenant_timelines: list[tuple[TenantId, TimelineId]] ): remote_storage = env.pageserver_remote_storage assert isinstance(remote_storage, LocalFsStorage) diff --git a/test_runner/fixtures/pageserver/utils.py b/test_runner/fixtures/pageserver/utils.py index a74fef6a60..377a95fbeb 100644 --- a/test_runner/fixtures/pageserver/utils.py +++ b/test_runner/fixtures/pageserver/utils.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import time -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING from mypy_boto3_s3.type_defs import ( DeleteObjectOutputTypeDef, @@ -14,6 +16,9 @@ from fixtures.pageserver.http import PageserverApiException, PageserverHttpClien from fixtures.remote_storage import RemoteStorage, RemoteStorageKind, S3Storage from fixtures.utils import wait_until +if TYPE_CHECKING: + from typing import Any, Optional, Union + def assert_tenant_state( pageserver_http: PageserverHttpClient, @@ -66,7 +71,7 @@ def wait_for_upload( ) -def _tenant_in_expected_state(tenant_info: Dict[str, Any], expected_state: str): +def _tenant_in_expected_state(tenant_info: dict[str, Any], expected_state: str): if tenant_info["state"]["slug"] == expected_state: return True if tenant_info["state"]["slug"] == "Broken": @@ -80,7 +85,7 @@ def wait_until_tenant_state( expected_state: str, iterations: int, period: float = 1.0, -) -> Dict[str, Any]: +) -> dict[str, Any]: """ Does not use `wait_until` for debugging purposes """ @@ -136,7 +141,7 @@ def wait_until_timeline_state( expected_state: str, iterations: int, period: float = 1.0, -) -> Dict[str, Any]: +) -> dict[str, Any]: """ Does not use `wait_until` for debugging purposes """ @@ -147,7 +152,7 @@ def wait_until_timeline_state( if isinstance(timeline["state"], str): if timeline["state"] == expected_state: return timeline - elif isinstance(timeline, Dict): + elif isinstance(timeline, dict): if timeline["state"].get(expected_state): return timeline @@ -235,7 +240,7 @@ def wait_for_upload_queue_empty( # this is `started left join finished`; if match, subtracting start from finished, resulting in queue depth remaining_labels = ["shard_id", "file_kind", "op_kind"] - tl: List[Tuple[Any, float]] = [] + tl: list[tuple[Any, float]] = [] for s in started: found = False for f in finished: @@ -302,7 +307,7 @@ def assert_prefix_empty( assert remote_storage is not None response = list_prefix(remote_storage, prefix) keys = response["KeyCount"] - objects: List[ObjectTypeDef] = response.get("Contents", []) + objects: list[ObjectTypeDef] = response.get("Contents", []) common_prefixes = response.get("CommonPrefixes", []) is_mock_s3 = isinstance(remote_storage, S3Storage) and not remote_storage.cleanup @@ -430,7 +435,7 @@ def enable_remote_storage_versioning( return response -def many_small_layers_tenant_config() -> Dict[str, Any]: +def many_small_layers_tenant_config() -> dict[str, Any]: """ Create a new dict to avoid issues with deleting from the global value. In python, the global is mutable. diff --git a/test_runner/fixtures/parametrize.py b/test_runner/fixtures/parametrize.py index 2c8e71526c..b408d83cf3 100644 --- a/test_runner/fixtures/parametrize.py +++ b/test_runner/fixtures/parametrize.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import os -from typing import Any, Dict, Optional +from typing import TYPE_CHECKING import allure import pytest @@ -9,6 +11,10 @@ from _pytest.python import Metafunc from fixtures.pg_version import PgVersion from fixtures.utils import AuxFileStore +if TYPE_CHECKING: + from typing import Any, Optional + + """ Dynamically parametrize tests by different parameters """ @@ -44,7 +50,7 @@ def pageserver_aux_file_policy() -> Optional[AuxFileStore]: return None -def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[Dict[str, Any]]: +def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]: toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM") if toml_table is None: return None @@ -54,7 +60,7 @@ def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[Dict @pytest.fixture(scope="function", autouse=True) -def pageserver_default_tenant_config_compaction_algorithm() -> Optional[Dict[str, Any]]: +def pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]: return get_pageserver_default_tenant_config_compaction_algorithm() diff --git a/test_runner/fixtures/pg_stats.py b/test_runner/fixtures/pg_stats.py index adb3a7730e..d334d07b2b 100644 --- a/test_runner/fixtures/pg_stats.py +++ b/test_runner/fixtures/pg_stats.py @@ -1,15 +1,16 @@ +from __future__ import annotations + from functools import cached_property -from typing import List import pytest class PgStatTable: table: str - columns: List[str] + columns: list[str] additional_query: str - def __init__(self, table: str, columns: List[str], filter_query: str = ""): + def __init__(self, table: str, columns: list[str], filter_query: str = ""): self.table = table self.columns = columns self.additional_query = filter_query @@ -20,7 +21,7 @@ class PgStatTable: @pytest.fixture(scope="function") -def pg_stats_rw() -> List[PgStatTable]: +def pg_stats_rw() -> list[PgStatTable]: return [ PgStatTable( "pg_stat_database", @@ -31,7 +32,7 @@ def pg_stats_rw() -> List[PgStatTable]: @pytest.fixture(scope="function") -def pg_stats_ro() -> List[PgStatTable]: +def pg_stats_ro() -> list[PgStatTable]: return [ PgStatTable( "pg_stat_database", ["tup_returned", "tup_fetched"], "WHERE datname='postgres'" @@ -40,7 +41,7 @@ def pg_stats_ro() -> List[PgStatTable]: @pytest.fixture(scope="function") -def pg_stats_wo() -> List[PgStatTable]: +def pg_stats_wo() -> list[PgStatTable]: return [ PgStatTable( "pg_stat_database", @@ -51,7 +52,7 @@ def pg_stats_wo() -> List[PgStatTable]: @pytest.fixture(scope="function") -def pg_stats_wal() -> List[PgStatTable]: +def pg_stats_wal() -> list[PgStatTable]: return [ PgStatTable( "pg_stat_wal", diff --git a/test_runner/fixtures/pg_version.py b/test_runner/fixtures/pg_version.py index 258935959b..5820b50a46 100644 --- a/test_runner/fixtures/pg_version.py +++ b/test_runner/fixtures/pg_version.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import enum import os from typing import Optional @@ -36,7 +38,7 @@ class PgVersion(str, enum.Enum): return f"v{self.value}" @classmethod - def _missing_(cls, value) -> Optional["PgVersion"]: + def _missing_(cls, value) -> Optional[PgVersion]: known_values = {v.value for _, v in cls.__members__.items()} # Allow passing version as a string with "v" prefix (e.g. "v14") diff --git a/test_runner/fixtures/port_distributor.py b/test_runner/fixtures/port_distributor.py index fd808d7a5f..435f452a02 100644 --- a/test_runner/fixtures/port_distributor.py +++ b/test_runner/fixtures/port_distributor.py @@ -1,10 +1,15 @@ +from __future__ import annotations + import re import socket from contextlib import closing -from typing import Dict, Union +from typing import TYPE_CHECKING from fixtures.log_helper import log +if TYPE_CHECKING: + from typing import Union + def can_bind(host: str, port: int) -> bool: """ @@ -24,7 +29,7 @@ def can_bind(host: str, port: int) -> bool: sock.bind((host, port)) sock.listen() return True - except socket.error: + except OSError: log.info(f"Port {port} is in use, skipping") return False finally: @@ -34,7 +39,7 @@ def can_bind(host: str, port: int) -> bool: class PortDistributor: def __init__(self, base_port: int, port_number: int): self.iterator = iter(range(base_port, base_port + port_number)) - self.port_map: Dict[int, int] = {} + self.port_map: dict[int, int] = {} def get_port(self) -> int: for port in self.iterator: diff --git a/test_runner/fixtures/remote_storage.py b/test_runner/fixtures/remote_storage.py index 1b6c3c23ba..a527b810df 100644 --- a/test_runner/fixtures/remote_storage.py +++ b/test_runner/fixtures/remote_storage.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import enum import hashlib import json @@ -6,7 +8,7 @@ import re import subprocess from dataclasses import dataclass from pathlib import Path -from typing import Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Union import boto3 import toml @@ -16,6 +18,10 @@ from fixtures.common_types import TenantId, TenantShardId, TimelineId from fixtures.log_helper import log from fixtures.pageserver.common_types import IndexPartDump +if TYPE_CHECKING: + from typing import Any, Optional + + TIMELINE_INDEX_PART_FILE_NAME = "index_part.json" TENANT_HEATMAP_FILE_NAME = "heatmap-v1.json" @@ -142,7 +148,7 @@ class LocalFsStorage: with self.heatmap_path(tenant_id).open("r") as f: return json.load(f) - def to_toml_dict(self) -> Dict[str, Any]: + def to_toml_dict(self) -> dict[str, Any]: return { "local_path": str(self.root), } @@ -175,7 +181,7 @@ class S3Storage: """formatting deserialized with humantime crate, for example "1s".""" custom_timeout: Optional[str] = None - def access_env_vars(self) -> Dict[str, str]: + def access_env_vars(self) -> dict[str, str]: if self.aws_profile is not None: env = { "AWS_PROFILE": self.aws_profile, @@ -204,7 +210,7 @@ class S3Storage: } ) - def to_toml_dict(self) -> Dict[str, Any]: + def to_toml_dict(self) -> dict[str, Any]: rv = { "bucket_name": self.bucket_name, "bucket_region": self.bucket_region, @@ -279,7 +285,7 @@ class S3Storage: ) -> str: return f"{self.tenant_path(tenant_id)}/timelines/{timeline_id}" - def get_latest_index_key(self, index_keys: List[str]) -> str: + def get_latest_index_key(self, index_keys: list[str]) -> str: """ Gets the latest index file key. @@ -419,7 +425,7 @@ class RemoteStorageKind(str, enum.Enum): ) -def available_remote_storages() -> List[RemoteStorageKind]: +def available_remote_storages() -> list[RemoteStorageKind]: remote_storages = [RemoteStorageKind.LOCAL_FS, RemoteStorageKind.MOCK_S3] if os.getenv("ENABLE_REAL_S3_REMOTE_STORAGE") is not None: remote_storages.append(RemoteStorageKind.REAL_S3) @@ -429,7 +435,7 @@ def available_remote_storages() -> List[RemoteStorageKind]: return remote_storages -def available_s3_storages() -> List[RemoteStorageKind]: +def available_s3_storages() -> list[RemoteStorageKind]: remote_storages = [RemoteStorageKind.MOCK_S3] if os.getenv("ENABLE_REAL_S3_REMOTE_STORAGE") is not None: remote_storages.append(RemoteStorageKind.REAL_S3) @@ -459,7 +465,7 @@ def default_remote_storage() -> RemoteStorageKind: return RemoteStorageKind.LOCAL_FS -def remote_storage_to_toml_dict(remote_storage: RemoteStorage) -> Dict[str, Any]: +def remote_storage_to_toml_dict(remote_storage: RemoteStorage) -> dict[str, Any]: if not isinstance(remote_storage, (LocalFsStorage, S3Storage)): raise Exception("invalid remote storage type") diff --git a/test_runner/fixtures/safekeeper/__init__.py b/test_runner/fixtures/safekeeper/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/fixtures/safekeeper/__init__.py +++ b/test_runner/fixtures/safekeeper/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/fixtures/safekeeper/http.py b/test_runner/fixtures/safekeeper/http.py index 7f170eeea3..5d9a3bd149 100644 --- a/test_runner/fixtures/safekeeper/http.py +++ b/test_runner/fixtures/safekeeper/http.py @@ -1,6 +1,8 @@ +from __future__ import annotations + import json from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING import pytest import requests @@ -10,6 +12,9 @@ from fixtures.log_helper import log from fixtures.metrics import Metrics, MetricsGetter, parse_metrics from fixtures.utils import wait_until +if TYPE_CHECKING: + from typing import Any, Optional, Union + # Walreceiver as returned by sk's timeline status endpoint. @dataclass @@ -29,7 +34,7 @@ class SafekeeperTimelineStatus: backup_lsn: Lsn peer_horizon_lsn: Lsn remote_consistent_lsn: Lsn - walreceivers: List[Walreceiver] + walreceivers: list[Walreceiver] class SafekeeperMetrics(Metrics): @@ -57,7 +62,7 @@ class TermBumpResponse: current_term: int @classmethod - def from_json(cls, d: Dict[str, Any]) -> "TermBumpResponse": + def from_json(cls, d: dict[str, Any]) -> TermBumpResponse: return TermBumpResponse( previous_term=d["previous_term"], current_term=d["current_term"], @@ -93,7 +98,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): if not self.is_testing_enabled: pytest.skip("safekeeper was built without 'testing' feature") - def configure_failpoints(self, config_strings: Union[Tuple[str, str], List[Tuple[str, str]]]): + def configure_failpoints(self, config_strings: Union[tuple[str, str], list[tuple[str, str]]]): self.is_testing_enabled_or_skip() if isinstance(config_strings, tuple): @@ -113,14 +118,14 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): assert res_json is None return res_json - def tenant_delete_force(self, tenant_id: TenantId) -> Dict[Any, Any]: + def tenant_delete_force(self, tenant_id: TenantId) -> dict[Any, Any]: res = self.delete(f"http://localhost:{self.port}/v1/tenant/{tenant_id}") res.raise_for_status() res_json = res.json() assert isinstance(res_json, dict) return res_json - def timeline_list(self) -> List[TenantTimelineId]: + def timeline_list(self) -> list[TenantTimelineId]: res = self.get(f"http://localhost:{self.port}/v1/tenant/timeline") res.raise_for_status() resj = res.json() @@ -178,7 +183,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): # only_local doesn't remove segments in the remote storage. def timeline_delete( self, tenant_id: TenantId, timeline_id: TimelineId, only_local: bool = False - ) -> Dict[Any, Any]: + ) -> dict[Any, Any]: res = self.delete( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}", params={ @@ -190,7 +195,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): assert isinstance(res_json, dict) return res_json - def debug_dump(self, params: Optional[Dict[str, str]] = None) -> Dict[str, Any]: + def debug_dump(self, params: Optional[dict[str, str]] = None) -> dict[str, Any]: params = params or {} res = self.get(f"http://localhost:{self.port}/v1/debug_dump", params=params) res.raise_for_status() @@ -199,7 +204,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): return res_json def debug_dump_timeline( - self, timeline_id: TimelineId, params: Optional[Dict[str, str]] = None + self, timeline_id: TimelineId, params: Optional[dict[str, str]] = None ) -> Any: params = params or {} params["timeline_id"] = str(timeline_id) @@ -214,14 +219,14 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): dump = self.debug_dump_timeline(timeline_id, {"dump_control_file": "true"}) return dump["control_file"]["eviction_state"] - def pull_timeline(self, body: Dict[str, Any]) -> Dict[str, Any]: + def pull_timeline(self, body: dict[str, Any]) -> dict[str, Any]: res = self.post(f"http://localhost:{self.port}/v1/pull_timeline", json=body) res.raise_for_status() res_json = res.json() assert isinstance(res_json, dict) return res_json - def copy_timeline(self, tenant_id: TenantId, timeline_id: TimelineId, body: Dict[str, Any]): + def copy_timeline(self, tenant_id: TenantId, timeline_id: TimelineId, body: dict[str, Any]): res = self.post( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/copy", json=body, @@ -232,8 +237,8 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): self, tenant_id: TenantId, timeline_id: TimelineId, - patch: Dict[str, Any], - ) -> Dict[str, Any]: + patch: dict[str, Any], + ) -> dict[str, Any]: res = self.patch( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/control_file", json={ @@ -255,7 +260,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): def timeline_digest( self, tenant_id: TenantId, timeline_id: TimelineId, from_lsn: Lsn, until_lsn: Lsn - ) -> Dict[str, Any]: + ) -> dict[str, Any]: res = self.get( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/digest", params={ diff --git a/test_runner/fixtures/safekeeper/utils.py b/test_runner/fixtures/safekeeper/utils.py index 2a081c6ccb..0246916470 100644 --- a/test_runner/fixtures/safekeeper/utils.py +++ b/test_runner/fixtures/safekeeper/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log from fixtures.safekeeper.http import SafekeeperHttpClient diff --git a/test_runner/fixtures/slow.py b/test_runner/fixtures/slow.py index ae0e87b553..4c6372d515 100644 --- a/test_runner/fixtures/slow.py +++ b/test_runner/fixtures/slow.py @@ -1,9 +1,15 @@ -from typing import Any, List +from __future__ import annotations + +from typing import TYPE_CHECKING import pytest from _pytest.config import Config from _pytest.config.argparsing import Parser +if TYPE_CHECKING: + from typing import Any + + """ This plugin allows tests to be marked as slow using pytest.mark.slow. By default slow tests are excluded. They need to be specifically requested with the --runslow flag in @@ -21,7 +27,7 @@ def pytest_configure(config: Config): config.addinivalue_line("markers", "slow: mark test as slow to run") -def pytest_collection_modifyitems(config: Config, items: List[Any]): +def pytest_collection_modifyitems(config: Config, items: list[Any]): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests return diff --git a/test_runner/fixtures/storage_controller_proxy.py b/test_runner/fixtures/storage_controller_proxy.py index 3477f8b1f2..02cf6fc33f 100644 --- a/test_runner/fixtures/storage_controller_proxy.py +++ b/test_runner/fixtures/storage_controller_proxy.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import re from typing import Any, Optional diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 44a884624e..23381e258a 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import contextlib import enum import json @@ -7,22 +9,10 @@ import subprocess import tarfile import threading import time +from collections.abc import Iterable from hashlib import sha256 from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Optional, - Set, - Tuple, - TypeVar, - Union, -) +from typing import TYPE_CHECKING, Any, Callable, TypeVar from urllib.parse import urlencode import allure @@ -36,6 +26,12 @@ from fixtures.pageserver.common_types import ( ) if TYPE_CHECKING: + from typing import ( + IO, + Optional, + Union, + ) + from fixtures.neon_fixtures import PgBin from fixtures.common_types import TimelineId @@ -44,7 +40,7 @@ Fn = TypeVar("Fn", bound=Callable[..., Any]) def subprocess_capture( capture_dir: Path, - cmd: List[str], + cmd: list[str], *, check=False, echo_stderr=False, @@ -53,7 +49,7 @@ def subprocess_capture( timeout=None, with_command_header=True, **popen_kwargs: Any, -) -> Tuple[str, Optional[str], int]: +) -> tuple[str, Optional[str], int]: """Run a process and bifurcate its output to files and the `log` logger stderr and stdout are always captured in files. They are also optionally @@ -98,7 +94,7 @@ def subprocess_capture( first = False # prefix the files with the command line so that we can # later understand which file is for what command - self.out_file.write((f"# {' '.join(cmd)}\n\n").encode("utf-8")) + self.out_file.write((f"# {' '.join(cmd)}\n\n").encode()) # Only bother decoding if we are going to do something more than stream to a file if self.echo or self.capture: @@ -166,7 +162,7 @@ def global_counter() -> int: return _global_counter -def print_gc_result(row: Dict[str, Any]): +def print_gc_result(row: dict[str, Any]): log.info("GC duration {elapsed} ms".format_map(row)) log.info( " total: {layers_total}, needed_by_cutoff {layers_needed_by_cutoff}, needed_by_pitr {layers_needed_by_pitr}" @@ -304,7 +300,7 @@ def allure_add_grafana_links(host: str, timeline_id: TimelineId, start_ms: int, "proxy logs": f'{{neon_service="proxy-scram", neon_region="{region_id}"}}', } - params: Dict[str, Any] = { + params: dict[str, Any] = { "datasource": LOGS_STAGING_DATASOURCE_ID, "queries": [ { @@ -420,7 +416,7 @@ def assert_ge(a, b) -> None: assert a >= b -def run_pg_bench_small(pg_bin: "PgBin", connstr: str): +def run_pg_bench_small(pg_bin: PgBin, connstr: str): """ Fast way to populate data. For more layers consider combining with these tenant settings: @@ -465,7 +461,7 @@ def humantime_to_ms(humantime: str) -> float: return round(total_ms, 3) -def scan_log_for_errors(input: Iterable[str], allowed_errors: List[str]) -> List[Tuple[int, str]]: +def scan_log_for_errors(input: Iterable[str], allowed_errors: list[str]) -> list[tuple[int, str]]: # FIXME: this duplicates test_runner/fixtures/pageserver/allowed_errors.py error_or_warn = re.compile(r"\s(ERROR|WARN)") errors = [] @@ -515,7 +511,7 @@ class AuxFileStore(str, enum.Enum): return f"'aux-{self.value}'" -def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: Set[str]): +def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str]): """ This is essentially: @@ -539,7 +535,7 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: Set[str digest.update(buf) return digest.digest() - def build_hash_list(p: Path) -> List[Tuple[str, bytes]]: + def build_hash_list(p: Path) -> list[tuple[str, bytes]]: with tarfile.open(p) as f: matching_files = (info for info in f if info.isreg() and info.name not in skip_files) ret = list( @@ -587,7 +583,7 @@ class PropagatingThread(threading.Thread): self.exc = e def join(self, timeout=None): - super(PropagatingThread, self).join(timeout) + super().join(timeout) if self.exc: raise self.exc return self.ret diff --git a/test_runner/fixtures/workload.py b/test_runner/fixtures/workload.py index 1ea0267e87..4f9c1125bf 100644 --- a/test_runner/fixtures/workload.py +++ b/test_runner/fixtures/workload.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading from typing import Any, Optional diff --git a/test_runner/logical_repl/test_clickhouse.py b/test_runner/logical_repl/test_clickhouse.py index c5ed9bc8af..8e03bbe5d4 100644 --- a/test_runner/logical_repl/test_clickhouse.py +++ b/test_runner/logical_repl/test_clickhouse.py @@ -2,6 +2,8 @@ Test the logical replication in Neon with ClickHouse as a consumer """ +from __future__ import annotations + import hashlib import os import time diff --git a/test_runner/logical_repl/test_debezium.py b/test_runner/logical_repl/test_debezium.py index 5426a06ca1..d2cb087c92 100644 --- a/test_runner/logical_repl/test_debezium.py +++ b/test_runner/logical_repl/test_debezium.py @@ -2,6 +2,8 @@ Test the logical replication in Neon with Debezium as a consumer """ +from __future__ import annotations + import json import os import time diff --git a/test_runner/performance/__init__.py b/test_runner/performance/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/performance/__init__.py +++ b/test_runner/performance/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/performance/pageserver/__init__.py b/test_runner/performance/pageserver/__init__.py index e69de29bb2..9d48db4f9f 100644 --- a/test_runner/performance/pageserver/__init__.py +++ b/test_runner/performance/pageserver/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/test_runner/performance/pageserver/interactive/__init__.py b/test_runner/performance/pageserver/interactive/__init__.py index 29644c240e..1133d116a5 100644 --- a/test_runner/performance/pageserver/interactive/__init__.py +++ b/test_runner/performance/pageserver/interactive/__init__.py @@ -6,3 +6,5 @@ but then debug a performance problem interactively. It's kind of an abuse of the test framework, but, it's our only tool right now to automate a complex test bench setup. """ + +from __future__ import annotations diff --git a/test_runner/performance/pageserver/interactive/test_many_small_tenants.py b/test_runner/performance/pageserver/interactive/test_many_small_tenants.py index 0a5a2c10d6..4931295beb 100644 --- a/test_runner/performance/pageserver/interactive/test_many_small_tenants.py +++ b/test_runner/performance/pageserver/interactive/test_many_small_tenants.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pdb diff --git a/test_runner/performance/pageserver/pagebench/__init__.py b/test_runner/performance/pageserver/pagebench/__init__.py index 9f5e45c0a0..4ed774cf2d 100644 --- a/test_runner/performance/pageserver/pagebench/__init__.py +++ b/test_runner/performance/pageserver/pagebench/__init__.py @@ -8,3 +8,5 @@ instead of benchmarking the full stack. See https://github.com/neondatabase/neon/issues/5771 for the context in which this was developed. """ + +from __future__ import annotations diff --git a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py index c3ba5afc24..efd423104d 100644 --- a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py +++ b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import asyncio import json from pathlib import Path -from typing import Any, Dict, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -13,6 +15,9 @@ from performance.pageserver.util import ( setup_pageserver_with_tenants, ) +if TYPE_CHECKING: + from typing import Any + @pytest.mark.parametrize("duration", [30]) @pytest.mark.parametrize("pgbench_scale", [get_scale_for_db(200)]) @@ -29,7 +34,7 @@ def test_basebackup_with_high_slru_count( def record(metric, **kwargs): zenbenchmark.record(metric_name=f"pageserver_basebackup.{metric}", **kwargs) - params: Dict[str, Tuple[Any, Dict[str, Any]]] = {} + params: dict[str, tuple[Any, dict[str, Any]]] = {} # params from fixtures params.update( @@ -157,7 +162,7 @@ def run_benchmark(env: NeonEnv, pg_bin: PgBin, record, duration_secs: int): results_path = Path(basepath + ".stdout") log.info(f"Benchmark results at: {results_path}") - with open(results_path, "r") as f: + with open(results_path) as f: results = json.load(f) log.info(f"Results:\n{json.dumps(results, sort_keys=True, indent=2)}") diff --git a/test_runner/performance/pageserver/pagebench/test_ondemand_download_churn.py b/test_runner/performance/pageserver/pagebench/test_ondemand_download_churn.py index 9ad6e7907c..8738f93a06 100644 --- a/test_runner/performance/pageserver/pagebench/test_ondemand_download_churn.py +++ b/test_runner/performance/pageserver/pagebench/test_ondemand_download_churn.py @@ -1,6 +1,8 @@ +from __future__ import annotations + import json from pathlib import Path -from typing import Any, Dict, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -14,6 +16,9 @@ from fixtures.neon_fixtures import ( from fixtures.remote_storage import s3_storage from fixtures.utils import humantime_to_ms +if TYPE_CHECKING: + from typing import Any + @pytest.mark.parametrize("duration", [30]) @pytest.mark.parametrize("io_engine", ["tokio-epoll-uring", "std-fs"]) @@ -30,7 +35,7 @@ def test_download_churn( def record(metric, **kwargs): zenbenchmark.record(metric_name=f"pageserver_ondemand_download_churn.{metric}", **kwargs) - params: Dict[str, Tuple[Any, Dict[str, Any]]] = {} + params: dict[str, tuple[Any, dict[str, Any]]] = {} # params from fixtures params.update( @@ -134,7 +139,7 @@ def run_benchmark( results_path = Path(basepath + ".stdout") log.info(f"Benchmark results at: {results_path}") - with open(results_path, "r") as f: + with open(results_path) as f: results = json.load(f) log.info(f"Results:\n{json.dumps(results, sort_keys=True, indent=2)}") diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 97eed88473..c038fc3fd2 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import json import os from pathlib import Path -from typing import Any, Dict, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -18,6 +20,10 @@ from performance.pageserver.util import ( setup_pageserver_with_tenants, ) +if TYPE_CHECKING: + from typing import Any + + # The following tests use pagebench "getpage at latest LSN" to characterize the throughput of the pageserver. # originally there was a single test named `test_pageserver_max_throughput_getpage_at_latest_lsn`` # so you still see some references to this name in the code. @@ -92,7 +98,7 @@ def setup_and_run_pagebench_benchmark( metric_name=f"pageserver_max_throughput_getpage_at_latest_lsn.{metric}", **kwargs ) - params: Dict[str, Tuple[Any, Dict[str, Any]]] = {} + params: dict[str, tuple[Any, dict[str, Any]]] = {} # params from fixtures params.update( @@ -225,7 +231,7 @@ def run_pagebench_benchmark( results_path = Path(basepath + ".stdout") log.info(f"Benchmark results at: {results_path}") - with open(results_path, "r") as f: + with open(results_path) as f: results = json.load(f) log.info(f"Results:\n{json.dumps(results, sort_keys=True, indent=2)}") diff --git a/test_runner/performance/pageserver/util.py b/test_runner/performance/pageserver/util.py index 88296a7fbd..227319c425 100644 --- a/test_runner/performance/pageserver/util.py +++ b/test_runner/performance/pageserver/util.py @@ -2,7 +2,9 @@ Utilities used by all code in this sub-directory """ -from typing import Any, Callable, Dict, Optional, Tuple +from __future__ import annotations + +from typing import TYPE_CHECKING import fixtures.pageserver.many_tenants as many_tenants from fixtures.common_types import TenantId, TimelineId @@ -13,6 +15,9 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.utils import wait_until_all_tenants_state +if TYPE_CHECKING: + from typing import Any, Callable, Optional + def ensure_pageserver_ready_for_benchmarking(env: NeonEnv, n_tenants: int): """ @@ -40,7 +45,7 @@ def setup_pageserver_with_tenants( neon_env_builder: NeonEnvBuilder, name: str, n_tenants: int, - setup: Callable[[NeonEnv], Tuple[TenantId, TimelineId, Dict[str, Any]]], + setup: Callable[[NeonEnv], tuple[TenantId, TimelineId, dict[str, Any]]], timeout_in_seconds: Optional[int] = None, ) -> NeonEnv: """ diff --git a/test_runner/performance/pgvector/loaddata.py b/test_runner/performance/pgvector/loaddata.py index 36c209aed3..207f5657fc 100644 --- a/test_runner/performance/pgvector/loaddata.py +++ b/test_runner/performance/pgvector/loaddata.py @@ -1,10 +1,12 @@ +from __future__ import annotations + import sys from pathlib import Path -import numpy as np -import pandas as pd +import numpy as np # type: ignore [import] +import pandas as pd # type: ignore [import] import psycopg2 -from pgvector.psycopg2 import register_vector +from pgvector.psycopg2 import register_vector # type: ignore [import] from psycopg2.extras import execute_values diff --git a/test_runner/performance/test_branch_creation.py b/test_runner/performance/test_branch_creation.py index 1fdb06785b..c50c4ad432 100644 --- a/test_runner/performance/test_branch_creation.py +++ b/test_runner/performance/test_branch_creation.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import random import re import statistics @@ -5,7 +7,6 @@ import threading import time import timeit from contextlib import closing -from typing import List import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -18,7 +19,7 @@ from fixtures.utils import wait_until from prometheus_client.samples import Sample -def _record_branch_creation_durations(neon_compare: NeonCompare, durs: List[float]): +def _record_branch_creation_durations(neon_compare: NeonCompare, durs: list[float]): neon_compare.zenbenchmark.record( "branch_creation_duration_max", max(durs), "s", MetricReport.LOWER_IS_BETTER ) @@ -66,7 +67,7 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int) env.create_branch("b0", tenant_id=tenant) - threads: List[threading.Thread] = [] + threads: list[threading.Thread] = [] threads.append(threading.Thread(target=run_pgbench, args=("b0",), daemon=True)) threads[-1].start() @@ -194,7 +195,7 @@ def wait_and_record_startup_metrics( ] ) - def metrics_are_filled() -> List[Sample]: + def metrics_are_filled() -> list[Sample]: m = client.get_metrics() samples = m.query_all("pageserver_startup_duration_seconds") # we should not have duplicate labels diff --git a/test_runner/performance/test_branching.py b/test_runner/performance/test_branching.py index 36c821795a..dbff116360 100644 --- a/test_runner/performance/test_branching.py +++ b/test_runner/performance/test_branching.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import timeit from pathlib import Path -from typing import List from fixtures.benchmark_fixture import PgBenchRunResult from fixtures.compare_fixtures import NeonCompare @@ -22,7 +23,7 @@ def test_compare_child_and_root_pgbench_perf(neon_compare: NeonCompare): env = neon_compare.env pg_bin = neon_compare.pg_bin - def run_pgbench_on_branch(branch: str, cmd: List[str]): + def run_pgbench_on_branch(branch: str, cmd: list[str]): run_start_timestamp = utc_now_timestamp() t0 = timeit.default_timer() out = pg_bin.run_capture( diff --git a/test_runner/performance/test_bulk_insert.py b/test_runner/performance/test_bulk_insert.py index 69df7974b9..36090dcad7 100644 --- a/test_runner/performance/test_bulk_insert.py +++ b/test_runner/performance/test_bulk_insert.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing from fixtures.benchmark_fixture import MetricReport diff --git a/test_runner/performance/test_bulk_tenant_create.py b/test_runner/performance/test_bulk_tenant_create.py index 188ff5e3ad..15a03ba456 100644 --- a/test_runner/performance/test_bulk_tenant_create.py +++ b/test_runner/performance/test_bulk_tenant_create.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import timeit import pytest diff --git a/test_runner/performance/test_bulk_update.py b/test_runner/performance/test_bulk_update.py index 13c48e1174..6946bc66f2 100644 --- a/test_runner/performance/test_bulk_update.py +++ b/test_runner/performance/test_bulk_update.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn diff --git a/test_runner/performance/test_compaction.py b/test_runner/performance/test_compaction.py index 54b17ebf8a..8868dddf39 100644 --- a/test_runner/performance/test_compaction.py +++ b/test_runner/performance/test_compaction.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing import pytest diff --git a/test_runner/performance/test_compare_pg_stats.py b/test_runner/performance/test_compare_pg_stats.py index d5dd1b4bd0..a86995d6d3 100644 --- a/test_runner/performance/test_compare_pg_stats.py +++ b/test_runner/performance/test_compare_pg_stats.py @@ -1,7 +1,8 @@ +from __future__ import annotations + import os import threading import time -from typing import List import pytest from fixtures.compare_fixtures import PgCompare @@ -23,7 +24,7 @@ def test_compare_pg_stats_rw_with_pgbench_default( seed: int, scale: int, duration: int, - pg_stats_rw: List[PgStatTable], + pg_stats_rw: list[PgStatTable], ): env = neon_with_baseline # initialize pgbench @@ -45,7 +46,7 @@ def test_compare_pg_stats_wo_with_pgbench_simple_update( seed: int, scale: int, duration: int, - pg_stats_wo: List[PgStatTable], + pg_stats_wo: list[PgStatTable], ): env = neon_with_baseline # initialize pgbench @@ -67,7 +68,7 @@ def test_compare_pg_stats_ro_with_pgbench_select_only( seed: int, scale: int, duration: int, - pg_stats_ro: List[PgStatTable], + pg_stats_ro: list[PgStatTable], ): env = neon_with_baseline # initialize pgbench @@ -89,7 +90,7 @@ def test_compare_pg_stats_wal_with_pgbench_default( seed: int, scale: int, duration: int, - pg_stats_wal: List[PgStatTable], + pg_stats_wal: list[PgStatTable], ): env = neon_with_baseline # initialize pgbench @@ -106,7 +107,7 @@ def test_compare_pg_stats_wal_with_pgbench_default( @pytest.mark.parametrize("n_tables", [1, 10]) @pytest.mark.parametrize("duration", get_durations_matrix(10)) def test_compare_pg_stats_wo_with_heavy_write( - neon_with_baseline: PgCompare, n_tables: int, duration: int, pg_stats_wo: List[PgStatTable] + neon_with_baseline: PgCompare, n_tables: int, duration: int, pg_stats_wo: list[PgStatTable] ): env = neon_with_baseline with env.pg.connect().cursor() as cur: diff --git a/test_runner/performance/test_copy.py b/test_runner/performance/test_copy.py index a91c78e867..743604a381 100644 --- a/test_runner/performance/test_copy.py +++ b/test_runner/performance/test_copy.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing from io import BufferedReader, RawIOBase from typing import Optional diff --git a/test_runner/performance/test_dup_key.py b/test_runner/performance/test_dup_key.py index 60a4d91313..f7e4a629d6 100644 --- a/test_runner/performance/test_dup_key.py +++ b/test_runner/performance/test_dup_key.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing import pytest diff --git a/test_runner/performance/test_gc_feedback.py b/test_runner/performance/test_gc_feedback.py index 2ba1018b33..07f244da0c 100644 --- a/test_runner/performance/test_gc_feedback.py +++ b/test_runner/performance/test_gc_feedback.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import pytest diff --git a/test_runner/performance/test_gist_build.py b/test_runner/performance/test_gist_build.py index 45900d0c7f..e8ef59722d 100644 --- a/test_runner/performance/test_gist_build.py +++ b/test_runner/performance/test_gist_build.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing from fixtures.compare_fixtures import PgCompare diff --git a/test_runner/performance/test_hot_page.py b/test_runner/performance/test_hot_page.py index 5e97c7cddf..d025566919 100644 --- a/test_runner/performance/test_hot_page.py +++ b/test_runner/performance/test_hot_page.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing import pytest diff --git a/test_runner/performance/test_hot_table.py b/test_runner/performance/test_hot_table.py index 9a78c92ec0..792d35321d 100644 --- a/test_runner/performance/test_hot_table.py +++ b/test_runner/performance/test_hot_table.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing import pytest diff --git a/test_runner/performance/test_latency.py b/test_runner/performance/test_latency.py index 6c94ecc482..133a2cfd8a 100644 --- a/test_runner/performance/test_latency.py +++ b/test_runner/performance/test_latency.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading import pytest diff --git a/test_runner/performance/test_layer_map.py b/test_runner/performance/test_layer_map.py index fb2ac14a92..8a4ad2d399 100644 --- a/test_runner/performance/test_layer_map.py +++ b/test_runner/performance/test_layer_map.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from fixtures.neon_fixtures import NeonEnvBuilder, flush_ep_to_pageserver diff --git a/test_runner/performance/test_lazy_startup.py b/test_runner/performance/test_lazy_startup.py index 5af10bc491..704073fe3b 100644 --- a/test_runner/performance/test_lazy_startup.py +++ b/test_runner/performance/test_lazy_startup.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import requests from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker diff --git a/test_runner/performance/test_parallel_copy_to.py b/test_runner/performance/test_parallel_copy_to.py index 9a0b7723ac..ddee0ebcd1 100644 --- a/test_runner/performance/test_parallel_copy_to.py +++ b/test_runner/performance/test_parallel_copy_to.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio from io import BytesIO diff --git a/test_runner/performance/test_perf_olap.py b/test_runner/performance/test_perf_olap.py index 4e93ae6a09..6dcde91b76 100644 --- a/test_runner/performance/test_perf_olap.py +++ b/test_runner/performance/test_perf_olap.py @@ -1,7 +1,8 @@ +from __future__ import annotations + import os from dataclasses import dataclass from pathlib import Path -from typing import Dict, List, Tuple import pytest from _pytest.mark import ParameterSet @@ -45,7 +46,7 @@ def test_clickbench_create_pg_stat_statements(remote_compare: RemoteCompare): # # Disable auto formatting for the list of queries so that it's easier to read # fmt: off -QUERIES: Tuple[LabelledQuery, ...] = ( +QUERIES: tuple[LabelledQuery, ...] = ( ### ClickBench queries: LabelledQuery("Q0", r"SELECT COUNT(*) FROM hits;"), LabelledQuery("Q1", r"SELECT COUNT(*) FROM hits WHERE AdvEngineID <> 0;"), @@ -105,7 +106,7 @@ QUERIES: Tuple[LabelledQuery, ...] = ( # # Disable auto formatting for the list of queries so that it's easier to read # fmt: off -PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( +PGVECTOR_QUERIES: tuple[LabelledQuery, ...] = ( LabelledQuery("PGVPREP", r"ALTER EXTENSION VECTOR UPDATE;"), LabelledQuery("PGV0", r"DROP TABLE IF EXISTS hnsw_test_table;"), LabelledQuery("PGV1", r"CREATE TABLE hnsw_test_table AS TABLE documents WITH NO DATA;"), @@ -127,7 +128,7 @@ PGVECTOR_QUERIES: Tuple[LabelledQuery, ...] = ( EXPLAIN_STRING: str = "EXPLAIN (ANALYZE, VERBOSE, BUFFERS, COSTS, SETTINGS, FORMAT JSON)" -def get_scale() -> List[str]: +def get_scale() -> list[str]: # We parametrize each tpc-h and clickbench test with scale # to distinguish them from each other, but don't really use it inside. # Databases are pre-created and passed through BENCHMARK_CONNSTR env variable. @@ -147,7 +148,7 @@ def run_psql( options = f"-cstatement_timeout=0 {env.pg.default_options.get('options', '')}" connstr = env.pg.connstr(password=None, options=options) - environ: Dict[str, str] = {} + environ: dict[str, str] = {} if password is not None: environ["PGPASSWORD"] = password @@ -185,7 +186,7 @@ def test_clickbench(query: LabelledQuery, remote_compare: RemoteCompare, scale: run_psql(remote_compare, query, times=3, explain=explain) -def tpch_queuies() -> Tuple[ParameterSet, ...]: +def tpch_queuies() -> tuple[ParameterSet, ...]: """ A list of queries to run for the TPC-H benchmark. - querues in returning tuple are ordered by the query number diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index 6eaa29e4f8..24ff3d23fa 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -1,10 +1,11 @@ +from __future__ import annotations + import calendar import enum import os import timeit from datetime import datetime from pathlib import Path -from typing import Dict, List import pytest from fixtures.benchmark_fixture import MetricReport, PgBenchInitResult, PgBenchRunResult @@ -26,7 +27,7 @@ def utc_now_timestamp() -> int: def init_pgbench(env: PgCompare, cmdline, password: None): - environ: Dict[str, str] = {} + environ: dict[str, str] = {} if password is not None: environ["PGPASSWORD"] = password @@ -54,7 +55,7 @@ def init_pgbench(env: PgCompare, cmdline, password: None): def run_pgbench(env: PgCompare, prefix: str, cmdline, password: None): - environ: Dict[str, str] = {} + environ: dict[str, str] = {} if password is not None: environ["PGPASSWORD"] = password @@ -177,7 +178,7 @@ def run_test_pgbench(env: PgCompare, scale: int, duration: int, workload_type: P env.report_size() -def get_durations_matrix(default: int = 45) -> List[int]: +def get_durations_matrix(default: int = 45) -> list[int]: durations = os.getenv("TEST_PG_BENCH_DURATIONS_MATRIX", default=str(default)) rv = [] for d in durations.split(","): @@ -193,7 +194,7 @@ def get_durations_matrix(default: int = 45) -> List[int]: return rv -def get_scales_matrix(default: int = 10) -> List[int]: +def get_scales_matrix(default: int = 10) -> list[int]: scales = os.getenv("TEST_PG_BENCH_SCALES_MATRIX", default=str(default)) rv = [] for s in scales.split(","): diff --git a/test_runner/performance/test_perf_pgvector_queries.py b/test_runner/performance/test_perf_pgvector_queries.py index bb3db16305..4a5ea94c4b 100644 --- a/test_runner/performance/test_perf_pgvector_queries.py +++ b/test_runner/performance/test_perf_pgvector_queries.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.compare_fixtures import PgCompare diff --git a/test_runner/performance/test_physical_replication.py b/test_runner/performance/test_physical_replication.py index 49b1176d34..14b527acca 100644 --- a/test_runner/performance/test_physical_replication.py +++ b/test_runner/performance/test_physical_replication.py @@ -18,7 +18,7 @@ from fixtures.neon_api import connection_parameters_to_env from fixtures.pg_version import PgVersion if TYPE_CHECKING: - from typing import Any, List, Optional + from typing import Any, Optional from fixtures.benchmark_fixture import NeonBenchmarker from fixtures.neon_api import NeonAPI @@ -233,7 +233,7 @@ def test_replication_start_stop( ], env=master_env, ) - replica_pgbench: List[Optional[subprocess.Popen[Any]]] = [None for _ in range(num_replicas)] + replica_pgbench: list[Optional[subprocess.Popen[Any]]] = [None for _ in range(num_replicas)] # Use the bits of iconfig to tell us which configuration we are on. For example # a iconfig of 2 is 10 in binary, indicating replica 0 is suspended and replica 1 is diff --git a/test_runner/performance/test_random_writes.py b/test_runner/performance/test_random_writes.py index c1a59ebb31..46848a8af8 100644 --- a/test_runner/performance/test_random_writes.py +++ b/test_runner/performance/test_random_writes.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import random from contextlib import closing diff --git a/test_runner/performance/test_seqscans.py b/test_runner/performance/test_seqscans.py index 67d4f3ae9b..36ee4eb201 100644 --- a/test_runner/performance/test_seqscans.py +++ b/test_runner/performance/test_seqscans.py @@ -1,5 +1,8 @@ # Test sequential scan speed # + +from __future__ import annotations + from contextlib import closing import pytest diff --git a/test_runner/performance/test_sharding_autosplit.py b/test_runner/performance/test_sharding_autosplit.py index 35793e41d7..caa89955e3 100644 --- a/test_runner/performance/test_sharding_autosplit.py +++ b/test_runner/performance/test_sharding_autosplit.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures import re from pathlib import Path diff --git a/test_runner/performance/test_startup.py b/test_runner/performance/test_startup.py index 514d8bae2a..d051717e92 100644 --- a/test_runner/performance/test_startup.py +++ b/test_runner/performance/test_startup.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import requests from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/performance/test_storage_controller_scale.py b/test_runner/performance/test_storage_controller_scale.py index a186bbaceb..452a856714 100644 --- a/test_runner/performance/test_storage_controller_scale.py +++ b/test_runner/performance/test_storage_controller_scale.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures import random import time diff --git a/test_runner/performance/test_wal_backpressure.py b/test_runner/performance/test_wal_backpressure.py index c824e60c29..576a4f0467 100644 --- a/test_runner/performance/test_wal_backpressure.py +++ b/test_runner/performance/test_wal_backpressure.py @@ -1,8 +1,11 @@ +from __future__ import annotations + import statistics import threading import time import timeit -from typing import Any, Callable, Generator, List +from collections.abc import Generator +from typing import TYPE_CHECKING import pytest from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker @@ -13,6 +16,9 @@ from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, flush_ep_to_pageserver from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix +if TYPE_CHECKING: + from typing import Any, Callable + @pytest.fixture(params=["vanilla", "neon_off", "neon_on"]) # This fixture constructs multiple `PgCompare` interfaces using a builder pattern. @@ -202,7 +208,7 @@ def record_lsn_write_lag(env: PgCompare, run_cond: Callable[[], bool], pool_inte if not isinstance(env, NeonCompare): return - lsn_write_lags: List[Any] = [] + lsn_write_lags: list[Any] = [] last_received_lsn = Lsn(0) last_pg_flush_lsn = Lsn(0) diff --git a/test_runner/performance/test_write_amplification.py b/test_runner/performance/test_write_amplification.py index 3e290b3996..87824604f8 100644 --- a/test_runner/performance/test_write_amplification.py +++ b/test_runner/performance/test_write_amplification.py @@ -10,6 +10,9 @@ # in LSN order, writing the oldest layer first. That creates a new 10 MB image # layer to be created for each of those small updates. This is the Write # Amplification problem at its finest. + +from __future__ import annotations + from contextlib import closing from fixtures.compare_fixtures import PgCompare diff --git a/test_runner/pg_clients/python/asyncpg/asyncpg_example.py b/test_runner/pg_clients/python/asyncpg/asyncpg_example.py index de86fe482d..9077a07444 100755 --- a/test_runner/pg_clients/python/asyncpg/asyncpg_example.py +++ b/test_runner/pg_clients/python/asyncpg/asyncpg_example.py @@ -1,5 +1,7 @@ #! /usr/bin/env python3 +from __future__ import annotations + import asyncio import os diff --git a/test_runner/pg_clients/python/pg8000/pg8000_example.py b/test_runner/pg_clients/python/pg8000/pg8000_example.py index 840ed97c97..2e92806602 100755 --- a/test_runner/pg_clients/python/pg8000/pg8000_example.py +++ b/test_runner/pg_clients/python/pg8000/pg8000_example.py @@ -1,5 +1,7 @@ #! /usr/bin/env python3 +from __future__ import annotations + import os import pg8000.dbapi diff --git a/test_runner/pg_clients/test_pg_clients.py b/test_runner/pg_clients/test_pg_clients.py index 3579c92b0c..ffa710da06 100644 --- a/test_runner/pg_clients/test_pg_clients.py +++ b/test_runner/pg_clients/test_pg_clients.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import shutil from pathlib import Path from tempfile import NamedTemporaryFile diff --git a/test_runner/regress/test_ancestor_branch.py b/test_runner/regress/test_ancestor_branch.py index 67a38ab471..8cd49d480f 100644 --- a/test_runner/regress/test_ancestor_branch.py +++ b/test_runner/regress/test_ancestor_branch.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.common_types import TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index a4e557a863..4a7017994d 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -1,5 +1,8 @@ +from __future__ import annotations + +from collections.abc import Generator from dataclasses import dataclass -from typing import Generator, Optional +from typing import Optional import pytest from fixtures.common_types import TenantId diff --git a/test_runner/regress/test_auth.py b/test_runner/regress/test_auth.py index 6b06092183..eba8197116 100644 --- a/test_runner/regress/test_auth.py +++ b/test_runner/regress/test_auth.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os from contextlib import closing from pathlib import Path diff --git a/test_runner/regress/test_aux_files.py b/test_runner/regress/test_aux_files.py index 5328aef156..91d674d0db 100644 --- a/test_runner/regress/test_aux_files.py +++ b/test_runner/regress/test_aux_files.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.log_helper import log from fixtures.neon_fixtures import ( AuxFileStore, diff --git a/test_runner/regress/test_backpressure.py b/test_runner/regress/test_backpressure.py index 3d7a52ca77..c75419b786 100644 --- a/test_runner/regress/test_backpressure.py +++ b/test_runner/regress/test_backpressure.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading import time from contextlib import closing, contextmanager diff --git a/test_runner/regress/test_bad_connection.py b/test_runner/regress/test_bad_connection.py index 98842e64f4..c0c9537421 100644 --- a/test_runner/regress/test_bad_connection.py +++ b/test_runner/regress/test_bad_connection.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import random import time diff --git a/test_runner/regress/test_basebackup_error.py b/test_runner/regress/test_basebackup_error.py index 13c080ea0e..2dd1a88ad7 100644 --- a/test_runner/regress/test_basebackup_error.py +++ b/test_runner/regress/test_basebackup_error.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.neon_fixtures import NeonEnv diff --git a/test_runner/regress/test_branch_and_gc.py b/test_runner/regress/test_branch_and_gc.py index afeea55fc2..6d1565c5e5 100644 --- a/test_runner/regress/test_branch_and_gc.py +++ b/test_runner/regress/test_branch_and_gc.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading import time diff --git a/test_runner/regress/test_branch_behind.py b/test_runner/regress/test_branch_behind.py index cceb7b3d60..619fc15aa3 100644 --- a/test_runner/regress/test_branch_behind.py +++ b/test_runner/regress/test_branch_behind.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log diff --git a/test_runner/regress/test_branching.py b/test_runner/regress/test_branching.py index 8d07dfd511..34e4e994cb 100644 --- a/test_runner/regress/test_branching.py +++ b/test_runner/regress/test_branching.py @@ -1,8 +1,9 @@ +from __future__ import annotations + import random import threading import time from concurrent.futures import ThreadPoolExecutor -from typing import List import pytest from fixtures.common_types import Lsn, TimelineId @@ -56,10 +57,10 @@ def test_branching_with_pgbench( pg_bin.run_capture(["pgbench", "-T15", connstr]) env.create_branch("b0", tenant_id=tenant) - endpoints: List[Endpoint] = [] + endpoints: list[Endpoint] = [] endpoints.append(env.endpoints.create_start("b0", tenant_id=tenant)) - threads: List[threading.Thread] = [] + threads: list[threading.Thread] = [] threads.append( threading.Thread(target=run_pgbench, args=(endpoints[0].connstr(),), daemon=True) ) diff --git a/test_runner/regress/test_broken_timeline.py b/test_runner/regress/test_broken_timeline.py index 6b6af481aa..99e0e23b4a 100644 --- a/test_runner/regress/test_broken_timeline.py +++ b/test_runner/regress/test_broken_timeline.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import concurrent.futures import os -from typing import List, Tuple import pytest from fixtures.common_types import TenantId, TimelineId @@ -31,7 +32,7 @@ def test_local_corruption(neon_env_builder: NeonEnvBuilder): ] ) - tenant_timelines: List[Tuple[TenantId, TimelineId, Endpoint]] = [] + tenant_timelines: list[tuple[TenantId, TimelineId, Endpoint]] = [] for _ in range(3): tenant_id, timeline_id = env.create_tenant() diff --git a/test_runner/regress/test_build_info_metric.py b/test_runner/regress/test_build_info_metric.py index 8f714dae67..9a8744571a 100644 --- a/test_runner/regress/test_build_info_metric.py +++ b/test_runner/regress/test_build_info_metric.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.metrics import parse_metrics from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy diff --git a/test_runner/regress/test_change_pageserver.py b/test_runner/regress/test_change_pageserver.py index d3aa49f374..41aa5b47ca 100644 --- a/test_runner/regress/test_change_pageserver.py +++ b/test_runner/regress/test_change_pageserver.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio from fixtures.log_helper import log diff --git a/test_runner/regress/test_clog_truncate.py b/test_runner/regress/test_clog_truncate.py index bfce795d14..10027ce689 100644 --- a/test_runner/regress/test_clog_truncate.py +++ b/test_runner/regress/test_clog_truncate.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import time diff --git a/test_runner/regress/test_close_fds.py b/test_runner/regress/test_close_fds.py index 3957d0b3b0..c0bf7d2462 100644 --- a/test_runner/regress/test_close_fds.py +++ b/test_runner/regress/test_close_fds.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os.path import shutil import subprocess @@ -39,9 +41,8 @@ def test_lsof_pageserver_pid(neon_simple_env: NeonEnv): res = subprocess.run( [lsof, path], check=False, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + text=True, + capture_output=True, ) # parse the `lsof` command's output to get only the list of commands diff --git a/test_runner/regress/test_combocid.py b/test_runner/regress/test_combocid.py index 41907b1f20..57d5b2d8b3 100644 --- a/test_runner/regress/test_combocid.py +++ b/test_runner/regress/test_combocid.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnvBuilder, flush_ep_to_pageserver diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 98bd3a6a5f..39d4a3a6d7 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import enum import json import os diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index 1f960b6b75..791e38383e 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import re import shutil @@ -5,7 +7,7 @@ import subprocess import tempfile from dataclasses import dataclass from pathlib import Path -from typing import List, Optional +from typing import TYPE_CHECKING import pytest import toml @@ -25,6 +27,10 @@ from fixtures.pg_version import PgVersion from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage from fixtures.workload import Workload +if TYPE_CHECKING: + from typing import Optional + + # # A test suite that help to prevent unintentionally breaking backward or forward compatibility between Neon releases. # - `test_create_snapshot` a script wrapped in a test that creates a data snapshot. @@ -366,7 +372,7 @@ def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, r def dump_differs( - first: Path, second: Path, output: Path, allowed_diffs: Optional[List[str]] = None + first: Path, second: Path, output: Path, allowed_diffs: Optional[list[str]] = None ) -> bool: """ Runs diff(1) command on two SQL dumps and write the output to the given output file. diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py index 8b8c970357..d43c71ceac 100644 --- a/test_runner/regress/test_compute_catalog.py +++ b/test_runner/regress/test_compute_catalog.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import requests from fixtures.neon_fixtures import NeonEnv diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py index 6138c322d7..6c75765632 100644 --- a/test_runner/regress/test_compute_metrics.py +++ b/test_runner/regress/test_compute_metrics.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnv diff --git a/test_runner/regress/test_config.py b/test_runner/regress/test_config.py index 5aba1f265f..d48fd01fcb 100644 --- a/test_runner/regress/test_config.py +++ b/test_runner/regress/test_config.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os from contextlib import closing @@ -66,7 +68,7 @@ def test_safekeepers_reconfigure_reorder( assert new_sks != old_sks, "GUC changes were applied" log_path = os.path.join(endpoint.endpoint_path(), "compute.log") - with open(log_path, "r") as log_file: + with open(log_path) as log_file: logs = log_file.read() # Check that walproposer was not restarted assert "restarting walproposer" not in logs diff --git a/test_runner/regress/test_crafted_wal_end.py b/test_runner/regress/test_crafted_wal_end.py index 71369ab131..23c6fa3a5a 100644 --- a/test_runner/regress/test_crafted_wal_end.py +++ b/test_runner/regress/test_crafted_wal_end.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.log_helper import log from fixtures.neon_cli import WalCraft diff --git a/test_runner/regress/test_createdropdb.py b/test_runner/regress/test_createdropdb.py index cdf048ac26..97e185ceb5 100644 --- a/test_runner/regress/test_createdropdb.py +++ b/test_runner/regress/test_createdropdb.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pathlib diff --git a/test_runner/regress/test_createuser.py b/test_runner/regress/test_createuser.py index 96b38f8fb0..236f4eb2fe 100644 --- a/test_runner/regress/test_createuser.py +++ b/test_runner/regress/test_createuser.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnv from fixtures.utils import query_scalar diff --git a/test_runner/regress/test_ddl_forwarding.py b/test_runner/regress/test_ddl_forwarding.py index 65f310c27a..96657b3ce4 100644 --- a/test_runner/regress/test_ddl_forwarding.py +++ b/test_runner/regress/test_ddl_forwarding.py @@ -1,5 +1,7 @@ +from __future__ import annotations + from types import TracebackType -from typing import Any, Dict, List, Optional, Tuple, Type +from typing import TYPE_CHECKING import psycopg2 import pytest @@ -9,6 +11,9 @@ from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Any, Optional + def handle_db(dbs, roles, operation): if operation["op"] == "set": @@ -43,7 +48,7 @@ def handle_role(dbs, roles, operation): def ddl_forward_handler( - request: Request, dbs: Dict[str, str], roles: Dict[str, str], ddl: "DdlForwardingContext" + request: Request, dbs: dict[str, str], roles: dict[str, str], ddl: DdlForwardingContext ) -> Response: log.info(f"Received request with data {request.get_data(as_text=True)}") if ddl.fail: @@ -69,8 +74,8 @@ class DdlForwardingContext: self.pg = vanilla_pg self.host = host self.port = port - self.dbs: Dict[str, str] = {} - self.roles: Dict[str, str] = {} + self.dbs: dict[str, str] = {} + self.roles: dict[str, str] = {} self.fail = False endpoint = "/test/roles_and_databases" ddl_url = f"http://{host}:{port}{endpoint}" @@ -91,13 +96,13 @@ class DdlForwardingContext: def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): self.pg.stop() - def send(self, query: str) -> List[Tuple[Any, ...]]: + def send(self, query: str) -> list[tuple[Any, ...]]: return self.pg.safe_psql(query) def wait(self, timeout=3): @@ -106,7 +111,7 @@ class DdlForwardingContext: def failures(self, bool): self.fail = bool - def send_and_wait(self, query: str, timeout=3) -> List[Tuple[Any, ...]]: + def send_and_wait(self, query: str, timeout=3) -> list[tuple[Any, ...]]: res = self.send(query) self.wait(timeout=timeout) return res diff --git a/test_runner/regress/test_disk_usage_eviction.py b/test_runner/regress/test_disk_usage_eviction.py index 4fcdef0ca3..72866766de 100644 --- a/test_runner/regress/test_disk_usage_eviction.py +++ b/test_runner/regress/test_disk_usage_eviction.py @@ -1,8 +1,11 @@ +from __future__ import annotations + import enum import time from collections import Counter +from collections.abc import Iterable from dataclasses import dataclass -from typing import Any, Dict, Iterable, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId @@ -19,6 +22,10 @@ from fixtures.pageserver.utils import wait_for_upload_queue_empty from fixtures.remote_storage import RemoteStorageKind from fixtures.utils import human_bytes, wait_until +if TYPE_CHECKING: + from typing import Any + + GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy" # access times in the pageserver are stored at a very low resolution: to generate meaningfully different @@ -74,7 +81,7 @@ class EvictionOrder(str, enum.Enum): RELATIVE_ORDER_EQUAL = "relative_equal" RELATIVE_ORDER_SPARE = "relative_spare" - def config(self) -> Dict[str, Any]: + def config(self) -> dict[str, Any]: if self == EvictionOrder.RELATIVE_ORDER_EQUAL: return { "type": "RelativeAccessed", @@ -91,12 +98,12 @@ class EvictionOrder(str, enum.Enum): @dataclass class EvictionEnv: - timelines: list[Tuple[TenantId, TimelineId]] + timelines: list[tuple[TenantId, TimelineId]] neon_env: NeonEnv pg_bin: PgBin pageserver_http: PageserverHttpClient layer_size: int - pgbench_init_lsns: Dict[TenantId, Lsn] + pgbench_init_lsns: dict[TenantId, Lsn] @property def pageserver(self): @@ -105,7 +112,7 @@ class EvictionEnv: """ return self.neon_env.pageserver - def timelines_du(self, pageserver: NeonPageserver) -> Tuple[int, int, int]: + def timelines_du(self, pageserver: NeonPageserver) -> tuple[int, int, int]: return poor_mans_du( self.neon_env, [(tid, tlid) for tid, tlid in self.timelines], @@ -113,13 +120,13 @@ class EvictionEnv: verbose=False, ) - def du_by_timeline(self, pageserver: NeonPageserver) -> Dict[Tuple[TenantId, TimelineId], int]: + def du_by_timeline(self, pageserver: NeonPageserver) -> dict[tuple[TenantId, TimelineId], int]: return { (tid, tlid): poor_mans_du(self.neon_env, [(tid, tlid)], pageserver, verbose=True)[0] for tid, tlid in self.timelines } - def count_layers_per_tenant(self, pageserver: NeonPageserver) -> Dict[TenantId, int]: + def count_layers_per_tenant(self, pageserver: NeonPageserver) -> dict[TenantId, int]: return count_layers_per_tenant(pageserver, self.timelines) def warm_up_tenant(self, tenant_id: TenantId): @@ -204,8 +211,8 @@ class EvictionEnv: def count_layers_per_tenant( - pageserver: NeonPageserver, timelines: Iterable[Tuple[TenantId, TimelineId]] -) -> Dict[TenantId, int]: + pageserver: NeonPageserver, timelines: Iterable[tuple[TenantId, TimelineId]] +) -> dict[TenantId, int]: ret: Counter[TenantId] = Counter() for tenant_id, timeline_id in timelines: @@ -279,7 +286,7 @@ def _eviction_env( def pgbench_init_tenant( layer_size: int, scale: int, env: NeonEnv, pg_bin: PgBin -) -> Tuple[TenantId, TimelineId]: +) -> tuple[TenantId, TimelineId]: tenant_id, timeline_id = env.create_tenant( conf={ "gc_period": "0s", @@ -672,10 +679,10 @@ def test_fast_growing_tenant(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, or def poor_mans_du( env: NeonEnv, - timelines: Iterable[Tuple[TenantId, TimelineId]], + timelines: Iterable[tuple[TenantId, TimelineId]], pageserver: NeonPageserver, verbose: bool = False, -) -> Tuple[int, int, int]: +) -> tuple[int, int, int]: """ Disk usage, largest, smallest layer for layer files over the given (tenant, timeline) tuples; this could be done over layers endpoint just as well. diff --git a/test_runner/regress/test_download_extensions.py b/test_runner/regress/test_download_extensions.py index c89a82965e..04916a6b6f 100644 --- a/test_runner/regress/test_download_extensions.py +++ b/test_runner/regress/test_download_extensions.py @@ -1,8 +1,10 @@ +from __future__ import annotations + import os import shutil from contextlib import closing from pathlib import Path -from typing import Any, Dict +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log @@ -14,6 +16,9 @@ from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Any + # use neon_env_builder_local fixture to override the default neon_env_builder fixture # and use a test-specific pg_install instead of shared one @@ -88,7 +93,7 @@ def test_remote_extensions( ) # mock remote_extensions spec - spec: Dict[str, Any] = { + spec: dict[str, Any] = { "library_index": { "anon": "anon", }, diff --git a/test_runner/regress/test_endpoint_crash.py b/test_runner/regress/test_endpoint_crash.py index e34dfab6c4..0217cd0d03 100644 --- a/test_runner/regress/test_endpoint_crash.py +++ b/test_runner/regress/test_endpoint_crash.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_explain_with_lfc_stats.py b/test_runner/regress/test_explain_with_lfc_stats.py index 0217c9ac7b..2128bd93dd 100644 --- a/test_runner/regress/test_explain_with_lfc_stats.py +++ b/test_runner/regress/test_explain_with_lfc_stats.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from pathlib import Path from fixtures.log_helper import log diff --git a/test_runner/regress/test_fsm_truncate.py b/test_runner/regress/test_fsm_truncate.py index 691f96ab0a..55a010f26a 100644 --- a/test_runner/regress/test_fsm_truncate.py +++ b/test_runner/regress/test_fsm_truncate.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index e6d51a77a6..62d59528cf 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os from pathlib import Path diff --git a/test_runner/regress/test_gc_aggressive.py b/test_runner/regress/test_gc_aggressive.py index 3d472f9720..97c38cf658 100644 --- a/test_runner/regress/test_gc_aggressive.py +++ b/test_runner/regress/test_gc_aggressive.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio import concurrent.futures import random diff --git a/test_runner/regress/test_gin_redo.py b/test_runner/regress/test_gin_redo.py index 9205882239..71382990dc 100644 --- a/test_runner/regress/test_gin_redo.py +++ b/test_runner/regress/test_gin_redo.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from fixtures.neon_fixtures import NeonEnv, wait_replica_caughtup diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index be8f70bb70..a906e7a243 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio import os import threading diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index 87b44e4e3e..e367db33ff 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import os import re diff --git a/test_runner/regress/test_ingestion_layer_size.py b/test_runner/regress/test_ingestion_layer_size.py index 44c77b3410..2edbf4d6d3 100644 --- a/test_runner/regress/test_ingestion_layer_size.py +++ b/test_runner/regress/test_ingestion_layer_size.py @@ -1,5 +1,8 @@ +from __future__ import annotations + +from collections.abc import Iterable from dataclasses import dataclass -from typing import Iterable, List, Union +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log @@ -7,6 +10,9 @@ from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo from fixtures.utils import human_bytes +if TYPE_CHECKING: + from typing import Union + def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder, build_type: str): """ @@ -106,13 +112,13 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder, bui @dataclass class Histogram: - buckets: List[Union[int, float]] - counts: List[int] - sums: List[int] + buckets: list[Union[int, float]] + counts: list[int] + sums: list[int] def histogram_historic_layers( - infos: LayerMapInfo, minimum_sizes: List[Union[int, float]] + infos: LayerMapInfo, minimum_sizes: list[Union[int, float]] ) -> Histogram: def log_layer(layer: HistoricLayerInfo) -> HistoricLayerInfo: log.info( @@ -125,7 +131,7 @@ def histogram_historic_layers( return histogram(sizes, minimum_sizes) -def histogram(sizes: Iterable[int], minimum_sizes: List[Union[int, float]]) -> Histogram: +def histogram(sizes: Iterable[int], minimum_sizes: list[Union[int, float]]) -> Histogram: assert all(minimum_sizes[i] < minimum_sizes[i + 1] for i in range(len(minimum_sizes) - 1)) buckets = list(enumerate(minimum_sizes)) counts = [0 for _ in buckets] diff --git a/test_runner/regress/test_large_schema.py b/test_runner/regress/test_large_schema.py index c5d5b5fe64..ae5113ed45 100644 --- a/test_runner/regress/test_large_schema.py +++ b/test_runner/regress/test_large_schema.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import time diff --git a/test_runner/regress/test_layer_bloating.py b/test_runner/regress/test_layer_bloating.py index b8126395fd..a08d522fc2 100644 --- a/test_runner/regress/test_layer_bloating.py +++ b/test_runner/regress/test_layer_bloating.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pytest diff --git a/test_runner/regress/test_layer_eviction.py b/test_runner/regress/test_layer_eviction.py index 82cfe08bc0..c49ac6893e 100644 --- a/test_runner/regress/test_layer_eviction.py +++ b/test_runner/regress/test_layer_eviction.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time import pytest diff --git a/test_runner/regress/test_layer_writers_fail.py b/test_runner/regress/test_layer_writers_fail.py index 1711cc1414..dd31e2725b 100644 --- a/test_runner/regress/test_layer_writers_fail.py +++ b/test_runner/regress/test_layer_writers_fail.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.neon_fixtures import NeonEnv, NeonPageserver from fixtures.pageserver.http import PageserverApiException diff --git a/test_runner/regress/test_layers_from_future.py b/test_runner/regress/test_layers_from_future.py index 2857df8ef7..2536ec1b3c 100644 --- a/test_runner/regress/test_layers_from_future.py +++ b/test_runner/regress/test_layers_from_future.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from fixtures.common_types import Lsn diff --git a/test_runner/regress/test_lfc_resize.py b/test_runner/regress/test_lfc_resize.py index 0f791e9247..3083128d87 100644 --- a/test_runner/regress/test_lfc_resize.py +++ b/test_runner/regress/test_lfc_resize.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import random import re diff --git a/test_runner/regress/test_lfc_working_set_approximation.py b/test_runner/regress/test_lfc_working_set_approximation.py index 4a3a949d1a..36dfec969f 100644 --- a/test_runner/regress/test_lfc_working_set_approximation.py +++ b/test_runner/regress/test_lfc_working_set_approximation.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from pathlib import Path diff --git a/test_runner/regress/test_local_file_cache.py b/test_runner/regress/test_local_file_cache.py index 9c38200937..fbf018a167 100644 --- a/test_runner/regress/test_local_file_cache.py +++ b/test_runner/regress/test_local_file_cache.py @@ -1,9 +1,10 @@ +from __future__ import annotations + import os import queue import random import threading import time -from typing import List from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import query_scalar @@ -57,7 +58,7 @@ def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder): n_updates_performed_q.put(n_updates_performed) n_updates_performed_q: queue.Queue[int] = queue.Queue() - threads: List[threading.Thread] = [] + threads: list[threading.Thread] = [] for _i in range(n_threads): thread = threading.Thread(target=run_updates, args=(n_updates_performed_q,), daemon=True) thread.start() diff --git a/test_runner/regress/test_logging.py b/test_runner/regress/test_logging.py index bfffad7572..9a3fdd835d 100644 --- a/test_runner/regress/test_logging.py +++ b/test_runner/regress/test_logging.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import uuid import pytest diff --git a/test_runner/regress/test_logical_replication.py b/test_runner/regress/test_logical_replication.py index 1aa1bdf366..87991eadf1 100644 --- a/test_runner/regress/test_logical_replication.py +++ b/test_runner/regress/test_logical_replication.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from functools import partial from random import choice @@ -336,7 +338,7 @@ FROM generate_series(1, 16384) AS seq; -- Inserts enough rows to exceed 16MB of assert [r[0] for r in vanilla_pg.safe_psql("select * from t")] == [1, 2, 3] log_path = vanilla_pg.pgdatadir / "pg.log" - with open(log_path, "r") as log_file: + with open(log_path) as log_file: logs = log_file.read() assert "could not receive data from WAL stream" not in logs diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index ab43e32146..8b41d0cb1c 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import re import time from concurrent.futures import ThreadPoolExecutor diff --git a/test_runner/regress/test_multixact.py b/test_runner/regress/test_multixact.py index 742d03e464..e8bbe5aa97 100644 --- a/test_runner/regress/test_multixact.py +++ b/test_runner/regress/test_multixact.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content from fixtures.utils import query_scalar diff --git a/test_runner/regress/test_neon_cli.py b/test_runner/regress/test_neon_cli.py index 04780ebcf1..3a0a4b10bf 100644 --- a/test_runner/regress/test_neon_cli.py +++ b/test_runner/regress/test_neon_cli.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import subprocess from pathlib import Path diff --git a/test_runner/regress/test_neon_extension.py b/test_runner/regress/test_neon_extension.py index a99e9e15af..4035398a5f 100644 --- a/test_runner/regress/test_neon_extension.py +++ b/test_runner/regress/test_neon_extension.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from contextlib import closing diff --git a/test_runner/regress/test_neon_local_cli.py b/test_runner/regress/test_neon_local_cli.py index 0fdc5960e3..80e26d9432 100644 --- a/test_runner/regress/test_neon_local_cli.py +++ b/test_runner/regress/test_neon_local_cli.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.common_types import TimelineId from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index dc1c9d3fd9..7118127a1f 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv from fixtures.pg_version import PgVersion diff --git a/test_runner/regress/test_next_xid.py b/test_runner/regress/test_next_xid.py index cac74492d7..980f6b5694 100644 --- a/test_runner/regress/test_next_xid.py +++ b/test_runner/regress/test_next_xid.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import time from pathlib import Path @@ -189,7 +191,7 @@ def test_import_at_2bil( # calculate the SLRU segments that a particular multixid or multixid-offsets falls into. BLCKSZ = 8192 MULTIXACT_OFFSETS_PER_PAGE = int(BLCKSZ / 4) -SLRU_PAGES_PER_SEGMENT = int(32) +SLRU_PAGES_PER_SEGMENT = 32 MXACT_MEMBER_BITS_PER_XACT = 8 MXACT_MEMBER_FLAGS_PER_BYTE = 1 MULTIXACT_FLAGBYTES_PER_GROUP = 4 diff --git a/test_runner/regress/test_normal_work.py b/test_runner/regress/test_normal_work.py index 54433769fd..ae2d171058 100644 --- a/test_runner/regress/test_normal_work.py +++ b/test_runner/regress/test_normal_work.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder diff --git a/test_runner/regress/test_oid_overflow.py b/test_runner/regress/test_oid_overflow.py index e8eefc2414..f69c1112c7 100644 --- a/test_runner/regress/test_oid_overflow.py +++ b/test_runner/regress/test_oid_overflow.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_old_request_lsn.py b/test_runner/regress/test_old_request_lsn.py index dfd0271c10..a615464582 100644 --- a/test_runner/regress/test_old_request_lsn.py +++ b/test_runner/regress/test_old_request_lsn.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.common_types import TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_ondemand_download.py b/test_runner/regress/test_ondemand_download.py index 0d712d06f1..e1caaeb6c1 100644 --- a/test_runner/regress/test_ondemand_download.py +++ b/test_runner/regress/test_ondemand_download.py @@ -1,10 +1,12 @@ # It's possible to run any regular test with the local fs remote storage via # env ZENITH_PAGESERVER_OVERRIDES="remote_storage={local_path='/tmp/neon_zzz/'}" poetry ...... +from __future__ import annotations + import time from collections import defaultdict from concurrent.futures import ThreadPoolExecutor -from typing import Any, DefaultDict, Dict, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn @@ -26,6 +28,9 @@ from fixtures.pageserver.utils import ( from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage from fixtures.utils import query_scalar, wait_until +if TYPE_CHECKING: + from typing import Any + def get_num_downloaded_layers(client: PageserverHttpClient): """ @@ -505,7 +510,7 @@ def test_compaction_downloads_on_demand_without_image_creation(neon_env_builder: env = neon_env_builder.init_start(initial_tenant_conf=stringify(conf)) - def downloaded_bytes_and_count(pageserver_http: PageserverHttpClient) -> Tuple[int, int]: + def downloaded_bytes_and_count(pageserver_http: PageserverHttpClient) -> tuple[int, int]: m = pageserver_http.get_metrics() # these are global counters total_bytes = m.query_one("pageserver_remote_ondemand_downloaded_bytes_total").value @@ -634,7 +639,7 @@ def test_compaction_downloads_on_demand_with_image_creation(neon_env_builder: Ne layers = pageserver_http.layer_map_info(tenant_id, timeline_id) assert not layers.in_memory_layers, "no inmemory layers expected after post-commit checkpoint" - kinds_before: DefaultDict[str, int] = defaultdict(int) + kinds_before: defaultdict[str, int] = defaultdict(int) for layer in layers.historic_layers: kinds_before[layer.kind] += 1 @@ -651,7 +656,7 @@ def test_compaction_downloads_on_demand_with_image_creation(neon_env_builder: Ne pageserver_http.timeline_compact(tenant_id, timeline_id) layers = pageserver_http.layer_map_info(tenant_id, timeline_id) - kinds_after: DefaultDict[str, int] = defaultdict(int) + kinds_after: defaultdict[str, int] = defaultdict(int) for layer in layers.historic_layers: kinds_after[layer.kind] += 1 @@ -855,5 +860,5 @@ def test_layer_download_timeouted(neon_env_builder: NeonEnvBuilder): assert elapsed < 30, "too long passed: {elapsed=}" -def stringify(conf: Dict[str, Any]) -> Dict[str, str]: +def stringify(conf: dict[str, Any]) -> dict[str, str]: return dict(map(lambda x: (x[0], str(x[1])), conf.items())) diff --git a/test_runner/regress/test_ondemand_slru_download.py b/test_runner/regress/test_ondemand_slru_download.py index d6babe4393..5eaba78331 100644 --- a/test_runner/regress/test_ondemand_slru_download.py +++ b/test_runner/regress/test_ondemand_slru_download.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional import pytest diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index a19bc785f8..d1b70b9ee6 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional from fixtures.common_types import Lsn, TenantId, TimelineId diff --git a/test_runner/regress/test_pageserver_catchup.py b/test_runner/regress/test_pageserver_catchup.py index d020104431..3567e05f81 100644 --- a/test_runner/regress/test_pageserver_catchup.py +++ b/test_runner/regress/test_pageserver_catchup.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_pageserver_crash_consistency.py b/test_runner/regress/test_pageserver_crash_consistency.py index 2d6b50490e..ac46d3e62a 100644 --- a/test_runner/regress/test_pageserver_crash_consistency.py +++ b/test_runner/regress/test_pageserver_crash_consistency.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, wait_for_last_flush_lsn from fixtures.pageserver.common_types import ImageLayerName, parse_layer_file_name diff --git a/test_runner/regress/test_pageserver_generations.py b/test_runner/regress/test_pageserver_generations.py index a135b3da1a..d13cf0019d 100644 --- a/test_runner/regress/test_pageserver_generations.py +++ b/test_runner/regress/test_pageserver_generations.py @@ -9,6 +9,8 @@ of the pageserver are: - Updates to remote_consistent_lsn may only be made visible after validating generation """ +from __future__ import annotations + import enum import os import re diff --git a/test_runner/regress/test_pageserver_getpage_throttle.py b/test_runner/regress/test_pageserver_getpage_throttle.py index 4c9eac5cd7..6811d09cff 100644 --- a/test_runner/regress/test_pageserver_getpage_throttle.py +++ b/test_runner/regress/test_pageserver_getpage_throttle.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy import json import uuid @@ -61,7 +63,7 @@ def test_pageserver_getpage_throttle(neon_env_builder: NeonEnvBuilder, pg_bin: P results_path = Path(basepath + ".stdout") log.info(f"Benchmark results at: {results_path}") - with open(results_path, "r") as f: + with open(results_path) as f: results = json.load(f) log.info(f"Results:\n{json.dumps(results, sort_keys=True, indent=2)}") return int(results["total"]["request_count"]) diff --git a/test_runner/regress/test_pageserver_layer_rolling.py b/test_runner/regress/test_pageserver_layer_rolling.py index 8c6e563357..c0eb598891 100644 --- a/test_runner/regress/test_pageserver_layer_rolling.py +++ b/test_runner/regress/test_pageserver_layer_rolling.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import asyncio import os import time -from typing import Optional, Tuple +from typing import TYPE_CHECKING import psutil import pytest @@ -16,6 +18,10 @@ from fixtures.pageserver.http import PageserverHttpClient from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_for_upload from fixtures.utils import wait_until +if TYPE_CHECKING: + from typing import Optional + + TIMELINE_COUNT = 10 ENTRIES_PER_TIMELINE = 10_000 CHECKPOINT_TIMEOUT_SECONDS = 60 @@ -41,7 +47,7 @@ async def run_worker_for_tenant( return last_flush_lsn -async def run_worker(env: NeonEnv, tenant_conf, entries: int) -> Tuple[TenantId, TimelineId, Lsn]: +async def run_worker(env: NeonEnv, tenant_conf, entries: int) -> tuple[TenantId, TimelineId, Lsn]: tenant, timeline = env.create_tenant(conf=tenant_conf) last_flush_lsn = await run_worker_for_tenant(env, entries, tenant) return tenant, timeline, last_flush_lsn @@ -49,13 +55,13 @@ async def run_worker(env: NeonEnv, tenant_conf, entries: int) -> Tuple[TenantId, async def workload( env: NeonEnv, tenant_conf, timelines: int, entries: int -) -> list[Tuple[TenantId, TimelineId, Lsn]]: +) -> list[tuple[TenantId, TimelineId, Lsn]]: workers = [asyncio.create_task(run_worker(env, tenant_conf, entries)) for _ in range(timelines)] return await asyncio.gather(*workers) def wait_until_pageserver_is_caught_up( - env: NeonEnv, last_flush_lsns: list[Tuple[TenantId, TimelineId, Lsn]] + env: NeonEnv, last_flush_lsns: list[tuple[TenantId, TimelineId, Lsn]] ): for tenant, timeline, last_flush_lsn in last_flush_lsns: shards = tenant_get_shards(env, tenant) @@ -67,7 +73,7 @@ def wait_until_pageserver_is_caught_up( def wait_until_pageserver_has_uploaded( - env: NeonEnv, last_flush_lsns: list[Tuple[TenantId, TimelineId, Lsn]] + env: NeonEnv, last_flush_lsns: list[tuple[TenantId, TimelineId, Lsn]] ): for tenant, timeline, last_flush_lsn in last_flush_lsns: shards = tenant_get_shards(env, tenant) diff --git a/test_runner/regress/test_pageserver_metric_collection.py b/test_runner/regress/test_pageserver_metric_collection.py index 37ab51f9fb..5ec8357597 100644 --- a/test_runner/regress/test_pageserver_metric_collection.py +++ b/test_runner/regress/test_pageserver_metric_collection.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import gzip import json import os @@ -5,7 +7,7 @@ import time from dataclasses import dataclass from pathlib import Path from queue import SimpleQueue -from typing import Any, Dict, Set +from typing import TYPE_CHECKING from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log @@ -22,6 +24,10 @@ from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Any + + # TODO: collect all of the env setup *AFTER* removal of RemoteStorageKind.NOOP @@ -308,8 +314,8 @@ def test_metric_collection_cleans_up_tempfile( @dataclass class PrefixPartitionedFiles: - matching: Set[str] - other: Set[str] + matching: set[str] + other: set[str] def iterate_pageserver_workdir(path: Path, prefix: str) -> PrefixPartitionedFiles: @@ -340,7 +346,7 @@ class MetricsVerifier: """ def __init__(self): - self.tenants: Dict[TenantId, TenantMetricsVerifier] = {} + self.tenants: dict[TenantId, TenantMetricsVerifier] = {} pass def ingest(self, events, is_last): @@ -357,8 +363,8 @@ class MetricsVerifier: for t in self.tenants.values(): t.post_batch() - def accepted_event_names(self) -> Set[str]: - names: Set[str] = set() + def accepted_event_names(self) -> set[str]: + names: set[str] = set() for t in self.tenants.values(): names = names.union(t.accepted_event_names()) return names @@ -367,8 +373,8 @@ class MetricsVerifier: class TenantMetricsVerifier: def __init__(self, id: TenantId): self.id = id - self.timelines: Dict[TimelineId, TimelineMetricsVerifier] = {} - self.state: Dict[str, Any] = {} + self.timelines: dict[TimelineId, TimelineMetricsVerifier] = {} + self.state: dict[str, Any] = {} def ingest(self, event): assert TenantId(event["tenant_id"]) == self.id @@ -392,7 +398,7 @@ class TenantMetricsVerifier: for tl in self.timelines.values(): tl.post_batch(self) - def accepted_event_names(self) -> Set[str]: + def accepted_event_names(self) -> set[str]: names = set(self.state.keys()) for t in self.timelines.values(): names = names.union(t.accepted_event_names()) @@ -402,7 +408,7 @@ class TenantMetricsVerifier: class TimelineMetricsVerifier: def __init__(self, tenant_id: TenantId, timeline_id: TimelineId): self.id = timeline_id - self.state: Dict[str, Any] = {} + self.state: dict[str, Any] = {} def ingest(self, event): name = event["metric"] @@ -414,7 +420,7 @@ class TimelineMetricsVerifier: for v in self.state.values(): v.post_batch(self) - def accepted_event_names(self) -> Set[str]: + def accepted_event_names(self) -> set[str]: return set(self.state.keys()) diff --git a/test_runner/regress/test_pageserver_reconnect.py b/test_runner/regress/test_pageserver_reconnect.py index 7f10c36db8..be63208428 100644 --- a/test_runner/regress/test_pageserver_reconnect.py +++ b/test_runner/regress/test_pageserver_reconnect.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading import time from contextlib import closing diff --git a/test_runner/regress/test_pageserver_restart.py b/test_runner/regress/test_pageserver_restart.py index 86313ca91e..f7c42fc893 100644 --- a/test_runner/regress/test_pageserver_restart.py +++ b/test_runner/regress/test_pageserver_restart.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import random from contextlib import closing from typing import Optional diff --git a/test_runner/regress/test_pageserver_restarts_under_workload.py b/test_runner/regress/test_pageserver_restarts_under_workload.py index 637e1a87d3..ec74e03f89 100644 --- a/test_runner/regress/test_pageserver_restarts_under_workload.py +++ b/test_runner/regress/test_pageserver_restarts_under_workload.py @@ -1,6 +1,9 @@ # This test spawns pgbench in a thread in the background and concurrently restarts pageserver, # checking how client is able to transparently restore connection to pageserver # + +from __future__ import annotations + import threading import time diff --git a/test_runner/regress/test_pageserver_secondary.py b/test_runner/regress/test_pageserver_secondary.py index cd772beace..705b4ff054 100644 --- a/test_runner/regress/test_pageserver_secondary.py +++ b/test_runner/regress/test_pageserver_secondary.py @@ -1,9 +1,11 @@ +from __future__ import annotations + import json import os import random import time from pathlib import Path -from typing import Any, Dict, Optional, Union +from typing import TYPE_CHECKING import pytest from fixtures.common_types import TenantId, TenantShardId, TimelineId @@ -20,6 +22,10 @@ from fixtures.workload import Workload from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Any, Optional, Union + + # A tenant configuration that is convenient for generating uploads and deletions # without a large amount of postgres traffic. TENANT_CONF = { @@ -193,11 +199,11 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, make_httpserver, # state if it was running attached with a stale generation last_state[pageserver.id] = ("Detached", None) else: - secondary_conf: Optional[Dict[str, Any]] = None + secondary_conf: Optional[dict[str, Any]] = None if mode == "Secondary": secondary_conf = {"warm": rng.choice([True, False])} - location_conf: Dict[str, Any] = { + location_conf: dict[str, Any] = { "mode": mode, "secondary_conf": secondary_conf, "tenant_conf": {}, diff --git a/test_runner/regress/test_parallel_copy.py b/test_runner/regress/test_parallel_copy.py index a5037e8694..1689755b6f 100644 --- a/test_runner/regress/test_parallel_copy.py +++ b/test_runner/regress/test_parallel_copy.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio from io import BytesIO diff --git a/test_runner/regress/test_pg_query_cancellation.py b/test_runner/regress/test_pg_query_cancellation.py index c6b4eff516..d4ed7230fa 100644 --- a/test_runner/regress/test_pg_query_cancellation.py +++ b/test_runner/regress/test_pg_query_cancellation.py @@ -1,5 +1,6 @@ +from __future__ import annotations + from contextlib import closing -from typing import Set import pytest from fixtures.log_helper import log @@ -7,7 +8,7 @@ from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonPageserver from fixtures.pageserver.http import PageserverHttpClient from psycopg2.errors import QueryCanceled -CRITICAL_PG_PS_WAIT_FAILPOINTS: Set[str] = { +CRITICAL_PG_PS_WAIT_FAILPOINTS: set[str] = { "ps::connection-start::pre-login", "ps::connection-start::startup-packet", "ps::connection-start::process-query", @@ -92,7 +93,7 @@ def test_cancellations(neon_simple_env: NeonEnv): connect_works_correctly(failpoint, ep, ps, ps_http) -ENABLED_FAILPOINTS: Set[str] = set() +ENABLED_FAILPOINTS: set[str] = set() def connect_works_correctly( diff --git a/test_runner/regress/test_pg_waldump.py b/test_runner/regress/test_pg_waldump.py index 1990d69b6a..c98d395451 100644 --- a/test_runner/regress/test_pg_waldump.py +++ b/test_runner/regress/test_pg_waldump.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import shutil @@ -12,7 +14,7 @@ def check_wal_segment(pg_waldump_path: str, segment_path: str, test_output_dir): test_output_dir, [pg_waldump_path, "--ignore", segment_path] ) - with open(f"{output_path}.stdout", "r") as f: + with open(f"{output_path}.stdout") as f: stdout = f.read() assert "ABORT" in stdout assert "COMMIT" in stdout diff --git a/test_runner/regress/test_pitr_gc.py b/test_runner/regress/test_pitr_gc.py index 871a31b9ba..d983d77e72 100644 --- a/test_runner/regress/test_pitr_gc.py +++ b/test_runner/regress/test_pitr_gc.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.common_types import TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_postgres_version.py b/test_runner/regress/test_postgres_version.py index d8626c15a5..5eb743809f 100644 --- a/test_runner/regress/test_postgres_version.py +++ b/test_runner/regress/test_postgres_version.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import re from pathlib import Path diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index d2b8c2ed8b..f598900af9 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -1,15 +1,21 @@ +from __future__ import annotations + import asyncio import json import subprocess import time import urllib.parse -from typing import Any, List, Optional, Tuple +from typing import TYPE_CHECKING import psycopg2 import pytest import requests from fixtures.neon_fixtures import PSQL, NeonProxy, VanillaPostgres +if TYPE_CHECKING: + from typing import Any, Optional + + GET_CONNECTION_PID_QUERY = "SELECT pid FROM pg_stat_activity WHERE state = 'active'" @@ -222,7 +228,7 @@ def test_sql_over_http_serverless_driver(static_proxy: NeonProxy): def test_sql_over_http(static_proxy: NeonProxy): static_proxy.safe_psql("create role http with login password 'http' superuser") - def q(sql: str, params: Optional[List[Any]] = None) -> Any: + def q(sql: str, params: Optional[list[Any]] = None) -> Any: params = params or [] connstr = f"postgresql://http:http@{static_proxy.domain}:{static_proxy.proxy_port}/postgres" response = requests.post( @@ -285,7 +291,7 @@ def test_sql_over_http_db_name_with_space(static_proxy: NeonProxy): ) ) - def q(sql: str, params: Optional[List[Any]] = None) -> Any: + def q(sql: str, params: Optional[list[Any]] = None) -> Any: params = params or [] connstr = f"postgresql://http:http@{static_proxy.domain}:{static_proxy.proxy_port}/{urllib.parse.quote(db)}" response = requests.post( @@ -304,7 +310,7 @@ def test_sql_over_http_db_name_with_space(static_proxy: NeonProxy): def test_sql_over_http_output_options(static_proxy: NeonProxy): static_proxy.safe_psql("create role http2 with login password 'http2' superuser") - def q(sql: str, raw_text: bool, array_mode: bool, params: Optional[List[Any]] = None) -> Any: + def q(sql: str, raw_text: bool, array_mode: bool, params: Optional[list[Any]] = None) -> Any: params = params or [] connstr = ( f"postgresql://http2:http2@{static_proxy.domain}:{static_proxy.proxy_port}/postgres" @@ -340,7 +346,7 @@ def test_sql_over_http_batch(static_proxy: NeonProxy): static_proxy.safe_psql("create role http with login password 'http' superuser") def qq( - queries: List[Tuple[str, Optional[List[Any]]]], + queries: list[tuple[str, Optional[list[Any]]]], read_only: bool = False, deferrable: bool = False, ) -> Any: diff --git a/test_runner/regress/test_proxy_allowed_ips.py b/test_runner/regress/test_proxy_allowed_ips.py index 7a804114ba..902da1942e 100644 --- a/test_runner/regress/test_proxy_allowed_ips.py +++ b/test_runner/regress/test_proxy_allowed_ips.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import psycopg2 import pytest from fixtures.neon_fixtures import ( diff --git a/test_runner/regress/test_proxy_metric_collection.py b/test_runner/regress/test_proxy_metric_collection.py index f57b47f4da..dd63256388 100644 --- a/test_runner/regress/test_proxy_metric_collection.py +++ b/test_runner/regress/test_proxy_metric_collection.py @@ -1,5 +1,7 @@ +from __future__ import annotations + +from collections.abc import Iterator from pathlib import Path -from typing import Iterator import pytest from fixtures.log_helper import log diff --git a/test_runner/regress/test_proxy_websockets.py b/test_runner/regress/test_proxy_websockets.py index 6211446a40..071ca7c54e 100644 --- a/test_runner/regress/test_proxy_websockets.py +++ b/test_runner/regress/test_proxy_websockets.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import ssl import pytest @@ -53,7 +55,7 @@ async def test_websockets(static_proxy: NeonProxy): assert auth_response[1:5] == b"\x00\x00\x00\x08", "should be 8 bytes long message" assert auth_response[5:9] == b"\x00\x00\x00\x00", "should be authenticated" - query_message = "SELECT 1".encode("utf-8") + b"\0" + query_message = b"SELECT 1" + b"\0" length = (4 + len(query_message)).to_bytes(4, byteorder="big") await websocket.send([b"Q", length, query_message]) @@ -132,7 +134,7 @@ async def test_websockets_pipelined(static_proxy: NeonProxy): auth_message = password.encode("utf-8") + b"\0" length1 = (4 + len(auth_message)).to_bytes(4, byteorder="big") - query_message = "SELECT 1".encode("utf-8") + b"\0" + query_message = b"SELECT 1" + b"\0" length2 = (4 + len(query_message)).to_bytes(4, byteorder="big") await websocket.send( length0 diff --git a/test_runner/regress/test_read_validation.py b/test_runner/regress/test_read_validation.py index 78798c5abf..471a3b406a 100644 --- a/test_runner/regress/test_read_validation.py +++ b/test_runner/regress/test_read_validation.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import closing from fixtures.log_helper import log diff --git a/test_runner/regress/test_readonly_node.py b/test_runner/regress/test_readonly_node.py index b08fcc0da1..30c69cb883 100644 --- a/test_runner/regress/test_readonly_node.py +++ b/test_runner/regress/test_readonly_node.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time import pytest diff --git a/test_runner/regress/test_recovery.py b/test_runner/regress/test_recovery.py index 8556103458..b43a443149 100644 --- a/test_runner/regress/test_recovery.py +++ b/test_runner/regress/test_recovery.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from contextlib import closing diff --git a/test_runner/regress/test_remote_storage.py b/test_runner/regress/test_remote_storage.py index c955dce4dc..79b5ebe39a 100644 --- a/test_runner/regress/test_remote_storage.py +++ b/test_runner/regress/test_remote_storage.py @@ -1,9 +1,11 @@ +from __future__ import annotations + import os import queue import shutil import threading import time -from typing import Dict, List, Optional, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId @@ -35,6 +37,9 @@ from fixtures.utils import ( ) from requests import ReadTimeout +if TYPE_CHECKING: + from typing import Optional + # # Tests that a piece of data is backed up and restored correctly: @@ -423,7 +428,7 @@ def test_remote_timeline_client_calls_started_metric( assert timeline_id is not None wait_for_last_flush_lsn(env, endpoint, tenant_id, timeline_id) - calls_started: Dict[Tuple[str, str], List[int]] = { + calls_started: dict[tuple[str, str], list[int]] = { ("layer", "upload"): [0], ("index", "upload"): [0], ("layer", "delete"): [0], diff --git a/test_runner/regress/test_replica_start.py b/test_runner/regress/test_replica_start.py index d5e92b92d1..e81e7dad76 100644 --- a/test_runner/regress/test_replica_start.py +++ b/test_runner/regress/test_replica_start.py @@ -20,6 +20,8 @@ from shutdown checkpoint, using the CLOG scanning mechanism, waiting for running-xacts record and for in-progress transactions to finish etc. """ +from __future__ import annotations + import threading from contextlib import closing diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 721c391544..bedc9b5865 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from datetime import datetime, timezone diff --git a/test_runner/regress/test_setup.py b/test_runner/regress/test_setup.py index 02710fc807..dfbbd575b7 100644 --- a/test_runner/regress/test_setup.py +++ b/test_runner/regress/test_setup.py @@ -1,5 +1,7 @@ """Tests for the code in test fixtures""" +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index a3d4b5baca..d1d6b3af75 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import os import time from collections import defaultdict -from typing import Dict, List, Optional, Union +from typing import TYPE_CHECKING import pytest import requests @@ -24,6 +26,9 @@ from pytest_httpserver import HTTPServer from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Optional, Union + def test_sharding_smoke( neon_env_builder: NeonEnvBuilder, @@ -635,7 +640,7 @@ def test_sharding_split_stripe_size( tenant_id = env.initial_tenant assert len(notifications) == 1 - expect: Dict[str, Union[List[Dict[str, int]], str, None, int]] = { + expect: dict[str, Union[list[dict[str, int]], str, None, int]] = { "tenant_id": str(env.initial_tenant), "stripe_size": None, "shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}], @@ -651,7 +656,7 @@ def test_sharding_split_stripe_size( # Check that we ended up with the stripe size that we expected, both on the pageserver # and in the notifications to compute assert len(notifications) == 2 - expect_after: Dict[str, Union[List[Dict[str, int]], str, None, int]] = { + expect_after: dict[str, Union[list[dict[str, int]], str, None, int]] = { "tenant_id": str(env.initial_tenant), "stripe_size": new_stripe_size, "shards": [ diff --git a/test_runner/regress/test_sni_router.py b/test_runner/regress/test_sni_router.py index 4336e6551d..402f27b384 100644 --- a/test_runner/regress/test_sni_router.py +++ b/test_runner/regress/test_sni_router.py @@ -1,14 +1,19 @@ +from __future__ import annotations + import socket import subprocess from pathlib import Path from types import TracebackType -from typing import Optional, Type +from typing import TYPE_CHECKING import backoff from fixtures.log_helper import log from fixtures.neon_fixtures import PgProtocol, VanillaPostgres from fixtures.port_distributor import PortDistributor +if TYPE_CHECKING: + from typing import Optional + def generate_tls_cert(cn, certout, keyout): subprocess.run( @@ -53,7 +58,7 @@ class PgSniRouter(PgProtocol): self._popen: Optional[subprocess.Popen[bytes]] = None self.test_output_dir = test_output_dir - def start(self) -> "PgSniRouter": + def start(self) -> PgSniRouter: assert self._popen is None args = [ str(self.neon_binpath / "pg_sni_router"), @@ -86,12 +91,12 @@ class PgSniRouter(PgProtocol): if self._popen: self._popen.wait(timeout=2) - def __enter__(self) -> "PgSniRouter": + def __enter__(self) -> PgSniRouter: return self def __exit__( self, - exc_type: Optional[Type[BaseException]], + exc_type: Optional[type[BaseException]], exc: Optional[BaseException], tb: Optional[TracebackType], ): diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 016d36301b..202634477c 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures import json import threading @@ -5,7 +7,7 @@ import time from collections import defaultdict from datetime import datetime, timezone from enum import Enum -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING import pytest from fixtures.auth_tokens import TokenScope @@ -46,6 +48,9 @@ from urllib3 import Retry from werkzeug.wrappers.request import Request from werkzeug.wrappers.response import Response +if TYPE_CHECKING: + from typing import Any, Optional, Union + def get_node_shard_counts(env: NeonEnv, tenant_ids): counts: defaultdict[int, int] = defaultdict(int) @@ -490,7 +495,7 @@ def test_storage_controller_compute_hook( # Initial notification from tenant creation assert len(notifications) == 1 - expect: Dict[str, Union[List[Dict[str, int]], str, None, int]] = { + expect: dict[str, Union[list[dict[str, int]], str, None, int]] = { "tenant_id": str(env.initial_tenant), "stripe_size": None, "shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}], @@ -597,7 +602,7 @@ def test_storage_controller_stuck_compute_hook( # Initial notification from tenant creation assert len(notifications) == 1 - expect: Dict[str, Union[List[Dict[str, int]], str, None, int]] = { + expect: dict[str, Union[list[dict[str, int]], str, None, int]] = { "tenant_id": str(env.initial_tenant), "stripe_size": None, "shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}], @@ -834,7 +839,7 @@ def test_storage_controller_s3_time_travel_recovery( # Simulate a "disaster": delete some random files from remote storage for one of the shards assert env.pageserver_remote_storage shard_id_for_list = "0002" - objects: List[ObjectTypeDef] = list_prefix( + objects: list[ObjectTypeDef] = list_prefix( env.pageserver_remote_storage, f"tenants/{tenant_id}-{shard_id_for_list}/timelines/{timeline_id}/", ).get("Contents", []) @@ -885,7 +890,7 @@ def test_storage_controller_auth(neon_env_builder: NeonEnvBuilder): api = env.storage_controller_api tenant_id = TenantId.generate() - body: Dict[str, Any] = {"new_tenant_id": str(tenant_id)} + body: dict[str, Any] = {"new_tenant_id": str(tenant_id)} env.storage_controller.allowed_errors.append(".*Unauthorized.*") env.storage_controller.allowed_errors.append(".*Forbidden.*") @@ -1228,9 +1233,9 @@ def test_storage_controller_heartbeats( log.info(f"{node_to_tenants=}") # Check that all the tenants have been attached - assert sum((len(ts) for ts in node_to_tenants.values())) == len(tenant_ids) + assert sum(len(ts) for ts in node_to_tenants.values()) == len(tenant_ids) # Check that each node got one tenant - assert all((len(ts) == 1 for ts in node_to_tenants.values())) + assert all(len(ts) == 1 for ts in node_to_tenants.values()) wait_until(10, 1, tenants_placed) @@ -2071,10 +2076,10 @@ def test_storage_controller_metadata_health( def update_and_query_metadata_health( env: NeonEnv, - healthy: List[TenantShardId], - unhealthy: List[TenantShardId], + healthy: list[TenantShardId], + unhealthy: list[TenantShardId], outdated_duration: str = "1h", - ) -> Tuple[Set[str], Set[str]]: + ) -> tuple[set[str], set[str]]: """ Update metadata health. Then list tenant shards with unhealthy and outdated metadata health status. @@ -2389,7 +2394,7 @@ def test_storage_controller_ps_restarted_during_drain(neon_env_builder: NeonEnvB env.storage_controller.reconcile_until_idle() attached_id = int(env.storage_controller.locate(env.initial_tenant)[0]["node_id"]) - attached = next((ps for ps in env.pageservers if ps.id == attached_id)) + attached = next(ps for ps in env.pageservers if ps.id == attached_id) def attached_is_draining(): details = env.storage_controller.node_status(attached.id) diff --git a/test_runner/regress/test_storage_scrubber.py b/test_runner/regress/test_storage_scrubber.py index 7ecd0cf748..f999edc067 100644 --- a/test_runner/regress/test_storage_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pprint import shutil diff --git a/test_runner/regress/test_subscriber_restart.py b/test_runner/regress/test_subscriber_restart.py index e67001ef41..d37eeb1e6e 100644 --- a/test_runner/regress/test_subscriber_restart.py +++ b/test_runner/regress/test_subscriber_restart.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import threading import time diff --git a/test_runner/regress/test_subxacts.py b/test_runner/regress/test_subxacts.py index 82075bd723..7a46f0140c 100644 --- a/test_runner/regress/test_subxacts.py +++ b/test_runner/regress/test_subxacts.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content diff --git a/test_runner/regress/test_tenant_conf.py b/test_runner/regress/test_tenant_conf.py index d13cbe45e9..1dd46ec3d1 100644 --- a/test_runner/regress/test_tenant_conf.py +++ b/test_runner/regress/test_tenant_conf.py @@ -1,5 +1,7 @@ +from __future__ import annotations + import json -from typing import Any, Dict +from typing import TYPE_CHECKING from fixtures.common_types import Lsn from fixtures.neon_fixtures import ( @@ -10,11 +12,14 @@ from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import wait_until from fixtures.workload import Workload +if TYPE_CHECKING: + from typing import Any + def test_tenant_config(neon_env_builder: NeonEnvBuilder): """Test per tenant configuration""" - def set_some_nondefault_global_config(ps_cfg: Dict[str, Any]): + def set_some_nondefault_global_config(ps_cfg: dict[str, Any]): ps_cfg["page_cache_size"] = 444 ps_cfg["wait_lsn_timeout"] = "111 s" diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index eafd159ac0..294c1248c5 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json from threading import Thread diff --git a/test_runner/regress/test_tenant_detach.py b/test_runner/regress/test_tenant_detach.py index 6de22f262d..59c14b3263 100644 --- a/test_runner/regress/test_tenant_detach.py +++ b/test_runner/regress/test_tenant_detach.py @@ -1,9 +1,11 @@ +from __future__ import annotations + import asyncio import enum import random import time from threading import Thread -from typing import List, Optional +from typing import TYPE_CHECKING import asyncpg import pytest @@ -26,6 +28,10 @@ from fixtures.remote_storage import ( from fixtures.utils import query_scalar, wait_until from prometheus_client.samples import Sample +if TYPE_CHECKING: + from typing import Optional + + # In tests that overlap endpoint activity with tenant attach/detach, there are # a variety of warnings that the page service may emit when it cannot acquire # an active tenant to serve a request @@ -492,7 +498,7 @@ def test_metrics_while_ignoring_broken_tenant_and_reloading( r".* Changing Active tenant to Broken state, reason: broken from test" ) - def only_int(samples: List[Sample]) -> Optional[int]: + def only_int(samples: list[Sample]) -> Optional[int]: if len(samples) == 1: return int(samples[0].value) assert len(samples) == 0 diff --git a/test_runner/regress/test_tenant_relocation.py b/test_runner/regress/test_tenant_relocation.py index 645e22af1f..5561a128b7 100644 --- a/test_runner/regress/test_tenant_relocation.py +++ b/test_runner/regress/test_tenant_relocation.py @@ -1,10 +1,12 @@ +from __future__ import annotations + import os import shutil import threading import time from contextlib import closing, contextmanager from pathlib import Path -from typing import Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TenantId, TimelineId @@ -25,6 +27,9 @@ from fixtures.utils import ( wait_until, ) +if TYPE_CHECKING: + from typing import Any, Optional + def assert_abs_margin_ratio(a: float, b: float, margin_ratio: float): assert abs(a - b) / a < margin_ratio, abs(a - b) / a @@ -74,7 +79,7 @@ def populate_branch( ps_http: PageserverHttpClient, create_table: bool, expected_sum: Optional[int], -) -> Tuple[TimelineId, Lsn]: +) -> tuple[TimelineId, Lsn]: # insert some data with pg_cur(endpoint) as cur: cur.execute("SHOW neon.timeline_id") @@ -120,7 +125,7 @@ def check_timeline_attached( new_pageserver_http_client: PageserverHttpClient, tenant_id: TenantId, timeline_id: TimelineId, - old_timeline_detail: Dict[str, Any], + old_timeline_detail: dict[str, Any], old_current_lsn: Lsn, ): # new pageserver should be in sync (modulo wal tail or vacuum activity) with the old one because there was no new writes since checkpoint diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index 867c0021cd..9ea09d10d7 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -1,7 +1,8 @@ +from __future__ import annotations + import os from concurrent.futures import ThreadPoolExecutor from pathlib import Path -from typing import List, Tuple import pytest from fixtures.common_types import Lsn, TenantId, TimelineId @@ -302,7 +303,7 @@ def test_single_branch_get_tenant_size_grows( http_client = env.pageserver.http_client() - collected_responses: List[Tuple[str, Lsn, int]] = [] + collected_responses: list[tuple[str, Lsn, int]] = [] size_debug_file = open(test_output_dir / "size_debug.html", "w") @@ -313,7 +314,7 @@ def test_single_branch_get_tenant_size_grows( http_client: PageserverHttpClient, tenant_id: TenantId, timeline_id: TimelineId, - ) -> Tuple[Lsn, int]: + ) -> tuple[Lsn, int]: consistent = False size_debug = None diff --git a/test_runner/regress/test_tenant_tasks.py b/test_runner/regress/test_tenant_tasks.py index 2bf930d767..72183f5778 100644 --- a/test_runner/regress/test_tenant_tasks.py +++ b/test_runner/regress/test_tenant_tasks.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.common_types import TenantId, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index 7b194d40dd..95dc0fec78 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures import os import time @@ -5,7 +7,6 @@ from contextlib import closing from datetime import datetime from itertools import chain from pathlib import Path -from typing import List import pytest import requests @@ -272,7 +273,7 @@ def test_pageserver_metrics_removed_after_detach(neon_env_builder: NeonEnvBuilde assert cur.fetchone() == (5000050000,) endpoint.stop() - def get_ps_metric_samples_for_tenant(tenant_id: TenantId) -> List[Sample]: + def get_ps_metric_samples_for_tenant(tenant_id: TenantId) -> list[Sample]: ps_metrics = env.pageserver.http_client().get_metrics() samples = [] for metric_name in ps_metrics.metrics: @@ -459,7 +460,7 @@ def test_pageserver_metrics_many_relations(neon_env_builder: NeonEnvBuilder): "pageserver_directory_entries_count", {"tenant_id": str(env.initial_tenant)} ) - def only_int(samples: List[Sample]) -> int: + def only_int(samples: list[Sample]) -> int: assert len(samples) == 1 return int(samples[0].value) diff --git a/test_runner/regress/test_tenants_with_remote_storage.py b/test_runner/regress/test_tenants_with_remote_storage.py index 9310786da7..8d3ddf7e54 100644 --- a/test_runner/regress/test_tenants_with_remote_storage.py +++ b/test_runner/regress/test_tenants_with_remote_storage.py @@ -6,10 +6,11 @@ # checkpoint_distance setting so that a lot of layer files are created. # +from __future__ import annotations + import asyncio import os from pathlib import Path -from typing import List, Tuple from fixtures.common_types import Lsn, TenantId, TimelineId from fixtures.log_helper import log @@ -62,7 +63,7 @@ async def all_tenants_workload(env: NeonEnv, tenants_endpoints): def test_tenants_many(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() - tenants_endpoints: List[Tuple[TenantId, Endpoint]] = [] + tenants_endpoints: list[tuple[TenantId, Endpoint]] = [] for _ in range(1, 5): # Use a tiny checkpoint distance, to create a lot of layers quickly diff --git a/test_runner/regress/test_threshold_based_eviction.py b/test_runner/regress/test_threshold_based_eviction.py index 094dd20529..5f211ec4d4 100644 --- a/test_runner/regress/test_threshold_based_eviction.py +++ b/test_runner/regress/test_threshold_based_eviction.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import time from dataclasses import dataclass -from typing import List, Set, Tuple from fixtures.log_helper import log from fixtures.neon_fixtures import ( @@ -116,8 +117,8 @@ def test_threshold_based_eviction( # wait for evictions and assert that they stabilize @dataclass class ByLocalAndRemote: - remote_layers: Set[str] - local_layers: Set[str] + remote_layers: set[str] + local_layers: set[str] class MapInfoProjection: def __init__(self, info: LayerMapInfo): @@ -149,7 +150,7 @@ def test_threshold_based_eviction( consider_stable_when_no_change_for_seconds = 3 * eviction_threshold poll_interval = eviction_threshold / 3 started_waiting_at = time.time() - map_info_changes: List[Tuple[float, MapInfoProjection]] = [] + map_info_changes: list[tuple[float, MapInfoProjection]] = [] while time.time() - started_waiting_at < observation_window: current = ( time.time(), diff --git a/test_runner/regress/test_timeline_archive.py b/test_runner/regress/test_timeline_archive.py index 16e0521890..841707d32e 100644 --- a/test_runner/regress/test_timeline_archive.py +++ b/test_runner/regress/test_timeline_archive.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest from fixtures.common_types import TenantId, TimelineArchivalState, TimelineId from fixtures.neon_fixtures import ( diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 7b6f6ac3c6..306f22acf9 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import enum import os import queue diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index 7f148a4b9b..0c8554bb54 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime import enum import threading @@ -5,7 +7,6 @@ import time from concurrent.futures import ThreadPoolExecutor from queue import Empty, Queue from threading import Barrier -from typing import List, Set, Tuple import pytest from fixtures.common_types import Lsn, TimelineId @@ -48,7 +49,7 @@ class Branchpoint(str, enum.Enum): return self.value @staticmethod - def all() -> List["Branchpoint"]: + def all() -> list[Branchpoint]: return [ Branchpoint.EARLIER, Branchpoint.AT_L0, @@ -473,7 +474,7 @@ def test_compaction_induced_by_detaches_in_history( more_good_numbers = range(0, 3) - branches: List[Tuple[str, TimelineId]] = [("main", env.initial_timeline)] + branches: list[tuple[str, TimelineId]] = [("main", env.initial_timeline)] for num in more_good_numbers: branch_name = f"br-{len(branches)}" @@ -1270,7 +1271,7 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon {"request_type": "copy_object", "result": "ok"}, ) - def reparenting_progress(timelines: List[TimelineId]) -> Tuple[int, Set[TimelineId]]: + def reparenting_progress(timelines: list[TimelineId]) -> tuple[int, set[TimelineId]]: reparented = 0 not_reparented = set() for timeline in timelines: @@ -1306,7 +1307,7 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon http.configure_failpoints(("timeline-detach-ancestor::allow_one_reparented", "return")) - not_reparented: Set[TimelineId] = set() + not_reparented: set[TimelineId] = set() # tracked offset in the pageserver log which is at least at the most recent activation offset = None diff --git a/test_runner/regress/test_timeline_gc_blocking.py b/test_runner/regress/test_timeline_gc_blocking.py index 1540cbbcee..c19c78e251 100644 --- a/test_runner/regress/test_timeline_gc_blocking.py +++ b/test_runner/regress/test_timeline_gc_blocking.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import time from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass -from typing import List, Optional +from typing import TYPE_CHECKING import pytest from fixtures.log_helper import log @@ -12,6 +14,9 @@ from fixtures.neon_fixtures import ( ) from fixtures.pageserver.utils import wait_timeline_detail_404 +if TYPE_CHECKING: + from typing import Optional + @pytest.mark.parametrize("sharded", [True, False]) def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool): @@ -98,7 +103,7 @@ class ScrollableLog: @dataclass(frozen=True) class ManyPageservers: - many: List[ScrollableLog] + many: list[ScrollableLog] def assert_log_contains(self, what: str): for one in self.many: diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index aa77474097..85c6d17142 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import concurrent.futures import math import random diff --git a/test_runner/regress/test_truncate.py b/test_runner/regress/test_truncate.py index 4fc0601a18..946dab2676 100644 --- a/test_runner/regress/test_truncate.py +++ b/test_runner/regress/test_truncate.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from fixtures.neon_fixtures import NeonEnvBuilder diff --git a/test_runner/regress/test_twophase.py b/test_runner/regress/test_twophase.py index 1d9fe9d21d..e37e8dd3e8 100644 --- a/test_runner/regress/test_twophase.py +++ b/test_runner/regress/test_twophase.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os from pathlib import Path diff --git a/test_runner/regress/test_unlogged.py b/test_runner/regress/test_unlogged.py index 4431ccd959..a89391425e 100644 --- a/test_runner/regress/test_unlogged.py +++ b/test_runner/regress/test_unlogged.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from fixtures.neon_fixtures import NeonEnv, fork_at_current_lsn from fixtures.pg_version import PgVersion diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index ae1b6fdab3..d4c2ca7e07 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time from contextlib import closing diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 44ca9f90a4..d372e2d461 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import filecmp import logging import os @@ -12,7 +14,7 @@ from contextlib import closing from dataclasses import dataclass, field from functools import partial from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import TYPE_CHECKING import psycopg2 import psycopg2.errors @@ -56,6 +58,9 @@ from fixtures.utils import ( wait_until, ) +if TYPE_CHECKING: + from typing import Any, Optional + def wait_lsn_force_checkpoint( tenant_id: TenantId, @@ -124,8 +129,8 @@ class TimelineMetrics: timeline_id: TimelineId last_record_lsn: Lsn # One entry per each Safekeeper, order is the same - flush_lsns: List[Lsn] = field(default_factory=list) - commit_lsns: List[Lsn] = field(default_factory=list) + flush_lsns: list[Lsn] = field(default_factory=list) + commit_lsns: list[Lsn] = field(default_factory=list) # Run page server and multiple acceptors, and multiple compute nodes running @@ -152,7 +157,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder): tenant_id = env.initial_tenant - def collect_metrics(message: str) -> List[TimelineMetrics]: + def collect_metrics(message: str) -> list[TimelineMetrics]: with env.pageserver.http_client() as pageserver_http: timeline_details = [ pageserver_http.timeline_detail( @@ -765,7 +770,7 @@ class ProposerPostgres(PgProtocol): stdout_filename = basepath + ".stdout" - with open(stdout_filename, "r") as stdout_f: + with open(stdout_filename) as stdout_f: stdout = stdout_f.read() return Lsn(stdout.strip("\n ")) @@ -934,7 +939,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): assert debug_dump_1["config"]["id"] == env.safekeepers[0].id -class DummyConsumer(object): +class DummyConsumer: def __call__(self, msg): pass @@ -1162,7 +1167,7 @@ def is_flush_lsn_aligned(sk_http_clis, tenant_id, timeline_id): # Assert by xxd that WAL on given safekeepers is identical. No compute must be # running for this to be reliable. -def cmp_sk_wal(sks: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): +def cmp_sk_wal(sks: list[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): assert len(sks) >= 2, "cmp_sk_wal makes sense with >= 2 safekeepers passed" sk_http_clis = [sk.http_client() for sk in sks] @@ -1448,12 +1453,12 @@ class SafekeeperEnv: self.pg_bin = pg_bin self.num_safekeepers = num_safekeepers self.bin_safekeeper = str(neon_binpath / "safekeeper") - self.safekeepers: Optional[List[subprocess.CompletedProcess[Any]]] = None + self.safekeepers: Optional[list[subprocess.CompletedProcess[Any]]] = None self.postgres: Optional[ProposerPostgres] = None self.tenant_id: Optional[TenantId] = None self.timeline_id: Optional[TimelineId] = None - def init(self) -> "SafekeeperEnv": + def init(self) -> SafekeeperEnv: assert self.postgres is None, "postgres is already initialized" assert self.safekeepers is None, "safekeepers are already initialized" @@ -1534,7 +1539,7 @@ class SafekeeperEnv: def kill_safekeeper(self, sk_dir): """Read pid file and kill process""" pid_file = os.path.join(sk_dir, "safekeeper.pid") - with open(pid_file, "r") as f: + with open(pid_file) as f: pid = int(f.read()) log.info(f"Killing safekeeper with pid {pid}") os.kill(pid, signal.SIGKILL) @@ -1593,7 +1598,7 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): sum_after = query_scalar(cur, "SELECT SUM(key) FROM t") assert sum_after == sum_before + 5000050000 - def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): + def show_statuses(safekeepers: list[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): for sk in safekeepers: http_cli = sk.http_client() try: @@ -1802,7 +1807,7 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder, live_sk_change: bool): sum_after = query_scalar(cur, "SELECT SUM(key) FROM t") assert sum_after == sum_before + 5000050000 - def show_statuses(safekeepers: List[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): + def show_statuses(safekeepers: list[Safekeeper], tenant_id: TenantId, timeline_id: TimelineId): for sk in safekeepers: http_cli = sk.http_client(auth_token=env.auth_keys.generate_tenant_token(tenant_id)) try: @@ -2011,14 +2016,14 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder): tenant_id = env.initial_tenant timeline_id = env.create_branch("test_idle_reconnections") - def collect_stats() -> Dict[str, float]: + def collect_stats() -> dict[str, float]: # we need to collect safekeeper_pg_queries_received_total metric from all safekeepers sk_metrics = [ parse_metrics(sk.http_client().get_metrics_str(), f"safekeeper_{sk.id}") for sk in env.safekeepers ] - total: Dict[str, float] = {} + total: dict[str, float] = {} for sk in sk_metrics: queries_received = sk.query_all("safekeeper_pg_queries_received_total") diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index 74d114e976..92306469f8 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -1,9 +1,11 @@ +from __future__ import annotations + import asyncio import random import time from dataclasses import dataclass from pathlib import Path -from typing import List, Optional +from typing import TYPE_CHECKING import asyncpg import pytest @@ -13,10 +15,14 @@ from fixtures.log_helper import getLogger from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonEnvBuilder, Safekeeper from fixtures.remote_storage import RemoteStorageKind +if TYPE_CHECKING: + from typing import Optional + + log = getLogger("root.safekeeper_async") -class BankClient(object): +class BankClient: def __init__(self, conn: asyncpg.Connection, n_accounts, init_amount): self.conn: asyncpg.Connection = conn self.n_accounts = n_accounts @@ -65,7 +71,7 @@ async def bank_transfer(conn: asyncpg.Connection, from_uid, to_uid, amount): ) -class WorkerStats(object): +class WorkerStats: def __init__(self, n_workers): self.counters = [0] * n_workers self.running = True @@ -148,7 +154,7 @@ async def wait_for_lsn( async def run_restarts_under_load( env: NeonEnv, endpoint: Endpoint, - acceptors: List[Safekeeper], + acceptors: list[Safekeeper], n_workers=10, n_accounts=100, init_amount=100000, @@ -329,7 +335,7 @@ def test_compute_restarts(neon_env_builder: NeonEnvBuilder): asyncio.run(run_compute_restarts(env)) -class BackgroundCompute(object): +class BackgroundCompute: MAX_QUERY_GAP_SECONDS = 2 def __init__(self, index: int, env: NeonEnv, branch: str): @@ -339,7 +345,7 @@ class BackgroundCompute(object): self.running = False self.stopped = False self.total_tries = 0 - self.successful_queries: List[int] = [] + self.successful_queries: list[int] = [] async def run(self): if self.running: @@ -634,7 +640,7 @@ class RaceConditionTest: # shut down random subset of safekeeper, sleep, wake them up, rinse, repeat -async def xmas_garland(safekeepers: List[Safekeeper], data: RaceConditionTest): +async def xmas_garland(safekeepers: list[Safekeeper], data: RaceConditionTest): while not data.is_stopped: data.iteration += 1 victims = [] @@ -693,7 +699,7 @@ def test_race_conditions(neon_env_builder: NeonEnvBuilder): # Check that pageserver can select safekeeper with largest commit_lsn # and switch if LSN is not updated for some time (NoWalTimeout). async def run_wal_lagging(env: NeonEnv, endpoint: Endpoint, test_output_dir: Path): - def adjust_safekeepers(env: NeonEnv, active_sk: List[bool]): + def adjust_safekeepers(env: NeonEnv, active_sk: list[bool]): # Change the pg ports of the inactive safekeepers in the config file to be # invalid, to make them unavailable to the endpoint. We use # ports 10, 11 and 12 to simulate unavailable safekeepers. diff --git a/test_runner/regress/test_wal_receiver.py b/test_runner/regress/test_wal_receiver.py index 3c73df68e0..be2aa2b346 100644 --- a/test_runner/regress/test_wal_receiver.py +++ b/test_runner/regress/test_wal_receiver.py @@ -1,10 +1,15 @@ +from __future__ import annotations + import time -from typing import Any, Dict +from typing import TYPE_CHECKING from fixtures.common_types import Lsn, TenantId from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder +if TYPE_CHECKING: + from typing import Any + # Checks that pageserver's walreceiver state is printed in the logs during WAL wait timeout. # Ensures that walreceiver does not run without any data inserted and only starts after the insertion. @@ -43,7 +48,7 @@ def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): # Kills one of the safekeepers and ensures that only the active ones are printed in the state. def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuilder): # Trigger WAL wait timeout faster - def customize_pageserver_toml(ps_cfg: Dict[str, Any]): + def customize_pageserver_toml(ps_cfg: dict[str, Any]): ps_cfg["wait_lsn_timeout"] = "1s" tenant_config = ps_cfg.setdefault("tenant_config", {}) tenant_config["walreceiver_connect_timeout"] = "2s" diff --git a/test_runner/regress/test_wal_restore.py b/test_runner/regress/test_wal_restore.py index 46366f0e2c..05b6ad8a9b 100644 --- a/test_runner/regress/test_wal_restore.py +++ b/test_runner/regress/test_wal_restore.py @@ -1,8 +1,9 @@ +from __future__ import annotations + import sys import tarfile import tempfile from pathlib import Path -from typing import List import pytest import zstandard @@ -165,7 +166,7 @@ def test_wal_restore_http(neon_env_builder: NeonEnvBuilder, broken_tenant: bool) if broken_tenant: ps_client.tenant_detach(tenant_id) - objects: List[ObjectTypeDef] = list_prefix( + objects: list[ObjectTypeDef] = list_prefix( env.pageserver_remote_storage, f"tenants/{tenant_id}/timelines/{timeline_id}/" ).get("Contents", []) for obj in objects: diff --git a/test_runner/regress/test_walredo_not_left_behind_on_detach.py b/test_runner/regress/test_walredo_not_left_behind_on_detach.py index ae8e276a1a..182e57b8a4 100644 --- a/test_runner/regress/test_walredo_not_left_behind_on_detach.py +++ b/test_runner/regress/test_walredo_not_left_behind_on_detach.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import time import psutil diff --git a/test_runner/test_broken.py b/test_runner/test_broken.py index d710b53528..112e699395 100644 --- a/test_runner/test_broken.py +++ b/test_runner/test_broken.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import pytest