mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-22 21:59:59 +00:00
Python 3.11 (#9515)
## Problem On Debian 12 (Bookworm), Python 3.11 is the latest available version. ## Summary of changes - Update Python to 3.11 in build-tools - Fix ruff check / format - Fix mypy - Use `StrEnum` instead of pair `str`, `Enum` - Update docs
This commit is contained in:
committed by
GitHub
parent
0713ff3176
commit
8d1c44039e
@@ -132,7 +132,7 @@ make -j`sysctl -n hw.logicalcpu` -s
|
||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||
|
||||
To run the integration tests or Python scripts (not required to use the code), install
|
||||
Python (3.9 or higher), and install the python3 packages using `./scripts/pysync` (requires [poetry>=1.8](https://python-poetry.org/)) in the project directory.
|
||||
Python (3.11 or higher), and install the python3 packages using `./scripts/pysync` (requires [poetry>=1.8](https://python-poetry.org/)) in the project directory.
|
||||
|
||||
|
||||
#### Running neon database
|
||||
|
||||
@@ -234,7 +234,7 @@ USER nonroot:nonroot
|
||||
WORKDIR /home/nonroot
|
||||
|
||||
# Python
|
||||
ENV PYTHON_VERSION=3.9.19 \
|
||||
ENV PYTHON_VERSION=3.11.10 \
|
||||
PYENV_ROOT=/home/nonroot/.pyenv \
|
||||
PATH=/home/nonroot/.pyenv/shims:/home/nonroot/.pyenv/bin:/home/nonroot/.poetry/bin:$PATH
|
||||
RUN set -e \
|
||||
|
||||
@@ -113,21 +113,21 @@ so manual installation of dependencies is not recommended.
|
||||
A single virtual environment with all dependencies is described in the single `Pipfile`.
|
||||
|
||||
### Prerequisites
|
||||
- Install Python 3.9 (the minimal supported version) or greater.
|
||||
- Install Python 3.11 (the minimal supported version) or greater.
|
||||
- Our setup with poetry should work with newer python versions too. So feel free to open an issue with a `c/test-runner` label if something doesn't work as expected.
|
||||
- If you have some trouble with other version you can resolve it by installing Python 3.9 separately, via [pyenv](https://github.com/pyenv/pyenv) or via system package manager e.g.:
|
||||
- If you have some trouble with other version you can resolve it by installing Python 3.11 separately, via [pyenv](https://github.com/pyenv/pyenv) or via system package manager e.g.:
|
||||
```bash
|
||||
# In Ubuntu
|
||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||
sudo apt update
|
||||
sudo apt install python3.9
|
||||
sudo apt install python3.11
|
||||
```
|
||||
- Install `poetry`
|
||||
- Exact version of `poetry` is not important, see installation instructions available at poetry's [website](https://python-poetry.org/docs/#installation).
|
||||
- Install dependencies via `./scripts/pysync`.
|
||||
- Note that CI uses specific Python version (look for `PYTHON_VERSION` [here](https://github.com/neondatabase/docker-images/blob/main/rust/Dockerfile))
|
||||
so if you have different version some linting tools can yield different result locally vs in the CI.
|
||||
- You can explicitly specify which Python to use by running `poetry env use /path/to/python`, e.g. `poetry env use python3.9`.
|
||||
- You can explicitly specify which Python to use by running `poetry env use /path/to/python`, e.g. `poetry env use python3.11`.
|
||||
This may also disable the `The currently activated Python version X.Y.Z is not supported by the project` warning.
|
||||
|
||||
Run `poetry shell` to activate the virtual environment.
|
||||
|
||||
92
poetry.lock
generated
92
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
@@ -114,7 +114,6 @@ files = [
|
||||
[package.dependencies]
|
||||
aiohappyeyeballs = ">=2.3.0"
|
||||
aiosignal = ">=1.1.2"
|
||||
async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""}
|
||||
attrs = ">=17.3.0"
|
||||
frozenlist = ">=1.1.1"
|
||||
multidict = ">=4.5,<7.0"
|
||||
@@ -219,10 +218,8 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
|
||||
idna = ">=2.8"
|
||||
sniffio = ">=1.1"
|
||||
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
|
||||
@@ -737,10 +734,7 @@ files = [
|
||||
[package.dependencies]
|
||||
jmespath = ">=0.7.1,<2.0.0"
|
||||
python-dateutil = ">=2.1,<3.0.0"
|
||||
urllib3 = [
|
||||
{version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""},
|
||||
{version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""},
|
||||
]
|
||||
urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""}
|
||||
|
||||
[package.extras]
|
||||
crt = ["awscrt (==0.19.19)"]
|
||||
@@ -1069,20 +1063,6 @@ docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"]
|
||||
ssh = ["paramiko (>=2.4.3)"]
|
||||
websockets = ["websocket-client (>=1.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "exceptiongroup"
|
||||
version = "1.1.1"
|
||||
description = "Backport of PEP 654 (exception groups)"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"},
|
||||
{file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "execnet"
|
||||
version = "1.9.0"
|
||||
@@ -1110,7 +1090,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
click = ">=8.0"
|
||||
importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""}
|
||||
itsdangerous = ">=2.0"
|
||||
Jinja2 = ">=3.0"
|
||||
Werkzeug = ">=2.2.2"
|
||||
@@ -1319,25 +1298,6 @@ files = [
|
||||
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "4.12.0"
|
||||
description = "Read metadata from Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"},
|
||||
{file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
docs = ["jaraco.packaging (>=9)", "rst.linker (>=1.9)", "sphinx"]
|
||||
perf = ["ipython"]
|
||||
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "1.1.1"
|
||||
@@ -1933,7 +1893,6 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=1.0.0"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
@@ -2514,11 +2473,9 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
|
||||
iniconfig = "*"
|
||||
packaging = "*"
|
||||
pluggy = ">=0.12,<2.0"
|
||||
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
|
||||
@@ -2581,10 +2538,7 @@ files = [
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = [
|
||||
{version = ">=5.0", markers = "python_version < \"3.10\""},
|
||||
{version = ">=6.2.4", markers = "python_version >= \"3.10\""},
|
||||
]
|
||||
pytest = {version = ">=6.2.4", markers = "python_version >= \"3.10\""}
|
||||
|
||||
[[package]]
|
||||
name = "pytest-repeat"
|
||||
@@ -3092,17 +3046,6 @@ files = [
|
||||
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.0.1"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
|
||||
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-jwcrypto"
|
||||
version = "1.5.0.20240925"
|
||||
@@ -3359,16 +3302,6 @@ files = [
|
||||
{file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
|
||||
{file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
|
||||
{file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"},
|
||||
{file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
|
||||
{file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
|
||||
@@ -3523,21 +3456,6 @@ idna = ">=2.0"
|
||||
multidict = ">=4.0"
|
||||
propcache = ">=0.2.0"
|
||||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
version = "3.19.1"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"},
|
||||
{file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "zstandard"
|
||||
version = "0.21.0"
|
||||
@@ -3598,5 +3516,5 @@ cffi = ["cffi (>=1.11)"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.9"
|
||||
content-hash = "8cb9c38d83eec441391c0528ac2fbefde18c734373b2399e07c69382044e8ced"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "5a9b8c8d409acb840c0a94dcdec6aac9777ccec443d74c78dbd511fa223cd6f6"
|
||||
|
||||
@@ -4,7 +4,7 @@ authors = []
|
||||
package-mode = false
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
python = "^3.11"
|
||||
pytest = "^7.4.4"
|
||||
psycopg2-binary = "^2.9.10"
|
||||
typing-extensions = "^4.6.1"
|
||||
@@ -89,7 +89,7 @@ module = [
|
||||
ignore_missing_imports = true
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
target-version = "py311"
|
||||
extend-exclude = [
|
||||
"vendor/",
|
||||
"target/",
|
||||
@@ -108,6 +108,3 @@ select = [
|
||||
"B", # bugbear
|
||||
"UP", # pyupgrade
|
||||
]
|
||||
|
||||
[tool.ruff.lint.pyupgrade]
|
||||
keep-runtime-typing = true # Remove this stanza when we require Python 3.10
|
||||
|
||||
@@ -14,7 +14,7 @@ import psycopg2.extras
|
||||
import toml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
FLAKY_TESTS_QUERY = """
|
||||
SELECT
|
||||
@@ -65,7 +65,7 @@ def main(args: argparse.Namespace):
|
||||
pageserver_virtual_file_io_engine_parameter = ""
|
||||
|
||||
# re-use existing records of flaky tests from before parametrization by compaction_algorithm
|
||||
def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]:
|
||||
def get_pageserver_default_tenant_config_compaction_algorithm() -> dict[str, Any] | None:
|
||||
"""Duplicated from parametrize.py"""
|
||||
toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM")
|
||||
if toml_table is None:
|
||||
|
||||
@@ -196,7 +196,7 @@ async def main_impl(args, report_out, client: Client):
|
||||
gathered = await asyncio.gather(*get_timeline_id_coros, return_exceptions=True)
|
||||
assert len(tenant_ids) == len(gathered)
|
||||
tenant_and_timline_ids = []
|
||||
for tid, tlids in zip(tenant_ids, gathered):
|
||||
for tid, tlids in zip(tenant_ids, gathered, strict=False):
|
||||
for tlid in tlids:
|
||||
tenant_and_timline_ids.append((tid, tlid))
|
||||
elif len(comps) == 1:
|
||||
|
||||
@@ -11,7 +11,7 @@ import re
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
|
||||
import backoff
|
||||
@@ -140,8 +140,8 @@ def ingest_test_result(
|
||||
suite=labels["suite"],
|
||||
name=unparametrized_name,
|
||||
status=test["status"],
|
||||
started_at=datetime.fromtimestamp(test["time"]["start"] / 1000, tz=timezone.utc),
|
||||
stopped_at=datetime.fromtimestamp(test["time"]["stop"] / 1000, tz=timezone.utc),
|
||||
started_at=datetime.fromtimestamp(test["time"]["start"] / 1000, tz=UTC),
|
||||
stopped_at=datetime.fromtimestamp(test["time"]["stop"] / 1000, tz=UTC),
|
||||
duration=test["time"]["duration"],
|
||||
flaky=test["flaky"] or test["retriesStatusChange"],
|
||||
arch=arch,
|
||||
|
||||
@@ -113,7 +113,7 @@ The test suite has a Python enum with equal name but different meaning:
|
||||
|
||||
```python
|
||||
@enum.unique
|
||||
class RemoteStorageKind(str, enum.Enum):
|
||||
class RemoteStorageKind(StrEnum):
|
||||
LOCAL_FS = "local_fs"
|
||||
MOCK_S3 = "mock_s3"
|
||||
REAL_S3 = "real_s3"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from typing import Any
|
||||
|
||||
import jwt
|
||||
@@ -37,8 +37,7 @@ class AuthKeys:
|
||||
return self.generate_token(scope=TokenScope.TENANT, tenant_id=str(tenant_id))
|
||||
|
||||
|
||||
# TODO: Replace with `StrEnum` when we upgrade to python 3.11
|
||||
class TokenScope(str, Enum):
|
||||
class TokenScope(StrEnum):
|
||||
ADMIN = "admin"
|
||||
PAGE_SERVER_API = "pageserverapi"
|
||||
GENERATIONS_API = "generations_api"
|
||||
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
import timeit
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -24,8 +25,7 @@ from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator, Mapping
|
||||
from typing import Callable, Optional
|
||||
from collections.abc import Callable, Iterator, Mapping
|
||||
|
||||
|
||||
"""
|
||||
@@ -61,7 +61,7 @@ class PgBenchRunResult:
|
||||
number_of_threads: int
|
||||
number_of_transactions_actually_processed: int
|
||||
latency_average: float
|
||||
latency_stddev: Optional[float]
|
||||
latency_stddev: float | None
|
||||
tps: float
|
||||
run_duration: float
|
||||
run_start_timestamp: int
|
||||
@@ -171,14 +171,14 @@ _PGBENCH_INIT_EXTRACTORS: Mapping[str, re.Pattern[str]] = {
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PgBenchInitResult:
|
||||
total: Optional[float]
|
||||
drop_tables: Optional[float]
|
||||
create_tables: Optional[float]
|
||||
client_side_generate: Optional[float]
|
||||
server_side_generate: Optional[float]
|
||||
vacuum: Optional[float]
|
||||
primary_keys: Optional[float]
|
||||
foreign_keys: Optional[float]
|
||||
total: float | None
|
||||
drop_tables: float | None
|
||||
create_tables: float | None
|
||||
client_side_generate: float | None
|
||||
server_side_generate: float | None
|
||||
vacuum: float | None
|
||||
primary_keys: float | None
|
||||
foreign_keys: float | None
|
||||
duration: float
|
||||
start_timestamp: int
|
||||
end_timestamp: int
|
||||
@@ -196,7 +196,7 @@ class PgBenchInitResult:
|
||||
|
||||
last_line = stderr.splitlines()[-1]
|
||||
|
||||
timings: dict[str, Optional[float]] = {}
|
||||
timings: dict[str, float | None] = {}
|
||||
last_line_items = re.split(r"\(|\)|,", last_line)
|
||||
for item in last_line_items:
|
||||
for key, regex in _PGBENCH_INIT_EXTRACTORS.items():
|
||||
@@ -227,7 +227,7 @@ class PgBenchInitResult:
|
||||
|
||||
|
||||
@enum.unique
|
||||
class MetricReport(str, enum.Enum): # str is a hack to make it json serializable
|
||||
class MetricReport(StrEnum): # str is a hack to make it json serializable
|
||||
# this means that this is a constant test parameter
|
||||
# like number of transactions, or number of clients
|
||||
TEST_PARAM = "test_param"
|
||||
@@ -256,9 +256,8 @@ class NeonBenchmarker:
|
||||
metric_value: float,
|
||||
unit: str,
|
||||
report: MetricReport,
|
||||
labels: Optional[
|
||||
dict[str, str]
|
||||
] = None, # use this to associate additional key/value pairs in json format for associated Neon object IDs like project ID with the metric
|
||||
# use this to associate additional key/value pairs in json format for associated Neon object IDs like project ID with the metric
|
||||
labels: dict[str, str] | None = None,
|
||||
):
|
||||
"""
|
||||
Record a benchmark result.
|
||||
@@ -412,7 +411,7 @@ class NeonBenchmarker:
|
||||
self,
|
||||
pageserver: NeonPageserver,
|
||||
metric_name: str,
|
||||
label_filters: Optional[dict[str, str]] = None,
|
||||
label_filters: dict[str, str] | None = None,
|
||||
) -> int:
|
||||
"""Fetch the value of given int counter from pageserver metrics."""
|
||||
all_metrics = pageserver.http_client().get_metrics()
|
||||
|
||||
@@ -2,14 +2,14 @@ from __future__ import annotations
|
||||
|
||||
import random
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from functools import total_ordering
|
||||
from typing import TYPE_CHECKING, TypeVar
|
||||
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Union
|
||||
from typing import Any
|
||||
|
||||
T = TypeVar("T", bound="Id")
|
||||
|
||||
@@ -24,7 +24,7 @@ class Lsn:
|
||||
representation is like "1/0123abcd". See also pg_lsn datatype in Postgres
|
||||
"""
|
||||
|
||||
def __init__(self, x: Union[int, str]):
|
||||
def __init__(self, x: int | str):
|
||||
if isinstance(x, int):
|
||||
self.lsn_int = x
|
||||
else:
|
||||
@@ -67,7 +67,7 @@ class Lsn:
|
||||
return NotImplemented
|
||||
return self.lsn_int - other.lsn_int
|
||||
|
||||
def __add__(self, other: Union[int, Lsn]) -> Lsn:
|
||||
def __add__(self, other: int | Lsn) -> Lsn:
|
||||
if isinstance(other, int):
|
||||
return Lsn(self.lsn_int + other)
|
||||
elif isinstance(other, Lsn):
|
||||
@@ -249,7 +249,6 @@ class TenantShardId:
|
||||
return hash(self._tuple())
|
||||
|
||||
|
||||
# TODO: Replace with `StrEnum` when we upgrade to python 3.11
|
||||
class TimelineArchivalState(str, Enum):
|
||||
class TimelineArchivalState(StrEnum):
|
||||
ARCHIVED = "Archived"
|
||||
UNARCHIVED = "Unarchived"
|
||||
|
||||
@@ -99,7 +99,7 @@ class PgCompare(ABC):
|
||||
assert row is not None
|
||||
assert len(row) == len(pg_stat.columns)
|
||||
|
||||
for col, val in zip(pg_stat.columns, row):
|
||||
for col, val in zip(pg_stat.columns, row, strict=False):
|
||||
results[f"{pg_stat.table}.{col}"] = int(val)
|
||||
|
||||
return results
|
||||
|
||||
@@ -12,7 +12,8 @@ from fixtures.common_types import TenantId
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Callable, Optional
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ComputeReconfigure:
|
||||
@@ -20,12 +21,12 @@ class ComputeReconfigure:
|
||||
self.server = server
|
||||
self.control_plane_compute_hook_api = f"http://{server.host}:{server.port}/notify-attach"
|
||||
self.workloads: dict[TenantId, Any] = {}
|
||||
self.on_notify: Optional[Callable[[Any], None]] = None
|
||||
self.on_notify: Callable[[Any], None] | None = None
|
||||
|
||||
def register_workload(self, workload: Any):
|
||||
self.workloads[workload.tenant_id] = workload
|
||||
|
||||
def register_on_notify(self, fn: Optional[Callable[[Any], None]]):
|
||||
def register_on_notify(self, fn: Callable[[Any], None] | None):
|
||||
"""
|
||||
Add some extra work during a notification, like sleeping to slow things down, or
|
||||
logging what was notified.
|
||||
|
||||
@@ -31,7 +31,7 @@ from h2.settings import SettingCodes
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
RequestData = collections.namedtuple("RequestData", ["headers", "data"])
|
||||
@@ -49,7 +49,7 @@ class H2Protocol(asyncio.Protocol):
|
||||
def __init__(self):
|
||||
config = H2Configuration(client_side=False, header_encoding="utf-8")
|
||||
self.conn = H2Connection(config=config)
|
||||
self.transport: Optional[asyncio.Transport] = None
|
||||
self.transport: asyncio.Transport | None = None
|
||||
self.stream_data: dict[int, RequestData] = {}
|
||||
self.flow_control_futures: dict[int, asyncio.Future[Any]] = {}
|
||||
|
||||
@@ -61,7 +61,7 @@ class H2Protocol(asyncio.Protocol):
|
||||
self.transport.write(self.conn.data_to_send())
|
||||
|
||||
@override
|
||||
def connection_lost(self, exc: Optional[Exception]):
|
||||
def connection_lost(self, exc: Exception | None):
|
||||
for future in self.flow_control_futures.values():
|
||||
future.cancel()
|
||||
self.flow_control_futures = {}
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from prometheus_client.parser import text_string_to_metric_families
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Metrics:
|
||||
metrics: dict[str, list[Sample]]
|
||||
@@ -20,7 +16,7 @@ class Metrics:
|
||||
self.metrics = defaultdict(list)
|
||||
self.name = name
|
||||
|
||||
def query_all(self, name: str, filter: Optional[dict[str, str]] = None) -> list[Sample]:
|
||||
def query_all(self, name: str, filter: dict[str, str] | None = None) -> list[Sample]:
|
||||
filter = filter or {}
|
||||
res: list[Sample] = []
|
||||
|
||||
@@ -32,7 +28,7 @@ class Metrics:
|
||||
pass
|
||||
return res
|
||||
|
||||
def query_one(self, name: str, filter: Optional[dict[str, str]] = None) -> Sample:
|
||||
def query_one(self, name: str, filter: dict[str, str] | None = None) -> Sample:
|
||||
res = self.query_all(name, filter or {})
|
||||
assert len(res) == 1, f"expected single sample for {name} {filter}, found {res}"
|
||||
return res[0]
|
||||
@@ -47,9 +43,7 @@ class MetricsGetter:
|
||||
def get_metrics(self) -> Metrics:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_metric_value(
|
||||
self, name: str, filter: Optional[dict[str, str]] = None
|
||||
) -> Optional[float]:
|
||||
def get_metric_value(self, name: str, filter: dict[str, str] | None = None) -> float | None:
|
||||
metrics = self.get_metrics()
|
||||
results = metrics.query_all(name, filter=filter)
|
||||
if not results:
|
||||
@@ -59,7 +53,7 @@ class MetricsGetter:
|
||||
return results[0].value
|
||||
|
||||
def get_metrics_values(
|
||||
self, names: list[str], filter: Optional[dict[str, str]] = None, absence_ok: bool = False
|
||||
self, names: list[str], filter: dict[str, str] | None = None, absence_ok: bool = False
|
||||
) -> dict[str, float]:
|
||||
"""
|
||||
When fetching multiple named metrics, it is more efficient to use this
|
||||
|
||||
@@ -8,7 +8,7 @@ import requests
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Literal, Optional
|
||||
from typing import Any, Literal
|
||||
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
@@ -40,11 +40,11 @@ class NeonAPI:
|
||||
|
||||
def create_project(
|
||||
self,
|
||||
pg_version: Optional[PgVersion] = None,
|
||||
name: Optional[str] = None,
|
||||
branch_name: Optional[str] = None,
|
||||
branch_role_name: Optional[str] = None,
|
||||
branch_database_name: Optional[str] = None,
|
||||
pg_version: PgVersion | None = None,
|
||||
name: str | None = None,
|
||||
branch_name: str | None = None,
|
||||
branch_role_name: str | None = None,
|
||||
branch_database_name: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
data: dict[str, Any] = {
|
||||
"project": {
|
||||
@@ -179,8 +179,8 @@ class NeonAPI:
|
||||
def get_connection_uri(
|
||||
self,
|
||||
project_id: str,
|
||||
branch_id: Optional[str] = None,
|
||||
endpoint_id: Optional[str] = None,
|
||||
branch_id: str | None = None,
|
||||
endpoint_id: str | None = None,
|
||||
database_name: str = "neondb",
|
||||
role_name: str = "neondb_owner",
|
||||
pooled: bool = True,
|
||||
@@ -249,7 +249,7 @@ class NeonAPI:
|
||||
|
||||
@final
|
||||
class NeonApiEndpoint:
|
||||
def __init__(self, neon_api: NeonAPI, pg_version: PgVersion, project_id: Optional[str]):
|
||||
def __init__(self, neon_api: NeonAPI, pg_version: PgVersion, project_id: str | None):
|
||||
self.neon_api = neon_api
|
||||
self.project_id: str
|
||||
self.endpoint_id: str
|
||||
|
||||
@@ -20,7 +20,6 @@ from fixtures.pg_version import PgVersion
|
||||
if TYPE_CHECKING:
|
||||
from typing import (
|
||||
Any,
|
||||
Optional,
|
||||
TypeVar,
|
||||
cast,
|
||||
)
|
||||
@@ -36,7 +35,7 @@ class AbstractNeonCli:
|
||||
Do not use directly, use specific subclasses instead.
|
||||
"""
|
||||
|
||||
def __init__(self, extra_env: Optional[dict[str, str]], binpath: Path):
|
||||
def __init__(self, extra_env: dict[str, str] | None, binpath: Path):
|
||||
self.extra_env = extra_env
|
||||
self.binpath = binpath
|
||||
|
||||
@@ -45,7 +44,7 @@ class AbstractNeonCli:
|
||||
def raw_cli(
|
||||
self,
|
||||
arguments: list[str],
|
||||
extra_env_vars: Optional[dict[str, str]] = None,
|
||||
extra_env_vars: dict[str, str] | None = None,
|
||||
check_return_code=True,
|
||||
timeout=None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
@@ -173,7 +172,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
extra_env: Optional[dict[str, str]],
|
||||
extra_env: dict[str, str] | None,
|
||||
binpath: Path,
|
||||
repo_dir: Path,
|
||||
pg_distrib_dir: Path,
|
||||
@@ -195,10 +194,10 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
pg_version: PgVersion,
|
||||
conf: Optional[dict[str, Any]] = None,
|
||||
shard_count: Optional[int] = None,
|
||||
shard_stripe_size: Optional[int] = None,
|
||||
placement_policy: Optional[str] = None,
|
||||
conf: dict[str, Any] | None = None,
|
||||
shard_count: int | None = None,
|
||||
shard_stripe_size: int | None = None,
|
||||
placement_policy: str | None = None,
|
||||
set_default: bool = False,
|
||||
):
|
||||
"""
|
||||
@@ -302,8 +301,8 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
new_branch_name,
|
||||
ancestor_branch_name: Optional[str] = None,
|
||||
ancestor_start_lsn: Optional[Lsn] = None,
|
||||
ancestor_branch_name: str | None = None,
|
||||
ancestor_start_lsn: Lsn | None = None,
|
||||
):
|
||||
cmd = [
|
||||
"timeline",
|
||||
@@ -331,8 +330,8 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
base_lsn: Lsn,
|
||||
base_tarfile: Path,
|
||||
pg_version: PgVersion,
|
||||
end_lsn: Optional[Lsn] = None,
|
||||
wal_tarfile: Optional[Path] = None,
|
||||
end_lsn: Lsn | None = None,
|
||||
wal_tarfile: Path | None = None,
|
||||
):
|
||||
cmd = [
|
||||
"timeline",
|
||||
@@ -380,7 +379,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
def init(
|
||||
self,
|
||||
init_config: dict[str, Any],
|
||||
force: Optional[str] = None,
|
||||
force: str | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
with tempfile.NamedTemporaryFile(mode="w+") as init_config_tmpfile:
|
||||
init_config_tmpfile.write(toml.dumps(init_config))
|
||||
@@ -400,9 +399,9 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
|
||||
def storage_controller_start(
|
||||
self,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
instance_id: Optional[int] = None,
|
||||
base_port: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
instance_id: int | None = None,
|
||||
base_port: int | None = None,
|
||||
):
|
||||
cmd = ["storage_controller", "start"]
|
||||
if timeout_in_seconds is not None:
|
||||
@@ -413,7 +412,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
cmd.append(f"--base-port={base_port}")
|
||||
return self.raw_cli(cmd)
|
||||
|
||||
def storage_controller_stop(self, immediate: bool, instance_id: Optional[int] = None):
|
||||
def storage_controller_stop(self, immediate: bool, instance_id: int | None = None):
|
||||
cmd = ["storage_controller", "stop"]
|
||||
if immediate:
|
||||
cmd.extend(["-m", "immediate"])
|
||||
@@ -424,8 +423,8 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
def pageserver_start(
|
||||
self,
|
||||
id: int,
|
||||
extra_env_vars: Optional[dict[str, str]] = None,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
extra_env_vars: dict[str, str] | None = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
start_args = ["pageserver", "start", f"--id={id}"]
|
||||
if timeout_in_seconds is not None:
|
||||
@@ -442,9 +441,9 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
def safekeeper_start(
|
||||
self,
|
||||
id: int,
|
||||
extra_opts: Optional[list[str]] = None,
|
||||
extra_env_vars: Optional[dict[str, str]] = None,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
extra_opts: list[str] | None = None,
|
||||
extra_env_vars: dict[str, str] | None = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
if extra_opts is not None:
|
||||
extra_opts = [f"-e={opt}" for opt in extra_opts]
|
||||
@@ -457,7 +456,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
)
|
||||
|
||||
def safekeeper_stop(
|
||||
self, id: Optional[int] = None, immediate=False
|
||||
self, id: int | None = None, immediate=False
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = ["safekeeper", "stop"]
|
||||
if id is not None:
|
||||
@@ -467,7 +466,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
return self.raw_cli(args)
|
||||
|
||||
def storage_broker_start(
|
||||
self, timeout_in_seconds: Optional[int] = None
|
||||
self, timeout_in_seconds: int | None = None
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
cmd = ["storage_broker", "start"]
|
||||
if timeout_in_seconds is not None:
|
||||
@@ -485,10 +484,10 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
http_port: int,
|
||||
tenant_id: TenantId,
|
||||
pg_version: PgVersion,
|
||||
endpoint_id: Optional[str] = None,
|
||||
endpoint_id: str | None = None,
|
||||
hot_standby: bool = False,
|
||||
lsn: Optional[Lsn] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
lsn: Lsn | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
allow_multiple=False,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = [
|
||||
@@ -523,11 +522,11 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
def endpoint_start(
|
||||
self,
|
||||
endpoint_id: str,
|
||||
safekeepers: Optional[list[int]] = None,
|
||||
remote_ext_config: Optional[str] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
remote_ext_config: str | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
allow_multiple=False,
|
||||
basebackup_request_tries: Optional[int] = None,
|
||||
basebackup_request_tries: int | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = [
|
||||
"endpoint",
|
||||
@@ -555,9 +554,9 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
def endpoint_reconfigure(
|
||||
self,
|
||||
endpoint_id: str,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
safekeepers: Optional[list[int]] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
check_return_code=True,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = ["endpoint", "reconfigure", endpoint_id]
|
||||
@@ -574,7 +573,7 @@ class NeonLocalCli(AbstractNeonCli):
|
||||
endpoint_id: str,
|
||||
destroy=False,
|
||||
check_return_code=True,
|
||||
mode: Optional[str] = None,
|
||||
mode: str | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
args = [
|
||||
"endpoint",
|
||||
|
||||
@@ -17,7 +17,7 @@ from collections.abc import Iterable, Iterator
|
||||
from contextlib import closing, contextmanager
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
from types import TracebackType
|
||||
@@ -101,12 +101,10 @@ from fixtures.utils import (
|
||||
from .neon_api import NeonAPI, NeonApiEndpoint
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Optional,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from fixtures.paths import SnapshotDirLocked
|
||||
@@ -338,10 +336,10 @@ class NeonEnvBuilder:
|
||||
top_output_dir: Path,
|
||||
test_output_dir: Path,
|
||||
combination,
|
||||
test_overlay_dir: Optional[Path] = None,
|
||||
pageserver_remote_storage: Optional[RemoteStorage] = None,
|
||||
test_overlay_dir: Path | None = None,
|
||||
pageserver_remote_storage: RemoteStorage | None = None,
|
||||
# toml that will be decomposed into `--config-override` flags during `pageserver --init`
|
||||
pageserver_config_override: Optional[str | Callable[[dict[str, Any]], None]] = None,
|
||||
pageserver_config_override: str | Callable[[dict[str, Any]], None] | None = None,
|
||||
num_safekeepers: int = 1,
|
||||
num_pageservers: int = 1,
|
||||
# Use non-standard SK ids to check for various parsing bugs
|
||||
@@ -349,16 +347,16 @@ class NeonEnvBuilder:
|
||||
# fsync is disabled by default to make the tests go faster
|
||||
safekeepers_enable_fsync: bool = False,
|
||||
auth_enabled: bool = False,
|
||||
rust_log_override: Optional[str] = None,
|
||||
rust_log_override: str | None = None,
|
||||
default_branch_name: str = DEFAULT_BRANCH_NAME,
|
||||
preserve_database_files: bool = False,
|
||||
initial_tenant: Optional[TenantId] = None,
|
||||
initial_timeline: Optional[TimelineId] = None,
|
||||
pageserver_virtual_file_io_engine: Optional[str] = None,
|
||||
pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]] = None,
|
||||
safekeeper_extra_opts: Optional[list[str]] = None,
|
||||
storage_controller_port_override: Optional[int] = None,
|
||||
pageserver_virtual_file_io_mode: Optional[str] = None,
|
||||
initial_tenant: TenantId | None = None,
|
||||
initial_timeline: TimelineId | None = None,
|
||||
pageserver_virtual_file_io_engine: str | None = None,
|
||||
pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = None,
|
||||
safekeeper_extra_opts: list[str] | None = None,
|
||||
storage_controller_port_override: int | None = None,
|
||||
pageserver_virtual_file_io_mode: str | None = None,
|
||||
):
|
||||
self.repo_dir = repo_dir
|
||||
self.rust_log_override = rust_log_override
|
||||
@@ -367,7 +365,7 @@ class NeonEnvBuilder:
|
||||
# Pageserver remote storage
|
||||
self.pageserver_remote_storage = pageserver_remote_storage
|
||||
# Safekeepers remote storage
|
||||
self.safekeepers_remote_storage: Optional[RemoteStorage] = None
|
||||
self.safekeepers_remote_storage: RemoteStorage | None = None
|
||||
|
||||
self.run_id = run_id
|
||||
self.mock_s3_server: MockS3Server = mock_s3_server
|
||||
@@ -378,7 +376,7 @@ class NeonEnvBuilder:
|
||||
self.safekeepers_enable_fsync = safekeepers_enable_fsync
|
||||
self.auth_enabled = auth_enabled
|
||||
self.default_branch_name = default_branch_name
|
||||
self.env: Optional[NeonEnv] = None
|
||||
self.env: NeonEnv | None = None
|
||||
self.keep_remote_storage_contents: bool = True
|
||||
self.neon_binpath = neon_binpath
|
||||
self.neon_local_binpath = neon_binpath
|
||||
@@ -391,14 +389,14 @@ class NeonEnvBuilder:
|
||||
self.test_output_dir = test_output_dir
|
||||
self.test_overlay_dir = test_overlay_dir
|
||||
self.overlay_mounts_created_by_us: list[tuple[str, Path]] = []
|
||||
self.config_init_force: Optional[str] = None
|
||||
self.config_init_force: str | None = None
|
||||
self.top_output_dir = top_output_dir
|
||||
self.control_plane_compute_hook_api: Optional[str] = None
|
||||
self.storage_controller_config: Optional[dict[Any, Any]] = None
|
||||
self.control_plane_compute_hook_api: str | None = None
|
||||
self.storage_controller_config: dict[Any, Any] | None = None
|
||||
|
||||
self.pageserver_virtual_file_io_engine: Optional[str] = pageserver_virtual_file_io_engine
|
||||
self.pageserver_virtual_file_io_engine: str | None = pageserver_virtual_file_io_engine
|
||||
|
||||
self.pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]] = (
|
||||
self.pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = (
|
||||
pageserver_default_tenant_config_compaction_algorithm
|
||||
)
|
||||
if self.pageserver_default_tenant_config_compaction_algorithm is not None:
|
||||
@@ -440,10 +438,10 @@ class NeonEnvBuilder:
|
||||
|
||||
def init_start(
|
||||
self,
|
||||
initial_tenant_conf: Optional[dict[str, Any]] = None,
|
||||
initial_tenant_conf: dict[str, Any] | None = None,
|
||||
default_remote_storage_if_missing: bool = True,
|
||||
initial_tenant_shard_count: Optional[int] = None,
|
||||
initial_tenant_shard_stripe_size: Optional[int] = None,
|
||||
initial_tenant_shard_count: int | None = None,
|
||||
initial_tenant_shard_stripe_size: int | None = None,
|
||||
) -> NeonEnv:
|
||||
"""
|
||||
Default way to create and start NeonEnv. Also creates the initial_tenant with root initial_timeline.
|
||||
@@ -781,8 +779,8 @@ class NeonEnvBuilder:
|
||||
self,
|
||||
kind: RemoteStorageKind,
|
||||
user: RemoteStorageUser,
|
||||
bucket_name: Optional[str] = None,
|
||||
bucket_region: Optional[str] = None,
|
||||
bucket_name: str | None = None,
|
||||
bucket_region: str | None = None,
|
||||
) -> RemoteStorage:
|
||||
ret = kind.configure(
|
||||
self.repo_dir,
|
||||
@@ -845,9 +843,9 @@ class NeonEnvBuilder:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc_value: Optional[BaseException],
|
||||
traceback: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
):
|
||||
# Stop all the nodes.
|
||||
if self.env:
|
||||
@@ -1136,7 +1134,7 @@ class NeonEnv:
|
||||
force=config.config_init_force,
|
||||
)
|
||||
|
||||
def start(self, timeout_in_seconds: Optional[int] = None):
|
||||
def start(self, timeout_in_seconds: int | None = None):
|
||||
# Storage controller starts first, so that pageserver /re-attach calls don't
|
||||
# bounce through retries on startup
|
||||
self.storage_controller.start(timeout_in_seconds=timeout_in_seconds)
|
||||
@@ -1237,7 +1235,7 @@ class NeonEnv:
|
||||
), "env.pageserver must only be used with single pageserver NeonEnv"
|
||||
return self.pageservers[0]
|
||||
|
||||
def get_pageserver(self, id: Optional[int]) -> NeonPageserver:
|
||||
def get_pageserver(self, id: int | None) -> NeonPageserver:
|
||||
"""
|
||||
Look up a pageserver by its node ID.
|
||||
|
||||
@@ -1254,7 +1252,7 @@ class NeonEnv:
|
||||
|
||||
raise RuntimeError(f"Pageserver with ID {id} not found")
|
||||
|
||||
def get_tenant_pageserver(self, tenant_id: Union[TenantId, TenantShardId]):
|
||||
def get_tenant_pageserver(self, tenant_id: TenantId | TenantShardId):
|
||||
"""
|
||||
Get the NeonPageserver where this tenant shard is currently attached, according
|
||||
to the storage controller.
|
||||
@@ -1316,12 +1314,12 @@ class NeonEnv:
|
||||
|
||||
def create_tenant(
|
||||
self,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
timeline_id: Optional[TimelineId] = None,
|
||||
conf: Optional[dict[str, Any]] = None,
|
||||
shard_count: Optional[int] = None,
|
||||
shard_stripe_size: Optional[int] = None,
|
||||
placement_policy: Optional[str] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
timeline_id: TimelineId | None = None,
|
||||
conf: dict[str, Any] | None = None,
|
||||
shard_count: int | None = None,
|
||||
shard_stripe_size: int | None = None,
|
||||
placement_policy: str | None = None,
|
||||
set_default: bool = False,
|
||||
) -> tuple[TenantId, TimelineId]:
|
||||
"""
|
||||
@@ -1343,7 +1341,7 @@ class NeonEnv:
|
||||
|
||||
return tenant_id, timeline_id
|
||||
|
||||
def config_tenant(self, tenant_id: Optional[TenantId], conf: dict[str, str]):
|
||||
def config_tenant(self, tenant_id: TenantId | None, conf: dict[str, str]):
|
||||
"""
|
||||
Update tenant config.
|
||||
"""
|
||||
@@ -1353,10 +1351,10 @@ class NeonEnv:
|
||||
def create_branch(
|
||||
self,
|
||||
new_branch_name: str = DEFAULT_BRANCH_NAME,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
ancestor_branch_name: Optional[str] = None,
|
||||
ancestor_start_lsn: Optional[Lsn] = None,
|
||||
new_timeline_id: Optional[TimelineId] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
ancestor_branch_name: str | None = None,
|
||||
ancestor_start_lsn: Lsn | None = None,
|
||||
new_timeline_id: TimelineId | None = None,
|
||||
) -> TimelineId:
|
||||
new_timeline_id = new_timeline_id or TimelineId.generate()
|
||||
tenant_id = tenant_id or self.initial_tenant
|
||||
@@ -1370,8 +1368,8 @@ class NeonEnv:
|
||||
def create_timeline(
|
||||
self,
|
||||
new_branch_name: str,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
timeline_id: Optional[TimelineId] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
timeline_id: TimelineId | None = None,
|
||||
) -> TimelineId:
|
||||
timeline_id = timeline_id or TimelineId.generate()
|
||||
tenant_id = tenant_id or self.initial_tenant
|
||||
@@ -1396,8 +1394,8 @@ def neon_simple_env(
|
||||
compatibility_pg_distrib_dir: Path,
|
||||
pg_version: PgVersion,
|
||||
pageserver_virtual_file_io_engine: str,
|
||||
pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]],
|
||||
pageserver_virtual_file_io_mode: Optional[str],
|
||||
pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None,
|
||||
pageserver_virtual_file_io_mode: str | None,
|
||||
) -> Iterator[NeonEnv]:
|
||||
"""
|
||||
Simple Neon environment, with 1 safekeeper and 1 pageserver. No authentication, no fsync.
|
||||
@@ -1453,9 +1451,9 @@ def neon_env_builder(
|
||||
test_overlay_dir: Path,
|
||||
top_output_dir: Path,
|
||||
pageserver_virtual_file_io_engine: str,
|
||||
pageserver_default_tenant_config_compaction_algorithm: Optional[dict[str, Any]],
|
||||
pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None,
|
||||
record_property: Callable[[str, object], None],
|
||||
pageserver_virtual_file_io_mode: Optional[str],
|
||||
pageserver_virtual_file_io_mode: str | None,
|
||||
) -> Iterator[NeonEnvBuilder]:
|
||||
"""
|
||||
Fixture to create a Neon environment for test.
|
||||
@@ -1530,7 +1528,7 @@ class LogUtils:
|
||||
|
||||
def log_contains(
|
||||
self, pattern: str, offset: None | LogCursor = None
|
||||
) -> Optional[tuple[str, LogCursor]]:
|
||||
) -> tuple[str, LogCursor] | None:
|
||||
"""Check that the log contains a line that matches the given regex"""
|
||||
logfile = self.logfile
|
||||
if not logfile.exists():
|
||||
@@ -1569,14 +1567,13 @@ class StorageControllerApiException(Exception):
|
||||
|
||||
# See libs/pageserver_api/src/controller_api.rs
|
||||
# for the rust definitions of the enums below
|
||||
# TODO: Replace with `StrEnum` when we upgrade to python 3.11
|
||||
class PageserverAvailability(str, Enum):
|
||||
class PageserverAvailability(StrEnum):
|
||||
ACTIVE = "Active"
|
||||
UNAVAILABLE = "Unavailable"
|
||||
OFFLINE = "Offline"
|
||||
|
||||
|
||||
class PageserverSchedulingPolicy(str, Enum):
|
||||
class PageserverSchedulingPolicy(StrEnum):
|
||||
ACTIVE = "Active"
|
||||
DRAINING = "Draining"
|
||||
FILLING = "Filling"
|
||||
@@ -1584,7 +1581,7 @@ class PageserverSchedulingPolicy(str, Enum):
|
||||
PAUSE_FOR_RESTART = "PauseForRestart"
|
||||
|
||||
|
||||
class StorageControllerLeadershipStatus(str, Enum):
|
||||
class StorageControllerLeadershipStatus(StrEnum):
|
||||
LEADER = "leader"
|
||||
STEPPED_DOWN = "stepped_down"
|
||||
CANDIDATE = "candidate"
|
||||
@@ -1602,9 +1599,9 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
|
||||
def start(
|
||||
self,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
instance_id: Optional[int] = None,
|
||||
base_port: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
instance_id: int | None = None,
|
||||
base_port: int | None = None,
|
||||
):
|
||||
assert not self.running
|
||||
self.env.neon_cli.storage_controller_start(timeout_in_seconds, instance_id, base_port)
|
||||
@@ -1673,7 +1670,7 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
|
||||
return resp
|
||||
|
||||
def headers(self, scope: Optional[TokenScope]) -> dict[str, str]:
|
||||
def headers(self, scope: TokenScope | None) -> dict[str, str]:
|
||||
headers = {}
|
||||
if self.auth_enabled and scope is not None:
|
||||
jwt_token = self.env.auth_keys.generate_token(scope=scope)
|
||||
@@ -1711,9 +1708,9 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
|
||||
def attach_hook_issue(
|
||||
self,
|
||||
tenant_shard_id: Union[TenantId, TenantShardId],
|
||||
tenant_shard_id: TenantId | TenantShardId,
|
||||
pageserver_id: int,
|
||||
generation_override: Optional[int] = None,
|
||||
generation_override: int | None = None,
|
||||
) -> int:
|
||||
body = {"tenant_shard_id": str(tenant_shard_id), "node_id": pageserver_id}
|
||||
if generation_override is not None:
|
||||
@@ -1729,7 +1726,7 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
assert isinstance(gen, int)
|
||||
return gen
|
||||
|
||||
def attach_hook_drop(self, tenant_shard_id: Union[TenantId, TenantShardId]):
|
||||
def attach_hook_drop(self, tenant_shard_id: TenantId | TenantShardId):
|
||||
self.request(
|
||||
"POST",
|
||||
f"{self.api}/debug/v1/attach-hook",
|
||||
@@ -1737,7 +1734,7 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
headers=self.headers(TokenScope.ADMIN),
|
||||
)
|
||||
|
||||
def inspect(self, tenant_shard_id: Union[TenantId, TenantShardId]) -> Optional[tuple[int, int]]:
|
||||
def inspect(self, tenant_shard_id: TenantId | TenantShardId) -> tuple[int, int] | None:
|
||||
"""
|
||||
:return: 2-tuple of (generation, pageserver id), or None if unknown
|
||||
"""
|
||||
@@ -1857,10 +1854,10 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
def tenant_create(
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
shard_count: Optional[int] = None,
|
||||
shard_stripe_size: Optional[int] = None,
|
||||
tenant_config: Optional[dict[Any, Any]] = None,
|
||||
placement_policy: Optional[Union[dict[Any, Any], str]] = None,
|
||||
shard_count: int | None = None,
|
||||
shard_stripe_size: int | None = None,
|
||||
tenant_config: dict[Any, Any] | None = None,
|
||||
placement_policy: dict[Any, Any] | str | None = None,
|
||||
):
|
||||
"""
|
||||
Use this rather than pageserver_api() when you need to include shard parameters
|
||||
@@ -1941,7 +1938,7 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
return response.json()
|
||||
|
||||
def tenant_shard_split(
|
||||
self, tenant_id: TenantId, shard_count: int, shard_stripe_size: Optional[int] = None
|
||||
self, tenant_id: TenantId, shard_count: int, shard_stripe_size: int | None = None
|
||||
) -> list[TenantShardId]:
|
||||
response = self.request(
|
||||
"PUT",
|
||||
@@ -2039,8 +2036,8 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
def poll_node_status(
|
||||
self,
|
||||
node_id: int,
|
||||
desired_availability: Optional[PageserverAvailability],
|
||||
desired_scheduling_policy: Optional[PageserverSchedulingPolicy],
|
||||
desired_availability: PageserverAvailability | None,
|
||||
desired_scheduling_policy: PageserverSchedulingPolicy | None,
|
||||
max_attempts: int,
|
||||
backoff: float,
|
||||
):
|
||||
@@ -2259,7 +2256,7 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
json=body,
|
||||
)
|
||||
|
||||
def get_safekeeper(self, id: int) -> Optional[dict[str, Any]]:
|
||||
def get_safekeeper(self, id: int) -> dict[str, Any] | None:
|
||||
try:
|
||||
response = self.request(
|
||||
"GET",
|
||||
@@ -2290,9 +2287,9 @@ class NeonStorageController(MetricsGetter, LogUtils):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
self.stop(immediate=True)
|
||||
|
||||
@@ -2304,9 +2301,9 @@ class NeonProxiedStorageController(NeonStorageController):
|
||||
|
||||
def start(
|
||||
self,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
instance_id: Optional[int] = None,
|
||||
base_port: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
instance_id: int | None = None,
|
||||
base_port: int | None = None,
|
||||
):
|
||||
assert instance_id is not None and base_port is not None
|
||||
|
||||
@@ -2317,7 +2314,7 @@ class NeonProxiedStorageController(NeonStorageController):
|
||||
return self
|
||||
|
||||
def stop_instance(
|
||||
self, immediate: bool = False, instance_id: Optional[int] = None
|
||||
self, immediate: bool = False, instance_id: int | None = None
|
||||
) -> NeonStorageController:
|
||||
assert instance_id in self.instances
|
||||
if self.instances[instance_id]["running"]:
|
||||
@@ -2346,7 +2343,7 @@ class NeonProxiedStorageController(NeonStorageController):
|
||||
|
||||
def log_contains(
|
||||
self, pattern: str, offset: None | LogCursor = None
|
||||
) -> Optional[tuple[str, LogCursor]]:
|
||||
) -> tuple[str, LogCursor] | None:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@@ -2393,8 +2390,8 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
|
||||
def timeline_dir(
|
||||
self,
|
||||
tenant_shard_id: Union[TenantId, TenantShardId],
|
||||
timeline_id: Optional[TimelineId] = None,
|
||||
tenant_shard_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId | None = None,
|
||||
) -> Path:
|
||||
"""Get a timeline directory's path based on the repo directory of the test environment"""
|
||||
if timeline_id is None:
|
||||
@@ -2403,7 +2400,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
|
||||
def tenant_dir(
|
||||
self,
|
||||
tenant_shard_id: Optional[Union[TenantId, TenantShardId]] = None,
|
||||
tenant_shard_id: TenantId | TenantShardId | None = None,
|
||||
) -> Path:
|
||||
"""Get a tenant directory's path based on the repo directory of the test environment"""
|
||||
if tenant_shard_id is None:
|
||||
@@ -2447,8 +2444,8 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
|
||||
def start(
|
||||
self,
|
||||
extra_env_vars: Optional[dict[str, str]] = None,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
extra_env_vars: dict[str, str] | None = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
) -> NeonPageserver:
|
||||
"""
|
||||
Start the page server.
|
||||
@@ -2497,7 +2494,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
def restart(
|
||||
self,
|
||||
immediate: bool = False,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
):
|
||||
"""
|
||||
High level wrapper for restart: restarts the process, and waits for
|
||||
@@ -2537,9 +2534,9 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
self.stop(immediate=True)
|
||||
|
||||
@@ -2548,7 +2545,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
pytest.skip("pageserver was built without 'testing' feature")
|
||||
|
||||
def http_client(
|
||||
self, auth_token: Optional[str] = None, retries: Optional[Retry] = None
|
||||
self, auth_token: str | None = None, retries: Retry | None = None
|
||||
) -> PageserverHttpClient:
|
||||
return PageserverHttpClient(
|
||||
port=self.service_port.http,
|
||||
@@ -2585,7 +2582,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
config: None | dict[str, Any] = None,
|
||||
generation: Optional[int] = None,
|
||||
generation: int | None = None,
|
||||
override_storage_controller_generation: bool = False,
|
||||
):
|
||||
"""
|
||||
@@ -2619,7 +2616,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
return client.tenant_location_conf(tenant_id, config, **kwargs)
|
||||
|
||||
def read_tenant_location_conf(
|
||||
self, tenant_shard_id: Union[TenantId, TenantShardId]
|
||||
self, tenant_shard_id: TenantId | TenantShardId
|
||||
) -> dict[str, Any]:
|
||||
path = self.tenant_dir(tenant_shard_id) / "config-v1"
|
||||
log.info(f"Reading location conf from {path}")
|
||||
@@ -2634,9 +2631,9 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
def tenant_create(
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
conf: Optional[dict[str, Any]] = None,
|
||||
auth_token: Optional[str] = None,
|
||||
generation: Optional[int] = None,
|
||||
conf: dict[str, Any] | None = None,
|
||||
auth_token: str | None = None,
|
||||
generation: int | None = None,
|
||||
) -> TenantId:
|
||||
if generation is None:
|
||||
generation = self.env.storage_controller.attach_hook_issue(tenant_id, self.id)
|
||||
@@ -2656,7 +2653,7 @@ class NeonPageserver(PgProtocol, LogUtils):
|
||||
return tenant_id
|
||||
|
||||
def list_layers(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId
|
||||
) -> list[Path]:
|
||||
"""
|
||||
Inspect local storage on a pageserver to discover which layer files are present.
|
||||
@@ -2749,7 +2746,7 @@ class PgBin:
|
||||
if "/" not in str(command[0]):
|
||||
command[0] = str(self.pg_bin_path / command[0])
|
||||
|
||||
def _build_env(self, env_add: Optional[Env]) -> Env:
|
||||
def _build_env(self, env_add: Env | None) -> Env:
|
||||
if env_add is None:
|
||||
return self.env
|
||||
env = self.env.copy()
|
||||
@@ -2766,8 +2763,8 @@ class PgBin:
|
||||
def run_nonblocking(
|
||||
self,
|
||||
command: list[str],
|
||||
env: Optional[Env] = None,
|
||||
cwd: Optional[Union[str, Path]] = None,
|
||||
env: Env | None = None,
|
||||
cwd: str | Path | None = None,
|
||||
) -> subprocess.Popen[Any]:
|
||||
"""
|
||||
Run one of the postgres binaries, not waiting for it to finish
|
||||
@@ -2790,8 +2787,8 @@ class PgBin:
|
||||
def run(
|
||||
self,
|
||||
command: list[str],
|
||||
env: Optional[Env] = None,
|
||||
cwd: Optional[Union[str, Path]] = None,
|
||||
env: Env | None = None,
|
||||
cwd: str | Path | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Run one of the postgres binaries, waiting for it to finish
|
||||
@@ -2813,8 +2810,8 @@ class PgBin:
|
||||
def run_capture(
|
||||
self,
|
||||
command: list[str],
|
||||
env: Optional[Env] = None,
|
||||
cwd: Optional[str] = None,
|
||||
env: Env | None = None,
|
||||
cwd: str | None = None,
|
||||
with_command_header=True,
|
||||
**popen_kwargs: Any,
|
||||
) -> str:
|
||||
@@ -2941,7 +2938,7 @@ class VanillaPostgres(PgProtocol):
|
||||
conf_file.write("\n".join(hba) + "\n")
|
||||
conf_file.write(data)
|
||||
|
||||
def start(self, log_path: Optional[str] = None):
|
||||
def start(self, log_path: str | None = None):
|
||||
assert not self.running
|
||||
self.running = True
|
||||
|
||||
@@ -2965,9 +2962,9 @@ class VanillaPostgres(PgProtocol):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
if self.running:
|
||||
self.stop()
|
||||
@@ -3014,9 +3011,9 @@ class RemotePostgres(PgProtocol):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
# do nothing
|
||||
pass
|
||||
@@ -3092,7 +3089,7 @@ class PSQL:
|
||||
self.path = full_path
|
||||
self.database_url = f"postgres://{host}:{port}/main?options=project%3Dgeneric-project-name"
|
||||
|
||||
async def run(self, query: Optional[str] = None) -> asyncio.subprocess.Process:
|
||||
async def run(self, query: str | None = None) -> asyncio.subprocess.Process:
|
||||
run_args = [self.path, "--no-psqlrc", "--quiet", "--tuples-only", self.database_url]
|
||||
if query is not None:
|
||||
run_args += ["--command", query]
|
||||
@@ -3138,7 +3135,7 @@ class NeonProxy(PgProtocol):
|
||||
"""All auth backends must inherit from this class"""
|
||||
|
||||
@property
|
||||
def default_conn_url(self) -> Optional[str]:
|
||||
def default_conn_url(self) -> str | None:
|
||||
return None
|
||||
|
||||
@abc.abstractmethod
|
||||
@@ -3155,7 +3152,7 @@ class NeonProxy(PgProtocol):
|
||||
]
|
||||
|
||||
class Console(AuthBackend):
|
||||
def __init__(self, endpoint: str, fixed_rate_limit: Optional[int] = None):
|
||||
def __init__(self, endpoint: str, fixed_rate_limit: int | None = None):
|
||||
self.endpoint = endpoint
|
||||
self.fixed_rate_limit = fixed_rate_limit
|
||||
|
||||
@@ -3183,7 +3180,7 @@ class NeonProxy(PgProtocol):
|
||||
pg_conn_url: str
|
||||
|
||||
@property
|
||||
def default_conn_url(self) -> Optional[str]:
|
||||
def default_conn_url(self) -> str | None:
|
||||
return self.pg_conn_url
|
||||
|
||||
def extra_args(self) -> list[str]:
|
||||
@@ -3202,8 +3199,8 @@ class NeonProxy(PgProtocol):
|
||||
mgmt_port: int,
|
||||
external_http_port: int,
|
||||
auth_backend: NeonProxy.AuthBackend,
|
||||
metric_collection_endpoint: Optional[str] = None,
|
||||
metric_collection_interval: Optional[str] = None,
|
||||
metric_collection_endpoint: str | None = None,
|
||||
metric_collection_interval: str | None = None,
|
||||
):
|
||||
host = "127.0.0.1"
|
||||
domain = "proxy.localtest.me" # resolves to 127.0.0.1
|
||||
@@ -3221,7 +3218,7 @@ class NeonProxy(PgProtocol):
|
||||
self.metric_collection_endpoint = metric_collection_endpoint
|
||||
self.metric_collection_interval = metric_collection_interval
|
||||
self.http_timeout_seconds = 15
|
||||
self._popen: Optional[subprocess.Popen[bytes]] = None
|
||||
self._popen: subprocess.Popen[bytes] | None = None
|
||||
|
||||
def start(self) -> NeonProxy:
|
||||
assert self._popen is None
|
||||
@@ -3356,9 +3353,9 @@ class NeonProxy(PgProtocol):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
if self._popen is not None:
|
||||
self._popen.terminate()
|
||||
@@ -3439,7 +3436,7 @@ class NeonAuthBroker:
|
||||
self.mgmt_port = mgmt_port
|
||||
self.auth_backend = auth_backend
|
||||
self.http_timeout_seconds = 15
|
||||
self._popen: Optional[subprocess.Popen[bytes]] = None
|
||||
self._popen: subprocess.Popen[bytes] | None = None
|
||||
|
||||
def start(self) -> NeonAuthBroker:
|
||||
assert self._popen is None
|
||||
@@ -3515,9 +3512,9 @@ class NeonAuthBroker:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
_exc_type: Optional[type[BaseException]],
|
||||
_exc_value: Optional[BaseException],
|
||||
_traceback: Optional[TracebackType],
|
||||
_exc_type: type[BaseException] | None,
|
||||
_exc_value: BaseException | None,
|
||||
_traceback: TracebackType | None,
|
||||
):
|
||||
if self._popen is not None:
|
||||
self._popen.terminate()
|
||||
@@ -3673,9 +3670,9 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
):
|
||||
super().__init__(host="localhost", port=pg_port, user="cloud_admin", dbname="postgres")
|
||||
self.env = env
|
||||
self.branch_name: Optional[str] = None # dubious
|
||||
self.endpoint_id: Optional[str] = None # dubious, see asserts below
|
||||
self.pgdata_dir: Optional[Path] = None # Path to computenode PGDATA
|
||||
self.branch_name: str | None = None # dubious
|
||||
self.endpoint_id: str | None = None # dubious, see asserts below
|
||||
self.pgdata_dir: Path | None = None # Path to computenode PGDATA
|
||||
self.tenant_id = tenant_id
|
||||
self.pg_port = pg_port
|
||||
self.http_port = http_port
|
||||
@@ -3692,7 +3689,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
self._running = threading.Semaphore(0)
|
||||
|
||||
def http_client(
|
||||
self, auth_token: Optional[str] = None, retries: Optional[Retry] = None
|
||||
self, auth_token: str | None = None, retries: Retry | None = None
|
||||
) -> EndpointHttpClient:
|
||||
return EndpointHttpClient(
|
||||
port=self.http_port,
|
||||
@@ -3701,11 +3698,11 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
def create(
|
||||
self,
|
||||
branch_name: str,
|
||||
endpoint_id: Optional[str] = None,
|
||||
endpoint_id: str | None = None,
|
||||
hot_standby: bool = False,
|
||||
lsn: Optional[Lsn] = None,
|
||||
config_lines: Optional[list[str]] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
lsn: Lsn | None = None,
|
||||
config_lines: list[str] | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
allow_multiple: bool = False,
|
||||
) -> Endpoint:
|
||||
"""
|
||||
@@ -3748,11 +3745,11 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
|
||||
def start(
|
||||
self,
|
||||
remote_ext_config: Optional[str] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
safekeepers: Optional[list[int]] = None,
|
||||
remote_ext_config: str | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
safekeepers: list[int] | None = None,
|
||||
allow_multiple: bool = False,
|
||||
basebackup_request_tries: Optional[int] = None,
|
||||
basebackup_request_tries: int | None = None,
|
||||
) -> Endpoint:
|
||||
"""
|
||||
Start the Postgres instance.
|
||||
@@ -3828,9 +3825,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
def is_running(self):
|
||||
return self._running._value > 0
|
||||
|
||||
def reconfigure(
|
||||
self, pageserver_id: Optional[int] = None, safekeepers: Optional[list[int]] = None
|
||||
):
|
||||
def reconfigure(self, pageserver_id: int | None = None, safekeepers: list[int] | None = None):
|
||||
assert self.endpoint_id is not None
|
||||
# If `safekeepers` is not None, they are remember them as active and use
|
||||
# in the following commands.
|
||||
@@ -3877,7 +3872,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
def stop(
|
||||
self,
|
||||
mode: str = "fast",
|
||||
sks_wait_walreceiver_gone: Optional[tuple[list[Safekeeper], TimelineId]] = None,
|
||||
sks_wait_walreceiver_gone: tuple[list[Safekeeper], TimelineId] | None = None,
|
||||
) -> Endpoint:
|
||||
"""
|
||||
Stop the Postgres instance if it's running.
|
||||
@@ -3931,14 +3926,14 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
def create_start(
|
||||
self,
|
||||
branch_name: str,
|
||||
endpoint_id: Optional[str] = None,
|
||||
endpoint_id: str | None = None,
|
||||
hot_standby: bool = False,
|
||||
lsn: Optional[Lsn] = None,
|
||||
config_lines: Optional[list[str]] = None,
|
||||
remote_ext_config: Optional[str] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
lsn: Lsn | None = None,
|
||||
config_lines: list[str] | None = None,
|
||||
remote_ext_config: str | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
allow_multiple: bool = False,
|
||||
basebackup_request_tries: Optional[int] = None,
|
||||
basebackup_request_tries: int | None = None,
|
||||
) -> Endpoint:
|
||||
"""
|
||||
Create an endpoint, apply config, and start Postgres.
|
||||
@@ -3967,9 +3962,9 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
self.stop()
|
||||
|
||||
@@ -3980,7 +3975,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
assert self.pgdata_dir is not None # please mypy
|
||||
return get_dir_size(self.pgdata_dir / "pg_wal") / 1024 / 1024
|
||||
|
||||
def clear_shared_buffers(self, cursor: Optional[Any] = None):
|
||||
def clear_shared_buffers(self, cursor: Any | None = None):
|
||||
"""
|
||||
Best-effort way to clear postgres buffers. Pinned buffers will not be 'cleared.'
|
||||
|
||||
@@ -4003,14 +3998,14 @@ class EndpointFactory:
|
||||
def create_start(
|
||||
self,
|
||||
branch_name: str,
|
||||
endpoint_id: Optional[str] = None,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
lsn: Optional[Lsn] = None,
|
||||
endpoint_id: str | None = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
lsn: Lsn | None = None,
|
||||
hot_standby: bool = False,
|
||||
config_lines: Optional[list[str]] = None,
|
||||
remote_ext_config: Optional[str] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
basebackup_request_tries: Optional[int] = None,
|
||||
config_lines: list[str] | None = None,
|
||||
remote_ext_config: str | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
basebackup_request_tries: int | None = None,
|
||||
) -> Endpoint:
|
||||
ep = Endpoint(
|
||||
self.env,
|
||||
@@ -4035,12 +4030,12 @@ class EndpointFactory:
|
||||
def create(
|
||||
self,
|
||||
branch_name: str,
|
||||
endpoint_id: Optional[str] = None,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
lsn: Optional[Lsn] = None,
|
||||
endpoint_id: str | None = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
lsn: Lsn | None = None,
|
||||
hot_standby: bool = False,
|
||||
config_lines: Optional[list[str]] = None,
|
||||
pageserver_id: Optional[int] = None,
|
||||
config_lines: list[str] | None = None,
|
||||
pageserver_id: int | None = None,
|
||||
) -> Endpoint:
|
||||
ep = Endpoint(
|
||||
self.env,
|
||||
@@ -4078,7 +4073,7 @@ class EndpointFactory:
|
||||
return self
|
||||
|
||||
def new_replica(
|
||||
self, origin: Endpoint, endpoint_id: str, config_lines: Optional[list[str]] = None
|
||||
self, origin: Endpoint, endpoint_id: str, config_lines: list[str] | None = None
|
||||
):
|
||||
branch_name = origin.branch_name
|
||||
assert origin in self.endpoints
|
||||
@@ -4094,7 +4089,7 @@ class EndpointFactory:
|
||||
)
|
||||
|
||||
def new_replica_start(
|
||||
self, origin: Endpoint, endpoint_id: str, config_lines: Optional[list[str]] = None
|
||||
self, origin: Endpoint, endpoint_id: str, config_lines: list[str] | None = None
|
||||
):
|
||||
branch_name = origin.branch_name
|
||||
assert origin in self.endpoints
|
||||
@@ -4132,7 +4127,7 @@ class Safekeeper(LogUtils):
|
||||
port: SafekeeperPort,
|
||||
id: int,
|
||||
running: bool = False,
|
||||
extra_opts: Optional[list[str]] = None,
|
||||
extra_opts: list[str] | None = None,
|
||||
):
|
||||
self.env = env
|
||||
self.port = port
|
||||
@@ -4158,7 +4153,7 @@ class Safekeeper(LogUtils):
|
||||
self.extra_opts = extra_opts
|
||||
|
||||
def start(
|
||||
self, extra_opts: Optional[list[str]] = None, timeout_in_seconds: Optional[int] = None
|
||||
self, extra_opts: list[str] | None = None, timeout_in_seconds: int | None = None
|
||||
) -> Safekeeper:
|
||||
if extra_opts is None:
|
||||
# Apply either the extra_opts passed in, or the ones from our constructor: we do not merge the two.
|
||||
@@ -4238,7 +4233,7 @@ class Safekeeper(LogUtils):
|
||||
return res
|
||||
|
||||
def http_client(
|
||||
self, auth_token: Optional[str] = None, gen_sk_wide_token: bool = True
|
||||
self, auth_token: str | None = None, gen_sk_wide_token: bool = True
|
||||
) -> SafekeeperHttpClient:
|
||||
"""
|
||||
When auth_token is None but gen_sk_wide is True creates safekeeper wide
|
||||
@@ -4371,7 +4366,7 @@ class NeonBroker(LogUtils):
|
||||
|
||||
def start(
|
||||
self,
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
):
|
||||
assert not self.running
|
||||
self.env.neon_cli.storage_broker_start(timeout_in_seconds)
|
||||
@@ -4394,8 +4389,7 @@ class NeonBroker(LogUtils):
|
||||
assert_no_errors(self.logfile, "storage_controller", [])
|
||||
|
||||
|
||||
# TODO: Replace with `StrEnum` when we upgrade to python 3.11
|
||||
class NodeKind(str, Enum):
|
||||
class NodeKind(StrEnum):
|
||||
PAGESERVER = "pageserver"
|
||||
SAFEKEEPER = "safekeeper"
|
||||
|
||||
@@ -4406,7 +4400,7 @@ class StorageScrubber:
|
||||
self.log_dir = log_dir
|
||||
|
||||
def scrubber_cli(
|
||||
self, args: list[str], timeout, extra_env: Optional[dict[str, str]] = None
|
||||
self, args: list[str], timeout, extra_env: dict[str, str] | None = None
|
||||
) -> str:
|
||||
assert isinstance(self.env.pageserver_remote_storage, S3Storage)
|
||||
s3_storage = self.env.pageserver_remote_storage
|
||||
@@ -4469,8 +4463,8 @@ class StorageScrubber:
|
||||
self,
|
||||
post_to_storage_controller: bool = False,
|
||||
node_kind: NodeKind = NodeKind.PAGESERVER,
|
||||
timeline_lsns: Optional[list[dict[str, Any]]] = None,
|
||||
extra_env: Optional[dict[str, str]] = None,
|
||||
timeline_lsns: list[dict[str, Any]] | None = None,
|
||||
extra_env: dict[str, str] | None = None,
|
||||
) -> tuple[bool, Any]:
|
||||
"""
|
||||
Returns the health status and the metadata summary.
|
||||
@@ -4504,8 +4498,8 @@ class StorageScrubber:
|
||||
def pageserver_physical_gc(
|
||||
self,
|
||||
min_age_secs: int,
|
||||
tenant_ids: Optional[list[TenantId]] = None,
|
||||
mode: Optional[str] = None,
|
||||
tenant_ids: list[TenantId] | None = None,
|
||||
mode: str | None = None,
|
||||
):
|
||||
args = ["pageserver-physical-gc", "--min-age", f"{min_age_secs}s"]
|
||||
|
||||
@@ -4619,7 +4613,7 @@ def check_restored_datadir_content(
|
||||
test_output_dir: Path,
|
||||
env: NeonEnv,
|
||||
endpoint: Endpoint,
|
||||
ignored_files: Optional[list[str]] = None,
|
||||
ignored_files: list[str] | None = None,
|
||||
):
|
||||
pg_bin = PgBin(test_output_dir, env.pg_distrib_dir, env.pg_version)
|
||||
|
||||
@@ -4721,7 +4715,7 @@ def logical_replication_sync(subscriber: PgProtocol, publisher: PgProtocol) -> L
|
||||
|
||||
|
||||
def tenant_get_shards(
|
||||
env: NeonEnv, tenant_id: TenantId, pageserver_id: Optional[int] = None
|
||||
env: NeonEnv, tenant_id: TenantId, pageserver_id: int | None = None
|
||||
) -> list[tuple[TenantShardId, NeonPageserver]]:
|
||||
"""
|
||||
Helper for when you want to talk to one or more pageservers, and the
|
||||
@@ -4784,8 +4778,8 @@ def wait_for_last_flush_lsn(
|
||||
endpoint: Endpoint,
|
||||
tenant: TenantId,
|
||||
timeline: TimelineId,
|
||||
pageserver_id: Optional[int] = None,
|
||||
auth_token: Optional[str] = None,
|
||||
pageserver_id: int | None = None,
|
||||
auth_token: str | None = None,
|
||||
) -> Lsn:
|
||||
"""Wait for pageserver to catch up the latest flush LSN, returns the last observed lsn."""
|
||||
|
||||
@@ -4814,7 +4808,7 @@ def flush_ep_to_pageserver(
|
||||
ep: Endpoint,
|
||||
tenant: TenantId,
|
||||
timeline: TimelineId,
|
||||
pageserver_id: Optional[int] = None,
|
||||
pageserver_id: int | None = None,
|
||||
) -> Lsn:
|
||||
"""
|
||||
Stop endpoint and wait until all committed WAL reaches the pageserver
|
||||
@@ -4857,7 +4851,7 @@ def wait_for_wal_insert_lsn(
|
||||
endpoint: Endpoint,
|
||||
tenant: TenantId,
|
||||
timeline: TimelineId,
|
||||
pageserver_id: Optional[int] = None,
|
||||
pageserver_id: int | None = None,
|
||||
) -> Lsn:
|
||||
"""Wait for pageserver to catch up the latest flush LSN, returns the last observed lsn."""
|
||||
last_flush_lsn = Lsn(endpoint.safe_psql("SELECT pg_current_wal_insert_lsn()")[0][0])
|
||||
@@ -4878,7 +4872,7 @@ def fork_at_current_lsn(
|
||||
endpoint: Endpoint,
|
||||
new_branch_name: str,
|
||||
ancestor_branch_name: str,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
) -> TimelineId:
|
||||
"""
|
||||
Create new branch at the last LSN of an existing branch.
|
||||
@@ -4951,8 +4945,8 @@ def last_flush_lsn_upload(
|
||||
endpoint: Endpoint,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
pageserver_id: Optional[int] = None,
|
||||
auth_token: Optional[str] = None,
|
||||
pageserver_id: int | None = None,
|
||||
auth_token: str | None = None,
|
||||
) -> Lsn:
|
||||
"""
|
||||
Wait for pageserver to catch to the latest flush LSN of given endpoint,
|
||||
@@ -4987,9 +4981,9 @@ def generate_uploads_and_deletions(
|
||||
env: NeonEnv,
|
||||
*,
|
||||
init: bool = True,
|
||||
tenant_id: Optional[TenantId] = None,
|
||||
timeline_id: Optional[TimelineId] = None,
|
||||
data: Optional[str] = None,
|
||||
tenant_id: TenantId | None = None,
|
||||
timeline_id: TimelineId | None = None,
|
||||
data: str | None = None,
|
||||
pageserver: NeonPageserver,
|
||||
):
|
||||
"""
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Union
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import KEY_MAX, KEY_MIN, Key, Lsn
|
||||
|
||||
@@ -46,7 +46,7 @@ class DeltaLayerName:
|
||||
return ret
|
||||
|
||||
|
||||
LayerName = Union[ImageLayerName, DeltaLayerName]
|
||||
LayerName = ImageLayerName | DeltaLayerName
|
||||
|
||||
|
||||
class InvalidFileName(Exception):
|
||||
|
||||
@@ -4,7 +4,7 @@ import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
@@ -16,9 +16,6 @@ from fixtures.metrics import Metrics, MetricsGetter, parse_metrics
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.utils import Fn
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional, Union
|
||||
|
||||
|
||||
class PageserverApiException(Exception):
|
||||
def __init__(self, message, status_code: int):
|
||||
@@ -43,7 +40,7 @@ class TimelineCreate409(PageserverApiException):
|
||||
class InMemoryLayerInfo:
|
||||
kind: str
|
||||
lsn_start: str
|
||||
lsn_end: Optional[str]
|
||||
lsn_end: str | None
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, d: dict[str, Any]) -> InMemoryLayerInfo:
|
||||
@@ -60,10 +57,10 @@ class HistoricLayerInfo:
|
||||
layer_file_name: str
|
||||
layer_file_size: int
|
||||
lsn_start: str
|
||||
lsn_end: Optional[str]
|
||||
lsn_end: str | None
|
||||
remote: bool
|
||||
# None for image layers, true if pageserver thinks this is an L0 delta layer
|
||||
l0: Optional[bool]
|
||||
l0: bool | None
|
||||
visible: bool
|
||||
|
||||
@classmethod
|
||||
@@ -180,8 +177,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
self,
|
||||
port: int,
|
||||
is_testing_enabled_or_skip: Fn,
|
||||
auth_token: Optional[str] = None,
|
||||
retries: Optional[Retry] = None,
|
||||
auth_token: str | None = None,
|
||||
retries: Retry | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.port = port
|
||||
@@ -278,7 +275,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def tenant_attach(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
generation: int,
|
||||
config: None | dict[str, Any] = None,
|
||||
):
|
||||
@@ -305,7 +302,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
},
|
||||
)
|
||||
|
||||
def tenant_reset(self, tenant_id: Union[TenantId, TenantShardId], drop_cache: bool):
|
||||
def tenant_reset(self, tenant_id: TenantId | TenantShardId, drop_cache: bool):
|
||||
params = {}
|
||||
if drop_cache:
|
||||
params["drop_cache"] = "true"
|
||||
@@ -315,10 +312,10 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def tenant_location_conf(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
location_conf: dict[str, Any],
|
||||
flush_ms=None,
|
||||
lazy: Optional[bool] = None,
|
||||
lazy: bool | None = None,
|
||||
):
|
||||
body = location_conf.copy()
|
||||
|
||||
@@ -353,13 +350,13 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
self.verbose_error(res)
|
||||
return res.json()
|
||||
|
||||
def tenant_delete(self, tenant_id: Union[TenantId, TenantShardId]):
|
||||
def tenant_delete(self, tenant_id: TenantId | TenantShardId):
|
||||
res = self.delete(f"http://localhost:{self.port}/v1/tenant/{tenant_id}")
|
||||
self.verbose_error(res)
|
||||
return res
|
||||
|
||||
def tenant_status(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], activate: bool = False
|
||||
self, tenant_id: TenantId | TenantShardId, activate: bool = False
|
||||
) -> dict[Any, Any]:
|
||||
"""
|
||||
:activate: hint the server not to accelerate activation of this tenant in response
|
||||
@@ -378,17 +375,17 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
assert isinstance(res_json, dict)
|
||||
return res_json
|
||||
|
||||
def tenant_config(self, tenant_id: Union[TenantId, TenantShardId]) -> TenantConfig:
|
||||
def tenant_config(self, tenant_id: TenantId | TenantShardId) -> TenantConfig:
|
||||
res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/config")
|
||||
self.verbose_error(res)
|
||||
return TenantConfig.from_json(res.json())
|
||||
|
||||
def tenant_heatmap_upload(self, tenant_id: Union[TenantId, TenantShardId]):
|
||||
def tenant_heatmap_upload(self, tenant_id: TenantId | TenantShardId):
|
||||
res = self.post(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/heatmap_upload")
|
||||
self.verbose_error(res)
|
||||
|
||||
def tenant_secondary_download(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], wait_ms: Optional[int] = None
|
||||
self, tenant_id: TenantId | TenantShardId, wait_ms: int | None = None
|
||||
) -> tuple[int, dict[Any, Any]]:
|
||||
url = f"http://localhost:{self.port}/v1/tenant/{tenant_id}/secondary/download"
|
||||
if wait_ms is not None:
|
||||
@@ -397,13 +394,13 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
self.verbose_error(res)
|
||||
return (res.status_code, res.json())
|
||||
|
||||
def tenant_secondary_status(self, tenant_id: Union[TenantId, TenantShardId]):
|
||||
def tenant_secondary_status(self, tenant_id: TenantId | TenantShardId):
|
||||
url = f"http://localhost:{self.port}/v1/tenant/{tenant_id}/secondary/status"
|
||||
res = self.get(url)
|
||||
self.verbose_error(res)
|
||||
return res.json()
|
||||
|
||||
def set_tenant_config(self, tenant_id: Union[TenantId, TenantShardId], config: dict[str, Any]):
|
||||
def set_tenant_config(self, tenant_id: TenantId | TenantShardId, config: dict[str, Any]):
|
||||
"""
|
||||
Only use this via storage_controller.pageserver_api().
|
||||
|
||||
@@ -420,8 +417,8 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
def patch_tenant_config_client_side(
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
inserts: Optional[dict[str, Any]] = None,
|
||||
removes: Optional[list[str]] = None,
|
||||
inserts: dict[str, Any] | None = None,
|
||||
removes: list[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Only use this via storage_controller.pageserver_api().
|
||||
@@ -436,11 +433,11 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
del current[key]
|
||||
self.set_tenant_config(tenant_id, current)
|
||||
|
||||
def tenant_size(self, tenant_id: Union[TenantId, TenantShardId]) -> int:
|
||||
def tenant_size(self, tenant_id: TenantId | TenantShardId) -> int:
|
||||
return self.tenant_size_and_modelinputs(tenant_id)[0]
|
||||
|
||||
def tenant_size_and_modelinputs(
|
||||
self, tenant_id: Union[TenantId, TenantShardId]
|
||||
self, tenant_id: TenantId | TenantShardId
|
||||
) -> tuple[int, dict[str, Any]]:
|
||||
"""
|
||||
Returns the tenant size, together with the model inputs as the second tuple item.
|
||||
@@ -456,7 +453,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
assert isinstance(inputs, dict)
|
||||
return (size, inputs)
|
||||
|
||||
def tenant_size_debug(self, tenant_id: Union[TenantId, TenantShardId]) -> str:
|
||||
def tenant_size_debug(self, tenant_id: TenantId | TenantShardId) -> str:
|
||||
"""
|
||||
Returns the tenant size debug info, as an HTML string
|
||||
"""
|
||||
@@ -468,10 +465,10 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def tenant_time_travel_remote_storage(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timestamp: datetime,
|
||||
done_if_after: datetime,
|
||||
shard_counts: Optional[list[int]] = None,
|
||||
shard_counts: list[int] | None = None,
|
||||
):
|
||||
"""
|
||||
Issues a request to perform time travel operations on the remote storage
|
||||
@@ -490,7 +487,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_list(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
include_non_incremental_logical_size: bool = False,
|
||||
include_timeline_dir_layer_file_size_sum: bool = False,
|
||||
) -> list[dict[str, Any]]:
|
||||
@@ -510,7 +507,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_and_offloaded_list(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
) -> TimelinesInfoAndOffloaded:
|
||||
res = self.get(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline_and_offloaded",
|
||||
@@ -523,11 +520,11 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
def timeline_create(
|
||||
self,
|
||||
pg_version: PgVersion,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
new_timeline_id: TimelineId,
|
||||
ancestor_timeline_id: Optional[TimelineId] = None,
|
||||
ancestor_start_lsn: Optional[Lsn] = None,
|
||||
existing_initdb_timeline_id: Optional[TimelineId] = None,
|
||||
ancestor_timeline_id: TimelineId | None = None,
|
||||
ancestor_start_lsn: Lsn | None = None,
|
||||
existing_initdb_timeline_id: TimelineId | None = None,
|
||||
**kwargs,
|
||||
) -> dict[Any, Any]:
|
||||
body: dict[str, Any] = {
|
||||
@@ -558,7 +555,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_detail(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
include_non_incremental_logical_size: bool = False,
|
||||
include_timeline_dir_layer_file_size_sum: bool = False,
|
||||
@@ -584,7 +581,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return res_json
|
||||
|
||||
def timeline_delete(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, **kwargs
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, **kwargs
|
||||
):
|
||||
"""
|
||||
Note that deletion is not instant, it is scheduled and performed mostly in the background.
|
||||
@@ -600,9 +597,9 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_gc(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
gc_horizon: Optional[int],
|
||||
gc_horizon: int | None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Unlike most handlers, this will wait for the layers to be actually
|
||||
@@ -624,16 +621,14 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
assert isinstance(res_json, dict)
|
||||
return res_json
|
||||
|
||||
def timeline_block_gc(self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId):
|
||||
def timeline_block_gc(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId):
|
||||
res = self.post(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/block_gc",
|
||||
)
|
||||
log.info(f"Got GC request response code: {res.status_code}")
|
||||
self.verbose_error(res)
|
||||
|
||||
def timeline_unblock_gc(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
):
|
||||
def timeline_unblock_gc(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId):
|
||||
res = self.post(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/unblock_gc",
|
||||
)
|
||||
@@ -642,7 +637,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_offload(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
):
|
||||
self.is_testing_enabled_or_skip()
|
||||
@@ -658,14 +653,14 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_compact(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
force_repartition=False,
|
||||
force_image_layer_creation=False,
|
||||
force_l0_compaction=False,
|
||||
wait_until_uploaded=False,
|
||||
enhanced_gc_bottom_most_compaction=False,
|
||||
body: Optional[dict[str, Any]] = None,
|
||||
body: dict[str, Any] | None = None,
|
||||
):
|
||||
self.is_testing_enabled_or_skip()
|
||||
query = {}
|
||||
@@ -692,7 +687,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
assert res_json is None
|
||||
|
||||
def timeline_preserve_initdb_archive(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId
|
||||
):
|
||||
log.info(
|
||||
f"Requesting initdb archive preservation for tenant {tenant_id} and timeline {timeline_id}"
|
||||
@@ -704,7 +699,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_archival_config(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
state: TimelineArchivalState,
|
||||
):
|
||||
@@ -720,7 +715,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_get_lsn_by_timestamp(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
timestamp: datetime,
|
||||
with_lease: bool = False,
|
||||
@@ -739,7 +734,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return res_json
|
||||
|
||||
def timeline_lsn_lease(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, lsn: Lsn
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, lsn: Lsn
|
||||
):
|
||||
data = {
|
||||
"lsn": str(lsn),
|
||||
@@ -755,7 +750,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return res_json
|
||||
|
||||
def timeline_get_timestamp_of_lsn(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, lsn: Lsn
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, lsn: Lsn
|
||||
):
|
||||
log.info(f"Requesting time range of lsn {lsn}, tenant {tenant_id}, timeline {timeline_id}")
|
||||
res = self.get(
|
||||
@@ -765,9 +760,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
res_json = res.json()
|
||||
return res_json
|
||||
|
||||
def timeline_layer_map_info(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
):
|
||||
def timeline_layer_map_info(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId):
|
||||
log.info(f"Requesting layer map info of tenant {tenant_id}, timeline {timeline_id}")
|
||||
res = self.get(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer",
|
||||
@@ -778,13 +771,13 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_checkpoint(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
force_repartition=False,
|
||||
force_image_layer_creation=False,
|
||||
force_l0_compaction=False,
|
||||
wait_until_uploaded=False,
|
||||
compact: Optional[bool] = None,
|
||||
compact: bool | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.is_testing_enabled_or_skip()
|
||||
@@ -814,7 +807,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_spawn_download_remote_layers(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
max_concurrent_downloads: int,
|
||||
) -> dict[str, Any]:
|
||||
@@ -833,7 +826,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_poll_download_remote_layers_status(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
spawn_response: dict[str, Any],
|
||||
poll_state=None,
|
||||
@@ -855,7 +848,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def timeline_download_remote_layers(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
max_concurrent_downloads: int,
|
||||
errors_ok=False,
|
||||
@@ -905,7 +898,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
timeline_id: TimelineId,
|
||||
file_kind: str,
|
||||
op_kind: str,
|
||||
) -> Optional[int]:
|
||||
) -> int | None:
|
||||
metrics = [
|
||||
"pageserver_remote_timeline_client_calls_started_total",
|
||||
"pageserver_remote_timeline_client_calls_finished_total",
|
||||
@@ -929,7 +922,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def layer_map_info(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
) -> LayerMapInfo:
|
||||
res = self.get(
|
||||
@@ -939,7 +932,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return LayerMapInfo.from_json(res.json())
|
||||
|
||||
def timeline_layer_scan_disposable_keys(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, layer_name: str
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, layer_name: str
|
||||
) -> ScanDisposableKeysResponse:
|
||||
res = self.post(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/{layer_name}/scan_disposable_keys",
|
||||
@@ -949,7 +942,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return ScanDisposableKeysResponse.from_json(res.json())
|
||||
|
||||
def download_layer(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, layer_name: str
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, layer_name: str
|
||||
):
|
||||
res = self.get(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/{layer_name}",
|
||||
@@ -958,9 +951,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
assert res.status_code == 200
|
||||
|
||||
def download_all_layers(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
):
|
||||
def download_all_layers(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId):
|
||||
info = self.layer_map_info(tenant_id, timeline_id)
|
||||
for layer in info.historic_layers:
|
||||
if not layer.remote:
|
||||
@@ -969,9 +960,9 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def detach_ancestor(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
batch_size: Optional[int] = None,
|
||||
batch_size: int | None = None,
|
||||
**kwargs,
|
||||
) -> set[TimelineId]:
|
||||
params = {}
|
||||
@@ -987,7 +978,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
return set(map(TimelineId, json["reparented_timelines"]))
|
||||
|
||||
def evict_layer(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId, layer_name: str
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, layer_name: str
|
||||
):
|
||||
res = self.delete(
|
||||
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/{layer_name}",
|
||||
@@ -996,7 +987,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
assert res.status_code in (200, 304)
|
||||
|
||||
def evict_all_layers(self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId):
|
||||
def evict_all_layers(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId):
|
||||
info = self.layer_map_info(tenant_id, timeline_id)
|
||||
for layer in info.historic_layers:
|
||||
self.evict_layer(tenant_id, timeline_id, layer.layer_file_name)
|
||||
@@ -1009,7 +1000,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
self.verbose_error(res)
|
||||
return res.json()
|
||||
|
||||
def tenant_break(self, tenant_id: Union[TenantId, TenantShardId]):
|
||||
def tenant_break(self, tenant_id: TenantId | TenantShardId):
|
||||
res = self.put(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/break")
|
||||
self.verbose_error(res)
|
||||
|
||||
@@ -1058,7 +1049,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter):
|
||||
|
||||
def perf_info(
|
||||
self,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
):
|
||||
self.is_testing_enabled_or_skip()
|
||||
|
||||
@@ -13,7 +13,8 @@ from fixtures.neon_fixtures import (
|
||||
from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Callable
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
|
||||
def single_timeline(
|
||||
|
||||
@@ -17,14 +17,14 @@ from fixtures.remote_storage import RemoteStorage, RemoteStorageKind, S3Storage
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
def assert_tenant_state(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant: TenantId,
|
||||
expected_state: str,
|
||||
message: Optional[str] = None,
|
||||
message: str | None = None,
|
||||
) -> None:
|
||||
tenant_status = pageserver_http.tenant_status(tenant)
|
||||
log.info(f"tenant_status: {tenant_status}")
|
||||
@@ -33,7 +33,7 @@ def assert_tenant_state(
|
||||
|
||||
def remote_consistent_lsn(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant: Union[TenantId, TenantShardId],
|
||||
tenant: TenantId | TenantShardId,
|
||||
timeline: TimelineId,
|
||||
) -> Lsn:
|
||||
detail = pageserver_http.timeline_detail(tenant, timeline)
|
||||
@@ -51,7 +51,7 @@ def remote_consistent_lsn(
|
||||
|
||||
def wait_for_upload(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant: Union[TenantId, TenantShardId],
|
||||
tenant: TenantId | TenantShardId,
|
||||
timeline: TimelineId,
|
||||
lsn: Lsn,
|
||||
):
|
||||
@@ -138,7 +138,7 @@ def wait_until_all_tenants_state(
|
||||
|
||||
def wait_until_timeline_state(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
expected_state: str,
|
||||
iterations: int,
|
||||
@@ -188,7 +188,7 @@ def wait_until_tenant_active(
|
||||
|
||||
def last_record_lsn(
|
||||
pageserver_http_client: PageserverHttpClient,
|
||||
tenant: Union[TenantId, TenantShardId],
|
||||
tenant: TenantId | TenantShardId,
|
||||
timeline: TimelineId,
|
||||
) -> Lsn:
|
||||
detail = pageserver_http_client.timeline_detail(tenant, timeline)
|
||||
@@ -200,7 +200,7 @@ def last_record_lsn(
|
||||
|
||||
def wait_for_last_record_lsn(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant: Union[TenantId, TenantShardId],
|
||||
tenant: TenantId | TenantShardId,
|
||||
timeline: TimelineId,
|
||||
lsn: Lsn,
|
||||
) -> Lsn:
|
||||
@@ -267,10 +267,10 @@ def wait_for_upload_queue_empty(
|
||||
|
||||
def wait_timeline_detail_404(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
iterations: int,
|
||||
interval: Optional[float] = None,
|
||||
interval: float | None = None,
|
||||
):
|
||||
if interval is None:
|
||||
interval = 0.25
|
||||
@@ -292,10 +292,10 @@ def wait_timeline_detail_404(
|
||||
|
||||
def timeline_delete_wait_completed(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
iterations: int = 20,
|
||||
interval: Optional[float] = None,
|
||||
interval: float | None = None,
|
||||
**delete_args,
|
||||
) -> None:
|
||||
pageserver_http.timeline_delete(tenant_id=tenant_id, timeline_id=timeline_id, **delete_args)
|
||||
@@ -304,9 +304,9 @@ def timeline_delete_wait_completed(
|
||||
|
||||
# remote_storage must not be None, but that's easier for callers to make mypy happy
|
||||
def assert_prefix_empty(
|
||||
remote_storage: Optional[RemoteStorage],
|
||||
prefix: Optional[str] = None,
|
||||
allowed_postfix: Optional[str] = None,
|
||||
remote_storage: RemoteStorage | None,
|
||||
prefix: str | None = None,
|
||||
allowed_postfix: str | None = None,
|
||||
delimiter: str = "/",
|
||||
) -> None:
|
||||
assert remote_storage is not None
|
||||
@@ -348,8 +348,8 @@ def assert_prefix_empty(
|
||||
|
||||
# remote_storage must not be None, but that's easier for callers to make mypy happy
|
||||
def assert_prefix_not_empty(
|
||||
remote_storage: Optional[RemoteStorage],
|
||||
prefix: Optional[str] = None,
|
||||
remote_storage: RemoteStorage | None,
|
||||
prefix: str | None = None,
|
||||
delimiter: str = "/",
|
||||
):
|
||||
assert remote_storage is not None
|
||||
@@ -358,7 +358,7 @@ def assert_prefix_not_empty(
|
||||
|
||||
|
||||
def list_prefix(
|
||||
remote: RemoteStorage, prefix: Optional[str] = None, delimiter: str = "/"
|
||||
remote: RemoteStorage, prefix: str | None = None, delimiter: str = "/"
|
||||
) -> ListObjectsV2OutputTypeDef:
|
||||
"""
|
||||
Note that this function takes into account prefix_in_bucket.
|
||||
|
||||
@@ -11,7 +11,7 @@ from _pytest.python import Metafunc
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
"""
|
||||
@@ -20,31 +20,31 @@ Dynamically parametrize tests by different parameters
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def pg_version() -> Optional[PgVersion]:
|
||||
def pg_version() -> PgVersion | None:
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def build_type() -> Optional[str]:
|
||||
def build_type() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def platform() -> Optional[str]:
|
||||
def platform() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def pageserver_virtual_file_io_engine() -> Optional[str]:
|
||||
def pageserver_virtual_file_io_engine() -> str | None:
|
||||
return os.getenv("PAGESERVER_VIRTUAL_FILE_IO_ENGINE")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def pageserver_virtual_file_io_mode() -> Optional[str]:
|
||||
def pageserver_virtual_file_io_mode() -> str | None:
|
||||
return os.getenv("PAGESERVER_VIRTUAL_FILE_IO_MODE")
|
||||
|
||||
|
||||
def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]:
|
||||
def get_pageserver_default_tenant_config_compaction_algorithm() -> dict[str, Any] | None:
|
||||
toml_table = os.getenv("PAGESERVER_DEFAULT_TENANT_CONFIG_COMPACTION_ALGORITHM")
|
||||
if toml_table is None:
|
||||
return None
|
||||
@@ -54,7 +54,7 @@ def get_pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def pageserver_default_tenant_config_compaction_algorithm() -> Optional[dict[str, Any]]:
|
||||
def pageserver_default_tenant_config_compaction_algorithm() -> dict[str, Any] | None:
|
||||
return get_pageserver_default_tenant_config_compaction_algorithm()
|
||||
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ from fixtures.utils import allure_attach_from_dir
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
|
||||
|
||||
BASE_DIR = Path(__file__).parents[2]
|
||||
@@ -26,9 +25,7 @@ COMPUTE_CONFIG_DIR = BASE_DIR / "compute" / "etc"
|
||||
DEFAULT_OUTPUT_DIR: str = "test_output"
|
||||
|
||||
|
||||
def get_test_dir(
|
||||
request: FixtureRequest, top_output_dir: Path, prefix: Optional[str] = None
|
||||
) -> Path:
|
||||
def get_test_dir(request: FixtureRequest, top_output_dir: Path, prefix: str | None = None) -> Path:
|
||||
"""Compute the path to a working directory for an individual test."""
|
||||
test_name = request.node.name
|
||||
test_dir = top_output_dir / f"{prefix or ''}{test_name.replace('/', '-')}"
|
||||
@@ -112,7 +109,7 @@ def compatibility_snapshot_dir() -> Iterator[Path]:
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def compatibility_neon_binpath() -> Iterator[Optional[Path]]:
|
||||
def compatibility_neon_binpath() -> Iterator[Path | None]:
|
||||
if os.getenv("REMOTE_ENV"):
|
||||
return
|
||||
comp_binpath = None
|
||||
@@ -133,7 +130,7 @@ def pg_distrib_dir(base_dir: Path) -> Iterator[Path]:
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def compatibility_pg_distrib_dir() -> Iterator[Optional[Path]]:
|
||||
def compatibility_pg_distrib_dir() -> Iterator[Path | None]:
|
||||
compat_distrib_dir = None
|
||||
if env_compat_postgres_bin := os.environ.get("COMPATIBILITY_POSTGRES_DISTRIB_DIR"):
|
||||
compat_distrib_dir = Path(env_compat_postgres_bin).resolve()
|
||||
@@ -197,7 +194,7 @@ class FileAndThreadLock:
|
||||
def __init__(self, path: Path):
|
||||
self.path = path
|
||||
self.thread_lock = threading.Lock()
|
||||
self.fd: Optional[int] = None
|
||||
self.fd: int | None = None
|
||||
|
||||
def __enter__(self):
|
||||
self.fd = os.open(self.path, os.O_CREAT | os.O_WRONLY)
|
||||
@@ -208,9 +205,9 @@ class FileAndThreadLock:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc_value: Optional[BaseException],
|
||||
exc_traceback: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
exc_traceback: TracebackType | None,
|
||||
):
|
||||
assert self.fd is not None
|
||||
assert self.thread_lock.locked() # ... by us
|
||||
@@ -263,9 +260,9 @@ class SnapshotDir:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc_value: Optional[BaseException],
|
||||
exc_traceback: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
exc_traceback: TracebackType | None,
|
||||
):
|
||||
self._lock.__exit__(exc_type, exc_value, exc_traceback)
|
||||
|
||||
@@ -277,7 +274,7 @@ def shared_snapshot_dir(top_output_dir: Path, ident: str) -> SnapshotDir:
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_overlay_dir(request: FixtureRequest, top_output_dir: Path) -> Optional[Path]:
|
||||
def test_overlay_dir(request: FixtureRequest, top_output_dir: Path) -> Path | None:
|
||||
"""
|
||||
Idempotently create a test's overlayfs mount state directory.
|
||||
If the functionality isn't enabled via env var, returns None.
|
||||
|
||||
@@ -1,22 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
from typing import TYPE_CHECKING
|
||||
from enum import StrEnum
|
||||
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
"""
|
||||
This fixture is used to determine which version of Postgres to use for tests.
|
||||
"""
|
||||
|
||||
|
||||
# Inherit PgVersion from str rather than int to make it easier to pass as a command-line argument
|
||||
# TODO: use enum.StrEnum for Python >= 3.11
|
||||
class PgVersion(str, enum.Enum):
|
||||
class PgVersion(StrEnum):
|
||||
V14 = "14"
|
||||
V15 = "15"
|
||||
V16 = "16"
|
||||
@@ -34,7 +28,6 @@ class PgVersion(str, enum.Enum):
|
||||
def __repr__(self) -> str:
|
||||
return f"'{self.value}'"
|
||||
|
||||
# Make this explicit for Python 3.11 compatibility, which changes the behavior of enums
|
||||
@override
|
||||
def __str__(self) -> str:
|
||||
return self.value
|
||||
@@ -47,16 +40,18 @@ class PgVersion(str, enum.Enum):
|
||||
|
||||
@classmethod
|
||||
@override
|
||||
def _missing_(cls, value: object) -> Optional[PgVersion]:
|
||||
known_values = {v.value for _, v in cls.__members__.items()}
|
||||
def _missing_(cls, value: object) -> PgVersion | None:
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
# Allow passing version as a string with "v" prefix (e.g. "v14")
|
||||
if isinstance(value, str) and value.lower().startswith("v") and value[1:] in known_values:
|
||||
return cls(value[1:])
|
||||
# Allow passing version as an int (e.g. 15 or 150002, both will be converted to PgVersion.V15)
|
||||
elif isinstance(value, int) and str(value)[:2] in known_values:
|
||||
return cls(str(value)[:2])
|
||||
known_values = set(cls.__members__.values())
|
||||
|
||||
# Allow passing version as v-prefixed string (e.g. "v14")
|
||||
if value.lower().startswith("v") and (v := value[1:]) in known_values:
|
||||
return cls(v)
|
||||
|
||||
# Allow passing version as an int (i.e. both "15" and "150002" matches PgVersion.V15)
|
||||
if value.isdigit() and (v := value[:2]) in known_values:
|
||||
return cls(v)
|
||||
|
||||
# Make mypy happy
|
||||
# See https://github.com/python/mypy/issues/3974
|
||||
return None
|
||||
|
||||
@@ -3,13 +3,9 @@ from __future__ import annotations
|
||||
import re
|
||||
import socket
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
|
||||
def can_bind(host: str, port: int) -> bool:
|
||||
"""
|
||||
@@ -49,17 +45,19 @@ class PortDistributor:
|
||||
"port range configured for test is exhausted, consider enlarging the range"
|
||||
)
|
||||
|
||||
def replace_with_new_port(self, value: Union[int, str]) -> Union[int, str]:
|
||||
def replace_with_new_port(self, value: int | str) -> int | str:
|
||||
"""
|
||||
Returns a new port for a port number in a string (like "localhost:1234") or int.
|
||||
Replacements are memorised, so a substitution for the same port is always the same.
|
||||
"""
|
||||
|
||||
# TODO: replace with structural pattern matching for Python >= 3.10
|
||||
if isinstance(value, int):
|
||||
return self._replace_port_int(value)
|
||||
|
||||
return self._replace_port_str(value)
|
||||
match value:
|
||||
case int():
|
||||
return self._replace_port_int(value)
|
||||
case str():
|
||||
return self._replace_port_str(value)
|
||||
case _:
|
||||
raise TypeError(f"Unsupported type {type(value)}, should be int | str")
|
||||
|
||||
def _replace_port_int(self, value: int) -> int:
|
||||
known_port = self.port_map.get(value)
|
||||
|
||||
@@ -6,8 +6,9 @@ import json
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Union
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import boto3
|
||||
import toml
|
||||
@@ -20,7 +21,7 @@ from fixtures.log_helper import log
|
||||
from fixtures.pageserver.common_types import IndexPartDump
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
TIMELINE_INDEX_PART_FILE_NAME = "index_part.json"
|
||||
@@ -28,7 +29,7 @@ TENANT_HEATMAP_FILE_NAME = "heatmap-v1.json"
|
||||
|
||||
|
||||
@enum.unique
|
||||
class RemoteStorageUser(str, enum.Enum):
|
||||
class RemoteStorageUser(StrEnum):
|
||||
"""
|
||||
Instead of using strings for the users, use a more strict enum.
|
||||
"""
|
||||
@@ -77,21 +78,19 @@ class MockS3Server:
|
||||
class LocalFsStorage:
|
||||
root: Path
|
||||
|
||||
def tenant_path(self, tenant_id: Union[TenantId, TenantShardId]) -> Path:
|
||||
def tenant_path(self, tenant_id: TenantId | TenantShardId) -> Path:
|
||||
return self.root / "tenants" / str(tenant_id)
|
||||
|
||||
def timeline_path(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
) -> Path:
|
||||
def timeline_path(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId) -> Path:
|
||||
return self.tenant_path(tenant_id) / "timelines" / str(timeline_id)
|
||||
|
||||
def timeline_latest_generation(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
) -> Optional[int]:
|
||||
self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId
|
||||
) -> int | None:
|
||||
timeline_files = os.listdir(self.timeline_path(tenant_id, timeline_id))
|
||||
index_parts = [f for f in timeline_files if f.startswith("index_part")]
|
||||
|
||||
def parse_gen(filename: str) -> Optional[int]:
|
||||
def parse_gen(filename: str) -> int | None:
|
||||
log.info(f"parsing index_part '{filename}'")
|
||||
parts = filename.split("-")
|
||||
if len(parts) == 2:
|
||||
@@ -104,9 +103,7 @@ class LocalFsStorage:
|
||||
raise RuntimeError(f"No index_part found for {tenant_id}/{timeline_id}")
|
||||
return generations[-1]
|
||||
|
||||
def index_path(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
) -> Path:
|
||||
def index_path(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId) -> Path:
|
||||
latest_gen = self.timeline_latest_generation(tenant_id, timeline_id)
|
||||
if latest_gen is None:
|
||||
filename = TIMELINE_INDEX_PART_FILE_NAME
|
||||
@@ -120,7 +117,7 @@ class LocalFsStorage:
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
local_name: str,
|
||||
generation: Optional[int] = None,
|
||||
generation: int | None = None,
|
||||
):
|
||||
if generation is None:
|
||||
generation = self.timeline_latest_generation(tenant_id, timeline_id)
|
||||
@@ -130,9 +127,7 @@ class LocalFsStorage:
|
||||
filename = f"{local_name}-{generation:08x}"
|
||||
return self.timeline_path(tenant_id, timeline_id) / filename
|
||||
|
||||
def index_content(
|
||||
self, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
) -> Any:
|
||||
def index_content(self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId) -> Any:
|
||||
with self.index_path(tenant_id, timeline_id).open("r") as f:
|
||||
return json.load(f)
|
||||
|
||||
@@ -164,17 +159,17 @@ class LocalFsStorage:
|
||||
class S3Storage:
|
||||
bucket_name: str
|
||||
bucket_region: str
|
||||
access_key: Optional[str]
|
||||
secret_key: Optional[str]
|
||||
aws_profile: Optional[str]
|
||||
access_key: str | None
|
||||
secret_key: str | None
|
||||
aws_profile: str | None
|
||||
prefix_in_bucket: str
|
||||
client: S3Client
|
||||
cleanup: bool
|
||||
"""Is this MOCK_S3 (false) or REAL_S3 (true)"""
|
||||
real: bool
|
||||
endpoint: Optional[str] = None
|
||||
endpoint: str | None = None
|
||||
"""formatting deserialized with humantime crate, for example "1s"."""
|
||||
custom_timeout: Optional[str] = None
|
||||
custom_timeout: str | None = None
|
||||
|
||||
def access_env_vars(self) -> dict[str, str]:
|
||||
if self.aws_profile is not None:
|
||||
@@ -272,12 +267,10 @@ class S3Storage:
|
||||
def tenants_path(self) -> str:
|
||||
return f"{self.prefix_in_bucket}/tenants"
|
||||
|
||||
def tenant_path(self, tenant_id: Union[TenantShardId, TenantId]) -> str:
|
||||
def tenant_path(self, tenant_id: TenantShardId | TenantId) -> str:
|
||||
return f"{self.tenants_path()}/{tenant_id}"
|
||||
|
||||
def timeline_path(
|
||||
self, tenant_id: Union[TenantShardId, TenantId], timeline_id: TimelineId
|
||||
) -> str:
|
||||
def timeline_path(self, tenant_id: TenantShardId | TenantId, timeline_id: TimelineId) -> str:
|
||||
return f"{self.tenant_path(tenant_id)}/timelines/{timeline_id}"
|
||||
|
||||
def get_latest_index_key(self, index_keys: list[str]) -> str:
|
||||
@@ -315,11 +308,11 @@ class S3Storage:
|
||||
assert self.real is False
|
||||
|
||||
|
||||
RemoteStorage = Union[LocalFsStorage, S3Storage]
|
||||
RemoteStorage = LocalFsStorage | S3Storage
|
||||
|
||||
|
||||
@enum.unique
|
||||
class RemoteStorageKind(str, enum.Enum):
|
||||
class RemoteStorageKind(StrEnum):
|
||||
LOCAL_FS = "local_fs"
|
||||
MOCK_S3 = "mock_s3"
|
||||
REAL_S3 = "real_s3"
|
||||
@@ -331,8 +324,8 @@ class RemoteStorageKind(str, enum.Enum):
|
||||
run_id: str,
|
||||
test_name: str,
|
||||
user: RemoteStorageUser,
|
||||
bucket_name: Optional[str] = None,
|
||||
bucket_region: Optional[str] = None,
|
||||
bucket_name: str | None = None,
|
||||
bucket_region: str | None = None,
|
||||
) -> RemoteStorage:
|
||||
if self == RemoteStorageKind.LOCAL_FS:
|
||||
return LocalFsStorage(LocalFsStorage.component_path(repo_dir, user))
|
||||
|
||||
@@ -13,7 +13,7 @@ from fixtures.metrics import Metrics, MetricsGetter, parse_metrics
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
# Walreceiver as returned by sk's timeline status endpoint.
|
||||
@@ -72,7 +72,7 @@ class TermBumpResponse:
|
||||
class SafekeeperHttpClient(requests.Session, MetricsGetter):
|
||||
HTTPError = requests.HTTPError
|
||||
|
||||
def __init__(self, port: int, auth_token: Optional[str] = None, is_testing_enabled=False):
|
||||
def __init__(self, port: int, auth_token: str | None = None, is_testing_enabled=False):
|
||||
super().__init__()
|
||||
self.port = port
|
||||
self.auth_token = auth_token
|
||||
@@ -98,7 +98,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter):
|
||||
if not self.is_testing_enabled:
|
||||
pytest.skip("safekeeper was built without 'testing' feature")
|
||||
|
||||
def configure_failpoints(self, config_strings: Union[tuple[str, str], list[tuple[str, str]]]):
|
||||
def configure_failpoints(self, config_strings: tuple[str, str] | list[tuple[str, str]]):
|
||||
self.is_testing_enabled_or_skip()
|
||||
|
||||
if isinstance(config_strings, tuple):
|
||||
@@ -195,7 +195,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter):
|
||||
assert isinstance(res_json, dict)
|
||||
return res_json
|
||||
|
||||
def debug_dump(self, params: Optional[dict[str, str]] = None) -> dict[str, Any]:
|
||||
def debug_dump(self, params: dict[str, str] | None = None) -> dict[str, Any]:
|
||||
params = params or {}
|
||||
res = self.get(f"http://localhost:{self.port}/v1/debug_dump", params=params)
|
||||
res.raise_for_status()
|
||||
@@ -204,7 +204,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter):
|
||||
return res_json
|
||||
|
||||
def debug_dump_timeline(
|
||||
self, timeline_id: TimelineId, params: Optional[dict[str, str]] = None
|
||||
self, timeline_id: TimelineId, params: dict[str, str] | None = None
|
||||
) -> Any:
|
||||
params = params or {}
|
||||
params["timeline_id"] = str(timeline_id)
|
||||
@@ -285,7 +285,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter):
|
||||
self,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
term: Optional[int],
|
||||
term: int | None,
|
||||
) -> TermBumpResponse:
|
||||
body = {}
|
||||
if term is not None:
|
||||
|
||||
@@ -13,14 +13,14 @@ from werkzeug.wrappers.response import Response
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
class StorageControllerProxy:
|
||||
def __init__(self, server: HTTPServer):
|
||||
self.server: HTTPServer = server
|
||||
self.listen: str = f"http://{server.host}:{server.port}"
|
||||
self.routing_to: Optional[str] = None
|
||||
self.routing_to: str | None = None
|
||||
|
||||
def route_to(self, storage_controller_api: str):
|
||||
self.routing_to = storage_controller_api
|
||||
|
||||
@@ -8,10 +8,10 @@ import subprocess
|
||||
import tarfile
|
||||
import threading
|
||||
import time
|
||||
from collections.abc import Iterable
|
||||
from collections.abc import Callable, Iterable
|
||||
from hashlib import sha256
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Callable, TypeVar
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import allure
|
||||
@@ -29,7 +29,7 @@ from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
from typing import IO, Optional
|
||||
from typing import IO
|
||||
|
||||
from fixtures.common_types import TimelineId
|
||||
from fixtures.neon_fixtures import PgBin
|
||||
@@ -66,10 +66,10 @@ def subprocess_capture(
|
||||
echo_stderr: bool = False,
|
||||
echo_stdout: bool = False,
|
||||
capture_stdout: bool = False,
|
||||
timeout: Optional[float] = None,
|
||||
timeout: float | None = None,
|
||||
with_command_header: bool = True,
|
||||
**popen_kwargs: Any,
|
||||
) -> tuple[str, Optional[str], int]:
|
||||
) -> tuple[str, str | None, int]:
|
||||
"""Run a process and bifurcate its output to files and the `log` logger
|
||||
|
||||
stderr and stdout are always captured in files. They are also optionally
|
||||
@@ -536,7 +536,7 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str
|
||||
"""
|
||||
started_at = time.time()
|
||||
|
||||
def hash_extracted(reader: Optional[IO[bytes]]) -> bytes:
|
||||
def hash_extracted(reader: IO[bytes] | None) -> bytes:
|
||||
assert reader is not None
|
||||
digest = sha256(usedforsecurity=False)
|
||||
while True:
|
||||
@@ -563,7 +563,7 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str
|
||||
|
||||
mismatching: set[str] = set()
|
||||
|
||||
for left_tuple, right_tuple in zip(left_list, right_list):
|
||||
for left_tuple, right_tuple in zip(left_list, right_list, strict=False):
|
||||
left_path, left_hash = left_tuple
|
||||
right_path, right_hash = right_tuple
|
||||
assert (
|
||||
@@ -595,7 +595,7 @@ class PropagatingThread(threading.Thread):
|
||||
self.exc = e
|
||||
|
||||
@override
|
||||
def join(self, timeout: Optional[float] = None) -> Any:
|
||||
def join(self, timeout: float | None = None) -> Any:
|
||||
super().join(timeout)
|
||||
if self.exc:
|
||||
raise self.exc
|
||||
|
||||
@@ -15,7 +15,7 @@ from fixtures.neon_fixtures import (
|
||||
from fixtures.pageserver.utils import wait_for_last_record_lsn
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
# neon_local doesn't handle creating/modifying endpoints concurrently, so we use a mutex
|
||||
# to ensure we don't do that: this enables running lots of Workloads in parallel safely.
|
||||
@@ -36,8 +36,8 @@ class Workload:
|
||||
env: NeonEnv,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
branch_name: Optional[str] = None,
|
||||
endpoint_opts: Optional[dict[str, Any]] = None,
|
||||
branch_name: str | None = None,
|
||||
endpoint_opts: dict[str, Any] | None = None,
|
||||
):
|
||||
self.env = env
|
||||
self.tenant_id = tenant_id
|
||||
@@ -50,7 +50,7 @@ class Workload:
|
||||
self.expect_rows = 0
|
||||
self.churn_cursor = 0
|
||||
|
||||
self._endpoint: Optional[Endpoint] = None
|
||||
self._endpoint: Endpoint | None = None
|
||||
self._endpoint_opts = endpoint_opts or {}
|
||||
|
||||
def reconfigure(self):
|
||||
@@ -61,7 +61,7 @@ class Workload:
|
||||
with ENDPOINT_LOCK:
|
||||
self._endpoint.reconfigure()
|
||||
|
||||
def endpoint(self, pageserver_id: Optional[int] = None) -> Endpoint:
|
||||
def endpoint(self, pageserver_id: int | None = None) -> Endpoint:
|
||||
# We may be running alongside other Workloads for different tenants. Full TTID is
|
||||
# obnoxiously long for use here, but a cut-down version is still unique enough for tests.
|
||||
endpoint_id = f"ep-workload-{str(self.tenant_id)[0:4]}-{str(self.timeline_id)[0:4]}"
|
||||
@@ -94,7 +94,7 @@ class Workload:
|
||||
def __del__(self):
|
||||
self.stop()
|
||||
|
||||
def init(self, pageserver_id: Optional[int] = None):
|
||||
def init(self, pageserver_id: int | None = None):
|
||||
endpoint = self.endpoint(pageserver_id)
|
||||
|
||||
endpoint.safe_psql(f"CREATE TABLE {self.table} (id INTEGER PRIMARY KEY, val text);")
|
||||
@@ -103,7 +103,7 @@ class Workload:
|
||||
self.env, endpoint, self.tenant_id, self.timeline_id, pageserver_id=pageserver_id
|
||||
)
|
||||
|
||||
def write_rows(self, n: int, pageserver_id: Optional[int] = None, upload: bool = True):
|
||||
def write_rows(self, n: int, pageserver_id: int | None = None, upload: bool = True):
|
||||
endpoint = self.endpoint(pageserver_id)
|
||||
start = self.expect_rows
|
||||
end = start + n - 1
|
||||
@@ -125,7 +125,7 @@ class Workload:
|
||||
return False
|
||||
|
||||
def churn_rows(
|
||||
self, n: int, pageserver_id: Optional[int] = None, upload: bool = True, ingest: bool = True
|
||||
self, n: int, pageserver_id: int | None = None, upload: bool = True, ingest: bool = True
|
||||
):
|
||||
assert self.expect_rows >= n
|
||||
|
||||
@@ -190,7 +190,7 @@ class Workload:
|
||||
else:
|
||||
log.info(f"Churn: not waiting for upload, disk LSN {last_flush_lsn}")
|
||||
|
||||
def validate(self, pageserver_id: Optional[int] = None):
|
||||
def validate(self, pageserver_id: int | None = None):
|
||||
endpoint = self.endpoint(pageserver_id)
|
||||
endpoint.clear_shared_buffers()
|
||||
result = endpoint.safe_psql(f"SELECT COUNT(*) FROM {self.table}")
|
||||
|
||||
@@ -16,7 +16,8 @@ from fixtures.neon_fixtures import (
|
||||
from fixtures.pageserver.utils import wait_until_all_tenants_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Callable, Optional
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
|
||||
def ensure_pageserver_ready_for_benchmarking(env: NeonEnv, n_tenants: int):
|
||||
@@ -46,7 +47,7 @@ def setup_pageserver_with_tenants(
|
||||
name: str,
|
||||
n_tenants: int,
|
||||
setup: Callable[[NeonEnv], tuple[TenantId, TimelineId, dict[str, Any]]],
|
||||
timeout_in_seconds: Optional[int] = None,
|
||||
timeout_in_seconds: int | None = None,
|
||||
) -> NeonEnv:
|
||||
"""
|
||||
Utility function to set up a pageserver with a given number of identical tenants.
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from io import BufferedReader, RawIOBase
|
||||
from typing import Optional, final
|
||||
from typing import final
|
||||
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from typing_extensions import override
|
||||
@@ -13,7 +13,7 @@ class CopyTestData(RawIOBase):
|
||||
def __init__(self, rows: int):
|
||||
self.rows = rows
|
||||
self.rownum = 0
|
||||
self.linebuf: Optional[bytes] = None
|
||||
self.linebuf: bytes | None = None
|
||||
self.ptr = 0
|
||||
|
||||
@override
|
||||
|
||||
@@ -18,7 +18,7 @@ from fixtures.neon_api import connection_parameters_to_env
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from fixtures.benchmark_fixture import NeonBenchmarker
|
||||
from fixtures.neon_api import NeonAPI
|
||||
@@ -247,7 +247,7 @@ def test_replication_start_stop(
|
||||
],
|
||||
env=master_env,
|
||||
)
|
||||
replica_pgbench: list[Optional[subprocess.Popen[Any]]] = [None for _ in range(num_replicas)]
|
||||
replica_pgbench: list[subprocess.Popen[Any] | None] = [None] * num_replicas
|
||||
|
||||
# Use the bits of iconfig to tell us which configuration we are on. For example
|
||||
# a iconfig of 2 is 10 in binary, indicating replica 0 is suspended and replica 1 is
|
||||
|
||||
@@ -4,7 +4,7 @@ import concurrent.futures
|
||||
import random
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from enum import StrEnum
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId, TenantShardId, TimelineArchivalState, TimelineId
|
||||
@@ -139,7 +139,7 @@ def test_storage_controller_many_tenants(
|
||||
tenant_timelines_count = 100
|
||||
|
||||
# These lists are maintained for use with rng.choice
|
||||
tenants_with_timelines = list(rng.sample(tenants.keys(), tenant_timelines_count))
|
||||
tenants_with_timelines = list(rng.sample(list(tenants.keys()), tenant_timelines_count))
|
||||
tenants_without_timelines = list(
|
||||
tenant_id for tenant_id in tenants if tenant_id not in tenants_with_timelines
|
||||
)
|
||||
@@ -171,7 +171,7 @@ def test_storage_controller_many_tenants(
|
||||
# start timing on test nodes if we aren't a bit careful.
|
||||
create_concurrency = 16
|
||||
|
||||
class Operation(str, Enum):
|
||||
class Operation(StrEnum):
|
||||
TIMELINE_OPS = "timeline_ops"
|
||||
SHARD_MIGRATE = "shard_migrate"
|
||||
TENANT_PASSTHROUGH = "tenant_passthrough"
|
||||
|
||||
@@ -17,7 +17,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, flush_ep_to_pageserver
|
||||
from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Callable
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
|
||||
@pytest.fixture(params=["vanilla", "neon_off", "neon_on"])
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
from collections.abc import Generator
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId
|
||||
@@ -105,7 +104,7 @@ def test_null_config(negative_env: NegativeTests):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("content_type", [None, "application/json"])
|
||||
def test_empty_config(positive_env: NeonEnv, content_type: Optional[str]):
|
||||
def test_empty_config(positive_env: NeonEnv, content_type: str | None):
|
||||
"""
|
||||
When the 'config' body attribute is omitted, the request should be accepted
|
||||
and the tenant should use the default configuration
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
import json
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
from enum import StrEnum
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
@@ -15,10 +14,6 @@ from fixtures.pageserver.http import PageserverApiException
|
||||
from fixtures.utils import skip_in_debug_build, wait_until
|
||||
from fixtures.workload import Workload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
AGGRESIVE_COMPACTION_TENANT_CONF = {
|
||||
# Disable gc and compaction. The test runs compaction manually.
|
||||
"gc_period": "0s",
|
||||
@@ -172,7 +167,7 @@ LARGE_STRIPES = 32768
|
||||
def test_sharding_compaction(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
stripe_size: int,
|
||||
shard_count: Optional[int],
|
||||
shard_count: int | None,
|
||||
gc_compaction: bool,
|
||||
):
|
||||
"""
|
||||
@@ -277,7 +272,7 @@ def test_sharding_compaction(
|
||||
)
|
||||
|
||||
|
||||
class CompactionAlgorithm(str, enum.Enum):
|
||||
class CompactionAlgorithm(StrEnum):
|
||||
LEGACY = "legacy"
|
||||
TIERED = "tiered"
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import subprocess
|
||||
import tempfile
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import fixtures.utils
|
||||
import pytest
|
||||
@@ -28,10 +27,6 @@ from fixtures.pg_version import PgVersion
|
||||
from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage
|
||||
from fixtures.workload import Workload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
#
|
||||
# A test suite that help to prevent unintentionally breaking backward or forward compatibility between Neon releases.
|
||||
# - `test_create_snapshot` a script wrapped in a test that creates a data snapshot.
|
||||
@@ -385,7 +380,7 @@ def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, r
|
||||
|
||||
|
||||
def dump_differs(
|
||||
first: Path, second: Path, output: Path, allowed_diffs: Optional[list[str]] = None
|
||||
first: Path, second: Path, output: Path, allowed_diffs: list[str] | None = None
|
||||
) -> bool:
|
||||
"""
|
||||
Runs diff(1) command on two SQL dumps and write the output to the given output file.
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import enum
|
||||
import os
|
||||
import shutil
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
@@ -16,7 +17,7 @@ from fixtures.paths import BASE_DIR, COMPUTE_CONFIG_DIR
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import TracebackType
|
||||
from typing import Optional, TypedDict, Union
|
||||
from typing import TypedDict
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.pg_version import PgVersion
|
||||
@@ -26,15 +27,15 @@ if TYPE_CHECKING:
|
||||
metric_name: str
|
||||
type: str
|
||||
help: str
|
||||
key_labels: Optional[list[str]]
|
||||
values: Optional[list[str]]
|
||||
query: Optional[str]
|
||||
query_ref: Optional[str]
|
||||
key_labels: list[str] | None
|
||||
values: list[str] | None
|
||||
query: str | None
|
||||
query_ref: str | None
|
||||
|
||||
class Collector(TypedDict):
|
||||
collector_name: str
|
||||
metrics: list[Metric]
|
||||
queries: Optional[list[Query]]
|
||||
queries: list[Query] | None
|
||||
|
||||
class Query(TypedDict):
|
||||
query_name: str
|
||||
@@ -53,12 +54,12 @@ def __import_callback(dir: str, rel: str) -> tuple[str, bytes]:
|
||||
if not rel:
|
||||
raise RuntimeError("Empty filename")
|
||||
|
||||
full_path: Optional[str] = None
|
||||
full_path: str | None = None
|
||||
if os.path.isabs(rel):
|
||||
full_path = rel
|
||||
else:
|
||||
for p in (dir, *JSONNET_PATH):
|
||||
assert isinstance(p, (str, Path)), "for mypy"
|
||||
assert isinstance(p, str | Path), "for mypy"
|
||||
full_path = os.path.join(p, rel)
|
||||
|
||||
assert isinstance(full_path, str), "for mypy"
|
||||
@@ -82,9 +83,9 @@ def __import_callback(dir: str, rel: str) -> tuple[str, bytes]:
|
||||
|
||||
|
||||
def jsonnet_evaluate_file(
|
||||
jsonnet_file: Union[str, Path],
|
||||
ext_vars: Optional[Union[str, dict[str, str]]] = None,
|
||||
tla_vars: Optional[Union[str, dict[str, str]]] = None,
|
||||
jsonnet_file: str | Path,
|
||||
ext_vars: str | dict[str, str] | None = None,
|
||||
tla_vars: str | dict[str, str] | None = None,
|
||||
) -> str:
|
||||
return cast(
|
||||
"str",
|
||||
@@ -102,7 +103,7 @@ def evaluate_collector(jsonnet_file: Path, pg_version: PgVersion) -> str:
|
||||
|
||||
|
||||
def evaluate_config(
|
||||
jsonnet_file: Path, collector_name: str, collector_file: Union[str, Path], connstr: str
|
||||
jsonnet_file: Path, collector_name: str, collector_file: str | Path, connstr: str
|
||||
) -> str:
|
||||
return jsonnet_evaluate_file(
|
||||
jsonnet_file,
|
||||
@@ -115,7 +116,7 @@ def evaluate_config(
|
||||
|
||||
|
||||
@enum.unique
|
||||
class SqlExporterProcess(str, enum.Enum):
|
||||
class SqlExporterProcess(StrEnum):
|
||||
COMPUTE = "compute"
|
||||
AUTOSCALING = "autoscaling"
|
||||
|
||||
@@ -191,9 +192,9 @@ class SqlExporterRunner:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
self.stop()
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
def handle_db(dbs, roles, operation):
|
||||
@@ -97,9 +97,9 @@ class DdlForwardingContext:
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
self.pg.stop()
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import time
|
||||
from collections import Counter
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
@@ -80,7 +81,7 @@ def test_min_resident_size_override_handling(
|
||||
|
||||
|
||||
@enum.unique
|
||||
class EvictionOrder(str, enum.Enum):
|
||||
class EvictionOrder(StrEnum):
|
||||
RELATIVE_ORDER_EQUAL = "relative_equal"
|
||||
RELATIVE_ORDER_SPARE = "relative_spare"
|
||||
|
||||
|
||||
@@ -2,16 +2,12 @@ from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
|
||||
from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo
|
||||
from fixtures.utils import human_bytes, skip_in_debug_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
|
||||
@skip_in_debug_build("debug run is unnecessarily slow")
|
||||
def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
@@ -109,14 +105,12 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
@dataclass
|
||||
class Histogram:
|
||||
buckets: list[Union[int, float]]
|
||||
buckets: list[int | float]
|
||||
counts: list[int]
|
||||
sums: list[int]
|
||||
|
||||
|
||||
def histogram_historic_layers(
|
||||
infos: LayerMapInfo, minimum_sizes: list[Union[int, float]]
|
||||
) -> Histogram:
|
||||
def histogram_historic_layers(infos: LayerMapInfo, minimum_sizes: list[int | float]) -> Histogram:
|
||||
def log_layer(layer: HistoricLayerInfo) -> HistoricLayerInfo:
|
||||
log.info(
|
||||
f"{layer.layer_file_name} {human_bytes(layer.layer_file_size)} ({layer.layer_file_size} bytes)"
|
||||
@@ -128,7 +122,7 @@ def histogram_historic_layers(
|
||||
return histogram(sizes, minimum_sizes)
|
||||
|
||||
|
||||
def histogram(sizes: Iterable[int], minimum_sizes: list[Union[int, float]]) -> Histogram:
|
||||
def histogram(sizes: Iterable[int], minimum_sizes: list[int | float]) -> Histogram:
|
||||
assert all(minimum_sizes[i] < minimum_sizes[i + 1] for i in range(len(minimum_sizes) - 1))
|
||||
buckets = list(enumerate(minimum_sizes))
|
||||
counts = [0 for _ in buckets]
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn
|
||||
@@ -207,7 +207,7 @@ def test_ts_of_lsn_api(neon_env_builder: NeonEnvBuilder):
|
||||
for i in range(1000):
|
||||
cur.execute("INSERT INTO foo VALUES(%s)", (i,))
|
||||
# Get the timestamp at UTC
|
||||
after_timestamp = query_scalar(cur, "SELECT clock_timestamp()").replace(tzinfo=timezone.utc)
|
||||
after_timestamp = query_scalar(cur, "SELECT clock_timestamp()").replace(tzinfo=UTC)
|
||||
after_lsn = query_scalar(cur, "SELECT pg_current_wal_lsn()")
|
||||
tbl.append([i, after_timestamp, after_lsn])
|
||||
time.sleep(0.02)
|
||||
@@ -273,11 +273,7 @@ def test_ts_of_lsn_api(neon_env_builder: NeonEnvBuilder):
|
||||
)
|
||||
log.info("result: %s, after_ts: %s", result, after_timestamp)
|
||||
|
||||
# TODO use fromisoformat once we have Python 3.11+
|
||||
# which has https://github.com/python/cpython/pull/92177
|
||||
timestamp = datetime.strptime(result, "%Y-%m-%dT%H:%M:%S.%f000Z").replace(
|
||||
tzinfo=timezone.utc
|
||||
)
|
||||
timestamp = datetime.fromisoformat(result).replace(tzinfo=UTC)
|
||||
assert timestamp < after_timestamp, "after_timestamp after timestamp"
|
||||
if i > 1:
|
||||
before_timestamp = tbl[i - step_size][1]
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn
|
||||
from fixtures.log_helper import log
|
||||
@@ -13,7 +11,7 @@ from fixtures.utils import query_scalar
|
||||
# Test on-demand download of the pg_xact SLRUs
|
||||
#
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
def test_ondemand_download_pg_xact(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]):
|
||||
def test_ondemand_download_pg_xact(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
if shard_count is not None:
|
||||
neon_env_builder.num_pageservers = shard_count
|
||||
|
||||
@@ -79,7 +77,7 @@ def test_ondemand_download_pg_xact(neon_env_builder: NeonEnvBuilder, shard_count
|
||||
|
||||
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]):
|
||||
def test_ondemand_download_replica(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
if shard_count is not None:
|
||||
neon_env_builder.num_pageservers = shard_count
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from fixtures.common_types import Lsn, TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import (
|
||||
DEFAULT_BRANCH_NAME,
|
||||
@@ -82,7 +80,7 @@ def expect_updated_msg_lsn(
|
||||
client: PageserverHttpClient,
|
||||
tenant_id: TenantId,
|
||||
timeline_id: TimelineId,
|
||||
prev_msg_lsn: Optional[Lsn],
|
||||
prev_msg_lsn: Lsn | None,
|
||||
) -> Lsn:
|
||||
timeline_details = client.timeline_detail(tenant_id, timeline_id=timeline_id)
|
||||
|
||||
|
||||
@@ -11,11 +11,10 @@ of the pageserver are:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
from enum import StrEnum
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
@@ -41,10 +40,6 @@ from fixtures.remote_storage import (
|
||||
from fixtures.utils import run_only_on_default_postgres, wait_until
|
||||
from fixtures.workload import Workload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# A tenant configuration that is convenient for generating uploads and deletions
|
||||
# without a large amount of postgres traffic.
|
||||
TENANT_CONF = {
|
||||
@@ -65,7 +60,7 @@ TENANT_CONF = {
|
||||
|
||||
|
||||
def read_all(
|
||||
env: NeonEnv, tenant_id: Optional[TenantId] = None, timeline_id: Optional[TimelineId] = None
|
||||
env: NeonEnv, tenant_id: TenantId | None = None, timeline_id: TimelineId | None = None
|
||||
):
|
||||
if tenant_id is None:
|
||||
tenant_id = env.initial_tenant
|
||||
@@ -286,12 +281,12 @@ def test_deferred_deletion(neon_env_builder: NeonEnvBuilder):
|
||||
assert get_deletion_queue_unexpected_errors(ps_http) == 0
|
||||
|
||||
|
||||
class KeepAttachment(str, enum.Enum):
|
||||
class KeepAttachment(StrEnum):
|
||||
KEEP = "keep"
|
||||
LOSE = "lose"
|
||||
|
||||
|
||||
class ValidateBefore(str, enum.Enum):
|
||||
class ValidateBefore(StrEnum):
|
||||
VALIDATE = "validate"
|
||||
NO_VALIDATE = "no-validate"
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psutil
|
||||
import pytest
|
||||
@@ -17,17 +16,13 @@ from fixtures.pageserver.http import PageserverHttpClient
|
||||
from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_for_upload
|
||||
from fixtures.utils import skip_in_debug_build, wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
TIMELINE_COUNT = 10
|
||||
ENTRIES_PER_TIMELINE = 10_000
|
||||
CHECKPOINT_TIMEOUT_SECONDS = 60
|
||||
|
||||
|
||||
async def run_worker_for_tenant(
|
||||
env: NeonEnv, entries: int, tenant: TenantId, offset: Optional[int] = None
|
||||
env: NeonEnv, entries: int, tenant: TenantId, offset: int | None = None
|
||||
) -> Lsn:
|
||||
if offset is None:
|
||||
offset = 0
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import annotations
|
||||
|
||||
import random
|
||||
from contextlib import closing
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
@@ -156,7 +155,7 @@ def test_pageserver_restart(neon_env_builder: NeonEnvBuilder):
|
||||
@pytest.mark.timeout(540)
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
@skip_in_debug_build("times out in debug builds")
|
||||
def test_pageserver_chaos(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]):
|
||||
def test_pageserver_chaos(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
# same rationale as with the immediate stop; we might leave orphan layers behind.
|
||||
neon_env_builder.disable_scrub_on_exit()
|
||||
neon_env_builder.enable_pageserver_remote_storage(s3_storage())
|
||||
|
||||
@@ -23,7 +23,7 @@ from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
# A tenant configuration that is convenient for generating uploads and deletions
|
||||
@@ -199,7 +199,7 @@ def test_location_conf_churn(neon_env_builder: NeonEnvBuilder, make_httpserver,
|
||||
# state if it was running attached with a stale generation
|
||||
last_state[pageserver.id] = ("Detached", None)
|
||||
else:
|
||||
secondary_conf: Optional[dict[str, Any]] = None
|
||||
secondary_conf: dict[str, Any] | None = None
|
||||
if mode == "Secondary":
|
||||
secondary_conf = {"warm": rng.choice([True, False])}
|
||||
|
||||
@@ -469,7 +469,7 @@ def test_heatmap_uploads(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
|
||||
def list_elegible_layers(
|
||||
pageserver, tenant_id: Union[TenantId, TenantShardId], timeline_id: TimelineId
|
||||
pageserver, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId
|
||||
) -> list[Path]:
|
||||
"""
|
||||
The subset of layer filenames that are elegible for secondary download: at time of writing this
|
||||
|
||||
@@ -21,8 +21,6 @@ from fixtures.remote_storage import s3_storage
|
||||
from fixtures.utils import skip_in_debug_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
from fixtures.neon_fixtures import PgBin
|
||||
from pytest import CaptureFixture
|
||||
|
||||
@@ -48,7 +46,7 @@ def post_checks(env: NeonEnv, test_output_dir: Path, db_name: str, endpoint: End
|
||||
data properly.
|
||||
"""
|
||||
|
||||
ignored_files: Optional[list[str]] = None
|
||||
ignored_files: list[str] | None = None
|
||||
|
||||
# Neon handles unlogged relations in a special manner. During a
|
||||
# basebackup, we ship the init fork as the main fork. This presents a
|
||||
@@ -131,7 +129,7 @@ def test_pg_regress(
|
||||
capsys: CaptureFixture[str],
|
||||
base_dir: Path,
|
||||
pg_distrib_dir: Path,
|
||||
shard_count: Optional[int],
|
||||
shard_count: int | None,
|
||||
):
|
||||
DBNAME = "regression"
|
||||
|
||||
@@ -205,7 +203,7 @@ def test_isolation(
|
||||
capsys: CaptureFixture[str],
|
||||
base_dir: Path,
|
||||
pg_distrib_dir: Path,
|
||||
shard_count: Optional[int],
|
||||
shard_count: int | None,
|
||||
):
|
||||
DBNAME = "isolation_regression"
|
||||
|
||||
@@ -274,7 +272,7 @@ def test_sql_regress(
|
||||
capsys: CaptureFixture[str],
|
||||
base_dir: Path,
|
||||
pg_distrib_dir: Path,
|
||||
shard_count: Optional[int],
|
||||
shard_count: int | None,
|
||||
):
|
||||
DBNAME = "regression"
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import requests
|
||||
from fixtures.neon_fixtures import PSQL, NeonProxy, VanillaPostgres
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
GET_CONNECTION_PID_QUERY = "SELECT pid FROM pg_stat_activity WHERE state = 'active'"
|
||||
@@ -228,7 +228,7 @@ def test_sql_over_http_serverless_driver(static_proxy: NeonProxy):
|
||||
def test_sql_over_http(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create role http with login password 'http' superuser")
|
||||
|
||||
def q(sql: str, params: Optional[list[Any]] = None) -> Any:
|
||||
def q(sql: str, params: list[Any] | None = None) -> Any:
|
||||
params = params or []
|
||||
connstr = f"postgresql://http:http@{static_proxy.domain}:{static_proxy.proxy_port}/postgres"
|
||||
response = requests.post(
|
||||
@@ -291,7 +291,7 @@ def test_sql_over_http_db_name_with_space(static_proxy: NeonProxy):
|
||||
)
|
||||
)
|
||||
|
||||
def q(sql: str, params: Optional[list[Any]] = None) -> Any:
|
||||
def q(sql: str, params: list[Any] | None = None) -> Any:
|
||||
params = params or []
|
||||
connstr = f"postgresql://http:http@{static_proxy.domain}:{static_proxy.proxy_port}/{urllib.parse.quote(db)}"
|
||||
response = requests.post(
|
||||
@@ -310,7 +310,7 @@ def test_sql_over_http_db_name_with_space(static_proxy: NeonProxy):
|
||||
def test_sql_over_http_output_options(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create role http2 with login password 'http2' superuser")
|
||||
|
||||
def q(sql: str, raw_text: bool, array_mode: bool, params: Optional[list[Any]] = None) -> Any:
|
||||
def q(sql: str, raw_text: bool, array_mode: bool, params: list[Any] | None = None) -> Any:
|
||||
params = params or []
|
||||
connstr = (
|
||||
f"postgresql://http2:http2@{static_proxy.domain}:{static_proxy.proxy_port}/postgres"
|
||||
@@ -346,7 +346,7 @@ def test_sql_over_http_batch(static_proxy: NeonProxy):
|
||||
static_proxy.safe_psql("create role http with login password 'http' superuser")
|
||||
|
||||
def qq(
|
||||
queries: list[tuple[str, Optional[list[Any]]]],
|
||||
queries: list[tuple[str, list[Any] | None]],
|
||||
read_only: bool = False,
|
||||
deferrable: bool = False,
|
||||
) -> Any:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId
|
||||
@@ -175,7 +174,7 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
def get_layers_protected_by_lease(
|
||||
ps_http: PageserverHttpClient,
|
||||
tenant_id: Union[TenantId, TenantShardId],
|
||||
tenant_id: TenantId | TenantShardId,
|
||||
timeline_id: TimelineId,
|
||||
lease_lsn: Lsn,
|
||||
) -> set[str]:
|
||||
|
||||
@@ -5,7 +5,6 @@ import queue
|
||||
import shutil
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TenantId, TimelineId
|
||||
@@ -37,9 +36,6 @@ from fixtures.utils import (
|
||||
)
|
||||
from requests import ReadTimeout
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
#
|
||||
# Tests that a piece of data is backed up and restored correctly:
|
||||
@@ -452,7 +448,7 @@ def test_remote_timeline_client_calls_started_metric(
|
||||
for (file_kind, op_kind), observations in calls_started.items():
|
||||
log.info(f"ensure_calls_started_grew: {file_kind} {op_kind}: {observations}")
|
||||
assert all(
|
||||
x < y for x, y in zip(observations, observations[1:])
|
||||
x < y for x, y in zip(observations, observations[1:], strict=False)
|
||||
), f"observations for {file_kind} {op_kind} did not grow monotonically: {observations}"
|
||||
|
||||
def churn(data_pass1, data_pass2):
|
||||
@@ -731,7 +727,7 @@ def test_empty_branch_remote_storage_upload_on_restart(neon_env_builder: NeonEnv
|
||||
# sleep a bit to force the upload task go into exponential backoff
|
||||
time.sleep(1)
|
||||
|
||||
q: queue.Queue[Optional[PageserverApiException]] = queue.Queue()
|
||||
q: queue.Queue[PageserverApiException | None] = queue.Queue()
|
||||
barrier = threading.Barrier(2)
|
||||
|
||||
def create_in_background():
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from fixtures.common_types import Lsn
|
||||
from fixtures.log_helper import log
|
||||
@@ -77,7 +77,7 @@ def test_tenant_s3_restore(
|
||||
|
||||
# These sleeps are important because they fend off differences in clocks between us and S3
|
||||
time.sleep(4)
|
||||
ts_before_deletion = datetime.now(tz=timezone.utc).replace(tzinfo=None)
|
||||
ts_before_deletion = datetime.now(tz=UTC).replace(tzinfo=None)
|
||||
time.sleep(4)
|
||||
|
||||
assert (
|
||||
@@ -104,7 +104,7 @@ def test_tenant_s3_restore(
|
||||
)
|
||||
|
||||
time.sleep(4)
|
||||
ts_after_deletion = datetime.now(tz=timezone.utc).replace(tzinfo=None)
|
||||
ts_after_deletion = datetime.now(tz=UTC).replace(tzinfo=None)
|
||||
time.sleep(4)
|
||||
|
||||
ps_http.tenant_time_travel_remote_storage(
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
@@ -27,9 +27,6 @@ from typing_extensions import override
|
||||
from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional, Union
|
||||
|
||||
|
||||
def test_sharding_smoke(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
@@ -189,7 +186,7 @@ def test_sharding_split_unsharded(
|
||||
],
|
||||
)
|
||||
def test_sharding_split_compaction(
|
||||
neon_env_builder: NeonEnvBuilder, failpoint: Optional[str], build_type: str
|
||||
neon_env_builder: NeonEnvBuilder, failpoint: str | None, build_type: str
|
||||
):
|
||||
"""
|
||||
Test that after a split, we clean up parent layer data in the child shards via compaction.
|
||||
@@ -782,7 +779,7 @@ def test_sharding_split_stripe_size(
|
||||
tenant_id = env.initial_tenant
|
||||
|
||||
assert len(notifications) == 1
|
||||
expect: dict[str, Union[list[dict[str, int]], str, None, int]] = {
|
||||
expect: dict[str, list[dict[str, int]] | str | None | int] = {
|
||||
"tenant_id": str(env.initial_tenant),
|
||||
"stripe_size": None,
|
||||
"shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}],
|
||||
@@ -798,7 +795,7 @@ def test_sharding_split_stripe_size(
|
||||
# Check that we ended up with the stripe size that we expected, both on the pageserver
|
||||
# and in the notifications to compute
|
||||
assert len(notifications) == 2
|
||||
expect_after: dict[str, Union[list[dict[str, int]], str, None, int]] = {
|
||||
expect_after: dict[str, list[dict[str, int]] | str | None | int] = {
|
||||
"tenant_id": str(env.initial_tenant),
|
||||
"stripe_size": new_stripe_size,
|
||||
"shards": [
|
||||
@@ -1046,7 +1043,7 @@ def test_sharding_ingest_gaps(
|
||||
|
||||
|
||||
class Failure:
|
||||
pageserver_id: Optional[int]
|
||||
pageserver_id: int | None
|
||||
|
||||
def apply(self, env: NeonEnv):
|
||||
raise NotImplementedError()
|
||||
@@ -1370,7 +1367,7 @@ def test_sharding_split_failures(
|
||||
|
||||
assert attached_count == initial_shard_count
|
||||
|
||||
def assert_split_done(exclude_ps_id: Optional[int] = None) -> None:
|
||||
def assert_split_done(exclude_ps_id: int | None = None) -> None:
|
||||
secondary_count = 0
|
||||
attached_count = 0
|
||||
for ps in env.pageservers:
|
||||
|
||||
@@ -4,16 +4,12 @@ import socket
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import backoff
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import PgProtocol, VanillaPostgres
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def generate_tls_cert(cn, certout, keyout):
|
||||
subprocess.run(
|
||||
@@ -55,7 +51,7 @@ class PgSniRouter(PgProtocol):
|
||||
self.destination = destination
|
||||
self.tls_cert = tls_cert
|
||||
self.tls_key = tls_key
|
||||
self._popen: Optional[subprocess.Popen[bytes]] = None
|
||||
self._popen: subprocess.Popen[bytes] | None = None
|
||||
self.test_output_dir = test_output_dir
|
||||
|
||||
def start(self) -> PgSniRouter:
|
||||
@@ -96,9 +92,9 @@ class PgSniRouter(PgProtocol):
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Optional[type[BaseException]],
|
||||
exc: Optional[BaseException],
|
||||
tb: Optional[TracebackType],
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
):
|
||||
if self._popen is not None:
|
||||
self._popen.terminate()
|
||||
|
||||
@@ -5,7 +5,7 @@ import json
|
||||
import threading
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@@ -56,7 +56,7 @@ from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_node_shard_counts(env: NeonEnv, tenant_ids):
|
||||
@@ -593,7 +593,7 @@ def test_storage_controller_compute_hook(
|
||||
|
||||
# Initial notification from tenant creation
|
||||
assert len(notifications) == 1
|
||||
expect: dict[str, Union[list[dict[str, int]], str, None, int]] = {
|
||||
expect: dict[str, list[dict[str, int]] | str | None | int] = {
|
||||
"tenant_id": str(env.initial_tenant),
|
||||
"stripe_size": None,
|
||||
"shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}],
|
||||
@@ -708,7 +708,7 @@ def test_storage_controller_stuck_compute_hook(
|
||||
|
||||
# Initial notification from tenant creation
|
||||
assert len(notifications) == 1
|
||||
expect: dict[str, Union[list[dict[str, int]], str, None, int]] = {
|
||||
expect: dict[str, list[dict[str, int]] | str | None | int] = {
|
||||
"tenant_id": str(env.initial_tenant),
|
||||
"stripe_size": None,
|
||||
"shards": [{"node_id": int(env.pageservers[0].id), "shard_number": 0}],
|
||||
@@ -1048,7 +1048,7 @@ def test_storage_controller_s3_time_travel_recovery(
|
||||
)
|
||||
|
||||
time.sleep(4)
|
||||
ts_before_disaster = datetime.now(tz=timezone.utc).replace(tzinfo=None)
|
||||
ts_before_disaster = datetime.now(tz=UTC).replace(tzinfo=None)
|
||||
time.sleep(4)
|
||||
|
||||
# Simulate a "disaster": delete some random files from remote storage for one of the shards
|
||||
@@ -1072,7 +1072,7 @@ def test_storage_controller_s3_time_travel_recovery(
|
||||
pass
|
||||
|
||||
time.sleep(4)
|
||||
ts_after_disaster = datetime.now(tz=timezone.utc).replace(tzinfo=None)
|
||||
ts_after_disaster = datetime.now(tz=UTC).replace(tzinfo=None)
|
||||
time.sleep(4)
|
||||
|
||||
# Do time travel recovery
|
||||
@@ -2274,7 +2274,7 @@ def test_storage_controller_node_deletion(
|
||||
@pytest.mark.parametrize("shard_count", [None, 2])
|
||||
def test_storage_controller_metadata_health(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
shard_count: Optional[int],
|
||||
shard_count: int | None,
|
||||
):
|
||||
"""
|
||||
Create three tenants A, B, C.
|
||||
|
||||
@@ -6,7 +6,6 @@ import shutil
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId, TenantShardId, TimelineId
|
||||
@@ -20,12 +19,9 @@ from fixtures.remote_storage import S3Storage, s3_storage
|
||||
from fixtures.utils import wait_until
|
||||
from fixtures.workload import Workload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]):
|
||||
def test_scrubber_tenant_snapshot(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
"""
|
||||
Test the `tenant-snapshot` subcommand, which grabs data from remote storage
|
||||
|
||||
@@ -131,7 +127,7 @@ def drop_local_state(env: NeonEnv, tenant_id: TenantId):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]):
|
||||
def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
neon_env_builder.enable_pageserver_remote_storage(s3_storage())
|
||||
neon_env_builder.num_pageservers = 2
|
||||
|
||||
@@ -179,9 +175,7 @@ def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Opt
|
||||
|
||||
|
||||
@pytest.mark.parametrize("shard_count", [None, 2])
|
||||
def test_scrubber_physical_gc_ancestors(
|
||||
neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]
|
||||
):
|
||||
def test_scrubber_physical_gc_ancestors(neon_env_builder: NeonEnvBuilder, shard_count: int | None):
|
||||
neon_env_builder.enable_pageserver_remote_storage(s3_storage())
|
||||
neon_env_builder.num_pageservers = 2
|
||||
|
||||
@@ -499,7 +493,7 @@ def test_scrubber_physical_gc_ancestors_split(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
@pytest.mark.parametrize("shard_count", [None, 4])
|
||||
def test_scrubber_scan_pageserver_metadata(
|
||||
neon_env_builder: NeonEnvBuilder, shard_count: Optional[int]
|
||||
neon_env_builder: NeonEnvBuilder, shard_count: int | None
|
||||
):
|
||||
"""
|
||||
Create some layers. Delete an object listed in index. Run scrubber and see if it detects the defect.
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import enum
|
||||
import random
|
||||
import time
|
||||
from enum import StrEnum
|
||||
from threading import Thread
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import asyncpg
|
||||
import pytest
|
||||
@@ -28,10 +27,6 @@ from fixtures.remote_storage import (
|
||||
from fixtures.utils import query_scalar, wait_until
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# In tests that overlap endpoint activity with tenant attach/detach, there are
|
||||
# a variety of warnings that the page service may emit when it cannot acquire
|
||||
# an active tenant to serve a request
|
||||
@@ -57,7 +52,7 @@ def do_gc_target(
|
||||
log.info("gc http thread returning")
|
||||
|
||||
|
||||
class ReattachMode(str, enum.Enum):
|
||||
class ReattachMode(StrEnum):
|
||||
REATTACH_EXPLICIT = "explicit"
|
||||
REATTACH_RESET = "reset"
|
||||
REATTACH_RESET_DROP = "reset_drop"
|
||||
@@ -498,7 +493,7 @@ def test_metrics_while_ignoring_broken_tenant_and_reloading(
|
||||
r".* Changing Active tenant to Broken state, reason: broken from test"
|
||||
)
|
||||
|
||||
def only_int(samples: list[Sample]) -> Optional[int]:
|
||||
def only_int(samples: list[Sample]) -> int | None:
|
||||
if len(samples) == 1:
|
||||
return int(samples[0].value)
|
||||
assert len(samples) == 0
|
||||
|
||||
@@ -28,7 +28,7 @@ from fixtures.utils import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
def assert_abs_margin_ratio(a: float, b: float, margin_ratio: float):
|
||||
@@ -78,7 +78,7 @@ def populate_branch(
|
||||
tenant_id: TenantId,
|
||||
ps_http: PageserverHttpClient,
|
||||
create_table: bool,
|
||||
expected_sum: Optional[int],
|
||||
expected_sum: int | None,
|
||||
) -> tuple[TimelineId, Lsn]:
|
||||
# insert some data
|
||||
with pg_cur(endpoint) as cur:
|
||||
|
||||
@@ -4,7 +4,6 @@ import json
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
@@ -661,7 +660,7 @@ def test_timeline_archival_chaos(neon_env_builder: NeonEnvBuilder):
|
||||
],
|
||||
)
|
||||
def test_timeline_retain_lsn(
|
||||
neon_env_builder: NeonEnvBuilder, with_intermediary: bool, offload_child: Optional[str]
|
||||
neon_env_builder: NeonEnvBuilder, with_intermediary: bool, offload_child: str | None
|
||||
):
|
||||
"""
|
||||
Ensure that retain_lsn functionality for timelines works, both for offloaded and non-offloaded ones
|
||||
|
||||
@@ -5,6 +5,7 @@ import enum
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from enum import StrEnum
|
||||
from queue import Empty, Queue
|
||||
from threading import Barrier
|
||||
|
||||
@@ -36,7 +37,7 @@ def layer_name(info: HistoricLayerInfo) -> str:
|
||||
|
||||
|
||||
@enum.unique
|
||||
class Branchpoint(str, enum.Enum):
|
||||
class Branchpoint(StrEnum):
|
||||
"""
|
||||
Have branches at these Lsns possibly relative to L0 layer boundary.
|
||||
"""
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
@@ -14,9 +13,6 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
from fixtures.pageserver.utils import wait_timeline_detail_404
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sharded", [True, False])
|
||||
def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool):
|
||||
@@ -89,7 +85,7 @@ def wait_for_another_gc_round():
|
||||
@dataclass
|
||||
class ScrollableLog:
|
||||
pageserver: NeonPageserver
|
||||
offset: Optional[LogCursor]
|
||||
offset: LogCursor | None
|
||||
|
||||
def assert_log_contains(self, what: str):
|
||||
msg, offset = self.pageserver.assert_log_contains(what, offset=self.offset)
|
||||
|
||||
@@ -7,7 +7,6 @@ import time
|
||||
from collections import defaultdict
|
||||
from contextlib import closing
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import psycopg2.errors
|
||||
import psycopg2.extras
|
||||
@@ -668,7 +667,7 @@ def test_tenant_physical_size(neon_env_builder: NeonEnvBuilder):
|
||||
class TimelinePhysicalSizeValues:
|
||||
api_current_physical: int
|
||||
prometheus_resident_physical: float
|
||||
prometheus_remote_physical: Optional[float] = None
|
||||
prometheus_remote_physical: float | None = None
|
||||
python_timelinedir_layerfiles_physical: int
|
||||
layer_map_file_size_sum: int
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ from fixtures.utils import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
|
||||
def wait_lsn_force_checkpoint(
|
||||
@@ -189,7 +189,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
|
||||
m.flush_lsns.append(Lsn(int(sk_m.flush_lsn_inexact(tenant_id, timeline_id))))
|
||||
m.commit_lsns.append(Lsn(int(sk_m.commit_lsn_inexact(tenant_id, timeline_id))))
|
||||
|
||||
for flush_lsn, commit_lsn in zip(m.flush_lsns, m.commit_lsns):
|
||||
for flush_lsn, commit_lsn in zip(m.flush_lsns, m.commit_lsns, strict=False):
|
||||
# Invariant. May be < when transaction is in progress.
|
||||
assert (
|
||||
commit_lsn <= flush_lsn
|
||||
@@ -224,7 +224,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(daemon=True)
|
||||
self.should_stop = threading.Event()
|
||||
self.exception: Optional[BaseException] = None
|
||||
self.exception: BaseException | None = None
|
||||
|
||||
def run(self) -> None:
|
||||
try:
|
||||
@@ -521,7 +521,7 @@ def test_wal_backup(neon_env_builder: NeonEnvBuilder):
|
||||
# Shut down subsequently each of safekeepers and fill a segment while sk is
|
||||
# down; ensure segment gets offloaded by others.
|
||||
offloaded_seg_end = [Lsn("0/2000000"), Lsn("0/3000000"), Lsn("0/4000000")]
|
||||
for victim, seg_end in zip(env.safekeepers, offloaded_seg_end):
|
||||
for victim, seg_end in zip(env.safekeepers, offloaded_seg_end, strict=False):
|
||||
victim.stop()
|
||||
# roughly fills one segment
|
||||
cur.execute("insert into t select generate_series(1,250000), 'payload'")
|
||||
@@ -666,7 +666,7 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
# recreate timeline on pageserver from scratch
|
||||
ps_http.timeline_create(
|
||||
pg_version=PgVersion(pg_version),
|
||||
pg_version=PgVersion(str(pg_version)),
|
||||
tenant_id=tenant_id,
|
||||
new_timeline_id=timeline_id,
|
||||
)
|
||||
@@ -1177,14 +1177,14 @@ def cmp_sk_wal(sks: list[Safekeeper], tenant_id: TenantId, timeline_id: Timeline
|
||||
# report/understand if WALs are different due to that.
|
||||
statuses = [sk_http_cli.timeline_status(tenant_id, timeline_id) for sk_http_cli in sk_http_clis]
|
||||
term_flush_lsns = [(s.last_log_term, s.flush_lsn) for s in statuses]
|
||||
for tfl, sk in zip(term_flush_lsns[1:], sks[1:]):
|
||||
for tfl, sk in zip(term_flush_lsns[1:], sks[1:], strict=False):
|
||||
assert (
|
||||
term_flush_lsns[0] == tfl
|
||||
), f"(last_log_term, flush_lsn) are not equal on sks {sks[0].id} and {sk.id}: {term_flush_lsns[0]} != {tfl}"
|
||||
|
||||
# check that WALs are identic.
|
||||
segs = [sk.list_segments(tenant_id, timeline_id) for sk in sks]
|
||||
for cmp_segs, sk in zip(segs[1:], sks[1:]):
|
||||
for cmp_segs, sk in zip(segs[1:], sks[1:], strict=False):
|
||||
assert (
|
||||
segs[0] == cmp_segs
|
||||
), f"lists of segments on sks {sks[0].id} and {sk.id} are not identic: {segs[0]} and {cmp_segs}"
|
||||
@@ -1455,10 +1455,10 @@ class SafekeeperEnv:
|
||||
self.pg_bin = pg_bin
|
||||
self.num_safekeepers = num_safekeepers
|
||||
self.bin_safekeeper = str(neon_binpath / "safekeeper")
|
||||
self.safekeepers: Optional[list[subprocess.CompletedProcess[Any]]] = None
|
||||
self.postgres: Optional[ProposerPostgres] = None
|
||||
self.tenant_id: Optional[TenantId] = None
|
||||
self.timeline_id: Optional[TimelineId] = None
|
||||
self.safekeepers: list[subprocess.CompletedProcess[Any]] | None = None
|
||||
self.postgres: ProposerPostgres | None = None
|
||||
self.tenant_id: TenantId | None = None
|
||||
self.timeline_id: TimelineId | None = None
|
||||
|
||||
def init(self) -> SafekeeperEnv:
|
||||
assert self.postgres is None, "postgres is already initialized"
|
||||
|
||||
@@ -5,7 +5,6 @@ import random
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import asyncpg
|
||||
import pytest
|
||||
@@ -16,10 +15,6 @@ from fixtures.neon_fixtures import Endpoint, NeonEnv, NeonEnvBuilder, Safekeeper
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
from fixtures.utils import skip_in_debug_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Optional
|
||||
|
||||
|
||||
log = getLogger("root.safekeeper_async")
|
||||
|
||||
|
||||
@@ -261,7 +256,7 @@ def test_restarts_frequent_checkpoints(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
|
||||
def endpoint_create_start(
|
||||
env: NeonEnv, branch: str, pgdir_name: Optional[str], allow_multiple: bool = False
|
||||
env: NeonEnv, branch: str, pgdir_name: str | None, allow_multiple: bool = False
|
||||
):
|
||||
endpoint = Endpoint(
|
||||
env,
|
||||
@@ -287,7 +282,7 @@ async def exec_compute_query(
|
||||
env: NeonEnv,
|
||||
branch: str,
|
||||
query: str,
|
||||
pgdir_name: Optional[str] = None,
|
||||
pgdir_name: str | None = None,
|
||||
allow_multiple: bool = False,
|
||||
):
|
||||
with endpoint_create_start(
|
||||
@@ -705,7 +700,7 @@ async def run_wal_lagging(env: NeonEnv, endpoint: Endpoint, test_output_dir: Pat
|
||||
# invalid, to make them unavailable to the endpoint. We use
|
||||
# ports 10, 11 and 12 to simulate unavailable safekeepers.
|
||||
config = toml.load(test_output_dir / "repo" / "config")
|
||||
for i, (_sk, active) in enumerate(zip(env.safekeepers, active_sk)):
|
||||
for i, (_sk, active) in enumerate(zip(env.safekeepers, active_sk, strict=False)):
|
||||
if active:
|
||||
config["safekeepers"][i]["pg_port"] = env.safekeepers[i].port.pg
|
||||
else:
|
||||
|
||||
Reference in New Issue
Block a user