mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-23 06:09:59 +00:00
ruff: enable TC — flake8-type-checking (#11368)
## Problem `TYPE_CHECKING` is used inconsistently across Python tests. ## Summary of changes - Update `ruff`: 0.7.0 -> 0.11.2 - Enable TC (flake8-type-checking): https://docs.astral.sh/ruff/rules/#flake8-type-checking-tc - (auto)fix all new issues
This commit is contained in:
committed by
GitHub
parent
db5384e1b0
commit
30a7dd630c
40
poetry.lock
generated
40
poetry.lock
generated
@@ -3111,30 +3111,30 @@ six = "*"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.0"
|
||||
version = "0.11.2"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628"},
|
||||
{file = "ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737"},
|
||||
{file = "ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914"},
|
||||
{file = "ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9"},
|
||||
{file = "ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4"},
|
||||
{file = "ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9"},
|
||||
{file = "ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d"},
|
||||
{file = "ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11"},
|
||||
{file = "ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec"},
|
||||
{file = "ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2"},
|
||||
{file = "ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e"},
|
||||
{file = "ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b"},
|
||||
{file = "ruff-0.11.2-py3-none-linux_armv6l.whl", hash = "sha256:c69e20ea49e973f3afec2c06376eb56045709f0212615c1adb0eda35e8a4e477"},
|
||||
{file = "ruff-0.11.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2c5424cc1c4eb1d8ecabe6d4f1b70470b4f24a0c0171356290b1953ad8f0e272"},
|
||||
{file = "ruff-0.11.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ecf20854cc73f42171eedb66f006a43d0a21bfb98a2523a809931cda569552d9"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c543bf65d5d27240321604cee0633a70c6c25c9a2f2492efa9f6d4b8e4199bb"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20967168cc21195db5830b9224be0e964cc9c8ecf3b5a9e3ce19876e8d3a96e3"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:955a9ce63483999d9f0b8f0b4a3ad669e53484232853054cc8b9d51ab4c5de74"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:86b3a27c38b8fce73bcd262b0de32e9a6801b76d52cdb3ae4c914515f0cef608"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3b66a03b248c9fcd9d64d445bafdf1589326bee6fc5c8e92d7562e58883e30f"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0397c2672db015be5aa3d4dac54c69aa012429097ff219392c018e21f5085147"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869bcf3f9abf6457fbe39b5a37333aa4eecc52a3b99c98827ccc371a8e5b6f1b"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2a2b50ca35457ba785cd8c93ebbe529467594087b527a08d487cf0ee7b3087e9"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7c69c74bf53ddcfbc22e6eb2f31211df7f65054bfc1f72288fc71e5f82db3eab"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6e8fb75e14560f7cf53b15bbc55baf5ecbe373dd5f3aab96ff7aa7777edd7630"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:842a472d7b4d6f5924e9297aa38149e5dcb1e628773b70e6387ae2c97a63c58f"},
|
||||
{file = "ruff-0.11.2-py3-none-win32.whl", hash = "sha256:aca01ccd0eb5eb7156b324cfaa088586f06a86d9e5314b0eb330cb48415097cc"},
|
||||
{file = "ruff-0.11.2-py3-none-win_amd64.whl", hash = "sha256:3170150172a8f994136c0c66f494edf199a0bbea7a409f649e4bc8f4d7084080"},
|
||||
{file = "ruff-0.11.2-py3-none-win_arm64.whl", hash = "sha256:52933095158ff328f4c77af3d74f0379e34fd52f175144cefc1b192e7ccd32b4"},
|
||||
{file = "ruff-0.11.2.tar.gz", hash = "sha256:ec47591497d5a1050175bdf4e1a4e6272cddff7da88a2ad595e1e326041d8d94"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3844,4 +3844,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.11"
|
||||
content-hash = "715fc8c896dcfa1b15054deeddcdec557ef93af91b26e1c8e4688fe4dbef5296"
|
||||
content-hash = "fb50cb6b291169dce3188560cdb31a14af95647318f8f0f0d718131dbaf1817a"
|
||||
|
||||
@@ -53,7 +53,7 @@ jsonnet = "^0.21.0-rc2"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
mypy = "==1.13.0"
|
||||
ruff = "^0.7.0"
|
||||
ruff = "^0.11.2"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
@@ -109,4 +109,5 @@ select = [
|
||||
"W", # pycodestyle
|
||||
"B", # bugbear
|
||||
"UP", # pyupgrade
|
||||
"TC", # flake8-type-checking
|
||||
]
|
||||
|
||||
@@ -8,9 +8,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2
|
||||
from psycopg2.extensions import connection as PgConnection
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from psycopg2.extensions import connection as PgConnection
|
||||
|
||||
|
||||
def main(args: argparse.Namespace):
|
||||
|
||||
@@ -7,13 +7,13 @@ import logging
|
||||
import signal
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from collections.abc import Awaitable
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import aiohttp
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any
|
||||
|
||||
|
||||
|
||||
@@ -4,11 +4,15 @@ Run the regression tests on the cloud instance of Neon
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
|
||||
@pytest.mark.timeout(7200)
|
||||
|
||||
@@ -2,11 +2,12 @@ from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import jwt
|
||||
|
||||
from fixtures.common_types import TenantId
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.common_types import TenantId
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -15,18 +15,20 @@ from typing import TYPE_CHECKING
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable, Iterator, Mapping
|
||||
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
|
||||
|
||||
"""
|
||||
This file contains fixtures for micro-benchmarks.
|
||||
|
||||
@@ -11,7 +11,6 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
import pytest
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from typing_extensions import override
|
||||
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
@@ -24,11 +23,14 @@ from fixtures.neon_fixtures import (
|
||||
VanillaPostgres,
|
||||
wait_for_last_flush_lsn,
|
||||
)
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
|
||||
class PgCompare(ABC):
|
||||
"""Common interface of all postgres implementations, useful for benchmarks.
|
||||
|
||||
@@ -4,8 +4,6 @@ import concurrent.futures
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
from fixtures.common_types import TenantId
|
||||
@@ -15,6 +13,9 @@ if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
|
||||
|
||||
class ComputeReconfigure:
|
||||
def __init__(self, server: HTTPServer):
|
||||
|
||||
@@ -147,7 +147,7 @@ def fast_import(
|
||||
pg_distrib_dir,
|
||||
pg_version,
|
||||
workdir,
|
||||
cleanup=not cast(bool, pytestconfig.getoption("--preserve-database-files")),
|
||||
cleanup=not cast("bool", pytestconfig.getoption("--preserve-database-files")),
|
||||
) as fi:
|
||||
yield fi
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import asyncio
|
||||
import collections
|
||||
import io
|
||||
import json
|
||||
from collections.abc import AsyncIterable
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
import pytest_asyncio
|
||||
@@ -31,6 +30,7 @@ from h2.settings import SettingCodes
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncIterable
|
||||
from typing import Any
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from prometheus_client.parser import text_string_to_metric_families
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
|
||||
class Metrics:
|
||||
metrics: dict[str, list[Sample]]
|
||||
|
||||
@@ -7,7 +7,6 @@ import subprocess
|
||||
import tempfile
|
||||
import textwrap
|
||||
from itertools import chain, product
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
import toml
|
||||
@@ -15,14 +14,15 @@ import toml
|
||||
from fixtures.common_types import Lsn, TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.pageserver.common_types import IndexPartDump
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
cast,
|
||||
)
|
||||
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
|
||||
# Used to be an ABC. abc.ABC removed due to linter without name change.
|
||||
class AbstractNeonCli:
|
||||
@@ -36,7 +36,7 @@ class AbstractNeonCli:
|
||||
self.extra_env = extra_env
|
||||
self.binpath = binpath
|
||||
|
||||
COMMAND: str = cast(str, None) # To be overwritten by the derived class.
|
||||
COMMAND: str = cast("str", None) # To be overwritten by the derived class.
|
||||
|
||||
def raw_cli(
|
||||
self,
|
||||
|
||||
@@ -14,14 +14,12 @@ import threading
|
||||
import time
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable, Iterator
|
||||
from contextlib import closing, contextmanager
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from enum import StrEnum
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, cast
|
||||
from urllib.parse import quote, urlparse
|
||||
|
||||
@@ -34,19 +32,12 @@ import psycopg2.sql
|
||||
import pytest
|
||||
import requests
|
||||
import toml
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from jwcrypto import jwk
|
||||
from mypy_boto3_kms import KMSClient
|
||||
from mypy_boto3_s3 import S3Client
|
||||
|
||||
# Type-related stuff
|
||||
from psycopg2.extensions import connection as PgConnection
|
||||
from psycopg2.extensions import cursor as PgCursor
|
||||
from psycopg2.extensions import make_dsn, parse_dsn
|
||||
from pytest_httpserver import HTTPServer
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
from fixtures import overlayfs
|
||||
from fixtures.auth_tokens import AuthKeys, TokenScope
|
||||
@@ -60,7 +51,6 @@ from fixtures.common_types import (
|
||||
)
|
||||
from fixtures.compute_migrations import NUM_COMPUTE_MIGRATIONS
|
||||
from fixtures.endpoint.http import EndpointHttpClient
|
||||
from fixtures.h2server import H2Server
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import Metrics, MetricsGetter, parse_metrics
|
||||
from fixtures.neon_cli import NeonLocalCli, Pagectl
|
||||
@@ -78,7 +68,6 @@ from fixtures.pageserver.utils import (
|
||||
wait_for_last_record_lsn,
|
||||
)
|
||||
from fixtures.paths import get_test_repo_dir, shared_snapshot_dir
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
from fixtures.remote_storage import (
|
||||
LocalFsStorage,
|
||||
@@ -108,10 +97,21 @@ from fixtures.utils import (
|
||||
from .neon_api import NeonAPI, NeonApiEndpoint
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Iterable, Iterator
|
||||
from types import TracebackType
|
||||
from typing import Any, Self, TypeVar
|
||||
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from mypy_boto3_kms import KMSClient
|
||||
from mypy_boto3_s3 import S3Client
|
||||
from pytest_httpserver import HTTPServer
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
from fixtures.h2server import H2Server
|
||||
from fixtures.paths import SnapshotDirLocked
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -497,9 +497,9 @@ class NeonEnvBuilder:
|
||||
else:
|
||||
self.pageserver_wal_receiver_protocol = PageserverWalReceiverProtocol.INTERPRETED
|
||||
|
||||
assert test_name.startswith(
|
||||
"test_"
|
||||
), "Unexpectedly instantiated from outside a test function"
|
||||
assert test_name.startswith("test_"), (
|
||||
"Unexpectedly instantiated from outside a test function"
|
||||
)
|
||||
self.test_name = test_name
|
||||
self.compatibility_neon_binpath = compatibility_neon_binpath
|
||||
self.compatibility_pg_distrib_dir = compatibility_pg_distrib_dir
|
||||
@@ -508,12 +508,12 @@ class NeonEnvBuilder:
|
||||
self.mixdir = self.test_output_dir / "mixdir_neon"
|
||||
|
||||
if self.version_combination is not None:
|
||||
assert (
|
||||
self.compatibility_neon_binpath is not None
|
||||
), "the environment variable COMPATIBILITY_NEON_BIN is required when using mixed versions"
|
||||
assert (
|
||||
self.compatibility_pg_distrib_dir is not None
|
||||
), "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required when using mixed versions"
|
||||
assert self.compatibility_neon_binpath is not None, (
|
||||
"the environment variable COMPATIBILITY_NEON_BIN is required when using mixed versions"
|
||||
)
|
||||
assert self.compatibility_pg_distrib_dir is not None, (
|
||||
"the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required when using mixed versions"
|
||||
)
|
||||
self.mixdir.mkdir(mode=0o755, exist_ok=True)
|
||||
self._mix_versions()
|
||||
self.test_may_use_compatibility_snapshot_binaries = True
|
||||
@@ -795,9 +795,9 @@ class NeonEnvBuilder:
|
||||
work = ident_state_dir / "work"
|
||||
assert upper.is_dir()
|
||||
assert work.is_dir()
|
||||
assert (
|
||||
self.test_overlay_dir not in dst.parents
|
||||
), "otherwise workdir cleanup below wouldn't work"
|
||||
assert self.test_overlay_dir not in dst.parents, (
|
||||
"otherwise workdir cleanup below wouldn't work"
|
||||
)
|
||||
# find index, still not mutating state
|
||||
idxmap = {
|
||||
existing_ident: idx
|
||||
@@ -863,9 +863,9 @@ class NeonEnvBuilder:
|
||||
self.pageserver_remote_storage = ret
|
||||
|
||||
def enable_safekeeper_remote_storage(self, kind: RemoteStorageKind):
|
||||
assert (
|
||||
self.safekeepers_remote_storage is None
|
||||
), "safekeepers_remote_storage already configured"
|
||||
assert self.safekeepers_remote_storage is None, (
|
||||
"safekeepers_remote_storage already configured"
|
||||
)
|
||||
|
||||
self.safekeepers_remote_storage = self._configure_and_create_remote_storage(
|
||||
kind, RemoteStorageUser.SAFEKEEPER
|
||||
@@ -1421,9 +1421,9 @@ class NeonEnv:
|
||||
assert that there is only one. Tests with multiple pageservers should always use
|
||||
get_pageserver with an explicit ID.
|
||||
"""
|
||||
assert (
|
||||
len(self.pageservers) == 1
|
||||
), "env.pageserver must only be used with single pageserver NeonEnv"
|
||||
assert len(self.pageservers) == 1, (
|
||||
"env.pageserver must only be used with single pageserver NeonEnv"
|
||||
)
|
||||
return self.pageservers[0]
|
||||
|
||||
def get_pageserver(self, id: int | None) -> NeonPageserver:
|
||||
@@ -1614,7 +1614,7 @@ def neon_simple_env(
|
||||
compatibility_pg_distrib_dir=compatibility_pg_distrib_dir,
|
||||
pg_version=pg_version,
|
||||
run_id=run_id,
|
||||
preserve_database_files=cast(bool, pytestconfig.getoption("--preserve-database-files")),
|
||||
preserve_database_files=cast("bool", pytestconfig.getoption("--preserve-database-files")),
|
||||
test_name=request.node.name,
|
||||
test_output_dir=test_output_dir,
|
||||
pageserver_virtual_file_io_engine=pageserver_virtual_file_io_engine,
|
||||
@@ -1683,7 +1683,7 @@ def neon_env_builder(
|
||||
combination=combination,
|
||||
pg_version=pg_version,
|
||||
run_id=run_id,
|
||||
preserve_database_files=cast(bool, pytestconfig.getoption("--preserve-database-files")),
|
||||
preserve_database_files=cast("bool", pytestconfig.getoption("--preserve-database-files")),
|
||||
pageserver_virtual_file_io_engine=pageserver_virtual_file_io_engine,
|
||||
test_name=request.node.name,
|
||||
test_output_dir=test_output_dir,
|
||||
@@ -3577,9 +3577,9 @@ class NeonProxy(PgProtocol):
|
||||
|
||||
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=10)
|
||||
def _wait_until_ready(self):
|
||||
assert (
|
||||
self._popen and self._popen.poll() is None
|
||||
), "Proxy exited unexpectedly. Check test log."
|
||||
assert self._popen and self._popen.poll() is None, (
|
||||
"Proxy exited unexpectedly. Check test log."
|
||||
)
|
||||
requests.get(f"http://{self.host}:{self.http_port}/v1/status")
|
||||
|
||||
def http_query(self, query, args, **kwargs):
|
||||
@@ -3787,9 +3787,9 @@ class NeonAuthBroker:
|
||||
|
||||
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=10)
|
||||
def _wait_until_ready(self):
|
||||
assert (
|
||||
self._popen and self._popen.poll() is None
|
||||
), "Proxy exited unexpectedly. Check test log."
|
||||
assert self._popen and self._popen.poll() is None, (
|
||||
"Proxy exited unexpectedly. Check test log."
|
||||
)
|
||||
requests.get(f"http://{self.host}:{self.http_port}/v1/status")
|
||||
|
||||
async def query(self, query, args, **kwargs):
|
||||
@@ -4069,9 +4069,9 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
m = re.search(r"=\s*(\S+)", line)
|
||||
assert m is not None, f"malformed config line {line}"
|
||||
size = m.group(1)
|
||||
assert size_to_bytes(size) >= size_to_bytes(
|
||||
"1MB"
|
||||
), "LFC size cannot be set less than 1MB"
|
||||
assert size_to_bytes(size) >= size_to_bytes("1MB"), (
|
||||
"LFC size cannot be set less than 1MB"
|
||||
)
|
||||
lfc_path_escaped = str(lfc_path).replace("'", "''")
|
||||
config_lines = [
|
||||
f"neon.file_cache_path = '{lfc_path_escaped}'",
|
||||
@@ -4082,12 +4082,12 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
] + config_lines
|
||||
else:
|
||||
for line in config_lines:
|
||||
assert (
|
||||
line.find("neon.max_file_cache_size") == -1
|
||||
), "Setting LFC parameters is not allowed when LFC is disabled"
|
||||
assert (
|
||||
line.find("neon.file_cache_size_limit") == -1
|
||||
), "Setting LFC parameters is not allowed when LFC is disabled"
|
||||
assert line.find("neon.max_file_cache_size") == -1, (
|
||||
"Setting LFC parameters is not allowed when LFC is disabled"
|
||||
)
|
||||
assert line.find("neon.file_cache_size_limit") == -1, (
|
||||
"Setting LFC parameters is not allowed when LFC is disabled"
|
||||
)
|
||||
|
||||
self.config(config_lines)
|
||||
|
||||
@@ -4925,9 +4925,9 @@ class StorageScrubber:
|
||||
healthy = False
|
||||
else:
|
||||
for _, warnings in with_warnings.items():
|
||||
assert (
|
||||
len(warnings) > 0
|
||||
), "with_warnings value should not be empty, running without verbose mode?"
|
||||
assert len(warnings) > 0, (
|
||||
"with_warnings value should not be empty, running without verbose mode?"
|
||||
)
|
||||
if not self._check_line_list_allowed(warnings):
|
||||
healthy = False
|
||||
break
|
||||
@@ -4941,9 +4941,9 @@ class StorageScrubber:
|
||||
healthy = False
|
||||
else:
|
||||
for _, errors in with_errors.items():
|
||||
assert (
|
||||
len(errors) > 0
|
||||
), "with_errors value should not be empty, running without verbose mode?"
|
||||
assert len(errors) > 0, (
|
||||
"with_errors value should not be empty, running without verbose mode?"
|
||||
)
|
||||
if not self._check_line_list_allowed(errors):
|
||||
healthy = False
|
||||
break
|
||||
|
||||
@@ -5,7 +5,10 @@ from __future__ import annotations
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
|
||||
def scan_pageserver_log_for_errors(
|
||||
|
||||
@@ -7,8 +7,7 @@ import string
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
@@ -26,6 +25,9 @@ from fixtures.metrics import Metrics, MetricsGetter, parse_metrics
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.utils import EnhancedJSONEncoder, Fn
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class PageserverApiException(Exception):
|
||||
def __init__(self, message, status_code: int):
|
||||
|
||||
@@ -4,18 +4,19 @@ import concurrent.futures
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import fixtures.pageserver.remote_storage
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
|
||||
|
||||
def single_timeline(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
|
||||
@@ -5,11 +5,9 @@ import os
|
||||
import queue
|
||||
import shutil
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.pageserver.common_types import (
|
||||
InvalidFileName,
|
||||
parse_layer_file_name,
|
||||
@@ -17,8 +15,11 @@ from fixtures.pageserver.common_types import (
|
||||
from fixtures.remote_storage import LocalFsStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def duplicate_one_tenant(env: NeonEnv, template_tenant: TenantId, new_tenant: TenantId):
|
||||
remote_storage = env.pageserver_remote_storage
|
||||
|
||||
@@ -3,13 +3,6 @@ from __future__ import annotations
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from mypy_boto3_s3.type_defs import (
|
||||
DeleteObjectOutputTypeDef,
|
||||
EmptyResponseMetadataTypeDef,
|
||||
ListObjectsV2OutputTypeDef,
|
||||
ObjectTypeDef,
|
||||
)
|
||||
|
||||
from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.pageserver.http import PageserverApiException, PageserverHttpClient
|
||||
@@ -19,6 +12,13 @@ from fixtures.utils import wait_until
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from mypy_boto3_s3.type_defs import (
|
||||
DeleteObjectOutputTypeDef,
|
||||
EmptyResponseMetadataTypeDef,
|
||||
ListObjectsV2OutputTypeDef,
|
||||
ObjectTypeDef,
|
||||
)
|
||||
|
||||
|
||||
def assert_tenant_state(
|
||||
pageserver_http: PageserverHttpClient,
|
||||
@@ -241,9 +241,9 @@ def wait_for_upload_queue_empty(
|
||||
found = False
|
||||
for f in finished:
|
||||
if all([s.labels[label] == f.labels[label] for label in remaining_labels]):
|
||||
assert (
|
||||
not found
|
||||
), "duplicate match, remaining_labels don't uniquely identify sample"
|
||||
assert not found, (
|
||||
"duplicate match, remaining_labels don't uniquely identify sample"
|
||||
)
|
||||
tl.append((s.labels, int(s.value) - int(f.value)))
|
||||
found = True
|
||||
if not found:
|
||||
|
||||
@@ -6,13 +6,14 @@ from typing import TYPE_CHECKING
|
||||
import allure
|
||||
import pytest
|
||||
import toml
|
||||
from _pytest.python import Metafunc
|
||||
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from _pytest.python import Metafunc
|
||||
|
||||
|
||||
"""
|
||||
Dynamically parametrize tests by different parameters
|
||||
|
||||
@@ -6,7 +6,6 @@ import subprocess
|
||||
import threading
|
||||
from fcntl import LOCK_EX, LOCK_UN, flock
|
||||
from pathlib import Path
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
@@ -18,6 +17,7 @@ from fixtures.utils import allure_attach_from_dir
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
from types import TracebackType
|
||||
|
||||
|
||||
BASE_DIR = Path(__file__).parents[2]
|
||||
@@ -101,9 +101,9 @@ def compatibility_snapshot_dir() -> Iterator[Path]:
|
||||
if os.getenv("REMOTE_ENV"):
|
||||
return
|
||||
compatibility_snapshot_dir_env = os.environ.get("COMPATIBILITY_SNAPSHOT_DIR")
|
||||
assert (
|
||||
compatibility_snapshot_dir_env is not None
|
||||
), "COMPATIBILITY_SNAPSHOT_DIR is not set. It should be set to `compatibility_snapshot_pg(PG_VERSION)` path generateted by test_create_snapshot (ideally generated by the previous version of Neon)"
|
||||
assert compatibility_snapshot_dir_env is not None, (
|
||||
"COMPATIBILITY_SNAPSHOT_DIR is not set. It should be set to `compatibility_snapshot_pg(PG_VERSION)` path generateted by test_create_snapshot (ideally generated by the previous version of Neon)"
|
||||
)
|
||||
compatibility_snapshot_dir = Path(compatibility_snapshot_dir_env).resolve()
|
||||
yield compatibility_snapshot_dir
|
||||
|
||||
|
||||
@@ -7,22 +7,24 @@ import os
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import boto3
|
||||
import toml
|
||||
from moto.server import ThreadedMotoServer
|
||||
from mypy_boto3_s3 import S3Client
|
||||
from typing_extensions import override
|
||||
|
||||
from fixtures.common_types import TenantId, TenantShardId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.pageserver.common_types import IndexPartDump
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from mypy_boto3_s3 import S3Client
|
||||
|
||||
from fixtures.common_types import TenantId, TenantShardId, TimelineId
|
||||
|
||||
|
||||
TIMELINE_INDEX_PART_FILE_NAME = "index_part.json"
|
||||
TENANT_HEATMAP_FILE_NAME = "heatmap-v1.json"
|
||||
@@ -448,9 +450,9 @@ class RemoteStorageKind(StrEnum):
|
||||
env_secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
env_access_token = os.getenv("AWS_SESSION_TOKEN")
|
||||
env_profile = os.getenv("AWS_PROFILE")
|
||||
assert (
|
||||
env_access_key and env_secret_key and env_access_token
|
||||
) or env_profile, "need to specify either access key and secret access key or profile"
|
||||
assert (env_access_key and env_secret_key and env_access_token) or env_profile, (
|
||||
"need to specify either access key and secret access key or profile"
|
||||
)
|
||||
|
||||
bucket_name = bucket_name or os.getenv("REMOTE_STORAGE_S3_BUCKET")
|
||||
assert bucket_name is not None, "no remote storage bucket name provided"
|
||||
|
||||
@@ -3,12 +3,11 @@ from __future__ import annotations
|
||||
from collections.abc import MutableMapping
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
import pytest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import MutableMapping
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.safekeeper.http import SafekeeperHttpClient
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.safekeeper.http import SafekeeperHttpClient
|
||||
|
||||
|
||||
def wait_walreceivers_absent(
|
||||
sk_http_cli: SafekeeperHttpClient, tenant_id: TenantId, timeline_id: TimelineId
|
||||
|
||||
@@ -3,12 +3,13 @@ from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
|
||||
|
||||
"""
|
||||
This plugin allows tests to be marked as slow using pytest.mark.slow. By default slow
|
||||
|
||||
@@ -5,9 +5,7 @@ from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.datastructures import Headers
|
||||
from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
from fixtures.log_helper import log
|
||||
@@ -15,6 +13,9 @@ from fixtures.log_helper import log
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
|
||||
|
||||
class StorageControllerProxy:
|
||||
def __init__(self, server: HTTPServer):
|
||||
|
||||
@@ -19,7 +19,6 @@ from urllib.parse import urlencode
|
||||
import allure
|
||||
import pytest
|
||||
import zstandard
|
||||
from psycopg2.extensions import cursor
|
||||
from typing_extensions import override
|
||||
|
||||
from fixtures.common_types import Id, Lsn
|
||||
@@ -34,6 +33,8 @@ if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
from typing import IO
|
||||
|
||||
from psycopg2.extensions import cursor
|
||||
|
||||
from fixtures.common_types import TimelineId
|
||||
from fixtures.neon_fixtures import PgBin
|
||||
|
||||
@@ -512,7 +513,9 @@ def assert_no_errors(log_file: Path, service: str, allowed_errors: list[str]):
|
||||
for _lineno, error in errors:
|
||||
log.info(f"not allowed {service} error: {error.strip()}")
|
||||
|
||||
assert not errors, f"First log error on {service}: {errors[0]}\nHint: use scripts/check_allowed_errors.sh to test any new allowed_error you add"
|
||||
assert not errors, (
|
||||
f"First log error on {service}: {errors[0]}\nHint: use scripts/check_allowed_errors.sh to test any new allowed_error you add"
|
||||
)
|
||||
|
||||
|
||||
def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str]):
|
||||
@@ -550,18 +553,18 @@ def assert_pageserver_backups_equal(left: Path, right: Path, skip_files: set[str
|
||||
|
||||
left_list, right_list = map(build_hash_list, [left, right])
|
||||
|
||||
assert len(left_list) == len(
|
||||
right_list
|
||||
), f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}"
|
||||
assert len(left_list) == len(right_list), (
|
||||
f"unexpected number of files on tar files, {len(left_list)} != {len(right_list)}"
|
||||
)
|
||||
|
||||
mismatching: set[str] = set()
|
||||
|
||||
for left_tuple, right_tuple in zip(left_list, right_list, strict=False):
|
||||
left_path, left_hash = left_tuple
|
||||
right_path, right_hash = right_tuple
|
||||
assert (
|
||||
left_path == right_path
|
||||
), f"file count matched, expected these to be same paths: {left_path}, {right_path}"
|
||||
assert left_path == right_path, (
|
||||
f"file count matched, expected these to be same paths: {left_path}, {right_path}"
|
||||
)
|
||||
if left_hash != right_hash:
|
||||
mismatching.add(left_path)
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
import threading
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
@@ -17,6 +16,8 @@ from fixtures.pageserver.utils import wait_for_last_record_lsn
|
||||
if TYPE_CHECKING:
|
||||
from typing import Any
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
|
||||
# neon_local doesn't handle creating/modifying endpoints concurrently, so we use a mutex
|
||||
# to ensure we don't do that: this enables running lots of Workloads in parallel safely.
|
||||
ENDPOINT_LOCK = threading.Lock()
|
||||
|
||||
@@ -7,14 +7,17 @@ from __future__ import annotations
|
||||
import hashlib
|
||||
import os
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import clickhouse_connect
|
||||
import psycopg2
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
|
||||
|
||||
def query_clickhouse(
|
||||
client,
|
||||
|
||||
@@ -7,14 +7,17 @@ from __future__ import annotations
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2
|
||||
import pytest
|
||||
import requests
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
|
||||
|
||||
class DebeziumAPI:
|
||||
"""
|
||||
|
||||
@@ -7,18 +7,19 @@ from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import fixtures.pageserver.many_tenants as many_tenants
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
from fixtures.pageserver.utils import wait_until_all_tenants_state
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
|
||||
|
||||
def ensure_pageserver_ready_for_benchmarking(env: NeonEnv, n_tenants: int):
|
||||
"""
|
||||
|
||||
@@ -7,16 +7,19 @@ import threading
|
||||
import time
|
||||
import timeit
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.common_types import Lsn
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
from fixtures.pageserver.utils import wait_for_last_record_lsn
|
||||
from fixtures.utils import wait_until
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
from fixtures.neon_fixtures import NeonPageserver
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
|
||||
def _record_branch_creation_durations(neon_compare: NeonCompare, durs: list[float]):
|
||||
@@ -45,9 +48,9 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
|
||||
tenant, _ = env.create_tenant(
|
||||
conf={
|
||||
"gc_period": "5 s",
|
||||
"gc_horizon": f"{4 * 1024 ** 2}",
|
||||
"checkpoint_distance": f"{2 * 1024 ** 2}",
|
||||
"compaction_target_size": f"{1024 ** 2}",
|
||||
"gc_horizon": f"{4 * 1024**2}",
|
||||
"checkpoint_distance": f"{2 * 1024**2}",
|
||||
"compaction_target_size": f"{1024**2}",
|
||||
"compaction_threshold": "2",
|
||||
# set PITR interval to be small, so we can do GC
|
||||
"pitr_interval": "5 s",
|
||||
@@ -82,10 +85,10 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
|
||||
env.create_branch(f"b{i + 1}", ancestor_branch_name=f"b{p}", tenant_id=tenant)
|
||||
dur = timeit.default_timer() - timer
|
||||
|
||||
log.info(f"Creating branch b{i+1} took {dur}s")
|
||||
log.info(f"Creating branch b{i + 1} took {dur}s")
|
||||
branch_creation_durations.append(dur)
|
||||
|
||||
threads.append(threading.Thread(target=run_pgbench, args=(f"b{i+1}",), daemon=True))
|
||||
threads.append(threading.Thread(target=run_pgbench, args=(f"b{i + 1}",), daemon=True))
|
||||
threads[-1].start()
|
||||
|
||||
for thread in threads:
|
||||
|
||||
@@ -2,13 +2,16 @@ from __future__ import annotations
|
||||
|
||||
import timeit
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.benchmark_fixture import PgBenchRunResult
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
from fixtures.neon_fixtures import fork_at_current_lsn
|
||||
|
||||
from performance.test_perf_pgbench import utc_now_timestamp
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Start of `test_compare_child_and_root_*` tests
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import timeit
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
# Run bulk tenant creation test.
|
||||
#
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import wait_for_last_flush_lsn
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import NeonCompare
|
||||
|
||||
|
||||
#
|
||||
# Test compaction and image layer creation performance.
|
||||
|
||||
@@ -3,13 +3,16 @@ from __future__ import annotations
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.pg_stats import PgStatTable
|
||||
|
||||
|
||||
def get_seeds_matrix(default: int = 100):
|
||||
seeds = os.getenv("TEST_PG_BENCH_SEEDS_MATRIX", default=str(default))
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
@pytest.mark.timeout(120)
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, PgBin
|
||||
|
||||
|
||||
# Just start and measure duration.
|
||||
|
||||
@@ -2,11 +2,13 @@ from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from io import BufferedReader, RawIOBase
|
||||
from typing import final
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from typing_extensions import override
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@final
|
||||
class CopyTestData(RawIOBase):
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from pytest_lazyfixture import lazy_fixture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env",
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport, NeonBenchmarker
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchmarker, mode: str):
|
||||
@@ -18,9 +21,9 @@ def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma
|
||||
# disable default GC and compaction
|
||||
"gc_period": "1000 m",
|
||||
"compaction_period": "0 s",
|
||||
"gc_horizon": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"compaction_target_size": f"{1024 ** 2}",
|
||||
"gc_horizon": f"{1024**2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
"compaction_target_size": f"{1024**2}",
|
||||
# set PITR interval to be small, so we can do GC
|
||||
"pitr_interval": "10 s",
|
||||
# "compaction_threshold": "3",
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from pytest_lazyfixture import lazy_fixture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env",
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from pytest_lazyfixture import lazy_fixture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env",
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.neon_fixtures import PgProtocol
|
||||
|
||||
from performance.test_perf_pgbench import get_scales_matrix
|
||||
from performance.test_wal_backpressure import record_read_latency
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.neon_fixtures import PgProtocol
|
||||
|
||||
|
||||
def start_write_workload(pg: PgProtocol, scale: int = 10):
|
||||
with pg.connect().cursor() as cur:
|
||||
cur.execute(f"create table big as select generate_series(1,{scale*100_000})")
|
||||
cur.execute(f"create table big as select generate_series(1,{scale * 100_000})")
|
||||
|
||||
|
||||
# Measure latency of reads on one table, while lots of writes are happening on another table.
|
||||
@@ -24,7 +27,7 @@ def test_measure_read_latency_heavy_write_workload(neon_with_baseline: PgCompare
|
||||
pg = env.pg
|
||||
|
||||
with pg.connect().cursor() as cur:
|
||||
cur.execute(f"create table small as select generate_series(1,{scale*100_000})")
|
||||
cur.execute(f"create table small as select generate_series(1,{scale * 100_000})")
|
||||
|
||||
write_thread = threading.Thread(target=start_write_workload, args=(pg, scale * 100))
|
||||
write_thread.start()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections.abc import Iterator
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
@@ -14,6 +13,7 @@ from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import logical_replication_sync
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator
|
||||
from subprocess import Popen
|
||||
from typing import AnyStr
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from io import BytesIO
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnv
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnv
|
||||
|
||||
|
||||
async def repeat_bytes(buf, repetitions: int):
|
||||
|
||||
@@ -2,9 +2,11 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from io import BytesIO
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.neon_fixtures import PgProtocol
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.neon_fixtures import PgProtocol
|
||||
|
||||
|
||||
async def repeat_bytes(buf, repetitions: int):
|
||||
|
||||
@@ -134,8 +134,8 @@ def run_command_and_log_output(command, log_file_path: Path):
|
||||
# Define a list of necessary environment variables for pgcopydb
|
||||
custom_env_vars = {
|
||||
"LD_LIBRARY_PATH": f"{os.getenv('PGCOPYDB_LIB_PATH')}:{os.getenv('PG_16_LIB_PATH')}",
|
||||
"PGCOPYDB_SOURCE_PGURI": cast(str, os.getenv("BENCHMARK_INGEST_SOURCE_CONNSTR")),
|
||||
"PGCOPYDB_TARGET_PGURI": cast(str, os.getenv("BENCHMARK_INGEST_TARGET_CONNSTR")),
|
||||
"PGCOPYDB_SOURCE_PGURI": cast("str", os.getenv("BENCHMARK_INGEST_SOURCE_CONNSTR")),
|
||||
"PGCOPYDB_TARGET_PGURI": cast("str", os.getenv("BENCHMARK_INGEST_TARGET_CONNSTR")),
|
||||
"PGOPTIONS": "-c idle_in_transaction_session_timeout=0 -c maintenance_work_mem=8388608 -c max_parallel_maintenance_workers=7",
|
||||
}
|
||||
# Combine the current environment with custom variables
|
||||
|
||||
@@ -3,12 +3,15 @@ from __future__ import annotations
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from _pytest.mark import ParameterSet
|
||||
from fixtures.compare_fixtures import RemoteCompare
|
||||
from fixtures.log_helper import log
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _pytest.mark import ParameterSet
|
||||
from fixtures.compare_fixtures import RemoteCompare
|
||||
|
||||
|
||||
@dataclass
|
||||
class LabelledQuery:
|
||||
|
||||
@@ -4,14 +4,17 @@ import os
|
||||
import timeit
|
||||
from contextlib import closing
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import PgBenchRunResult
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.log_helper import log
|
||||
|
||||
from performance.test_perf_pgbench import get_durations_matrix, utc_now_timestamp
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
def get_custom_scripts(
|
||||
default: str = "insert_webhooks.sql@2 select_any_webhook_with_skew.sql@4 select_recent_webhook.sql@4",
|
||||
|
||||
@@ -6,12 +6,15 @@ import os
|
||||
import timeit
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport, PgBenchInitResult, PgBenchRunResult
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.utils import get_scale_for_db
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@enum.unique
|
||||
class PgBenchLoadType(enum.Enum):
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
from performance.test_perf_pgbench import PgBenchLoadType, get_durations_matrix, run_test_pgbench
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
# The following test runs on an existing database that has pgvector extension installed
|
||||
# and a table with 1 million embedding vectors loaded and indexed with HNSW.
|
||||
|
||||
@@ -2,10 +2,8 @@ from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2
|
||||
@@ -15,14 +13,16 @@ from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.common_types import Lsn
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_api import connection_parameters_to_env
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fixtures.benchmark_fixture import NeonBenchmarker
|
||||
from fixtures.neon_api import NeonAPI
|
||||
from fixtures.neon_fixtures import PgBin
|
||||
from fixtures.pg_version import PgVersion
|
||||
|
||||
|
||||
# Granularity of ~0.5 sec
|
||||
@@ -186,7 +186,7 @@ def test_replication_start_stop(
|
||||
prefix = "pgbench_agg"
|
||||
num_replicas = 2
|
||||
configuration_test_time_sec = 10 * 60
|
||||
pgbench_duration = f"-T{2 ** num_replicas * configuration_test_time_sec}"
|
||||
pgbench_duration = f"-T{2**num_replicas * configuration_test_time_sec}"
|
||||
error_occurred = False
|
||||
|
||||
project = neon_api.create_project(pg_version)
|
||||
|
||||
@@ -2,11 +2,14 @@ from __future__ import annotations
|
||||
|
||||
import random
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.utils import query_scalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
# This is a clear-box test that demonstrates the worst case scenario for the
|
||||
# "1 segment per layer" implementation of the pageserver. It writes to random
|
||||
|
||||
@@ -4,13 +4,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.benchmark_fixture import MetricReport
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
from fixtures.log_helper import log
|
||||
from pytest_lazyfixture import lazy_fixture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"rows,iters,workers",
|
||||
|
||||
@@ -5,10 +5,10 @@ import random
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from enum import StrEnum
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId, TenantShardId, TimelineArchivalState, TimelineId
|
||||
from fixtures.compute_reconfigure import ComputeReconfigure
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
@@ -22,6 +22,9 @@ from fixtures.pageserver.http import PageserverApiException, PageserverHttpClien
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compute_reconfigure import ComputeReconfigure
|
||||
|
||||
|
||||
def get_consistent_node_shard_counts(env: NeonEnv, total_shards) -> defaultdict[str, int]:
|
||||
"""
|
||||
@@ -171,7 +174,7 @@ def test_storage_controller_many_tenants(
|
||||
|
||||
rss = env.storage_controller.get_metric_value("process_resident_memory_bytes")
|
||||
assert rss is not None
|
||||
log.info(f"Resident memory: {rss} ({ rss / total_shards} per shard)")
|
||||
log.info(f"Resident memory: {rss} ({rss / total_shards} per shard)")
|
||||
assert rss < expect_memory_per_shard * total_shards
|
||||
|
||||
def assert_all_tenants_scheduled_in_home_az():
|
||||
@@ -186,15 +189,15 @@ def test_storage_controller_many_tenants(
|
||||
assert preferred_az == shard["preferred_az_id"]
|
||||
|
||||
# Attachment should be in the preferred AZ
|
||||
assert shard["preferred_az_id"] == az_selector(
|
||||
shard["node_attached"]
|
||||
), f"Shard {shard['tenant_shard_id']} not in {shard['preferred_az_id']}"
|
||||
assert shard["preferred_az_id"] == az_selector(shard["node_attached"]), (
|
||||
f"Shard {shard['tenant_shard_id']} not in {shard['preferred_az_id']}"
|
||||
)
|
||||
|
||||
# Secondary locations should not be in the preferred AZ
|
||||
for node_secondary in shard["node_secondary"]:
|
||||
assert (
|
||||
shard["preferred_az_id"] != az_selector(node_secondary)
|
||||
), f"Shard {shard['tenant_shard_id']} secondary should be in {shard['preferred_az_id']}"
|
||||
assert shard["preferred_az_id"] != az_selector(node_secondary), (
|
||||
f"Shard {shard['tenant_shard_id']} secondary should be in {shard['preferred_az_id']}"
|
||||
)
|
||||
|
||||
# There should only be one secondary location (i.e. no migrations in flight)
|
||||
assert len(shard["node_secondary"]) == 1
|
||||
@@ -531,9 +534,9 @@ def test_storage_controller_many_tenants(
|
||||
for node in nodes:
|
||||
if node["id"] in node_ids:
|
||||
checked_any = True
|
||||
assert (
|
||||
node["availability"] == expected_availability
|
||||
), f"Node {node['id']} is not {expected_availability} yet: {node['availability']}"
|
||||
assert node["availability"] == expected_availability, (
|
||||
f"Node {node['id']} is not {expected_availability} yet: {node['availability']}"
|
||||
)
|
||||
|
||||
assert checked_any
|
||||
|
||||
@@ -550,9 +553,9 @@ def test_storage_controller_many_tenants(
|
||||
desc = env.storage_controller.tenant_describe(tenant_id)
|
||||
for shard in desc["shards"]:
|
||||
# Attachment should be outside the AZ where we killed the pageservers
|
||||
assert (
|
||||
az_selector(shard["node_attached"]) != victim_az
|
||||
), f"Shard {shard['tenant_shard_id']} still in {victim_az} (node {shard['node_attached']})"
|
||||
assert az_selector(shard["node_attached"]) != victim_az, (
|
||||
f"Shard {shard['tenant_shard_id']} still in {victim_az} (node {shard['node_attached']})"
|
||||
)
|
||||
|
||||
# Bring back the pageservers
|
||||
for ps in killed_pageservers:
|
||||
|
||||
@@ -4,7 +4,6 @@ import statistics
|
||||
import threading
|
||||
import time
|
||||
import timeit
|
||||
from collections.abc import Generator
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
@@ -17,7 +16,7 @@ from fixtures.neon_fixtures import NeonEnvBuilder, PgBin, flush_ep_to_pageserver
|
||||
from performance.test_perf_pgbench import get_durations_matrix, get_scales_matrix
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Generator
|
||||
from typing import Any
|
||||
|
||||
|
||||
@@ -37,9 +36,9 @@ def pg_compare(request) -> Generator[PgCompare, None, None]:
|
||||
|
||||
yield fixture
|
||||
else:
|
||||
assert (
|
||||
len(x) == 2
|
||||
), f"request param ({request.param}) should have a format of `neon_{{safekeepers_enable_fsync}}`"
|
||||
assert len(x) == 2, (
|
||||
f"request param ({request.param}) should have a format of `neon_{{safekeepers_enable_fsync}}`"
|
||||
)
|
||||
|
||||
# `NeonCompare` interface
|
||||
neon_env_builder = request.getfixturevalue("neon_env_builder")
|
||||
@@ -278,7 +277,7 @@ def record_read_latency(
|
||||
t2 = timeit.default_timer()
|
||||
|
||||
log.info(
|
||||
f"Executed read query {read_query}, got {cur.fetchall()}, read time {t2-t1:.2f}s"
|
||||
f"Executed read query {read_query}, got {cur.fetchall()}, read time {t2 - t1:.2f}s"
|
||||
)
|
||||
read_latencies.append(t2 - t1)
|
||||
except Exception as err:
|
||||
|
||||
@@ -14,8 +14,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compare_fixtures import PgCompare
|
||||
|
||||
|
||||
def test_write_amplification(neon_with_baseline: PgCompare):
|
||||
|
||||
@@ -3,11 +3,14 @@ from __future__ import annotations
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
from fixtures.utils import subprocess_capture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import RemotePostgres
|
||||
|
||||
|
||||
@pytest.mark.remote_cluster
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.utils import query_scalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
# Create ancestor branches off the main branch.
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Generator
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import TenantId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverHttpClient, TenantConfig
|
||||
from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverHttpClient, TenantConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def positive_env(neon_env_builder: NeonEnvBuilder) -> NeonEnv:
|
||||
@@ -53,9 +57,9 @@ def negative_env(neon_env_builder: NeonEnvBuilder) -> Generator[NegativeTests, N
|
||||
|
||||
yield NegativeTests(env, tenant_id, config_pre_detach)
|
||||
|
||||
assert tenant_id not in [
|
||||
TenantId(t["id"]) for t in ps_http.tenant_list()
|
||||
], "tenant should not be attached after negative test"
|
||||
assert tenant_id not in [TenantId(t["id"]) for t in ps_http.tenant_list()], (
|
||||
"tenant should not be attached after negative test"
|
||||
)
|
||||
|
||||
env.pageserver.allowed_errors.extend(
|
||||
[
|
||||
@@ -214,9 +218,9 @@ def test_fully_custom_config(positive_env: NeonEnv):
|
||||
# also self-test that our fully_custom_config covers all of them
|
||||
initial_tenant_config = get_config(ps_http, env.initial_tenant)
|
||||
assert initial_tenant_config.tenant_specific_overrides == {}
|
||||
assert set(initial_tenant_config.effective_config.keys()) == set(
|
||||
fully_custom_config.keys()
|
||||
), "ensure we cover all config options"
|
||||
assert set(initial_tenant_config.effective_config.keys()) == set(fully_custom_config.keys()), (
|
||||
"ensure we cover all config options"
|
||||
)
|
||||
|
||||
# create a new tenant to test overrides
|
||||
(tenant_id, _) = env.create_tenant()
|
||||
@@ -237,17 +241,15 @@ def test_fully_custom_config(positive_env: NeonEnv):
|
||||
|
||||
# some more self-validation: assert that none of the values in our
|
||||
# fully custom config are the same as the default values
|
||||
assert set(our_tenant_config.effective_config.keys()) == set(
|
||||
fully_custom_config.keys()
|
||||
), "ensure we cover all config options"
|
||||
assert (
|
||||
{
|
||||
k: initial_tenant_config.effective_config[k]
|
||||
!= our_tenant_config.effective_config[k]
|
||||
for k in fully_custom_config.keys()
|
||||
}
|
||||
== {k: True for k in fully_custom_config.keys()}
|
||||
), "ensure our custom config has different values than the default config for all config options, so we know we overrode everything"
|
||||
assert set(our_tenant_config.effective_config.keys()) == set(fully_custom_config.keys()), (
|
||||
"ensure we cover all config options"
|
||||
)
|
||||
assert {
|
||||
k: initial_tenant_config.effective_config[k] != our_tenant_config.effective_config[k]
|
||||
for k in fully_custom_config.keys()
|
||||
} == {k: True for k in fully_custom_config.keys()}, (
|
||||
"ensure our custom config has different values than the default config for all config options, so we know we overrode everything"
|
||||
)
|
||||
|
||||
# ensure customizations survive reattach
|
||||
env.pageserver.tenant_detach(tenant_id)
|
||||
|
||||
@@ -23,13 +23,13 @@ async def test_auth_broker_happy(
|
||||
# local proxy mock just echos back the request
|
||||
# check that we forward the correct data
|
||||
|
||||
assert (
|
||||
res["headers"]["authorization"] == f"Bearer {token.serialize()}"
|
||||
), "JWT should be forwarded"
|
||||
assert res["headers"]["authorization"] == f"Bearer {token.serialize()}", (
|
||||
"JWT should be forwarded"
|
||||
)
|
||||
|
||||
assert (
|
||||
"anonymous" in res["headers"]["neon-connection-string"]
|
||||
), "conn string should be forwarded"
|
||||
assert "anonymous" in res["headers"]["neon-connection-string"], (
|
||||
"conn string should be forwarded"
|
||||
)
|
||||
|
||||
assert json.loads(res["body"]) == {
|
||||
"query": "foo",
|
||||
|
||||
@@ -3,11 +3,14 @@ from __future__ import annotations
|
||||
import threading
|
||||
import time
|
||||
from contextlib import closing, contextmanager
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2.extras
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import Endpoint, NeonEnvBuilder
|
||||
|
||||
pytest_plugins = "fixtures.neon_fixtures"
|
||||
|
||||
|
||||
@@ -2,13 +2,16 @@ from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2.errors
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.utils import USE_LFC
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
def test_compute_pageserver_connection_stress(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -2,14 +2,17 @@ from __future__ import annotations
|
||||
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.pageserver.http import TimelineCreate406
|
||||
from fixtures.utils import query_scalar, skip_in_debug_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
# Test the GC implementation when running with branching.
|
||||
# This test reproduces the issue https://github.com/neondatabase/neon/issues/707.
|
||||
@@ -58,9 +61,9 @@ def test_branch_and_gc(neon_simple_env: NeonEnv):
|
||||
# disable background GC
|
||||
"gc_period": "0s",
|
||||
# small checkpoint distance to create more delta layer files
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
# set the target size to be large to allow the image layer to cover the whole key space
|
||||
"compaction_target_size": f"{1024 ** 3}",
|
||||
"compaction_target_size": f"{1024**3}",
|
||||
# tweak the default settings to allow quickly create image layers and L1 layers
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "2",
|
||||
@@ -134,9 +137,9 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
|
||||
# disable background GC
|
||||
"gc_period": "0s",
|
||||
# small checkpoint distance to create more delta layer files
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
# set the target size to be large to allow the image layer to cover the whole key space
|
||||
"compaction_target_size": f"{1024 ** 3}",
|
||||
"compaction_target_size": f"{1024**3}",
|
||||
# tweak the default settings to allow quickly create image layers and L1 layers
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "2",
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.pageserver.http import TimelineCreate406
|
||||
from fixtures.utils import print_gc_result, query_scalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
# Create a couple of branches off the main branch, at a historical point in time.
|
||||
|
||||
@@ -4,16 +4,11 @@ import random
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverApiException
|
||||
from fixtures.pageserver.utils import wait_until_tenant_active
|
||||
from fixtures.utils import query_scalar
|
||||
@@ -21,6 +16,14 @@ from performance.test_perf_pgbench import get_scales_matrix
|
||||
from requests import RequestException
|
||||
from requests.exceptions import RetryError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
)
|
||||
|
||||
|
||||
# Test branch creation
|
||||
#
|
||||
@@ -43,9 +46,9 @@ def test_branching_with_pgbench(
|
||||
tenant, _ = env.create_tenant(
|
||||
conf={
|
||||
"gc_period": "5 s",
|
||||
"gc_horizon": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"compaction_target_size": f"{1024 ** 2}",
|
||||
"gc_horizon": f"{1024**2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
"compaction_target_size": f"{1024**2}",
|
||||
# set PITR interval to be small, so we can do GC
|
||||
"pitr_interval": "5 s",
|
||||
}
|
||||
|
||||
@@ -124,14 +124,14 @@ def test_timeline_init_break_before_checkpoint(neon_env_builder: NeonEnvBuilder)
|
||||
|
||||
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
|
||||
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
|
||||
assert (
|
||||
new_tenant_timelines == old_tenant_timelines
|
||||
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
assert new_tenant_timelines == old_tenant_timelines, (
|
||||
f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
)
|
||||
|
||||
timeline_dirs = [d for d in timelines_dir.iterdir()]
|
||||
assert (
|
||||
timeline_dirs == initial_timeline_dirs
|
||||
), "pageserver should clean its temp timeline files on timeline creation failure"
|
||||
assert timeline_dirs == initial_timeline_dirs, (
|
||||
"pageserver should clean its temp timeline files on timeline creation failure"
|
||||
)
|
||||
|
||||
|
||||
# The "exit" case is for a reproducer of issue 6007: an unclean shutdown where we can't do local fs cleanups
|
||||
@@ -176,14 +176,14 @@ def test_timeline_init_break_before_checkpoint_recreate(
|
||||
|
||||
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
|
||||
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
|
||||
assert (
|
||||
new_tenant_timelines == old_tenant_timelines
|
||||
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
assert new_tenant_timelines == old_tenant_timelines, (
|
||||
f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
)
|
||||
|
||||
timeline_dirs = [d for d in timelines_dir.iterdir()]
|
||||
assert (
|
||||
timeline_dirs == initial_timeline_dirs
|
||||
), "pageserver should clean its temp timeline files on timeline creation failure"
|
||||
assert timeline_dirs == initial_timeline_dirs, (
|
||||
"pageserver should clean its temp timeline files on timeline creation failure"
|
||||
)
|
||||
|
||||
# creating the branch should have worked now
|
||||
new_timeline_id = TimelineId(
|
||||
@@ -211,11 +211,11 @@ def test_timeline_create_break_after_dir_creation(neon_env_builder: NeonEnvBuild
|
||||
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
|
||||
# "New" timeline is not present in the list, allowing pageserver to retry the same request
|
||||
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
|
||||
assert (
|
||||
new_tenant_timelines == old_tenant_timelines
|
||||
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
assert new_tenant_timelines == old_tenant_timelines, (
|
||||
f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
|
||||
)
|
||||
|
||||
timeline_dirs = [d for d in timelines_dir.iterdir()]
|
||||
assert (
|
||||
timeline_dirs == initial_timeline_dirs
|
||||
), "pageserver should clean its temp timeline files on timeline creation failure"
|
||||
assert timeline_dirs == initial_timeline_dirs, (
|
||||
"pageserver should clean its temp timeline files on timeline creation failure"
|
||||
)
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.metrics import parse_metrics
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy
|
||||
|
||||
|
||||
def test_build_info_metric(neon_env_builder: NeonEnvBuilder, link_proxy: NeonProxy):
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
def test_change_pageserver(neon_env_builder: NeonEnvBuilder):
|
||||
"""
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.utils import query_scalar, wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
# Test compute node start after clog truncation
|
||||
|
||||
@@ -6,9 +6,12 @@ import subprocess
|
||||
import threading
|
||||
import time
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def lsof_path() -> str:
|
||||
|
||||
@@ -177,7 +177,7 @@ def test_pageserver_gc_compaction_smoke(neon_env_builder: NeonEnvBuilder, with_b
|
||||
"compaction_period": "5s",
|
||||
# No PiTR interval and small GC horizon
|
||||
"pitr_interval": "0s",
|
||||
"gc_horizon": f"{1024 ** 2}",
|
||||
"gc_horizon": f"{1024**2}",
|
||||
"lsn_lease_length": "0s",
|
||||
}
|
||||
|
||||
@@ -867,7 +867,7 @@ def test_image_layer_compression(neon_env_builder: NeonEnvBuilder, enabled: bool
|
||||
)
|
||||
assert bytes_in is not None
|
||||
assert bytes_out is not None
|
||||
log.info(f"Compression ratio: {bytes_out/bytes_in} ({bytes_out} in, {bytes_out} out)")
|
||||
log.info(f"Compression ratio: {bytes_out / bytes_in} ({bytes_out} in, {bytes_out} out)")
|
||||
|
||||
if enabled:
|
||||
# We are writing high compressible repetitive plain text, expect excellent compression
|
||||
|
||||
@@ -7,12 +7,12 @@ import subprocess
|
||||
import tempfile
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import fixtures.utils
|
||||
import pytest
|
||||
import toml
|
||||
from fixtures.common_types import TenantId, TimelineId
|
||||
from fixtures.compute_reconfigure import ComputeReconfigure
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
NeonEnv,
|
||||
@@ -28,6 +28,9 @@ from fixtures.pg_version import PgVersion
|
||||
from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage
|
||||
from fixtures.workload import Workload
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.compute_reconfigure import ComputeReconfigure
|
||||
|
||||
#
|
||||
# A test suite that help to prevent unintentionally breaking backward or forward compatibility between Neon releases.
|
||||
# - `test_create_snapshot` a script wrapped in a test that creates a data snapshot.
|
||||
@@ -232,7 +235,9 @@ def test_backward_compatibility(
|
||||
else:
|
||||
raise
|
||||
|
||||
assert not breaking_changes_allowed, "Breaking changes are allowed by ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage"
|
||||
assert not breaking_changes_allowed, (
|
||||
"Breaking changes are allowed by ALLOW_BACKWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage"
|
||||
)
|
||||
|
||||
|
||||
@check_ondisk_data_compatibility_if_enabled
|
||||
@@ -260,12 +265,12 @@ def test_forward_compatibility(
|
||||
# Use previous version's production binaries (pageserver, safekeeper, pg_distrib_dir, etc.).
|
||||
# But always use the current version's neon_local binary.
|
||||
# This is because we want to test the compatibility of the data format, not the compatibility of the neon_local CLI.
|
||||
assert (
|
||||
neon_env_builder.compatibility_neon_binpath is not None
|
||||
), "the environment variable COMPATIBILITY_NEON_BIN is required"
|
||||
assert (
|
||||
neon_env_builder.compatibility_pg_distrib_dir is not None
|
||||
), "the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required"
|
||||
assert neon_env_builder.compatibility_neon_binpath is not None, (
|
||||
"the environment variable COMPATIBILITY_NEON_BIN is required"
|
||||
)
|
||||
assert neon_env_builder.compatibility_pg_distrib_dir is not None, (
|
||||
"the environment variable COMPATIBILITY_POSTGRES_DISTRIB_DIR is required"
|
||||
)
|
||||
neon_env_builder.neon_binpath = neon_env_builder.compatibility_neon_binpath
|
||||
neon_env_builder.pg_distrib_dir = neon_env_builder.compatibility_pg_distrib_dir
|
||||
|
||||
@@ -311,7 +316,9 @@ def test_forward_compatibility(
|
||||
else:
|
||||
raise
|
||||
|
||||
assert not breaking_changes_allowed, "Breaking changes are allowed by ALLOW_FORWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage"
|
||||
assert not breaking_changes_allowed, (
|
||||
"Breaking changes are allowed by ALLOW_FORWARD_COMPATIBILITY_BREAKAGE, but the test has passed without any breakage"
|
||||
)
|
||||
|
||||
|
||||
def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, repo_dir: Path):
|
||||
|
||||
@@ -103,22 +103,22 @@ def test_compute_catalog(neon_simple_env: NeonEnv):
|
||||
objects = client.dbs_and_roles()
|
||||
|
||||
# Assert that 'cloud_admin' role exists in the 'roles' list
|
||||
assert any(
|
||||
role["name"] == "cloud_admin" for role in objects["roles"]
|
||||
), "The 'cloud_admin' role is missing"
|
||||
assert any(role["name"] == "cloud_admin" for role in objects["roles"]), (
|
||||
"The 'cloud_admin' role is missing"
|
||||
)
|
||||
|
||||
# Assert that 'postgres' database exists in the 'databases' list
|
||||
assert any(
|
||||
db["name"] == "postgres" for db in objects["databases"]
|
||||
), "The 'postgres' database is missing"
|
||||
assert any(db["name"] == "postgres" for db in objects["databases"]), (
|
||||
"The 'postgres' database is missing"
|
||||
)
|
||||
|
||||
# Check other databases
|
||||
for test_db in TEST_DB_NAMES:
|
||||
db = next((db for db in objects["databases"] if db["name"] == test_db["name"]), None)
|
||||
assert db is not None, f"The '{test_db['name']}' database is missing"
|
||||
assert (
|
||||
db["owner"] == test_db["owner"]
|
||||
), f"The '{test_db['name']}' database has incorrect owner"
|
||||
assert db["owner"] == test_db["owner"], (
|
||||
f"The '{test_db['name']}' database has incorrect owner"
|
||||
)
|
||||
|
||||
ddl = client.database_schema(database=test_db["name"])
|
||||
|
||||
@@ -135,9 +135,9 @@ def test_compute_catalog(neon_simple_env: NeonEnv):
|
||||
client.database_schema(database="nonexistentdb")
|
||||
raise AssertionError("Expected HTTPError was not raised")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
assert (
|
||||
e.response.status_code == 404
|
||||
), f"Expected 404 status code, but got {e.response.status_code}"
|
||||
assert e.response.status_code == 404, (
|
||||
f"Expected 404 status code, but got {e.response.status_code}"
|
||||
)
|
||||
|
||||
|
||||
def test_compute_create_drop_dbs_and_roles(neon_simple_env: NeonEnv):
|
||||
|
||||
@@ -13,21 +13,21 @@ import _jsonnet
|
||||
import pytest
|
||||
import requests
|
||||
import yaml
|
||||
from fixtures.endpoint.http import EndpointHttpClient
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import parse_metrics
|
||||
from fixtures.paths import BASE_DIR, COMPUTE_CONFIG_DIR
|
||||
from fixtures.utils import wait_until
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from types import TracebackType
|
||||
from typing import Self, TypedDict
|
||||
|
||||
from fixtures.endpoint.http import EndpointHttpClient
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
from prometheus_client.samples import Sample
|
||||
|
||||
class Metric(TypedDict):
|
||||
metric_name: str
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
import pytest
|
||||
@@ -9,6 +8,8 @@ from fixtures.metrics import parse_metrics
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def test_compute_reconfigure(neon_simple_env: NeonEnv):
|
||||
"""
|
||||
|
||||
@@ -2,8 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from contextlib import closing
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.utils import query_scalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
#
|
||||
# Test CREATE USER to check shared catalog restore
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import psycopg2
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv, VanillaPostgres
|
||||
from psycopg2.errors import UndefinedObject
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import TracebackType
|
||||
from typing import Any, Self
|
||||
|
||||
from fixtures.httpserver import ListenAddress
|
||||
from fixtures.neon_fixtures import NeonEnv, VanillaPostgres
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
|
||||
|
||||
def handle_db(dbs, roles, operation):
|
||||
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
import enum
|
||||
import time
|
||||
from collections import Counter
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from typing import TYPE_CHECKING
|
||||
@@ -18,14 +17,16 @@ from fixtures.neon_fixtures import (
|
||||
PgBin,
|
||||
wait_for_last_flush_lsn,
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverHttpClient
|
||||
from fixtures.pageserver.utils import wait_for_upload_queue_empty
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
from fixtures.utils import human_bytes, wait_until
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
from typing import Any
|
||||
|
||||
from fixtures.pageserver.http import PageserverHttpClient
|
||||
|
||||
|
||||
GLOBAL_LRU_LOG_LINE = "tenant_min_resident_size-respecting LRU would not relieve pressure, evicting more following global LRU policy"
|
||||
|
||||
@@ -323,9 +324,9 @@ def finish_tenant_creation(
|
||||
|
||||
layers = pageserver_http.layer_map_info(tenant_id, timeline_id)
|
||||
# log.info(f"{layers}")
|
||||
assert (
|
||||
len(layers.historic_layers) >= min_expected_layers
|
||||
), "evictions happen at layer granularity, but we often assert at byte-granularity"
|
||||
assert len(layers.historic_layers) >= min_expected_layers, (
|
||||
"evictions happen at layer granularity, but we often assert at byte-granularity"
|
||||
)
|
||||
|
||||
return pgbench_init_lsn
|
||||
|
||||
@@ -421,9 +422,9 @@ def test_pageserver_evicts_until_pressure_is_relieved(
|
||||
|
||||
assert 0 <= actual_change, "nothing can load layers during this test"
|
||||
assert actual_change >= target, "must evict more than half"
|
||||
assert (
|
||||
response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change
|
||||
), "report accurately evicted bytes"
|
||||
assert response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change, (
|
||||
"report accurately evicted bytes"
|
||||
)
|
||||
assert response["Finished"]["assumed"]["failed"]["count"] == 0, "zero failures expected"
|
||||
|
||||
|
||||
@@ -448,18 +449,18 @@ def test_pageserver_respects_overridden_resident_size(
|
||||
large_tenant = max(du_by_timeline, key=du_by_timeline.__getitem__)
|
||||
small_tenant = min(du_by_timeline, key=du_by_timeline.__getitem__)
|
||||
assert du_by_timeline[large_tenant] > du_by_timeline[small_tenant]
|
||||
assert (
|
||||
du_by_timeline[large_tenant] - du_by_timeline[small_tenant] > 5 * env.layer_size
|
||||
), "ensure this test will do more than 1 eviction"
|
||||
assert du_by_timeline[large_tenant] - du_by_timeline[small_tenant] > 5 * env.layer_size, (
|
||||
"ensure this test will do more than 1 eviction"
|
||||
)
|
||||
|
||||
# Give the larger tenant a haircut while preventing the smaller tenant from getting one.
|
||||
# To prevent the smaller from getting a haircut, we set min_resident_size to its current size.
|
||||
# To ensure the larger tenant is getting a haircut, any non-zero `target` will do.
|
||||
min_resident_size = du_by_timeline[small_tenant]
|
||||
target = 1
|
||||
assert (
|
||||
du_by_timeline[large_tenant] > min_resident_size
|
||||
), "ensure the larger tenant will get a haircut"
|
||||
assert du_by_timeline[large_tenant] > min_resident_size, (
|
||||
"ensure the larger tenant will get a haircut"
|
||||
)
|
||||
env.neon_env.storage_controller.pageserver_api().update_tenant_config(
|
||||
small_tenant[0], {"min_resident_size_override": min_resident_size}
|
||||
)
|
||||
@@ -490,17 +491,17 @@ def test_pageserver_respects_overridden_resident_size(
|
||||
actual_change = total_on_disk - later_total_on_disk
|
||||
assert 0 <= actual_change, "nothing can load layers during this test"
|
||||
assert actual_change >= target, "eviction must always evict more than target"
|
||||
assert (
|
||||
response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change
|
||||
), "report accurately evicted bytes"
|
||||
assert response["Finished"]["assumed"]["projected_after"]["freed_bytes"] >= actual_change, (
|
||||
"report accurately evicted bytes"
|
||||
)
|
||||
assert response["Finished"]["assumed"]["failed"]["count"] == 0, "zero failures expected"
|
||||
|
||||
assert (
|
||||
later_du_by_timeline[small_tenant] == du_by_timeline[small_tenant]
|
||||
), "small tenant sees no haircut"
|
||||
assert (
|
||||
later_du_by_timeline[large_tenant] < du_by_timeline[large_tenant]
|
||||
), "large tenant gets a haircut"
|
||||
assert later_du_by_timeline[small_tenant] == du_by_timeline[small_tenant], (
|
||||
"small tenant sees no haircut"
|
||||
)
|
||||
assert later_du_by_timeline[large_tenant] < du_by_timeline[large_tenant], (
|
||||
"large tenant gets a haircut"
|
||||
)
|
||||
assert du_by_timeline[large_tenant] - later_du_by_timeline[large_tenant] >= target
|
||||
|
||||
|
||||
@@ -579,9 +580,9 @@ def test_partial_evict_tenant(eviction_env: EvictionEnv, order: EvictionOrder):
|
||||
|
||||
later_du_by_timeline = env.du_by_timeline(env.pageserver)
|
||||
for tenant, later_tenant_usage in later_du_by_timeline.items():
|
||||
assert (
|
||||
later_tenant_usage < du_by_timeline[tenant]
|
||||
), "all tenants should have lost some layers"
|
||||
assert later_tenant_usage < du_by_timeline[tenant], (
|
||||
"all tenants should have lost some layers"
|
||||
)
|
||||
|
||||
# with relative order what matters is the amount of layers, with a
|
||||
# fudge factor of whether the eviction bothers tenants with highest
|
||||
@@ -665,9 +666,9 @@ def test_fast_growing_tenant(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin, or
|
||||
ratio = after / originally
|
||||
ratios.append(ratio)
|
||||
|
||||
assert (
|
||||
len(ratios) == 4
|
||||
), "rest of the assertions expect 3 + 1 timelines, ratios, scales, all in order"
|
||||
assert len(ratios) == 4, (
|
||||
"rest of the assertions expect 3 + 1 timelines, ratios, scales, all in order"
|
||||
)
|
||||
log.info(f"{ratios}")
|
||||
|
||||
if order == EvictionOrder.RELATIVE_ORDER_EQUAL:
|
||||
@@ -829,9 +830,9 @@ def test_statvfs_pressure_min_avail_bytes(eviction_env: EvictionEnv):
|
||||
|
||||
def more_than_min_avail_bytes_freed():
|
||||
post_eviction_total_size, _, _ = env.timelines_du(env.pageserver)
|
||||
assert (
|
||||
total_size - post_eviction_total_size >= min_avail_bytes
|
||||
), f"we requested at least {min_avail_bytes} worth of free space"
|
||||
assert total_size - post_eviction_total_size >= min_avail_bytes, (
|
||||
f"we requested at least {min_avail_bytes} worth of free space"
|
||||
)
|
||||
|
||||
wait_until(more_than_min_avail_bytes_freed, timeout=5)
|
||||
|
||||
@@ -878,6 +879,6 @@ def test_secondary_mode_eviction(eviction_env_ha: EvictionEnv):
|
||||
|
||||
post_eviction_total_size, _, _ = env.timelines_du(ps_secondary)
|
||||
|
||||
assert (
|
||||
total_size - post_eviction_total_size >= evict_bytes
|
||||
), "we requested at least evict_bytes worth of free space"
|
||||
assert total_size - post_eviction_total_size >= evict_bytes, (
|
||||
"we requested at least evict_bytes worth of free space"
|
||||
)
|
||||
|
||||
@@ -3,17 +3,16 @@ from __future__ import annotations
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
import zstandard
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.metrics import parse_metrics
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.response import Response
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fixtures.httpserver import ListenAddress
|
||||
@@ -21,6 +20,7 @@ if TYPE_CHECKING:
|
||||
NeonEnvBuilder,
|
||||
)
|
||||
from fixtures.pg_version import PgVersion
|
||||
from pytest_httpserver import HTTPServer
|
||||
from werkzeug.wrappers.request import Request
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.pg_version import PgVersion
|
||||
from fixtures.utils import WITH_SANITIZERS, run_only_on_postgres
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"sql_func",
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.utils import USE_LFC
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
def test_explain_with_lfc_stats(neon_simple_env: NeonEnv):
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
def test_fsm_truncate(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import Lsn
|
||||
from fixtures.log_helper import log
|
||||
@@ -10,9 +10,13 @@ from fixtures.neon_fixtures import (
|
||||
PgBin,
|
||||
VanillaPostgres,
|
||||
)
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
from fixtures.utils import query_scalar, subprocess_capture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.port_distributor import PortDistributor
|
||||
|
||||
num_rows = 1000
|
||||
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import random
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.common_types import TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
@@ -14,6 +14,9 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.common_types import TimelineId
|
||||
|
||||
# Test configuration
|
||||
#
|
||||
# Create a table with {NUM_ROWS} rows, and perform {UPDATES_TO_PERFORM} random
|
||||
|
||||
@@ -6,17 +6,11 @@ import re
|
||||
import shutil
|
||||
import tarfile
|
||||
from contextlib import closing
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.common_types import Lsn, TenantId, TimelineId
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
)
|
||||
from fixtures.pageserver.utils import (
|
||||
timeline_delete_wait_completed,
|
||||
wait_for_last_record_lsn,
|
||||
@@ -24,6 +18,16 @@ from fixtures.pageserver.utils import (
|
||||
from fixtures.remote_storage import RemoteStorageKind
|
||||
from fixtures.utils import assert_pageserver_backups_equal, subprocess_capture
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures.neon_fixtures import (
|
||||
Endpoint,
|
||||
NeonEnv,
|
||||
NeonEnvBuilder,
|
||||
PgBin,
|
||||
)
|
||||
|
||||
|
||||
def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_builder):
|
||||
# Put data in vanilla pg
|
||||
@@ -179,7 +183,7 @@ def test_import_from_pageserver_multisegment(
|
||||
logical_size = env.pageserver.http_client().timeline_detail(env.initial_tenant, timeline)[
|
||||
"current_logical_size"
|
||||
]
|
||||
log.info(f"timeline logical size = {logical_size / (1024 ** 2)}MB")
|
||||
log.info(f"timeline logical size = {logical_size / (1024**2)}MB")
|
||||
assert logical_size > 1024**3 # = 1GB
|
||||
|
||||
tar_output_file = _import(num_rows, lsn, env, pg_bin, timeline, test_output_dir)
|
||||
|
||||
@@ -287,9 +287,9 @@ def test_pgdata_import_smoke(
|
||||
shard_ps = env.get_pageserver(shard["node_id"])
|
||||
result = shard_ps.timeline_scan_no_disposable_keys(shard["shard_id"], timeline_id)
|
||||
assert result.tally.disposable_count == 0
|
||||
assert (
|
||||
result.tally.not_disposable_count > 0
|
||||
), "sanity check, each shard should have some data"
|
||||
assert result.tally.not_disposable_count > 0, (
|
||||
"sanity check, each shard should have some data"
|
||||
)
|
||||
|
||||
#
|
||||
# validate that we can write
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn
|
||||
from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo
|
||||
from fixtures.utils import human_bytes, skip_in_debug_build
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from fixtures.pageserver.http import HistoricLayerInfo, LayerMapInfo
|
||||
|
||||
|
||||
@skip_in_debug_build("debug run is unnecessarily slow")
|
||||
def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
@@ -27,9 +31,9 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
# bucket lower limits
|
||||
buckets = [0, minimum_initdb_size, minimum_good_layer_size, minimum_too_large_layer_size]
|
||||
|
||||
assert (
|
||||
minimum_initdb_size < minimum_good_layer_size
|
||||
), "keep checkpoint_distance higher than the initdb size (find it by experimenting)"
|
||||
assert minimum_initdb_size < minimum_good_layer_size, (
|
||||
"keep checkpoint_distance higher than the initdb size (find it by experimenting)"
|
||||
)
|
||||
|
||||
env = neon_env_builder.init_start(
|
||||
initial_tenant_conf={
|
||||
@@ -57,9 +61,9 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
assert size is not None
|
||||
assert isinstance(size[0], int)
|
||||
log.info(f"gin index size: {human_bytes(size[0])}")
|
||||
assert (
|
||||
size[0] > checkpoint_distance * 3
|
||||
), f"gin index is not large enough: {human_bytes(size[0])}"
|
||||
assert size[0] > checkpoint_distance * 3, (
|
||||
f"gin index is not large enough: {human_bytes(size[0])}"
|
||||
)
|
||||
wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
|
||||
|
||||
ps_http = env.pageserver.http_client()
|
||||
@@ -91,13 +95,13 @@ def test_ingesting_large_batches_of_images(neon_env_builder: NeonEnvBuilder):
|
||||
log.info("non-cumulative layer size distribution after compaction:")
|
||||
print_layer_size_histogram(post_compact)
|
||||
|
||||
assert (
|
||||
post_ingest.counts[3] == 0
|
||||
), f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2*checkpoint_distance)})"
|
||||
assert post_ingest.counts[3] == 0, (
|
||||
f"there should be no layers larger than 2*checkpoint_distance ({human_bytes(2 * checkpoint_distance)})"
|
||||
)
|
||||
assert post_ingest.counts[1] == 1, "expect one smaller layer for initdb"
|
||||
assert (
|
||||
post_ingest.counts[0] <= 1
|
||||
), "expect at most one tiny layer from shutting down the endpoint"
|
||||
assert post_ingest.counts[0] <= 1, (
|
||||
"expect at most one tiny layer from shutting down the endpoint"
|
||||
)
|
||||
|
||||
# just make sure we don't have trouble splitting the layers apart
|
||||
assert post_compact.counts[3] == 0
|
||||
|
||||
@@ -2,9 +2,12 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
# This test creates large number of tables which cause large catalog.
|
||||
|
||||
@@ -59,70 +59,72 @@ def test_basic_eviction(neon_env_builder: NeonEnvBuilder):
|
||||
(parse_layer_file_name(path.name), path)
|
||||
for path in env.pageserver.list_layers(tenant_id, timeline_id)
|
||||
)
|
||||
assert (
|
||||
len(initial_local_layers) > 1
|
||||
), f"Should create multiple layers for timeline, but got {initial_local_layers}"
|
||||
assert len(initial_local_layers) > 1, (
|
||||
f"Should create multiple layers for timeline, but got {initial_local_layers}"
|
||||
)
|
||||
|
||||
# Compare layer map dump with the local layers, ensure everything's present locally and matches
|
||||
initial_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id)
|
||||
assert (
|
||||
not initial_layer_map_info.in_memory_layers
|
||||
), "Should have no in memory layers after flushing"
|
||||
assert len(initial_local_layers) == len(
|
||||
initial_layer_map_info.historic_layers
|
||||
), "Should have the same layers in memory and on disk"
|
||||
assert not initial_layer_map_info.in_memory_layers, (
|
||||
"Should have no in memory layers after flushing"
|
||||
)
|
||||
assert len(initial_local_layers) == len(initial_layer_map_info.historic_layers), (
|
||||
"Should have the same layers in memory and on disk"
|
||||
)
|
||||
|
||||
for returned_layer in initial_layer_map_info.historic_layers:
|
||||
assert (
|
||||
returned_layer.kind == "Delta"
|
||||
), f"Did not create and expect image layers, but got {returned_layer}"
|
||||
assert (
|
||||
not returned_layer.remote
|
||||
), f"All created layers should be present locally, but got {returned_layer}"
|
||||
assert returned_layer.kind == "Delta", (
|
||||
f"Did not create and expect image layers, but got {returned_layer}"
|
||||
)
|
||||
assert not returned_layer.remote, (
|
||||
f"All created layers should be present locally, but got {returned_layer}"
|
||||
)
|
||||
|
||||
returned_layer_name = parse_layer_file_name(returned_layer.layer_file_name)
|
||||
assert (
|
||||
returned_layer_name in initial_local_layers
|
||||
), f"Did not find returned layer {returned_layer_name} in local layers {list(initial_local_layers.keys())}"
|
||||
assert returned_layer_name in initial_local_layers, (
|
||||
f"Did not find returned layer {returned_layer_name} in local layers {list(initial_local_layers.keys())}"
|
||||
)
|
||||
|
||||
local_layer_path = (
|
||||
env.pageserver.timeline_dir(tenant_id, timeline_id)
|
||||
/ initial_local_layers[returned_layer_name]
|
||||
)
|
||||
assert (
|
||||
returned_layer.layer_file_size == local_layer_path.stat().st_size
|
||||
), f"Returned layer {returned_layer} has a different file size than local layer {local_layer_path}"
|
||||
assert returned_layer.layer_file_size == local_layer_path.stat().st_size, (
|
||||
f"Returned layer {returned_layer} has a different file size than local layer {local_layer_path}"
|
||||
)
|
||||
|
||||
# Detach all layers, ensre they are not in the local FS, but are still dumped as part of the layer map
|
||||
for local_layer_name, local_layer_path in initial_local_layers.items():
|
||||
client.evict_layer(
|
||||
tenant_id=tenant_id, timeline_id=timeline_id, layer_name=local_layer_path.name
|
||||
)
|
||||
assert not env.pageserver.layer_exists(
|
||||
tenant_id, timeline_id, local_layer_name
|
||||
), f"Did not expect to find {local_layer_name} layer after evicting"
|
||||
assert not env.pageserver.layer_exists(tenant_id, timeline_id, local_layer_name), (
|
||||
f"Did not expect to find {local_layer_name} layer after evicting"
|
||||
)
|
||||
|
||||
empty_layers = env.pageserver.list_layers(tenant_id, timeline_id)
|
||||
assert not empty_layers, f"After evicting all layers, timeline {tenant_id}/{timeline_id} should have no layers locally, but got: {empty_layers}"
|
||||
assert not empty_layers, (
|
||||
f"After evicting all layers, timeline {tenant_id}/{timeline_id} should have no layers locally, but got: {empty_layers}"
|
||||
)
|
||||
|
||||
evicted_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id)
|
||||
assert (
|
||||
not evicted_layer_map_info.in_memory_layers
|
||||
), "Should have no in memory layers after flushing and evicting"
|
||||
assert len(initial_local_layers) == len(
|
||||
evicted_layer_map_info.historic_layers
|
||||
), "Should have the same layers in memory and on disk initially"
|
||||
assert not evicted_layer_map_info.in_memory_layers, (
|
||||
"Should have no in memory layers after flushing and evicting"
|
||||
)
|
||||
assert len(initial_local_layers) == len(evicted_layer_map_info.historic_layers), (
|
||||
"Should have the same layers in memory and on disk initially"
|
||||
)
|
||||
for returned_layer in evicted_layer_map_info.historic_layers:
|
||||
assert (
|
||||
returned_layer.kind == "Delta"
|
||||
), f"Did not create and expect image layers, but got {returned_layer}"
|
||||
assert (
|
||||
returned_layer.remote
|
||||
), f"All layers should be evicted and not present locally, but got {returned_layer}"
|
||||
assert returned_layer.kind == "Delta", (
|
||||
f"Did not create and expect image layers, but got {returned_layer}"
|
||||
)
|
||||
assert returned_layer.remote, (
|
||||
f"All layers should be evicted and not present locally, but got {returned_layer}"
|
||||
)
|
||||
returned_layer_name = parse_layer_file_name(returned_layer.layer_file_name)
|
||||
assert (
|
||||
returned_layer_name in initial_local_layers
|
||||
), f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}"
|
||||
assert returned_layer_name in initial_local_layers, (
|
||||
f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}"
|
||||
)
|
||||
|
||||
# redownload all evicted layers and ensure the initial state is restored
|
||||
for local_layer_name, _local_layer_path in initial_local_layers.items():
|
||||
@@ -142,15 +144,15 @@ def test_basic_eviction(neon_env_builder: NeonEnvBuilder):
|
||||
(parse_layer_file_name(path.name), path)
|
||||
for path in env.pageserver.list_layers(tenant_id, timeline_id)
|
||||
)
|
||||
assert (
|
||||
redownloaded_layers == initial_local_layers
|
||||
), "Should have the same layers locally after redownloading the evicted layers"
|
||||
assert redownloaded_layers == initial_local_layers, (
|
||||
"Should have the same layers locally after redownloading the evicted layers"
|
||||
)
|
||||
redownloaded_layer_map_info = client.layer_map_info(
|
||||
tenant_id=tenant_id, timeline_id=timeline_id
|
||||
)
|
||||
assert (
|
||||
redownloaded_layer_map_info == initial_layer_map_info
|
||||
), "Should have the same layer map after redownloading the evicted layers"
|
||||
assert redownloaded_layer_map_info == initial_layer_map_info, (
|
||||
"Should have the same layer map after redownloading the evicted layers"
|
||||
)
|
||||
|
||||
|
||||
def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder):
|
||||
@@ -266,9 +268,9 @@ def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder):
|
||||
resident_physical_size_metric = ps_http.get_timeline_metric(
|
||||
tenant_id, timeline_id, "pageserver_resident_physical_size"
|
||||
)
|
||||
assert (
|
||||
resident_physical_size_metric == 0
|
||||
), "ensure that resident_physical_size metric is zero"
|
||||
assert resident_physical_size_metric == 0, (
|
||||
"ensure that resident_physical_size metric is zero"
|
||||
)
|
||||
assert resident_physical_size_metric == sum(
|
||||
layer.layer_file_size for layer in info.historic_layers if not layer.remote
|
||||
), "ensure that resident_physical_size metric corresponds to layer map dump"
|
||||
|
||||
@@ -13,9 +13,9 @@ def test_image_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
|
||||
tenant_id, timeline_id = env.create_tenant(
|
||||
conf={
|
||||
# small checkpoint distance to create more delta layer files
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
# set the target size to be large to allow the image layer to cover the whole key space
|
||||
"compaction_target_size": f"{1024 ** 3}",
|
||||
"compaction_target_size": f"{1024**3}",
|
||||
# tweak the default settings to allow quickly create image layers and L1 layers
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "2",
|
||||
@@ -44,9 +44,9 @@ def test_image_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
|
||||
)
|
||||
)
|
||||
|
||||
assert (
|
||||
len(new_temp_layer_files) == 0
|
||||
), "pageserver should clean its temporary new image layer files on failure"
|
||||
assert len(new_temp_layer_files) == 0, (
|
||||
"pageserver should clean its temporary new image layer files on failure"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skip("See https://github.com/neondatabase/neon/issues/2703")
|
||||
@@ -57,9 +57,9 @@ def test_delta_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
|
||||
tenant_id, timeline_id = env.create_tenant(
|
||||
conf={
|
||||
# small checkpoint distance to create more delta layer files
|
||||
"checkpoint_distance": f"{1024 ** 2}",
|
||||
"checkpoint_distance": f"{1024**2}",
|
||||
# set the target size to be large to allow the image layer to cover the whole key space
|
||||
"compaction_target_size": f"{1024 ** 3}",
|
||||
"compaction_target_size": f"{1024**3}",
|
||||
# tweak the default settings to allow quickly create image layers and L1 layers
|
||||
"compaction_period": "1 s",
|
||||
"compaction_threshold": "2",
|
||||
@@ -90,6 +90,6 @@ def test_delta_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
|
||||
)
|
||||
)
|
||||
|
||||
assert (
|
||||
len(new_temp_layer_files) == 0
|
||||
), "pageserver should clean its temporary new delta layer files on failure"
|
||||
assert len(new_temp_layer_files) == 0, (
|
||||
"pageserver should clean its temporary new delta layer files on failure"
|
||||
)
|
||||
|
||||
@@ -127,9 +127,9 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder, attach_mode: str):
|
||||
|
||||
ip = get_index_part()
|
||||
assert len(ip.layer_metadata.keys())
|
||||
assert (
|
||||
ip.disk_consistent_lsn < last_record_lsn
|
||||
), "sanity check for what above loop is supposed to do"
|
||||
assert ip.disk_consistent_lsn < last_record_lsn, (
|
||||
"sanity check for what above loop is supposed to do"
|
||||
)
|
||||
|
||||
# create the image layer from the future
|
||||
env.storage_controller.pageserver_api().update_tenant_config(
|
||||
@@ -233,9 +233,9 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder, attach_mode: str):
|
||||
start = time.monotonic()
|
||||
while True:
|
||||
post_stat = future_layer_path.stat()
|
||||
assert (
|
||||
pre_stat.st_mtime == post_stat.st_mtime
|
||||
), "observed PUT overtake the stucked DELETE => bug isn't fixed yet"
|
||||
assert pre_stat.st_mtime == post_stat.st_mtime, (
|
||||
"observed PUT overtake the stucked DELETE => bug isn't fixed yet"
|
||||
)
|
||||
if time.monotonic() - start > max_race_opportunity_window:
|
||||
log.info(
|
||||
"a correct implementation would never let the later PUT overtake the earlier DELETE"
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.utils import USE_LFC
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
|
||||
@@ -5,12 +5,15 @@ import re
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv, PgBin
|
||||
from fixtures.utils import USE_LFC
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv, PgBin
|
||||
|
||||
|
||||
@pytest.mark.timeout(600)
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
|
||||
@@ -2,12 +2,15 @@ from __future__ import annotations
|
||||
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
from fixtures.utils import USE_LFC, query_scalar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping")
|
||||
def test_lfc_working_set_approximation(neon_simple_env: NeonEnv):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user