mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-14 17:02:56 +00:00
wip
This commit is contained in:
@@ -485,7 +485,7 @@ class NeonEnvBuilder:
|
||||
|
||||
self.env = None
|
||||
|
||||
self.__setattr__ = self.my_setattr
|
||||
self.armed = True
|
||||
|
||||
def __getattr__(self, attribute: str) -> Any:
|
||||
if self.storage_env_builder is not None:
|
||||
@@ -493,19 +493,24 @@ class NeonEnvBuilder:
|
||||
else:
|
||||
raise AttributeError(f"NeonEnvBuilder doesn't have attribute '{attribute}'")
|
||||
|
||||
def my_setattr(self, attribute: str, value: Any) -> Any:
|
||||
def __setattr__(self, attribute: str, value: Any) -> Any:
|
||||
if attribute == 'armed' or self.__dict__.get('armed') is not True:
|
||||
self.__dict__[attribute] = value
|
||||
return
|
||||
|
||||
if self.storage_env_builder is not None:
|
||||
return self.server_env_builder.__setattribute__(attribute, value)
|
||||
return setattr(self.storage_env_builder, attribute, value)
|
||||
else:
|
||||
raise AttributeError(f"NeonEnvBuilder doesn't have attribute '{attribute}'")
|
||||
|
||||
def init_configs(self, default_remote_storage_if_missing: bool = True) -> NeonEnv:
|
||||
self.armed = False
|
||||
# Cannot create more than one environment from one builder
|
||||
assert self.env is None, "environment already initialized"
|
||||
|
||||
env = NeonEnv(self)
|
||||
if self.storage_env_builder is not None:
|
||||
self.storage_env = self.storage_env_builder.init_configs(neon_cli, default_remote_storage_if_missing=default_remote_storage_if_missing)
|
||||
self.storage_env = self.storage_env_builder.init_configs(default_remote_storage_if_missing=default_remote_storage_if_missing)
|
||||
env.storage_env = self.storage_env
|
||||
self.env = env
|
||||
return self.env
|
||||
@@ -1554,10 +1559,10 @@ def neon_shared_storage_env(
|
||||
pg_distrib_dir: Path,
|
||||
request: FixtureRequest,
|
||||
neon_shared_storage_env_cache: Dict[str, NeonStorageEnv],
|
||||
shared_initdb_cache_dir: Path,
|
||||
) -> Iterator[NeonEnv]:
|
||||
|
||||
repo_dir = top_output_dir / f"shared_repo-{build_type}"
|
||||
|
||||
if neon_shared_storage_env_cache.get(build_type) is None:
|
||||
# Create the environment in the per-test output directory
|
||||
shutil.rmtree(repo_dir, ignore_errors=True)
|
||||
@@ -1673,6 +1678,7 @@ def neon_simple_env(
|
||||
shared_initdb_cache_dir=shared_initdb_cache_dir
|
||||
) as storage_env_builder:
|
||||
storage_env = storage_env_builder.init_configs()
|
||||
storage_env.start()
|
||||
with NeonEnvBuilder(
|
||||
top_output_dir=top_output_dir,
|
||||
repo_dir=repo_dir,
|
||||
@@ -4254,7 +4260,7 @@ class Endpoint(PgProtocol, LogUtils):
|
||||
self.http_port = http_port
|
||||
self.check_stop_result = check_stop_result
|
||||
# passed to endpoint create and endpoint reconfigure
|
||||
self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers))
|
||||
self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.storage_env.safekeepers))
|
||||
# path to conf is <repo_dir>/endpoints/<endpoint_id>/pgdata/postgresql.conf XXX
|
||||
|
||||
# Semaphore is set to 1 when we start, and acquire'd back to zero when we stop
|
||||
|
||||
@@ -6,8 +6,8 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
|
||||
|
||||
def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_aux_v2_config_switch(neon_shared_env: NeonEnv, vanilla_pg):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
|
||||
@@ -46,11 +46,11 @@ from fixtures.utils import query_scalar
|
||||
# Because the delta layer D covering lsn1 is corrupted, creating a branch
|
||||
# starting from lsn1 should return an error as follows:
|
||||
# could not find data for key ... at LSN ..., for request at LSN ...
|
||||
def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str):
|
||||
def test_branch_and_gc(neon_shared_env: NeonEnv, build_type: str):
|
||||
if build_type == "debug":
|
||||
pytest.skip("times out in debug builds")
|
||||
|
||||
env = neon_simple_env
|
||||
env = neon_shared_env
|
||||
pageserver_http_client = env.pageserver.http_client()
|
||||
|
||||
tenant, _ = env.neon_cli.create_tenant(
|
||||
@@ -116,8 +116,8 @@ def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str):
|
||||
# and prevent creating branches with invalid starting LSNs.
|
||||
#
|
||||
# For more details, see discussion in https://github.com/neondatabase/neon/pull/2101#issuecomment-1185273447.
|
||||
def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_branch_creation_before_gc(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
pageserver_http_client = env.pageserver.http_client()
|
||||
|
||||
error_regexes = [
|
||||
|
||||
@@ -33,7 +33,7 @@ from requests import RequestException
|
||||
@pytest.mark.parametrize("scale", get_scales_matrix(1))
|
||||
@pytest.mark.parametrize("ty", ["cascade", "flat"])
|
||||
def test_branching_with_pgbench(
|
||||
neon_simple_env: NeonEnv, pg_bin: PgBin, n_branches: int, scale: int, ty: str
|
||||
neon_shared_env: NeonEnv, pg_bin: PgBin, n_branches: int, scale: int, ty: str
|
||||
):
|
||||
env = neon_simple_env
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@ from fixtures.metrics import parse_metrics
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy
|
||||
|
||||
|
||||
def test_build_info_metric(neon_env_builder: NeonEnvBuilder, link_proxy: NeonProxy):
|
||||
neon_env_builder.num_safekeepers = 1
|
||||
env = neon_env_builder.init_start()
|
||||
def test_build_info_metric(neon_shared_env: NeonEnv, link_proxy: NeonProxy):
|
||||
env = neon_shared_env
|
||||
|
||||
parsed_metrics = {}
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ from fixtures.utils import query_scalar
|
||||
#
|
||||
# Test compute node start after clog truncation
|
||||
#
|
||||
def test_clog_truncate(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_clog_truncate(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
# set aggressive autovacuum to make sure that truncation will happen
|
||||
config = [
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder, flush_ep_to_pageserver
|
||||
|
||||
|
||||
def do_combocid_op(neon_env_builder: NeonEnvBuilder, op):
|
||||
env = neon_env_builder.init_start()
|
||||
def do_combocid_op(neon_shared_env: NeonEnv, op):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
@@ -49,20 +49,20 @@ def do_combocid_op(neon_env_builder: NeonEnvBuilder, op):
|
||||
)
|
||||
|
||||
|
||||
def test_combocid_delete(neon_env_builder: NeonEnvBuilder):
|
||||
do_combocid_op(neon_env_builder, "delete from t")
|
||||
def test_combocid_delete(neon_shared_env: NeonEnv):
|
||||
do_combocid_op(neon_env, "delete from t")
|
||||
|
||||
|
||||
def test_combocid_update(neon_env_builder: NeonEnvBuilder):
|
||||
do_combocid_op(neon_env_builder, "update t set val=val+1")
|
||||
def test_combocid_update(neon_shared_env: NeonEnv):
|
||||
do_combocid_op(neon_env, "update t set val=val+1")
|
||||
|
||||
|
||||
def test_combocid_lock(neon_env_builder: NeonEnvBuilder):
|
||||
do_combocid_op(neon_env_builder, "select * from t for update")
|
||||
def test_combocid_lock(neon_shared_env: NeonEnv):
|
||||
do_combocid_op(neon_env, "select * from t for update")
|
||||
|
||||
|
||||
def test_combocid_multi_insert(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_combocid_multi_insert(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
@@ -112,8 +112,8 @@ def test_combocid_multi_insert(neon_env_builder: NeonEnvBuilder):
|
||||
)
|
||||
|
||||
|
||||
def test_combocid(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_combocid(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
conn = endpoint.connect()
|
||||
|
||||
@@ -2,8 +2,8 @@ import requests
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def test_compute_catalog(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_compute_catalog(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"])
|
||||
client = endpoint.http_client()
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_config(neon_simple_env: NeonEnv):
|
||||
# check that config change was applied
|
||||
assert cur.fetchone() == ("debug1",)
|
||||
|
||||
def test_shared_config1(neon_shared_env: NeonEnv):
|
||||
def test_shared_config(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
# change config
|
||||
@@ -51,29 +51,6 @@ def test_shared_config1(neon_shared_env: NeonEnv):
|
||||
# check that config change was applied
|
||||
assert cur.fetchone() == ("debug1",)
|
||||
|
||||
def test_shared_config2(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
# change config
|
||||
endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"])
|
||||
|
||||
with closing(endpoint.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT setting
|
||||
FROM pg_settings
|
||||
WHERE
|
||||
source != 'default'
|
||||
AND source != 'override'
|
||||
AND name = 'log_min_messages'
|
||||
"""
|
||||
)
|
||||
|
||||
# check that config change was applied
|
||||
assert cur.fetchone() == ("debug1",)
|
||||
|
||||
|
||||
#
|
||||
# Test that reordering of safekeepers does not restart walproposer
|
||||
#
|
||||
|
||||
@@ -12,8 +12,8 @@ from fixtures.utils import query_scalar
|
||||
# Test CREATE DATABASE when there have been relmapper changes
|
||||
#
|
||||
@pytest.mark.parametrize("strategy", ["file_copy", "wal_log"])
|
||||
def test_createdb(neon_simple_env: NeonEnv, strategy: str):
|
||||
env = neon_simple_env
|
||||
def test_createdb(neon_shared_env: NeonEnv, strategy: str):
|
||||
env = neon_shared_env
|
||||
if env.pg_version == PgVersion.V14 and strategy == "wal_log":
|
||||
pytest.skip("wal_log strategy not supported on PostgreSQL 14")
|
||||
|
||||
@@ -58,8 +58,8 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str):
|
||||
#
|
||||
# Test DROP DATABASE
|
||||
#
|
||||
def test_dropdb(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
def test_dropdb(neon_shared_env: NeonEnv, test_output_dir):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
with endpoint.cursor() as cur:
|
||||
|
||||
@@ -5,8 +5,8 @@ from fixtures.utils import query_scalar
|
||||
#
|
||||
# Test CREATE USER to check shared catalog restore
|
||||
#
|
||||
def test_createuser(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_createuser(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
with endpoint.cursor() as cur:
|
||||
|
||||
@@ -10,11 +10,11 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
"💣", # calls `trigger_segfault` internally
|
||||
],
|
||||
)
|
||||
def test_endpoint_crash(neon_env_builder: NeonEnvBuilder, sql_func: str):
|
||||
def test_endpoint_crash(neon_shared_env: NeonEnv, sql_func: str):
|
||||
"""
|
||||
Test that triggering crash from neon_test_utils crashes the endpoint
|
||||
"""
|
||||
env = neon_env_builder.init_start()
|
||||
env = neon_shared_env
|
||||
env.neon_cli.create_branch("test_endpoint_crash")
|
||||
endpoint = env.endpoints.create_start("test_endpoint_crash")
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def test_fsm_truncate(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_fsm_truncate(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
env.neon_cli.create_branch("test_fsm_truncate")
|
||||
endpoint = env.endpoints.create_start("test_fsm_truncate")
|
||||
endpoint.safe_psql(
|
||||
|
||||
@@ -16,12 +16,12 @@ num_rows = 1000
|
||||
|
||||
# Ensure that regular postgres can start from fullbackup
|
||||
def test_fullbackup(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
neon_shared_env: NeonEnv,
|
||||
pg_bin: PgBin,
|
||||
port_distributor: PortDistributor,
|
||||
test_output_dir: Path,
|
||||
):
|
||||
env = neon_env_builder.init_start()
|
||||
env = neon_shared_env
|
||||
|
||||
# endpoint needs to be alive until the fullbackup so that we have
|
||||
# prev_record_lsn for the vanilla_pg to start in read-write mode
|
||||
|
||||
@@ -87,8 +87,8 @@ def test_hot_standby(neon_simple_env: NeonEnv):
|
||||
sk_http.configure_failpoints(("sk-send-wal-replica-sleep", "off"))
|
||||
|
||||
|
||||
def test_2_replicas_start(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_2_replicas_start(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
with env.endpoints.create_start(
|
||||
branch_name="main",
|
||||
@@ -286,8 +286,8 @@ def test_hot_standby_feedback(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
|
||||
|
||||
# Test race condition between WAL replay and backends performing queries
|
||||
# https://github.com/neondatabase/neon/issues/7791
|
||||
def test_replica_query_race(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_replica_query_race(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
primary_ep = env.endpoints.create_start(
|
||||
branch_name="main",
|
||||
|
||||
@@ -12,8 +12,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
# to large (several gigabytes) layer files (both ephemeral and delta layers).
|
||||
# It may cause problems with uploading to S3 and also degrade performance because ephemeral file swapping.
|
||||
#
|
||||
def test_large_schema(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_large_schema(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ from fixtures.neon_fixtures import NeonEnv, PgBin
|
||||
# Test branching, when a transaction is in prepared state
|
||||
#
|
||||
@pytest.mark.timeout(600)
|
||||
def test_lfc_resize(neon_simple_env: NeonEnv, pg_bin: PgBin):
|
||||
env = neon_simple_env
|
||||
def test_lfc_resize(neon_shared_env: NeonEnv, pg_bin: PgBin):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
|
||||
@@ -9,8 +9,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.utils import query_scalar
|
||||
|
||||
|
||||
def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_local_file_cache_unlink(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
cache_dir = os.path.join(env.repo_dir, "file_cache")
|
||||
os.mkdir(cache_dir)
|
||||
|
||||
@@ -24,13 +24,13 @@ def assert_lsn_lease_granted(result, with_lease: bool):
|
||||
|
||||
|
||||
@pytest.mark.parametrize("with_lease", [True, False])
|
||||
def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool):
|
||||
def test_lsn_mapping(neon_shared_env: NeonEnv, with_lease: bool):
|
||||
"""
|
||||
Test pageserver get_lsn_by_timestamp API.
|
||||
|
||||
:param with_lease: Whether to get a lease associated with returned LSN.
|
||||
"""
|
||||
env = neon_env_builder.init_start()
|
||||
env = neon_shared_env
|
||||
|
||||
tenant_id, _ = env.neon_cli.create_tenant(
|
||||
conf={
|
||||
|
||||
@@ -7,8 +7,8 @@ if TYPE_CHECKING:
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
def test_migrations(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_migrations(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create("main")
|
||||
endpoint.respec(skip_pg_catalog_updates=False)
|
||||
|
||||
@@ -12,8 +12,8 @@ from fixtures.utils import query_scalar
|
||||
# is enough to verify that the WAL records are handled correctly
|
||||
# in the pageserver.
|
||||
#
|
||||
def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
def test_multixact(neon_shared_env: NeonEnv, test_output_dir):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
cur = endpoint.connect().cursor()
|
||||
|
||||
@@ -4,8 +4,8 @@ from fixtures.pg_version import PgVersion
|
||||
from fixtures.utils import wait_until
|
||||
|
||||
|
||||
def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion):
|
||||
env = neon_simple_env
|
||||
def test_neon_superuser(neon_shared_env: NeonEnv, pg_version: PgVersion):
|
||||
env = neon_shared_env
|
||||
env.neon_cli.create_branch("test_neon_superuser_publisher", "main")
|
||||
pub = env.endpoints.create("test_neon_superuser_publisher")
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ from fixtures.log_helper import log
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
|
||||
|
||||
def test_oid_overflow(neon_env_builder: NeonEnvBuilder):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_oid_overflow(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
|
||||
@@ -56,8 +56,8 @@ def check_client(env: NeonEnv, client: PageserverHttpClient):
|
||||
assert TimelineId(timeline_details["timeline_id"]) == timeline_id
|
||||
|
||||
|
||||
def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_pageserver_http_get_wal_receiver_not_found(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
with env.pageserver.http_client() as client:
|
||||
tenant_id, timeline_id = env.neon_cli.create_tenant()
|
||||
|
||||
@@ -105,8 +105,8 @@ def expect_updated_msg_lsn(
|
||||
#
|
||||
# These fields used to be returned by a separate API call, but they're part of
|
||||
# `timeline_details` now.
|
||||
def test_pageserver_http_get_wal_receiver_success(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_pageserver_http_get_wal_receiver_success(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
with env.pageserver.http_client() as client:
|
||||
tenant_id, timeline_id = env.neon_cli.create_tenant()
|
||||
endpoint = env.endpoints.create_start(DEFAULT_BRANCH_NAME, tenant_id=tenant_id)
|
||||
|
||||
@@ -39,8 +39,8 @@ async def parallel_load_same_table(endpoint: Endpoint, n_parallel: int):
|
||||
|
||||
|
||||
# Load data into one table with COPY TO from 5 parallel connections
|
||||
def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5):
|
||||
env = neon_simple_env
|
||||
def test_parallel_copy(neon_shared_env: NeonEnv, n_parallel=5):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
# Create test table
|
||||
|
||||
@@ -19,8 +19,8 @@ def check_wal_segment(pg_waldump_path: str, segment_path: str, test_output_dir):
|
||||
|
||||
|
||||
# Simple test to check that pg_waldump works with neon WAL files
|
||||
def test_pg_waldump(neon_simple_env: NeonEnv, test_output_dir, pg_bin: PgBin):
|
||||
env = neon_simple_env
|
||||
def test_pg_waldump(neon_shared_env: NeonEnv, test_output_dir, pg_bin: PgBin):
|
||||
env = neon_shared_env
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
@@ -13,8 +13,8 @@ extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"]
|
||||
#
|
||||
# Validation of reading different page versions
|
||||
#
|
||||
def test_read_validation(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_read_validation(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
with closing(endpoint.connect()) as con:
|
||||
@@ -125,8 +125,8 @@ def test_read_validation(neon_simple_env: NeonEnv):
|
||||
log.info(f"Caught an expected failure: {e}")
|
||||
|
||||
|
||||
def test_read_validation_neg(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_read_validation_neg(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
env.pageserver.allowed_errors.append(".*invalid LSN\\(0\\) in request.*")
|
||||
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
@@ -215,8 +215,8 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder):
|
||||
|
||||
|
||||
# Similar test, but with more data, and we force checkpoints
|
||||
def test_timetravel(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_timetravel(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
tenant_id = env.initial_tenant
|
||||
timeline_id = env.initial_timeline
|
||||
client = env.pageserver.http_client()
|
||||
|
||||
@@ -7,8 +7,8 @@ from fixtures.utils import wait_until
|
||||
|
||||
# This test checks of logical replication subscriber is able to correctly restart replication without receiving duplicates.
|
||||
# It requires tracking information about replication origins at page server side
|
||||
def test_subscriber_restart(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_subscriber_restart(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
env.neon_cli.create_branch("publisher")
|
||||
pub = env.endpoints.create("publisher")
|
||||
pub.start()
|
||||
|
||||
@@ -7,8 +7,8 @@ from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
|
||||
# maintained in the pageserver, so subtransactions are not very exciting for
|
||||
# Neon. They are included in the commit record though and updated in the
|
||||
# CLOG.
|
||||
def test_subxacts(neon_simple_env: NeonEnv, test_output_dir):
|
||||
env = neon_simple_env
|
||||
def test_subxacts(neon_shared_env: NeonEnv, test_output_dir):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
pg_conn = endpoint.connect()
|
||||
|
||||
@@ -36,8 +36,8 @@ from fixtures.utils import query_scalar, run_pg_bench_small, wait_until
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
|
||||
def test_timeline_delete(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_timeline_delete(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
env.pageserver.allowed_errors.extend(
|
||||
[
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import time
|
||||
|
||||
from fixtures.neon_fixtures import NeonEnvBuilder
|
||||
from fixtures.neon_fixtures import NeonEnv
|
||||
|
||||
|
||||
#
|
||||
# Test truncation of FSM and VM forks of a relation
|
||||
#
|
||||
def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark):
|
||||
env = neon_env_builder.init_start()
|
||||
def test_truncate(neon_shared_env: NeonEnv, zenbenchmark):
|
||||
env = neon_shared_env
|
||||
n_records = 10000
|
||||
n_iter = 10
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ from fixtures.pg_version import PgVersion
|
||||
# fork to reset them during recovery. In Neon, pageserver directly sends init
|
||||
# fork contents as main fork during basebackup.
|
||||
#
|
||||
def test_unlogged(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_unlogged(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
conn = endpoint.connect()
|
||||
|
||||
@@ -10,8 +10,8 @@ from fixtures.utils import query_scalar
|
||||
# Test that the VM bit is cleared correctly at a HEAP_DELETE and
|
||||
# HEAP_UPDATE record.
|
||||
#
|
||||
def test_vm_bit_clear(neon_simple_env: NeonEnv):
|
||||
env = neon_simple_env
|
||||
def test_vm_bit_clear(neon_shared_env: NeonEnv):
|
||||
env = neon_shared_env
|
||||
|
||||
endpoint = env.endpoints.create_start("main")
|
||||
|
||||
@@ -114,13 +114,13 @@ def test_vm_bit_clear(neon_simple_env: NeonEnv):
|
||||
assert cur_new.fetchall() == []
|
||||
|
||||
|
||||
def test_vm_bit_clear_on_heap_lock_whitebox(neon_env_builder: NeonEnvBuilder):
|
||||
def test_vm_bit_clear_on_heap_lock_whitebox(neon_shared_env: NeonEnv):
|
||||
"""
|
||||
Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK record.
|
||||
|
||||
This is a repro for the bug fixed in commit 66fa176cc8.
|
||||
"""
|
||||
env = neon_env_builder.init_start()
|
||||
env = neon_shared_env
|
||||
endpoint = env.endpoints.create_start(
|
||||
"main",
|
||||
config_lines=[
|
||||
|
||||
Reference in New Issue
Block a user