diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index f909069157..57bda0d253 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -485,7 +485,7 @@ class NeonEnvBuilder: self.env = None - self.__setattr__ = self.my_setattr + self.armed = True def __getattr__(self, attribute: str) -> Any: if self.storage_env_builder is not None: @@ -493,19 +493,24 @@ class NeonEnvBuilder: else: raise AttributeError(f"NeonEnvBuilder doesn't have attribute '{attribute}'") - def my_setattr(self, attribute: str, value: Any) -> Any: + def __setattr__(self, attribute: str, value: Any) -> Any: + if attribute == 'armed' or self.__dict__.get('armed') is not True: + self.__dict__[attribute] = value + return + if self.storage_env_builder is not None: - return self.server_env_builder.__setattribute__(attribute, value) + return setattr(self.storage_env_builder, attribute, value) else: raise AttributeError(f"NeonEnvBuilder doesn't have attribute '{attribute}'") def init_configs(self, default_remote_storage_if_missing: bool = True) -> NeonEnv: + self.armed = False # Cannot create more than one environment from one builder assert self.env is None, "environment already initialized" env = NeonEnv(self) if self.storage_env_builder is not None: - self.storage_env = self.storage_env_builder.init_configs(neon_cli, default_remote_storage_if_missing=default_remote_storage_if_missing) + self.storage_env = self.storage_env_builder.init_configs(default_remote_storage_if_missing=default_remote_storage_if_missing) env.storage_env = self.storage_env self.env = env return self.env @@ -1554,10 +1559,10 @@ def neon_shared_storage_env( pg_distrib_dir: Path, request: FixtureRequest, neon_shared_storage_env_cache: Dict[str, NeonStorageEnv], + shared_initdb_cache_dir: Path, ) -> Iterator[NeonEnv]: repo_dir = top_output_dir / f"shared_repo-{build_type}" - if neon_shared_storage_env_cache.get(build_type) is None: # Create the environment in the per-test output directory shutil.rmtree(repo_dir, ignore_errors=True) @@ -1673,6 +1678,7 @@ def neon_simple_env( shared_initdb_cache_dir=shared_initdb_cache_dir ) as storage_env_builder: storage_env = storage_env_builder.init_configs() + storage_env.start() with NeonEnvBuilder( top_output_dir=top_output_dir, repo_dir=repo_dir, @@ -4254,7 +4260,7 @@ class Endpoint(PgProtocol, LogUtils): self.http_port = http_port self.check_stop_result = check_stop_result # passed to endpoint create and endpoint reconfigure - self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.safekeepers)) + self.active_safekeepers: List[int] = list(map(lambda sk: sk.id, env.storage_env.safekeepers)) # path to conf is /endpoints//pgdata/postgresql.conf XXX # Semaphore is set to 1 when we start, and acquire'd back to zero when we stop diff --git a/test_runner/regress/test_aux_files.py b/test_runner/regress/test_aux_files.py index 5328aef156..2a71798a77 100644 --- a/test_runner/regress/test_aux_files.py +++ b/test_runner/regress/test_aux_files.py @@ -6,8 +6,8 @@ from fixtures.neon_fixtures import ( ) -def test_aux_v2_config_switch(neon_env_builder: NeonEnvBuilder, vanilla_pg): - env = neon_env_builder.init_start() +def test_aux_v2_config_switch(neon_shared_env: NeonEnv, vanilla_pg): + env = neon_shared_env endpoint = env.endpoints.create_start("main") client = env.pageserver.http_client() diff --git a/test_runner/regress/test_branch_and_gc.py b/test_runner/regress/test_branch_and_gc.py index f2e3855c12..40c1fdf67e 100644 --- a/test_runner/regress/test_branch_and_gc.py +++ b/test_runner/regress/test_branch_and_gc.py @@ -46,11 +46,11 @@ from fixtures.utils import query_scalar # Because the delta layer D covering lsn1 is corrupted, creating a branch # starting from lsn1 should return an error as follows: # could not find data for key ... at LSN ..., for request at LSN ... -def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str): +def test_branch_and_gc(neon_shared_env: NeonEnv, build_type: str): if build_type == "debug": pytest.skip("times out in debug builds") - env = neon_simple_env + env = neon_shared_env pageserver_http_client = env.pageserver.http_client() tenant, _ = env.neon_cli.create_tenant( @@ -116,8 +116,8 @@ def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str): # and prevent creating branches with invalid starting LSNs. # # For more details, see discussion in https://github.com/neondatabase/neon/pull/2101#issuecomment-1185273447. -def test_branch_creation_before_gc(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_branch_creation_before_gc(neon_shared_env: NeonEnv): + env = neon_shared_env pageserver_http_client = env.pageserver.http_client() error_regexes = [ diff --git a/test_runner/regress/test_branching.py b/test_runner/regress/test_branching.py index fc74707639..ef21bb2b9f 100644 --- a/test_runner/regress/test_branching.py +++ b/test_runner/regress/test_branching.py @@ -33,7 +33,7 @@ from requests import RequestException @pytest.mark.parametrize("scale", get_scales_matrix(1)) @pytest.mark.parametrize("ty", ["cascade", "flat"]) def test_branching_with_pgbench( - neon_simple_env: NeonEnv, pg_bin: PgBin, n_branches: int, scale: int, ty: str + neon_shared_env: NeonEnv, pg_bin: PgBin, n_branches: int, scale: int, ty: str ): env = neon_simple_env diff --git a/test_runner/regress/test_build_info_metric.py b/test_runner/regress/test_build_info_metric.py index 8f714dae67..3eca9e8e5b 100644 --- a/test_runner/regress/test_build_info_metric.py +++ b/test_runner/regress/test_build_info_metric.py @@ -2,9 +2,8 @@ from fixtures.metrics import parse_metrics from fixtures.neon_fixtures import NeonEnvBuilder, NeonProxy -def test_build_info_metric(neon_env_builder: NeonEnvBuilder, link_proxy: NeonProxy): - neon_env_builder.num_safekeepers = 1 - env = neon_env_builder.init_start() +def test_build_info_metric(neon_shared_env: NeonEnv, link_proxy: NeonProxy): + env = neon_shared_env parsed_metrics = {} diff --git a/test_runner/regress/test_clog_truncate.py b/test_runner/regress/test_clog_truncate.py index 6e4880841a..1bdc3604dd 100644 --- a/test_runner/regress/test_clog_truncate.py +++ b/test_runner/regress/test_clog_truncate.py @@ -9,8 +9,8 @@ from fixtures.utils import query_scalar # # Test compute node start after clog truncation # -def test_clog_truncate(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_clog_truncate(neon_shared_env: NeonEnv): + env = neon_shared_env # set aggressive autovacuum to make sure that truncation will happen config = [ diff --git a/test_runner/regress/test_combocid.py b/test_runner/regress/test_combocid.py index 41907b1f20..fef5e64914 100644 --- a/test_runner/regress/test_combocid.py +++ b/test_runner/regress/test_combocid.py @@ -1,8 +1,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder, flush_ep_to_pageserver -def do_combocid_op(neon_env_builder: NeonEnvBuilder, op): - env = neon_env_builder.init_start() +def do_combocid_op(neon_shared_env: NeonEnv, op): + env = neon_shared_env endpoint = env.endpoints.create_start( "main", config_lines=[ @@ -49,20 +49,20 @@ def do_combocid_op(neon_env_builder: NeonEnvBuilder, op): ) -def test_combocid_delete(neon_env_builder: NeonEnvBuilder): - do_combocid_op(neon_env_builder, "delete from t") +def test_combocid_delete(neon_shared_env: NeonEnv): + do_combocid_op(neon_env, "delete from t") -def test_combocid_update(neon_env_builder: NeonEnvBuilder): - do_combocid_op(neon_env_builder, "update t set val=val+1") +def test_combocid_update(neon_shared_env: NeonEnv): + do_combocid_op(neon_env, "update t set val=val+1") -def test_combocid_lock(neon_env_builder: NeonEnvBuilder): - do_combocid_op(neon_env_builder, "select * from t for update") +def test_combocid_lock(neon_shared_env: NeonEnv): + do_combocid_op(neon_env, "select * from t for update") -def test_combocid_multi_insert(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_combocid_multi_insert(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start( "main", config_lines=[ @@ -112,8 +112,8 @@ def test_combocid_multi_insert(neon_env_builder: NeonEnvBuilder): ) -def test_combocid(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_combocid(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") conn = endpoint.connect() diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py index 8b8c970357..e0f3fe7329 100644 --- a/test_runner/regress/test_compute_catalog.py +++ b/test_runner/regress/test_compute_catalog.py @@ -2,8 +2,8 @@ import requests from fixtures.neon_fixtures import NeonEnv -def test_compute_catalog(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_compute_catalog(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"]) client = endpoint.http_client() diff --git a/test_runner/regress/test_config.py b/test_runner/regress/test_config.py index ba227c73a6..a7b56dd2bf 100644 --- a/test_runner/regress/test_config.py +++ b/test_runner/regress/test_config.py @@ -29,7 +29,7 @@ def test_config(neon_simple_env: NeonEnv): # check that config change was applied assert cur.fetchone() == ("debug1",) -def test_shared_config1(neon_shared_env: NeonEnv): +def test_shared_config(neon_shared_env: NeonEnv): env = neon_shared_env # change config @@ -51,29 +51,6 @@ def test_shared_config1(neon_shared_env: NeonEnv): # check that config change was applied assert cur.fetchone() == ("debug1",) -def test_shared_config2(neon_shared_env: NeonEnv): - env = neon_shared_env - - # change config - endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"]) - - with closing(endpoint.connect()) as conn: - with conn.cursor() as cur: - cur.execute( - """ - SELECT setting - FROM pg_settings - WHERE - source != 'default' - AND source != 'override' - AND name = 'log_min_messages' - """ - ) - - # check that config change was applied - assert cur.fetchone() == ("debug1",) - - # # Test that reordering of safekeepers does not restart walproposer # diff --git a/test_runner/regress/test_createdropdb.py b/test_runner/regress/test_createdropdb.py index af643f45d7..78bf290bb8 100644 --- a/test_runner/regress/test_createdropdb.py +++ b/test_runner/regress/test_createdropdb.py @@ -12,8 +12,8 @@ from fixtures.utils import query_scalar # Test CREATE DATABASE when there have been relmapper changes # @pytest.mark.parametrize("strategy", ["file_copy", "wal_log"]) -def test_createdb(neon_simple_env: NeonEnv, strategy: str): - env = neon_simple_env +def test_createdb(neon_shared_env: NeonEnv, strategy: str): + env = neon_shared_env if env.pg_version == PgVersion.V14 and strategy == "wal_log": pytest.skip("wal_log strategy not supported on PostgreSQL 14") @@ -58,8 +58,8 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str): # # Test DROP DATABASE # -def test_dropdb(neon_simple_env: NeonEnv, test_output_dir): - env = neon_simple_env +def test_dropdb(neon_shared_env: NeonEnv, test_output_dir): + env = neon_shared_env endpoint = env.endpoints.create_start("main") with endpoint.cursor() as cur: diff --git a/test_runner/regress/test_createuser.py b/test_runner/regress/test_createuser.py index d6f138e126..1bb986f1df 100644 --- a/test_runner/regress/test_createuser.py +++ b/test_runner/regress/test_createuser.py @@ -5,8 +5,8 @@ from fixtures.utils import query_scalar # # Test CREATE USER to check shared catalog restore # -def test_createuser(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_createuser(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") with endpoint.cursor() as cur: diff --git a/test_runner/regress/test_endpoint_crash.py b/test_runner/regress/test_endpoint_crash.py index ae3dded437..8035b928ed 100644 --- a/test_runner/regress/test_endpoint_crash.py +++ b/test_runner/regress/test_endpoint_crash.py @@ -10,11 +10,11 @@ from fixtures.neon_fixtures import NeonEnvBuilder "💣", # calls `trigger_segfault` internally ], ) -def test_endpoint_crash(neon_env_builder: NeonEnvBuilder, sql_func: str): +def test_endpoint_crash(neon_shared_env: NeonEnv, sql_func: str): """ Test that triggering crash from neon_test_utils crashes the endpoint """ - env = neon_env_builder.init_start() + env = neon_shared_env env.neon_cli.create_branch("test_endpoint_crash") endpoint = env.endpoints.create_start("test_endpoint_crash") diff --git a/test_runner/regress/test_fsm_truncate.py b/test_runner/regress/test_fsm_truncate.py index 80e4da8380..120774ec95 100644 --- a/test_runner/regress/test_fsm_truncate.py +++ b/test_runner/regress/test_fsm_truncate.py @@ -1,8 +1,8 @@ -from fixtures.neon_fixtures import NeonEnvBuilder +from fixtures.neon_fixtures import NeonEnv -def test_fsm_truncate(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_fsm_truncate(neon_shared_env: NeonEnv): + env = neon_shared_env env.neon_cli.create_branch("test_fsm_truncate") endpoint = env.endpoints.create_start("test_fsm_truncate") endpoint.safe_psql( diff --git a/test_runner/regress/test_fullbackup.py b/test_runner/regress/test_fullbackup.py index e6d51a77a6..84d40fcee1 100644 --- a/test_runner/regress/test_fullbackup.py +++ b/test_runner/regress/test_fullbackup.py @@ -16,12 +16,12 @@ num_rows = 1000 # Ensure that regular postgres can start from fullbackup def test_fullbackup( - neon_env_builder: NeonEnvBuilder, + neon_shared_env: NeonEnv, pg_bin: PgBin, port_distributor: PortDistributor, test_output_dir: Path, ): - env = neon_env_builder.init_start() + env = neon_shared_env # endpoint needs to be alive until the fullbackup so that we have # prev_record_lsn for the vanilla_pg to start in read-write mode diff --git a/test_runner/regress/test_hot_standby.py b/test_runner/regress/test_hot_standby.py index ae63136abb..6aa6ed780a 100644 --- a/test_runner/regress/test_hot_standby.py +++ b/test_runner/regress/test_hot_standby.py @@ -87,8 +87,8 @@ def test_hot_standby(neon_simple_env: NeonEnv): sk_http.configure_failpoints(("sk-send-wal-replica-sleep", "off")) -def test_2_replicas_start(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_2_replicas_start(neon_shared_env: NeonEnv): + env = neon_shared_env with env.endpoints.create_start( branch_name="main", @@ -286,8 +286,8 @@ def test_hot_standby_feedback(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): # Test race condition between WAL replay and backends performing queries # https://github.com/neondatabase/neon/issues/7791 -def test_replica_query_race(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_replica_query_race(neon_shared_env: NeonEnv): + env = neon_shared_env primary_ep = env.endpoints.create_start( branch_name="main", diff --git a/test_runner/regress/test_large_schema.py b/test_runner/regress/test_large_schema.py index c5d5b5fe64..bde685ebe3 100644 --- a/test_runner/regress/test_large_schema.py +++ b/test_runner/regress/test_large_schema.py @@ -12,8 +12,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder # to large (several gigabytes) layer files (both ephemeral and delta layers). # It may cause problems with uploading to S3 and also degrade performance because ephemeral file swapping. # -def test_large_schema(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_large_schema(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") diff --git a/test_runner/regress/test_lfc_resize.py b/test_runner/regress/test_lfc_resize.py index cb0b30d9c6..8716283162 100644 --- a/test_runner/regress/test_lfc_resize.py +++ b/test_runner/regress/test_lfc_resize.py @@ -14,8 +14,8 @@ from fixtures.neon_fixtures import NeonEnv, PgBin # Test branching, when a transaction is in prepared state # @pytest.mark.timeout(600) -def test_lfc_resize(neon_simple_env: NeonEnv, pg_bin: PgBin): - env = neon_simple_env +def test_lfc_resize(neon_shared_env: NeonEnv, pg_bin: PgBin): + env = neon_shared_env endpoint = env.endpoints.create_start( "main", config_lines=[ diff --git a/test_runner/regress/test_local_file_cache.py b/test_runner/regress/test_local_file_cache.py index 9c38200937..c12d45d8ae 100644 --- a/test_runner/regress/test_local_file_cache.py +++ b/test_runner/regress/test_local_file_cache.py @@ -9,8 +9,8 @@ from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import query_scalar -def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_local_file_cache_unlink(neon_shared_env: NeonEnv): + env = neon_shared_env cache_dir = os.path.join(env.repo_dir, "file_cache") os.mkdir(cache_dir) diff --git a/test_runner/regress/test_lsn_mapping.py b/test_runner/regress/test_lsn_mapping.py index 67e82f8d30..67e58c4151 100644 --- a/test_runner/regress/test_lsn_mapping.py +++ b/test_runner/regress/test_lsn_mapping.py @@ -24,13 +24,13 @@ def assert_lsn_lease_granted(result, with_lease: bool): @pytest.mark.parametrize("with_lease", [True, False]) -def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool): +def test_lsn_mapping(neon_shared_env: NeonEnv, with_lease: bool): """ Test pageserver get_lsn_by_timestamp API. :param with_lease: Whether to get a lease associated with returned LSN. """ - env = neon_env_builder.init_start() + env = neon_shared_env tenant_id, _ = env.neon_cli.create_tenant( conf={ diff --git a/test_runner/regress/test_migrations.py b/test_runner/regress/test_migrations.py index e88e56d030..9004557aea 100644 --- a/test_runner/regress/test_migrations.py +++ b/test_runner/regress/test_migrations.py @@ -7,8 +7,8 @@ if TYPE_CHECKING: from fixtures.neon_fixtures import NeonEnv -def test_migrations(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_migrations(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create("main") endpoint.respec(skip_pg_catalog_updates=False) diff --git a/test_runner/regress/test_multixact.py b/test_runner/regress/test_multixact.py index 8a00f8835f..3b67f638ef 100644 --- a/test_runner/regress/test_multixact.py +++ b/test_runner/regress/test_multixact.py @@ -12,8 +12,8 @@ from fixtures.utils import query_scalar # is enough to verify that the WAL records are handled correctly # in the pageserver. # -def test_multixact(neon_simple_env: NeonEnv, test_output_dir): - env = neon_simple_env +def test_multixact(neon_shared_env: NeonEnv, test_output_dir): + env = neon_shared_env endpoint = env.endpoints.create_start("main") cur = endpoint.connect().cursor() diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index 7825ec772c..7e3f8b4b63 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -4,8 +4,8 @@ from fixtures.pg_version import PgVersion from fixtures.utils import wait_until -def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): - env = neon_simple_env +def test_neon_superuser(neon_shared_env: NeonEnv, pg_version: PgVersion): + env = neon_shared_env env.neon_cli.create_branch("test_neon_superuser_publisher", "main") pub = env.endpoints.create("test_neon_superuser_publisher") diff --git a/test_runner/regress/test_oid_overflow.py b/test_runner/regress/test_oid_overflow.py index e8eefc2414..40126d0e68 100644 --- a/test_runner/regress/test_oid_overflow.py +++ b/test_runner/regress/test_oid_overflow.py @@ -2,8 +2,8 @@ from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnvBuilder -def test_oid_overflow(neon_env_builder: NeonEnvBuilder): - env = neon_env_builder.init_start() +def test_oid_overflow(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index 28dbf40bed..fee6230160 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -56,8 +56,8 @@ def check_client(env: NeonEnv, client: PageserverHttpClient): assert TimelineId(timeline_details["timeline_id"]) == timeline_id -def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_pageserver_http_get_wal_receiver_not_found(neon_shared_env: NeonEnv): + env = neon_shared_env with env.pageserver.http_client() as client: tenant_id, timeline_id = env.neon_cli.create_tenant() @@ -105,8 +105,8 @@ def expect_updated_msg_lsn( # # These fields used to be returned by a separate API call, but they're part of # `timeline_details` now. -def test_pageserver_http_get_wal_receiver_success(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_pageserver_http_get_wal_receiver_success(neon_shared_env: NeonEnv): + env = neon_shared_env with env.pageserver.http_client() as client: tenant_id, timeline_id = env.neon_cli.create_tenant() endpoint = env.endpoints.create_start(DEFAULT_BRANCH_NAME, tenant_id=tenant_id) diff --git a/test_runner/regress/test_parallel_copy.py b/test_runner/regress/test_parallel_copy.py index a5037e8694..18b070daee 100644 --- a/test_runner/regress/test_parallel_copy.py +++ b/test_runner/regress/test_parallel_copy.py @@ -39,8 +39,8 @@ async def parallel_load_same_table(endpoint: Endpoint, n_parallel: int): # Load data into one table with COPY TO from 5 parallel connections -def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5): - env = neon_simple_env +def test_parallel_copy(neon_shared_env: NeonEnv, n_parallel=5): + env = neon_shared_env endpoint = env.endpoints.create_start("main") # Create test table diff --git a/test_runner/regress/test_pg_waldump.py b/test_runner/regress/test_pg_waldump.py index 1990d69b6a..1a753f966c 100644 --- a/test_runner/regress/test_pg_waldump.py +++ b/test_runner/regress/test_pg_waldump.py @@ -19,8 +19,8 @@ def check_wal_segment(pg_waldump_path: str, segment_path: str, test_output_dir): # Simple test to check that pg_waldump works with neon WAL files -def test_pg_waldump(neon_simple_env: NeonEnv, test_output_dir, pg_bin: PgBin): - env = neon_simple_env +def test_pg_waldump(neon_shared_env: NeonEnv, test_output_dir, pg_bin: PgBin): + env = neon_shared_env tenant_id = env.initial_tenant timeline_id = env.initial_timeline endpoint = env.endpoints.create_start("main") diff --git a/test_runner/regress/test_read_validation.py b/test_runner/regress/test_read_validation.py index 78798c5abf..32c89ce54b 100644 --- a/test_runner/regress/test_read_validation.py +++ b/test_runner/regress/test_read_validation.py @@ -13,8 +13,8 @@ extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"] # # Validation of reading different page versions # -def test_read_validation(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_read_validation(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") with closing(endpoint.connect()) as con: @@ -125,8 +125,8 @@ def test_read_validation(neon_simple_env: NeonEnv): log.info(f"Caught an expected failure: {e}") -def test_read_validation_neg(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_read_validation_neg(neon_shared_env: NeonEnv): + env = neon_shared_env env.pageserver.allowed_errors.append(".*invalid LSN\\(0\\) in request.*") endpoint = env.endpoints.create_start("main") diff --git a/test_runner/regress/test_readonly_node.py b/test_runner/regress/test_readonly_node.py index 347fc3a04d..3deaee6e7c 100644 --- a/test_runner/regress/test_readonly_node.py +++ b/test_runner/regress/test_readonly_node.py @@ -215,8 +215,8 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder): # Similar test, but with more data, and we force checkpoints -def test_timetravel(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_timetravel(neon_shared_env: NeonEnv): + env = neon_shared_env tenant_id = env.initial_tenant timeline_id = env.initial_timeline client = env.pageserver.http_client() diff --git a/test_runner/regress/test_subscriber_restart.py b/test_runner/regress/test_subscriber_restart.py index 91caad7220..95221b8b6a 100644 --- a/test_runner/regress/test_subscriber_restart.py +++ b/test_runner/regress/test_subscriber_restart.py @@ -7,8 +7,8 @@ from fixtures.utils import wait_until # This test checks of logical replication subscriber is able to correctly restart replication without receiving duplicates. # It requires tracking information about replication origins at page server side -def test_subscriber_restart(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_subscriber_restart(neon_shared_env: NeonEnv): + env = neon_shared_env env.neon_cli.create_branch("publisher") pub = env.endpoints.create("publisher") pub.start() diff --git a/test_runner/regress/test_subxacts.py b/test_runner/regress/test_subxacts.py index 82075bd723..c9895a8fb4 100644 --- a/test_runner/regress/test_subxacts.py +++ b/test_runner/regress/test_subxacts.py @@ -7,8 +7,8 @@ from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content # maintained in the pageserver, so subtransactions are not very exciting for # Neon. They are included in the commit record though and updated in the # CLOG. -def test_subxacts(neon_simple_env: NeonEnv, test_output_dir): - env = neon_simple_env +def test_subxacts(neon_shared_env: NeonEnv, test_output_dir): + env = neon_shared_env endpoint = env.endpoints.create_start("main") pg_conn = endpoint.connect() diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 711fcd5016..72bcba9d87 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -36,8 +36,8 @@ from fixtures.utils import query_scalar, run_pg_bench_small, wait_until from urllib3.util.retry import Retry -def test_timeline_delete(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_timeline_delete(neon_shared_env: NeonEnv): + env = neon_shared_env env.pageserver.allowed_errors.extend( [ diff --git a/test_runner/regress/test_truncate.py b/test_runner/regress/test_truncate.py index 52f125ce0b..39d9c40d58 100644 --- a/test_runner/regress/test_truncate.py +++ b/test_runner/regress/test_truncate.py @@ -1,13 +1,13 @@ import time -from fixtures.neon_fixtures import NeonEnvBuilder +from fixtures.neon_fixtures import NeonEnv # # Test truncation of FSM and VM forks of a relation # -def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark): - env = neon_env_builder.init_start() +def test_truncate(neon_shared_env: NeonEnv, zenbenchmark): + env = neon_shared_env n_records = 10000 n_iter = 10 diff --git a/test_runner/regress/test_unlogged.py b/test_runner/regress/test_unlogged.py index deba29536c..bb03033391 100644 --- a/test_runner/regress/test_unlogged.py +++ b/test_runner/regress/test_unlogged.py @@ -7,8 +7,8 @@ from fixtures.pg_version import PgVersion # fork to reset them during recovery. In Neon, pageserver directly sends init # fork contents as main fork during basebackup. # -def test_unlogged(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_unlogged(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") conn = endpoint.connect() diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index 3075211ada..8aed99e5d3 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -10,8 +10,8 @@ from fixtures.utils import query_scalar # Test that the VM bit is cleared correctly at a HEAP_DELETE and # HEAP_UPDATE record. # -def test_vm_bit_clear(neon_simple_env: NeonEnv): - env = neon_simple_env +def test_vm_bit_clear(neon_shared_env: NeonEnv): + env = neon_shared_env endpoint = env.endpoints.create_start("main") @@ -114,13 +114,13 @@ def test_vm_bit_clear(neon_simple_env: NeonEnv): assert cur_new.fetchall() == [] -def test_vm_bit_clear_on_heap_lock_whitebox(neon_env_builder: NeonEnvBuilder): +def test_vm_bit_clear_on_heap_lock_whitebox(neon_shared_env: NeonEnv): """ Test that the ALL_FROZEN VM bit is cleared correctly at a HEAP_LOCK record. This is a repro for the bug fixed in commit 66fa176cc8. """ - env = neon_env_builder.init_start() + env = neon_shared_env endpoint = env.endpoints.create_start( "main", config_lines=[