diff --git a/test_runner/README.md b/test_runner/README.md index 647b930b26..d754e60d17 100644 --- a/test_runner/README.md +++ b/test_runner/README.md @@ -257,9 +257,8 @@ compute Postgres nodes. The connections between them can be configured to use JW authentication tokens, and some other configuration options can be tweaked too. The easiest way to get access to a Neon Environment is by using the `neon_simple_env` -fixture. For convenience, there is a branch called `empty` in environments created with -'neon_simple_env'. The convention is to create a test-specific branch of that and load any -test data there, instead of the 'main' branch. +fixture. For convenience, there is a branch called `main` in environments created with +'neon_simple_env', ready to be used in the test. For more complicated cases, you can build a custom Neon Environment, with the `neon_env` fixture: diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 3047dcc4f7..60887b9aed 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -57,7 +57,6 @@ from _pytest.fixtures import FixtureRequest from psycopg2.extensions import connection as PgConnection from psycopg2.extensions import cursor as PgCursor from psycopg2.extensions import make_dsn, parse_dsn -from typing_extensions import Literal from urllib3.util.retry import Retry from fixtures import overlayfs @@ -1451,11 +1450,9 @@ def neon_simple_env( ) as builder: env = builder.init_start() - # For convenience in tests, create a branch from the freshly-initialized cluster. - env.neon_cli.create_branch("empty", ancestor_branch_name=DEFAULT_BRANCH_NAME) - yield env + @pytest.fixture(scope="function") def neon_env_builder( pytestconfig: Config, diff --git a/test_runner/performance/test_logical_replication.py b/test_runner/performance/test_logical_replication.py index 077f73ac06..29a0380524 100644 --- a/test_runner/performance/test_logical_replication.py +++ b/test_runner/performance/test_logical_replication.py @@ -22,10 +22,8 @@ if TYPE_CHECKING: def test_logical_replication(neon_simple_env: NeonEnv, pg_bin: PgBin, vanilla_pg): env = neon_simple_env - env.neon_cli.create_branch("test_logical_replication", "empty") - endpoint = env.endpoints.create_start("test_logical_replication") + endpoint = env.endpoints.create_start("main") - log.info("postgres is running on 'test_logical_replication' branch") pg_bin.run_capture(["pgbench", "-i", "-s10", endpoint.connstr()]) endpoint.safe_psql("create publication pub1 for table pgbench_accounts, pgbench_history") diff --git a/test_runner/regress/test_basebackup_error.py b/test_runner/regress/test_basebackup_error.py index 170b494884..13c080ea0e 100644 --- a/test_runner/regress/test_basebackup_error.py +++ b/test_runner/regress/test_basebackup_error.py @@ -8,11 +8,10 @@ from fixtures.neon_fixtures import NeonEnv # def test_basebackup_error(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_basebackup_error", "empty") pageserver_http = env.pageserver.http_client() # Introduce failpoint pageserver_http.configure_failpoints(("basebackup-before-control-file", "return")) with pytest.raises(Exception, match="basebackup-before-control-file"): - env.endpoints.create_start("test_basebackup_error") + env.endpoints.create_start("main") diff --git a/test_runner/regress/test_clog_truncate.py b/test_runner/regress/test_clog_truncate.py index 26e6e336b9..6e4880841a 100644 --- a/test_runner/regress/test_clog_truncate.py +++ b/test_runner/regress/test_clog_truncate.py @@ -11,7 +11,6 @@ from fixtures.utils import query_scalar # def test_clog_truncate(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_clog_truncate", "empty") # set aggressive autovacuum to make sure that truncation will happen config = [ @@ -24,7 +23,7 @@ def test_clog_truncate(neon_simple_env: NeonEnv): "autovacuum_freeze_max_age=100000", ] - endpoint = env.endpoints.create_start("test_clog_truncate", config_lines=config) + endpoint = env.endpoints.create_start("main", config_lines=config) # Install extension containing function needed for test endpoint.safe_psql("CREATE EXTENSION neon_test_utils") @@ -58,7 +57,7 @@ def test_clog_truncate(neon_simple_env: NeonEnv): # create new branch after clog truncation and start a compute node on it log.info(f"create branch at lsn_after_truncation {lsn_after_truncation}") env.neon_cli.create_branch( - "test_clog_truncate_new", "test_clog_truncate", ancestor_start_lsn=lsn_after_truncation + "test_clog_truncate_new", "main", ancestor_start_lsn=lsn_after_truncation ) endpoint2 = env.endpoints.create_start("test_clog_truncate_new") diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py index dd36190fcd..8b8c970357 100644 --- a/test_runner/regress/test_compute_catalog.py +++ b/test_runner/regress/test_compute_catalog.py @@ -4,9 +4,8 @@ from fixtures.neon_fixtures import NeonEnv def test_compute_catalog(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_config", "empty") - endpoint = env.endpoints.create_start("test_config", config_lines=["log_min_messages=debug1"]) + endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"]) client = endpoint.http_client() objects = client.dbs_and_roles() diff --git a/test_runner/regress/test_config.py b/test_runner/regress/test_config.py index 2ef28eb94b..d8ef0b8dbd 100644 --- a/test_runner/regress/test_config.py +++ b/test_runner/regress/test_config.py @@ -9,10 +9,9 @@ from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder # def test_config(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_config", "empty") # change config - endpoint = env.endpoints.create_start("test_config", config_lines=["log_min_messages=debug1"]) + endpoint = env.endpoints.create_start("main", config_lines=["log_min_messages=debug1"]) with closing(endpoint.connect()) as conn: with conn.cursor() as cur: diff --git a/test_runner/regress/test_createdropdb.py b/test_runner/regress/test_createdropdb.py index f741a9fc87..af643f45d7 100644 --- a/test_runner/regress/test_createdropdb.py +++ b/test_runner/regress/test_createdropdb.py @@ -17,9 +17,7 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str): if env.pg_version == PgVersion.V14 and strategy == "wal_log": pytest.skip("wal_log strategy not supported on PostgreSQL 14") - env.neon_cli.create_branch("test_createdb", "empty") - - endpoint = env.endpoints.create_start("test_createdb") + endpoint = env.endpoints.create_start("main") with endpoint.cursor() as cur: # Cause a 'relmapper' change in the original branch @@ -33,7 +31,7 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str): lsn = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()") # Create a branch - env.neon_cli.create_branch("test_createdb2", "test_createdb", ancestor_start_lsn=lsn) + env.neon_cli.create_branch("test_createdb2", "main", ancestor_start_lsn=lsn) endpoint2 = env.endpoints.create_start("test_createdb2") # Test that you can connect to the new database on both branches @@ -62,8 +60,7 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str): # def test_dropdb(neon_simple_env: NeonEnv, test_output_dir): env = neon_simple_env - env.neon_cli.create_branch("test_dropdb", "empty") - endpoint = env.endpoints.create_start("test_dropdb") + endpoint = env.endpoints.create_start("main") with endpoint.cursor() as cur: cur.execute("CREATE DATABASE foodb") @@ -80,14 +77,10 @@ def test_dropdb(neon_simple_env: NeonEnv, test_output_dir): lsn_after_drop = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()") # Create two branches before and after database drop. - env.neon_cli.create_branch( - "test_before_dropdb", "test_dropdb", ancestor_start_lsn=lsn_before_drop - ) + env.neon_cli.create_branch("test_before_dropdb", "main", ancestor_start_lsn=lsn_before_drop) endpoint_before = env.endpoints.create_start("test_before_dropdb") - env.neon_cli.create_branch( - "test_after_dropdb", "test_dropdb", ancestor_start_lsn=lsn_after_drop - ) + env.neon_cli.create_branch("test_after_dropdb", "main", ancestor_start_lsn=lsn_after_drop) endpoint_after = env.endpoints.create_start("test_after_dropdb") # Test that database exists on the branch before drop diff --git a/test_runner/regress/test_createuser.py b/test_runner/regress/test_createuser.py index 17d9824f52..d6f138e126 100644 --- a/test_runner/regress/test_createuser.py +++ b/test_runner/regress/test_createuser.py @@ -7,8 +7,7 @@ from fixtures.utils import query_scalar # def test_createuser(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_createuser", "empty") - endpoint = env.endpoints.create_start("test_createuser") + endpoint = env.endpoints.create_start("main") with endpoint.cursor() as cur: # Cause a 'relmapper' change in the original branch @@ -19,7 +18,7 @@ def test_createuser(neon_simple_env: NeonEnv): lsn = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()") # Create a branch - env.neon_cli.create_branch("test_createuser2", "test_createuser", ancestor_start_lsn=lsn) + env.neon_cli.create_branch("test_createuser2", "main", ancestor_start_lsn=lsn) endpoint2 = env.endpoints.create_start("test_createuser2") # Test that you can connect to new branch as a new user diff --git a/test_runner/regress/test_ddl_forwarding.py b/test_runner/regress/test_ddl_forwarding.py index 50da673d87..65f310c27a 100644 --- a/test_runner/regress/test_ddl_forwarding.py +++ b/test_runner/regress/test_ddl_forwarding.py @@ -290,9 +290,8 @@ def assert_db_connlimit(endpoint: Any, db_name: str, connlimit: int, msg: str): # Here we test the latter. The first one is tested in test_ddl_forwarding def test_ddl_forwarding_invalid_db(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_ddl_forwarding_invalid_db", "empty") endpoint = env.endpoints.create_start( - "test_ddl_forwarding_invalid_db", + "main", # Some non-existent url config_lines=["neon.console_url=http://localhost:9999/unknown/api/v0/roles_and_databases"], ) diff --git a/test_runner/regress/test_explain_with_lfc_stats.py b/test_runner/regress/test_explain_with_lfc_stats.py index 5231dedcda..0217c9ac7b 100644 --- a/test_runner/regress/test_explain_with_lfc_stats.py +++ b/test_runner/regress/test_explain_with_lfc_stats.py @@ -10,11 +10,9 @@ def test_explain_with_lfc_stats(neon_simple_env: NeonEnv): cache_dir = Path(env.repo_dir) / "file_cache" cache_dir.mkdir(exist_ok=True) - branchname = "test_explain_with_lfc_stats" - env.neon_cli.create_branch(branchname, "empty") - log.info(f"Creating endopint with 1MB shared_buffers and 64 MB LFC for branch {branchname}") + log.info("Creating endpoint with 1MB shared_buffers and 64 MB LFC") endpoint = env.endpoints.create_start( - branchname, + "main", config_lines=[ "shared_buffers='1MB'", f"neon.file_cache_path='{cache_dir}/file.cache'", diff --git a/test_runner/regress/test_lfc_resize.py b/test_runner/regress/test_lfc_resize.py index 1b2c7f808f..cb0b30d9c6 100644 --- a/test_runner/regress/test_lfc_resize.py +++ b/test_runner/regress/test_lfc_resize.py @@ -16,9 +16,8 @@ from fixtures.neon_fixtures import NeonEnv, PgBin @pytest.mark.timeout(600) def test_lfc_resize(neon_simple_env: NeonEnv, pg_bin: PgBin): env = neon_simple_env - env.neon_cli.create_branch("test_lfc_resize", "empty") endpoint = env.endpoints.create_start( - "test_lfc_resize", + "main", config_lines=[ "neon.file_cache_path='file.cache'", "neon.max_file_cache_size=512MB", diff --git a/test_runner/regress/test_lfc_working_set_approximation.py b/test_runner/regress/test_lfc_working_set_approximation.py index 4c53e4e2fd..4a3a949d1a 100644 --- a/test_runner/regress/test_lfc_working_set_approximation.py +++ b/test_runner/regress/test_lfc_working_set_approximation.py @@ -12,11 +12,9 @@ def test_lfc_working_set_approximation(neon_simple_env: NeonEnv): cache_dir = Path(env.repo_dir) / "file_cache" cache_dir.mkdir(exist_ok=True) - branchname = "test_approximate_working_set_size" - env.neon_cli.create_branch(branchname, "empty") - log.info(f"Creating endopint with 1MB shared_buffers and 64 MB LFC for branch {branchname}") + log.info("Creating endpoint with 1MB shared_buffers and 64 MB LFC") endpoint = env.endpoints.create_start( - branchname, + "main", config_lines=[ "shared_buffers='1MB'", f"neon.file_cache_path='{cache_dir}/file.cache'", diff --git a/test_runner/regress/test_local_file_cache.py b/test_runner/regress/test_local_file_cache.py index 3c404c3b23..9c38200937 100644 --- a/test_runner/regress/test_local_file_cache.py +++ b/test_runner/regress/test_local_file_cache.py @@ -5,7 +5,7 @@ import threading import time from typing import List -from fixtures.neon_fixtures import DEFAULT_BRANCH_NAME, NeonEnvBuilder +from fixtures.neon_fixtures import NeonEnvBuilder from fixtures.utils import query_scalar @@ -15,11 +15,8 @@ def test_local_file_cache_unlink(neon_env_builder: NeonEnvBuilder): cache_dir = os.path.join(env.repo_dir, "file_cache") os.mkdir(cache_dir) - env.neon_cli.create_branch("empty", ancestor_branch_name=DEFAULT_BRANCH_NAME) - env.neon_cli.create_branch("test_local_file_cache_unlink", "empty") - endpoint = env.endpoints.create_start( - "test_local_file_cache_unlink", + "main", config_lines=[ "shared_buffers='1MB'", f"neon.file_cache_path='{cache_dir}/file.cache'", diff --git a/test_runner/regress/test_logical_replication.py b/test_runner/regress/test_logical_replication.py index f83a833dda..15a3719e0b 100644 --- a/test_runner/regress/test_logical_replication.py +++ b/test_runner/regress/test_logical_replication.py @@ -36,10 +36,8 @@ def test_logical_replication(neon_simple_env: NeonEnv, vanilla_pg): env = neon_simple_env tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_logical_replication", "empty") - endpoint = env.endpoints.create_start( - "test_logical_replication", config_lines=["log_statement=all"] - ) + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main", config_lines=["log_statement=all"]) pg_conn = endpoint.connect() cur = pg_conn.cursor() @@ -185,10 +183,9 @@ def test_obsolete_slot_drop(neon_simple_env: NeonEnv, vanilla_pg): env = neon_simple_env - env.neon_cli.create_branch("test_logical_replication", "empty") # set low neon.logical_replication_max_snap_files endpoint = env.endpoints.create_start( - "test_logical_replication", + "main", config_lines=["log_statement=all", "neon.logical_replication_max_snap_files=1"], ) @@ -472,7 +469,7 @@ def test_slots_and_branching(neon_simple_env: NeonEnv): def test_replication_shutdown(neon_simple_env: NeonEnv): # Ensure Postgres can exit without stuck when a replication job is active + neon extension installed env = neon_simple_env - env.neon_cli.create_branch("test_replication_shutdown_publisher", "empty") + env.neon_cli.create_branch("test_replication_shutdown_publisher", "main") pub = env.endpoints.create("test_replication_shutdown_publisher") env.neon_cli.create_branch("test_replication_shutdown_subscriber") diff --git a/test_runner/regress/test_migrations.py b/test_runner/regress/test_migrations.py index bdc5ca907e..e88e56d030 100644 --- a/test_runner/regress/test_migrations.py +++ b/test_runner/regress/test_migrations.py @@ -9,9 +9,8 @@ if TYPE_CHECKING: def test_migrations(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_migrations", "empty") - endpoint = env.endpoints.create("test_migrations") + endpoint = env.endpoints.create("main") endpoint.respec(skip_pg_catalog_updates=False) endpoint.start() diff --git a/test_runner/regress/test_multixact.py b/test_runner/regress/test_multixact.py index 88f7a5db59..8a00f8835f 100644 --- a/test_runner/regress/test_multixact.py +++ b/test_runner/regress/test_multixact.py @@ -14,8 +14,7 @@ from fixtures.utils import query_scalar # def test_multixact(neon_simple_env: NeonEnv, test_output_dir): env = neon_simple_env - env.neon_cli.create_branch("test_multixact", "empty") - endpoint = env.endpoints.create_start("test_multixact") + endpoint = env.endpoints.create_start("main") cur = endpoint.connect().cursor() cur.execute( @@ -73,7 +72,9 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir): assert int(next_multixact_id) > int(next_multixact_id_old) # Branch at this point - env.neon_cli.create_branch("test_multixact_new", "test_multixact", ancestor_start_lsn=lsn) + env.neon_cli.create_branch( + "test_multixact_new", ancestor_branch_name="main", ancestor_start_lsn=lsn + ) endpoint_new = env.endpoints.create_start("test_multixact_new") next_multixact_id_new = endpoint_new.safe_psql( diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index fd31df84da..7825ec772c 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -6,7 +6,7 @@ from fixtures.utils import wait_until def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): env = neon_simple_env - env.neon_cli.create_branch("test_neon_superuser_publisher", "empty") + env.neon_cli.create_branch("test_neon_superuser_publisher", "main") pub = env.endpoints.create("test_neon_superuser_publisher") env.neon_cli.create_branch("test_neon_superuser_subscriber") diff --git a/test_runner/regress/test_parallel_copy.py b/test_runner/regress/test_parallel_copy.py index b33e387a66..a5037e8694 100644 --- a/test_runner/regress/test_parallel_copy.py +++ b/test_runner/regress/test_parallel_copy.py @@ -41,8 +41,7 @@ async def parallel_load_same_table(endpoint: Endpoint, n_parallel: int): # Load data into one table with COPY TO from 5 parallel connections def test_parallel_copy(neon_simple_env: NeonEnv, n_parallel=5): env = neon_simple_env - env.neon_cli.create_branch("test_parallel_copy", "empty") - endpoint = env.endpoints.create_start("test_parallel_copy") + endpoint = env.endpoints.create_start("main") # Create test table conn = endpoint.connect() diff --git a/test_runner/regress/test_pg_query_cancellation.py b/test_runner/regress/test_pg_query_cancellation.py index bad2e5865e..c6b4eff516 100644 --- a/test_runner/regress/test_pg_query_cancellation.py +++ b/test_runner/regress/test_pg_query_cancellation.py @@ -42,11 +42,9 @@ def test_cancellations(neon_simple_env: NeonEnv): ps_http = ps.http_client() ps_http.is_testing_enabled_or_skip() - env.neon_cli.create_branch("test_config", "empty") - # We don't want to have any racy behaviour with autovacuum IOs ep = env.endpoints.create_start( - "test_config", + "main", config_lines=[ "autovacuum = off", "shared_buffers = 128MB", diff --git a/test_runner/regress/test_pg_waldump.py b/test_runner/regress/test_pg_waldump.py index 8e80efd9ba..1990d69b6a 100644 --- a/test_runner/regress/test_pg_waldump.py +++ b/test_runner/regress/test_pg_waldump.py @@ -22,8 +22,8 @@ def check_wal_segment(pg_waldump_path: str, segment_path: str, test_output_dir): def test_pg_waldump(neon_simple_env: NeonEnv, test_output_dir, pg_bin: PgBin): env = neon_simple_env tenant_id = env.initial_tenant - timeline_id = env.neon_cli.create_branch("test_pg_waldump", "empty") - endpoint = env.endpoints.create_start("test_pg_waldump") + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main") cur = endpoint.connect().cursor() cur.execute( diff --git a/test_runner/regress/test_read_validation.py b/test_runner/regress/test_read_validation.py index 1ac881553f..78798c5abf 100644 --- a/test_runner/regress/test_read_validation.py +++ b/test_runner/regress/test_read_validation.py @@ -15,12 +15,8 @@ extensions = ["pageinspect", "neon_test_utils", "pg_buffercache"] # def test_read_validation(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_read_validation", "empty") - - endpoint = env.endpoints.create_start( - "test_read_validation", - ) + endpoint = env.endpoints.create_start("main") with closing(endpoint.connect()) as con: with con.cursor() as c: for e in extensions: @@ -131,13 +127,9 @@ def test_read_validation(neon_simple_env: NeonEnv): def test_read_validation_neg(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_read_validation_neg", "empty") - env.pageserver.allowed_errors.append(".*invalid LSN\\(0\\) in request.*") - endpoint = env.endpoints.create_start( - "test_read_validation_neg", - ) + endpoint = env.endpoints.create_start("main") with closing(endpoint.connect()) as con: with con.cursor() as c: diff --git a/test_runner/regress/test_readonly_node.py b/test_runner/regress/test_readonly_node.py index 368f60127e..347fc3a04d 100644 --- a/test_runner/regress/test_readonly_node.py +++ b/test_runner/regress/test_readonly_node.py @@ -22,8 +22,7 @@ from fixtures.utils import query_scalar # def test_readonly_node(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_readonly_node", "empty") - endpoint_main = env.endpoints.create_start("test_readonly_node") + endpoint_main = env.endpoints.create_start("main") env.pageserver.allowed_errors.extend( [ @@ -74,12 +73,12 @@ def test_readonly_node(neon_simple_env: NeonEnv): # Create first read-only node at the point where only 100 rows were inserted endpoint_hundred = env.endpoints.create_start( - branch_name="test_readonly_node", endpoint_id="ep-readonly_node_hundred", lsn=lsn_a + branch_name="main", endpoint_id="ep-readonly_node_hundred", lsn=lsn_a ) # And another at the point where 200100 rows were inserted endpoint_more = env.endpoints.create_start( - branch_name="test_readonly_node", endpoint_id="ep-readonly_node_more", lsn=lsn_b + branch_name="main", endpoint_id="ep-readonly_node_more", lsn=lsn_b ) # On the 'hundred' node, we should see only 100 rows @@ -100,7 +99,7 @@ def test_readonly_node(neon_simple_env: NeonEnv): # Check creating a node at segment boundary endpoint = env.endpoints.create_start( - branch_name="test_readonly_node", + branch_name="main", endpoint_id="ep-branch_segment_boundary", lsn=Lsn("0/3000000"), ) @@ -112,7 +111,7 @@ def test_readonly_node(neon_simple_env: NeonEnv): with pytest.raises(Exception, match="invalid basebackup lsn"): # compute node startup with invalid LSN should fail env.endpoints.create_start( - branch_name="test_readonly_node", + branch_name="main", endpoint_id="ep-readonly_node_preinitdb", lsn=Lsn("0/42"), ) @@ -218,14 +217,10 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder): # Similar test, but with more data, and we force checkpoints def test_timetravel(neon_simple_env: NeonEnv): env = neon_simple_env - pageserver_http_client = env.pageserver.http_client() - env.neon_cli.create_branch("test_timetravel", "empty") - endpoint = env.endpoints.create_start("test_timetravel") - + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline client = env.pageserver.http_client() - - tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0] - timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0] + endpoint = env.endpoints.create_start("main") lsns = [] @@ -249,7 +244,7 @@ def test_timetravel(neon_simple_env: NeonEnv): wait_for_last_record_lsn(client, tenant_id, timeline_id, current_lsn) # run checkpoint manually to force a new layer file - pageserver_http_client.timeline_checkpoint(tenant_id, timeline_id) + client.timeline_checkpoint(tenant_id, timeline_id) ##### Restart pageserver env.endpoints.stop_all() @@ -258,7 +253,7 @@ def test_timetravel(neon_simple_env: NeonEnv): for i, lsn in lsns: endpoint_old = env.endpoints.create_start( - branch_name="test_timetravel", endpoint_id=f"ep-old_lsn_{i}", lsn=lsn + branch_name="main", endpoint_id=f"ep-old_lsn_{i}", lsn=lsn ) with endpoint_old.cursor() as cur: assert query_scalar(cur, f"select count(*) from testtab where iteration={i}") == 100000 diff --git a/test_runner/regress/test_subxacts.py b/test_runner/regress/test_subxacts.py index 10cb00c780..82075bd723 100644 --- a/test_runner/regress/test_subxacts.py +++ b/test_runner/regress/test_subxacts.py @@ -9,8 +9,7 @@ from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content # CLOG. def test_subxacts(neon_simple_env: NeonEnv, test_output_dir): env = neon_simple_env - env.neon_cli.create_branch("test_subxacts", "empty") - endpoint = env.endpoints.create_start("test_subxacts") + endpoint = env.endpoints.create_start("main") pg_conn = endpoint.connect() cur = pg_conn.cursor() diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index 328131cd08..711fcd5016 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -68,10 +68,13 @@ def test_timeline_delete(neon_simple_env: NeonEnv): # construct pair of branches to validate that pageserver prohibits # deletion of ancestor timelines when they have child branches - parent_timeline_id = env.neon_cli.create_branch("test_ancestor_branch_delete_parent", "empty") + parent_timeline_id = env.neon_cli.create_branch( + new_branch_name="test_ancestor_branch_delete_parent", ancestor_branch_name="main" + ) leaf_timeline_id = env.neon_cli.create_branch( - "test_ancestor_branch_delete_branch1", "test_ancestor_branch_delete_parent" + new_branch_name="test_ancestor_branch_delete_branch1", + ancestor_branch_name="test_ancestor_branch_delete_parent", ) timeline_path = env.pageserver.timeline_dir(env.initial_tenant, parent_timeline_id) diff --git a/test_runner/regress/test_timeline_size.py b/test_runner/regress/test_timeline_size.py index 9bf5f8680b..f2265dd3d9 100644 --- a/test_runner/regress/test_timeline_size.py +++ b/test_runner/regress/test_timeline_size.py @@ -36,7 +36,7 @@ from fixtures.utils import get_timeline_dir_size, wait_until def test_timeline_size(neon_simple_env: NeonEnv): env = neon_simple_env - new_timeline_id = env.neon_cli.create_branch("test_timeline_size", "empty") + new_timeline_id = env.neon_cli.create_branch("test_timeline_size", "main") client = env.pageserver.http_client() client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id) @@ -68,7 +68,7 @@ def test_timeline_size(neon_simple_env: NeonEnv): def test_timeline_size_createdropdb(neon_simple_env: NeonEnv): env = neon_simple_env - new_timeline_id = env.neon_cli.create_branch("test_timeline_size_createdropdb", "empty") + new_timeline_id = env.neon_cli.create_branch("test_timeline_size_createdropdb", "main") client = env.pageserver.http_client() client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id) diff --git a/test_runner/regress/test_twophase.py b/test_runner/regress/test_twophase.py index dd76689008..ea900b07b8 100644 --- a/test_runner/regress/test_twophase.py +++ b/test_runner/regress/test_twophase.py @@ -9,10 +9,7 @@ from fixtures.neon_fixtures import NeonEnv, fork_at_current_lsn # def test_twophase(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_twophase", "empty") - endpoint = env.endpoints.create_start( - "test_twophase", config_lines=["max_prepared_transactions=5"] - ) + endpoint = env.endpoints.create_start("main", config_lines=["max_prepared_transactions=5"]) conn = endpoint.connect() cur = conn.cursor() @@ -56,7 +53,7 @@ def test_twophase(neon_simple_env: NeonEnv): assert len(twophase_files) == 2 # Create a branch with the transaction in prepared state - fork_at_current_lsn(env, endpoint, "test_twophase_prepared", "test_twophase") + fork_at_current_lsn(env, endpoint, "test_twophase_prepared", "main") # Start compute on the new branch endpoint2 = env.endpoints.create_start( diff --git a/test_runner/regress/test_unlogged.py b/test_runner/regress/test_unlogged.py index 137d28b9fa..deba29536c 100644 --- a/test_runner/regress/test_unlogged.py +++ b/test_runner/regress/test_unlogged.py @@ -9,8 +9,7 @@ from fixtures.pg_version import PgVersion # def test_unlogged(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_unlogged", "empty") - endpoint = env.endpoints.create_start("test_unlogged") + endpoint = env.endpoints.create_start("main") conn = endpoint.connect() cur = conn.cursor() @@ -22,7 +21,7 @@ def test_unlogged(neon_simple_env: NeonEnv): cur.execute("INSERT INTO iut (id) values (42);") # create another compute to fetch inital empty contents from pageserver - fork_at_current_lsn(env, endpoint, "test_unlogged_basebackup", "test_unlogged") + fork_at_current_lsn(env, endpoint, "test_unlogged_basebackup", "main") endpoint2 = env.endpoints.create_start("test_unlogged_basebackup") conn2 = endpoint2.connect() diff --git a/test_runner/regress/test_vm_bits.py b/test_runner/regress/test_vm_bits.py index 7272979c4a..3075211ada 100644 --- a/test_runner/regress/test_vm_bits.py +++ b/test_runner/regress/test_vm_bits.py @@ -13,8 +13,7 @@ from fixtures.utils import query_scalar def test_vm_bit_clear(neon_simple_env: NeonEnv): env = neon_simple_env - env.neon_cli.create_branch("test_vm_bit_clear", "empty") - endpoint = env.endpoints.create_start("test_vm_bit_clear") + endpoint = env.endpoints.create_start("main") pg_conn = endpoint.connect() cur = pg_conn.cursor() @@ -58,7 +57,7 @@ def test_vm_bit_clear(neon_simple_env: NeonEnv): cur.execute("UPDATE vmtest_cold_update2 SET id = 5000, filler=repeat('x', 200) WHERE id = 1") # Branch at this point, to test that later - fork_at_current_lsn(env, endpoint, "test_vm_bit_clear_new", "test_vm_bit_clear") + fork_at_current_lsn(env, endpoint, "test_vm_bit_clear_new", "main") # Clear the buffer cache, to force the VM page to be re-fetched from # the page server diff --git a/test_runner/test_broken.py b/test_runner/test_broken.py index 7e8aef5a5f..d710b53528 100644 --- a/test_runner/test_broken.py +++ b/test_runner/test_broken.py @@ -23,8 +23,7 @@ run_broken = pytest.mark.skipif( def test_broken(neon_simple_env: NeonEnv, pg_bin): env = neon_simple_env - env.neon_cli.create_branch("test_broken", "empty") - env.endpoints.create_start("test_broken") + env.endpoints.create_start("main") log.info("postgres is running") log.info("THIS NEXT COMMAND WILL FAIL:")