tests: Replace direct calls to neon_cli with wrappers in NeonEnv (#9195)

Add wrappers for a few commands that didn't have them before. Move the
logic to generate tenant and timeline IDs from NeonCli to the callers,
so that NeonCli is more purely just a type-safe wrapper around
'neon_local'.
This commit is contained in:
Heikki Linnakangas
2024-10-03 22:03:22 +03:00
parent 4e9b32c442
commit 19db9e9aad
85 changed files with 491 additions and 404 deletions

View File

@@ -491,7 +491,7 @@ class NeonEnvBuilder:
log.debug(
f"Services started, creating initial tenant {env.initial_tenant} and its initial timeline"
)
initial_tenant, initial_timeline = env.neon_cli.create_tenant(
initial_tenant, initial_timeline = env.create_tenant(
tenant_id=env.initial_tenant,
conf=initial_tenant_conf,
timeline_id=env.initial_timeline,
@@ -954,8 +954,14 @@ class NeonEnv:
neon_cli - can be used to run the 'neon' CLI tool
create_tenant() - initializes a new tenant in the page server, returns
the tenant id
create_tenant() - initializes a new tenant and an initial empty timeline on it,
returns the tenant and timeline id
create_branch() - branch a new timeline from an existing one, returns
the new timeline id
create_timeline() - initializes a new timeline by running initdb, returns
the new timeline id
"""
BASE_PAGESERVER_ID = 1
@@ -1310,6 +1316,74 @@ class NeonEnv:
self.endpoint_counter += 1
return "ep-" + str(self.endpoint_counter)
def create_tenant(
self,
tenant_id: Optional[TenantId] = None,
timeline_id: Optional[TimelineId] = None,
conf: Optional[Dict[str, Any]] = None,
shard_count: Optional[int] = None,
shard_stripe_size: Optional[int] = None,
placement_policy: Optional[str] = None,
set_default: bool = False,
aux_file_policy: Optional[AuxFileStore] = None,
) -> Tuple[TenantId, TimelineId]:
"""
Creates a new tenant, returns its id and its initial timeline's id.
"""
tenant_id = tenant_id or TenantId.generate()
timeline_id = timeline_id or TimelineId.generate()
self.neon_cli.create_tenant(
tenant_id=tenant_id,
timeline_id=timeline_id,
pg_version=self.pg_version,
conf=conf,
shard_count=shard_count,
shard_stripe_size=shard_stripe_size,
placement_policy=placement_policy,
set_default=set_default,
aux_file_policy=aux_file_policy,
)
return tenant_id, timeline_id
def config_tenant(self, tenant_id: Optional[TenantId], conf: Dict[str, str]):
"""
Update tenant config.
"""
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.config_tenant(tenant_id, conf)
def create_branch(
self,
new_branch_name: str = DEFAULT_BRANCH_NAME,
tenant_id: Optional[TenantId] = None,
ancestor_branch_name: Optional[str] = None,
ancestor_start_lsn: Optional[Lsn] = None,
new_timeline_id: Optional[TimelineId] = None,
) -> TimelineId:
new_timeline_id = new_timeline_id or TimelineId.generate()
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.create_branch(
tenant_id, new_timeline_id, new_branch_name, ancestor_branch_name, ancestor_start_lsn
)
return new_timeline_id
def create_timeline(
self,
new_branch_name: str,
tenant_id: Optional[TenantId] = None,
timeline_id: Optional[TimelineId] = None,
) -> TimelineId:
timeline_id = timeline_id or TimelineId.generate()
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.create_timeline(new_branch_name, tenant_id, timeline_id, self.pg_version)
return timeline_id
@pytest.fixture(scope="function")
def neon_simple_env(
@@ -1559,21 +1633,19 @@ class NeonCli(AbstractNeonCli):
def create_tenant(
self,
tenant_id: Optional[TenantId] = None,
timeline_id: Optional[TimelineId] = None,
tenant_id: TenantId,
timeline_id: TimelineId,
pg_version: PgVersion,
conf: Optional[Dict[str, Any]] = None,
shard_count: Optional[int] = None,
shard_stripe_size: Optional[int] = None,
placement_policy: Optional[str] = None,
set_default: bool = False,
aux_file_policy: Optional[AuxFileStore] = None,
) -> Tuple[TenantId, TimelineId]:
):
"""
Creates a new tenant, returns its id and its initial timeline's id.
"""
tenant_id = tenant_id or TenantId.generate()
timeline_id = timeline_id or TimelineId.generate()
args = [
"tenant",
"create",
@@ -1582,7 +1654,7 @@ class NeonCli(AbstractNeonCli):
"--timeline-id",
str(timeline_id),
"--pg-version",
self.env.pg_version,
pg_version,
]
if conf is not None:
args.extend(
@@ -1612,7 +1684,6 @@ class NeonCli(AbstractNeonCli):
res = self.raw_cli(args)
res.check_returncode()
return tenant_id, timeline_id
def import_tenant(self, tenant_id: TenantId):
args = ["tenant", "import", "--tenant-id", str(tenant_id)]
@@ -1650,8 +1721,9 @@ class NeonCli(AbstractNeonCli):
def create_timeline(
self,
new_branch_name: str,
tenant_id: Optional[TenantId] = None,
timeline_id: Optional[TimelineId] = None,
tenant_id: TenantId,
timeline_id: TimelineId,
pg_version: PgVersion,
) -> TimelineId:
if timeline_id is None:
timeline_id = TimelineId.generate()
@@ -1662,11 +1734,11 @@ class NeonCli(AbstractNeonCli):
"--branch-name",
new_branch_name,
"--tenant-id",
str(tenant_id or self.env.initial_tenant),
str(tenant_id),
"--timeline-id",
str(timeline_id),
"--pg-version",
self.env.pg_version,
pg_version,
]
res = self.raw_cli(cmd)
@@ -1676,23 +1748,21 @@ class NeonCli(AbstractNeonCli):
def create_branch(
self,
tenant_id: TenantId,
timeline_id: TimelineId,
new_branch_name: str = DEFAULT_BRANCH_NAME,
ancestor_branch_name: Optional[str] = None,
tenant_id: Optional[TenantId] = None,
ancestor_start_lsn: Optional[Lsn] = None,
new_timeline_id: Optional[TimelineId] = None,
) -> TimelineId:
if new_timeline_id is None:
new_timeline_id = TimelineId.generate()
):
cmd = [
"timeline",
"branch",
"--branch-name",
new_branch_name,
"--timeline-id",
str(new_timeline_id),
str(timeline_id),
"--tenant-id",
str(tenant_id or self.env.initial_tenant),
str(tenant_id),
]
if ancestor_branch_name is not None:
cmd.extend(["--ancestor-branch-name", ancestor_branch_name])
@@ -1702,8 +1772,6 @@ class NeonCli(AbstractNeonCli):
res = self.raw_cli(cmd)
res.check_returncode()
return TimelineId(str(new_timeline_id))
def list_timelines(self, tenant_id: Optional[TenantId] = None) -> List[Tuple[str, TimelineId]]:
"""
Returns a list of (branch_name, timeline_id) tuples out of parsed `neon timeline list` CLI output.
@@ -1841,8 +1909,9 @@ class NeonCli(AbstractNeonCli):
branch_name: str,
pg_port: int,
http_port: int,
tenant_id: TenantId,
pg_version: PgVersion,
endpoint_id: Optional[str] = None,
tenant_id: Optional[TenantId] = None,
hot_standby: bool = False,
lsn: Optional[Lsn] = None,
pageserver_id: Optional[int] = None,
@@ -1852,11 +1921,11 @@ class NeonCli(AbstractNeonCli):
"endpoint",
"create",
"--tenant-id",
str(tenant_id or self.env.initial_tenant),
str(tenant_id),
"--branch-name",
branch_name,
"--pg-version",
self.env.pg_version,
pg_version,
]
if lsn is not None:
args.extend(["--lsn", str(lsn)])
@@ -3953,6 +4022,7 @@ class Endpoint(PgProtocol, LogUtils):
hot_standby=hot_standby,
pg_port=self.pg_port,
http_port=self.http_port,
pg_version=self.env.pg_version,
pageserver_id=pageserver_id,
allow_multiple=allow_multiple,
)
@@ -5282,7 +5352,12 @@ def fork_at_current_lsn(
the WAL up to that LSN to arrive in the pageserver before creating the branch.
"""
current_lsn = endpoint.safe_psql("SELECT pg_current_wal_lsn()")[0][0]
return env.neon_cli.create_branch(new_branch_name, ancestor_branch_name, tenant_id, current_lsn)
return env.create_branch(
new_branch_name=new_branch_name,
tenant_id=tenant_id,
ancestor_branch_name=ancestor_branch_name,
ancestor_start_lsn=current_lsn,
)
def import_timeline_from_vanilla_postgres(

View File

@@ -53,7 +53,7 @@ def setup_env(
"checkpoint_distance": 268435456,
"image_creation_threshold": 3,
}
template_tenant, template_timeline = env.neon_cli.create_tenant(set_default=True)
template_tenant, template_timeline = env.create_tenant(set_default=True)
env.pageserver.tenant_detach(template_tenant)
env.pageserver.tenant_attach(template_tenant, config)
ep = env.endpoints.create_start("main", tenant_id=template_tenant)

View File

@@ -81,7 +81,7 @@ def setup_tenant_template(env: NeonEnv, n_txns: int):
"image_creation_threshold": 3,
}
template_tenant, template_timeline = env.neon_cli.create_tenant(set_default=True)
template_tenant, template_timeline = env.create_tenant(set_default=True)
env.pageserver.tenant_detach(template_tenant)
env.pageserver.tenant_attach(template_tenant, config)

View File

@@ -162,7 +162,7 @@ def setup_tenant_template(env: NeonEnv, pg_bin: PgBin, scale: int):
"checkpoint_distance": 268435456,
"image_creation_threshold": 3,
}
template_tenant, template_timeline = env.neon_cli.create_tenant(set_default=True)
template_tenant, template_timeline = env.create_tenant(set_default=True)
env.pageserver.tenant_detach(template_tenant)
env.pageserver.tenant_attach(template_tenant, config)
ps_http = env.pageserver.http_client()

View File

@@ -41,7 +41,7 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
pg_bin = neon_compare.pg_bin
# Use aggressive GC and checkpoint settings, so GC and compaction happen more often during the test
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"gc_period": "5 s",
"gc_horizon": f"{4 * 1024 ** 2}",
@@ -64,7 +64,7 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
endpoint.stop()
env.neon_cli.create_branch("b0", tenant_id=tenant)
env.create_branch("b0", tenant_id=tenant)
threads: List[threading.Thread] = []
threads.append(threading.Thread(target=run_pgbench, args=("b0",), daemon=True))
@@ -78,7 +78,7 @@ def test_branch_creation_heavy_write(neon_compare: NeonCompare, n_branches: int)
p = random.randint(0, i)
timer = timeit.default_timer()
env.neon_cli.create_branch(f"b{i + 1}", f"b{p}", tenant_id=tenant)
env.create_branch(f"b{i + 1}", ancestor_branch_name=f"b{p}", tenant_id=tenant)
dur = timeit.default_timer() - timer
log.info(f"Creating branch b{i+1} took {dur}s")
@@ -104,7 +104,7 @@ def test_branch_creation_many(neon_compare: NeonCompare, n_branches: int, shape:
# seed the prng so we will measure the same structure every time
rng = random.Random("2024-02-29")
env.neon_cli.create_branch("b0")
env.create_branch("b0")
endpoint = env.endpoints.create_start("b0")
neon_compare.pg_bin.run_capture(["pgbench", "-i", "-I", "dtGvp", "-s10", endpoint.connstr()])
@@ -121,7 +121,7 @@ def test_branch_creation_many(neon_compare: NeonCompare, n_branches: int, shape:
timer = timeit.default_timer()
# each of these uploads to remote storage before completion
env.neon_cli.create_branch(f"b{i + 1}", parent)
env.create_branch(f"b{i + 1}", ancestor_branch_name=parent)
dur = timeit.default_timer() - timer
branch_creation_durations.append(dur)
@@ -222,7 +222,7 @@ def wait_and_record_startup_metrics(
def test_branch_creation_many_relations(neon_compare: NeonCompare):
env = neon_compare.env
timeline_id = env.neon_cli.create_branch("root")
timeline_id = env.create_branch("root")
endpoint = env.endpoints.create_start("root")
with closing(endpoint.connect()) as conn:
@@ -238,7 +238,7 @@ def test_branch_creation_many_relations(neon_compare: NeonCompare):
)
with neon_compare.record_duration("create_branch_time_not_busy_root"):
env.neon_cli.create_branch("child_not_busy", "root")
env.create_branch("child_not_busy", ancestor_branch_name="root")
# run a concurrent insertion to make the ancestor "busy" during the branch creation
thread = threading.Thread(
@@ -247,6 +247,6 @@ def test_branch_creation_many_relations(neon_compare: NeonCompare):
thread.start()
with neon_compare.record_duration("create_branch_time_busy_root"):
env.neon_cli.create_branch("child_busy", "root")
env.create_branch("child_busy", ancestor_branch_name="root")
thread.join()

View File

@@ -41,7 +41,7 @@ def test_compare_child_and_root_pgbench_perf(neon_compare: NeonCompare):
)
neon_compare.zenbenchmark.record_pg_bench_result(branch, res)
env.neon_cli.create_branch("root")
env.create_branch("root")
endpoint_root = env.endpoints.create_start("root")
pg_bin.run_capture(["pgbench", "-i", "-I", "dtGvp", endpoint_root.connstr(), "-s10"])
@@ -55,14 +55,14 @@ def test_compare_child_and_root_pgbench_perf(neon_compare: NeonCompare):
def test_compare_child_and_root_write_perf(neon_compare: NeonCompare):
env = neon_compare.env
env.neon_cli.create_branch("root")
env.create_branch("root")
endpoint_root = env.endpoints.create_start("root")
endpoint_root.safe_psql(
"CREATE TABLE foo(key serial primary key, t text default 'foooooooooooooooooooooooooooooooooooooooooooooooooooo')",
)
env.neon_cli.create_branch("child", "root")
env.create_branch("child", ancestor_branch_name="root")
endpoint_child = env.endpoints.create_start("child")
with neon_compare.record_duration("root_run_duration"):
@@ -73,7 +73,7 @@ def test_compare_child_and_root_write_perf(neon_compare: NeonCompare):
def test_compare_child_and_root_read_perf(neon_compare: NeonCompare):
env = neon_compare.env
env.neon_cli.create_branch("root")
env.create_branch("root")
endpoint_root = env.endpoints.create_start("root")
endpoint_root.safe_psql_many(
@@ -83,7 +83,7 @@ def test_compare_child_and_root_read_perf(neon_compare: NeonCompare):
]
)
env.neon_cli.create_branch("child", "root")
env.create_branch("child", ancestor_branch_name="root")
endpoint_child = env.endpoints.create_start("child")
with neon_compare.record_duration("root_run_duration"):

View File

@@ -26,10 +26,8 @@ def test_bulk_tenant_create(
for i in range(tenants_count):
start = timeit.default_timer()
tenant, _ = env.neon_cli.create_tenant()
env.neon_cli.create_timeline(
f"test_bulk_tenant_create_{tenants_count}_{i}", tenant_id=tenant
)
tenant, _ = env.create_tenant()
env.create_timeline(f"test_bulk_tenant_create_{tenants_count}_{i}", tenant_id=tenant)
# FIXME: We used to start new safekeepers here. Did that make sense? Should we do it now?
# if use_safekeepers == 'with_sa':

View File

@@ -16,7 +16,7 @@ def test_bulk_update(neon_env_builder: NeonEnvBuilder, zenbenchmark, fillfactor)
env = neon_env_builder.init_start()
n_records = 1000000
timeline_id = env.neon_cli.create_branch("test_bulk_update")
timeline_id = env.create_branch("test_bulk_update")
tenant_id = env.initial_tenant
endpoint = env.endpoints.create_start("test_bulk_update")
cur = endpoint.connect().cursor()

View File

@@ -17,7 +17,7 @@ def test_compaction(neon_compare: NeonCompare):
env = neon_compare.env
pageserver_http = env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
# Disable background GC and compaction, we'll run compaction manually.
"gc_period": "0s",
@@ -68,7 +68,7 @@ def test_compaction_l0_memory(neon_compare: NeonCompare):
env = neon_compare.env
pageserver_http = env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
# Initially disable compaction so that we will build up a stack of L0s
"compaction_period": "0s",

View File

@@ -11,7 +11,7 @@ def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma
env = neon_env_builder.init_start()
client = env.pageserver.http_client()
tenant_id, _ = env.neon_cli.create_tenant(
tenant_id, _ = env.create_tenant(
conf={
# disable default GC and compaction
"gc_period": "1000 m",
@@ -63,7 +63,7 @@ def gc_feedback_impl(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenchma
log.info(f"Physical storage size {physical_size}")
if mode == "with_snapshots":
if step == n_steps / 2:
env.neon_cli.create_branch("child")
env.create_branch("child")
max_num_of_deltas_above_image = 0
max_total_num_of_deltas = 0

View File

@@ -15,7 +15,7 @@ def test_layer_map(neon_env_builder: NeonEnvBuilder, zenbenchmark):
# We want to have a lot of lot of layer files to exercise the layer map. Disable
# GC, and make checkpoint_distance very small, so that we get a lot of small layer
# files.
tenant, timeline = env.neon_cli.create_tenant(
tenant, timeline = env.create_tenant(
conf={
"gc_period": "0s",
"checkpoint_distance": "16384",

View File

@@ -33,7 +33,7 @@ def test_lazy_startup(slru: str, neon_env_builder: NeonEnvBuilder, zenbenchmark:
env = neon_env_builder.init_start()
lazy_slru_download = "true" if slru == "lazy" else "false"
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"lazy_slru_download": lazy_slru_download,
}

View File

@@ -85,7 +85,7 @@ def test_sharding_autosplit(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
tenants = {}
for tenant_id in set(TenantId.generate() for _i in range(0, tenant_count)):
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(tenant_id, timeline_id, conf=tenant_conf)
env.create_tenant(tenant_id, timeline_id, conf=tenant_conf)
endpoint = env.endpoints.create("main", tenant_id=tenant_id)
tenants[tenant_id] = TenantState(timeline_id, endpoint)
endpoint.start()

View File

@@ -27,7 +27,7 @@ def test_startup_simple(neon_env_builder: NeonEnvBuilder, zenbenchmark: NeonBenc
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_startup")
env.create_branch("test_startup")
endpoint = None

View File

@@ -12,7 +12,7 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder):
pageserver_http = env.pageserver.http_client()
# Override defaults: 4M checkpoint_distance, disable background compaction and gc.
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"checkpoint_distance": "4194304",
"gc_period": "0s",
@@ -45,7 +45,9 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder):
log.info(f"LSN after 100k rows: {lsn_100}")
# Create branch1.
env.neon_cli.create_branch("branch1", "main", tenant_id=tenant, ancestor_start_lsn=lsn_100)
env.create_branch(
"branch1", ancestor_branch_name="main", ancestor_start_lsn=lsn_100, tenant_id=tenant
)
endpoint_branch1 = env.endpoints.create_start("branch1", tenant_id=tenant)
branch1_cur = endpoint_branch1.connect().cursor()
@@ -67,7 +69,9 @@ def test_ancestor_branch(neon_env_builder: NeonEnvBuilder):
log.info(f"LSN after 200k rows: {lsn_200}")
# Create branch2.
env.neon_cli.create_branch("branch2", "branch1", tenant_id=tenant, ancestor_start_lsn=lsn_200)
env.create_branch(
"branch2", ancestor_branch_name="branch1", ancestor_start_lsn=lsn_200, tenant_id=tenant
)
endpoint_branch2 = env.endpoints.create_start("branch2", tenant_id=tenant)
branch2_cur = endpoint_branch2.connect().cursor()

View File

@@ -41,7 +41,7 @@ def negative_env(neon_env_builder: NeonEnvBuilder) -> Generator[NegativeTests, N
assert isinstance(env.pageserver_remote_storage, LocalFsStorage)
ps_http = env.pageserver.http_client()
(tenant_id, _) = env.neon_cli.create_tenant()
(tenant_id, _) = env.create_tenant()
assert ps_http.tenant_config(tenant_id).tenant_specific_overrides == {}
config_pre_detach = ps_http.tenant_config(tenant_id)
assert tenant_id in [TenantId(t["id"]) for t in ps_http.tenant_list()]
@@ -109,7 +109,7 @@ def test_empty_config(positive_env: NeonEnv, content_type: Optional[str]):
"""
env = positive_env
ps_http = env.pageserver.http_client()
(tenant_id, _) = env.neon_cli.create_tenant()
(tenant_id, _) = env.create_tenant()
assert ps_http.tenant_config(tenant_id).tenant_specific_overrides == {}
config_pre_detach = ps_http.tenant_config(tenant_id)
assert tenant_id in [TenantId(t["id"]) for t in ps_http.tenant_list()]
@@ -182,7 +182,7 @@ def test_fully_custom_config(positive_env: NeonEnv):
fully_custom_config.keys()
), "ensure we cover all config options"
(tenant_id, _) = env.neon_cli.create_tenant()
(tenant_id, _) = env.create_tenant()
ps_http.set_tenant_config(tenant_id, fully_custom_config)
our_tenant_config = ps_http.tenant_config(tenant_id)
assert our_tenant_config.tenant_specific_overrides == fully_custom_config

View File

@@ -76,7 +76,7 @@ def test_compute_auth_to_pageserver(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
branch = "test_compute_auth_to_pageserver"
env.neon_cli.create_branch(branch)
env.create_branch(branch)
endpoint = env.endpoints.create_start(branch)
with closing(endpoint.connect()) as conn:
@@ -186,7 +186,7 @@ def test_auth_failures(neon_env_builder: NeonEnvBuilder, auth_enabled: bool):
env = neon_env_builder.init_start()
branch = f"test_auth_failures_auth_enabled_{auth_enabled}"
timeline_id = env.neon_cli.create_branch(branch)
timeline_id = env.create_branch(branch)
env.endpoints.create_start(branch)
tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant)

View File

@@ -98,7 +98,7 @@ def check_backpressure(endpoint: Endpoint, stop_event: threading.Event, polling_
def test_backpressure_received_lsn_lag(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
# Create a branch for us
env.neon_cli.create_branch("test_backpressure")
env.create_branch("test_backpressure")
endpoint = env.endpoints.create(
"test_backpressure", config_lines=["max_replication_write_lag=30MB"]

View File

@@ -22,7 +22,7 @@ def test_compute_pageserver_connection_stress(neon_env_builder: NeonEnvBuilder):
pageserver_http = env.pageserver.http_client()
pageserver_http.configure_failpoints(("simulated-bad-compute-connection", "50%return(15)"))
env.neon_cli.create_branch("test_compute_pageserver_connection_stress")
env.create_branch("test_compute_pageserver_connection_stress")
endpoint = env.endpoints.create_start("test_compute_pageserver_connection_stress")
pg_conn = endpoint.connect()

View File

@@ -53,7 +53,7 @@ def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str):
env = neon_simple_env
pageserver_http_client = env.pageserver.http_client()
tenant, timeline_main = env.neon_cli.create_tenant(
tenant, timeline_main = env.create_tenant(
conf={
# disable background GC
"gc_period": "0s",
@@ -90,7 +90,7 @@ def test_branch_and_gc(neon_simple_env: NeonEnv, build_type: str):
pageserver_http_client.timeline_checkpoint(tenant, timeline_main)
pageserver_http_client.timeline_gc(tenant, timeline_main, lsn2 - lsn1 + 1024)
env.neon_cli.create_branch(
env.create_branch(
"test_branch", ancestor_branch_name="main", ancestor_start_lsn=lsn1, tenant_id=tenant
)
endpoint_branch = env.endpoints.create_start("test_branch", tenant_id=tenant)
@@ -127,7 +127,7 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
env.storage_controller.allowed_errors.extend(error_regexes)
# Disable background GC but set the `pitr_interval` to be small, so GC can delete something
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
# disable background GC
"gc_period": "0s",
@@ -145,7 +145,7 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
}
)
b0 = env.neon_cli.create_branch("b0", tenant_id=tenant)
b0 = env.create_branch("b0", tenant_id=tenant)
endpoint0 = env.endpoints.create_start("b0", tenant_id=tenant)
res = endpoint0.safe_psql_many(
queries=[
@@ -176,7 +176,7 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv):
# The starting LSN is invalid as the corresponding record is scheduled to be removed by in-queue GC.
with pytest.raises(Exception, match="invalid branch start lsn: .*"):
env.neon_cli.create_branch("b1", "b0", tenant_id=tenant, ancestor_start_lsn=lsn)
env.create_branch("b1", ancestor_branch_name="b0", ancestor_start_lsn=lsn, tenant_id=tenant)
# retry the same with the HTTP API, so that we can inspect the status code
with pytest.raises(TimelineCreate406):
new_timeline_id = TimelineId.generate()

View File

@@ -23,7 +23,7 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
env.storage_controller.allowed_errors.extend(error_regexes)
# Branch at the point where only 100 rows were inserted
branch_behind_timeline_id = env.neon_cli.create_branch("test_branch_behind")
branch_behind_timeline_id = env.create_branch("test_branch_behind")
endpoint_main = env.endpoints.create_start("test_branch_behind")
main_cur = endpoint_main.connect().cursor()
@@ -58,8 +58,10 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
log.info(f"LSN after 200100 rows: {lsn_b}")
# Branch at the point where only 100 rows were inserted
env.neon_cli.create_branch(
"test_branch_behind_hundred", "test_branch_behind", ancestor_start_lsn=lsn_a
env.create_branch(
"test_branch_behind_hundred",
ancestor_branch_name="test_branch_behind",
ancestor_start_lsn=lsn_a,
)
# Insert many more rows. This generates enough WAL to fill a few segments.
@@ -75,8 +77,10 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
log.info(f"LSN after 400100 rows: {lsn_c}")
# Branch at the point where only 200100 rows were inserted
env.neon_cli.create_branch(
"test_branch_behind_more", "test_branch_behind", ancestor_start_lsn=lsn_b
env.create_branch(
"test_branch_behind_more",
ancestor_branch_name="test_branch_behind",
ancestor_start_lsn=lsn_b,
)
endpoint_hundred = env.endpoints.create_start("test_branch_behind_hundred")
@@ -97,15 +101,17 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
pageserver_http = env.pageserver.http_client()
# branch at segment boundary
env.neon_cli.create_branch(
"test_branch_segment_boundary", "test_branch_behind", ancestor_start_lsn=Lsn("0/3000000")
env.create_branch(
"test_branch_segment_boundary",
ancestor_branch_name="test_branch_behind",
ancestor_start_lsn=Lsn("0/3000000"),
)
endpoint = env.endpoints.create_start("test_branch_segment_boundary")
assert endpoint.safe_psql("SELECT 1")[0][0] == 1
# branch at pre-initdb lsn (from main branch)
with pytest.raises(Exception, match="invalid branch start lsn: .*"):
env.neon_cli.create_branch("test_branch_preinitdb", ancestor_start_lsn=Lsn("0/42"))
env.create_branch("test_branch_preinitdb", ancestor_start_lsn=Lsn("0/42"))
# retry the same with the HTTP API, so that we can inspect the status code
with pytest.raises(TimelineCreate406):
new_timeline_id = TimelineId.generate()
@@ -116,8 +122,10 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
# branch at pre-ancestor lsn
with pytest.raises(Exception, match="less than timeline ancestor lsn"):
env.neon_cli.create_branch(
"test_branch_preinitdb", "test_branch_behind", ancestor_start_lsn=Lsn("0/42")
env.create_branch(
"test_branch_preinitdb",
ancestor_branch_name="test_branch_behind",
ancestor_start_lsn=Lsn("0/42"),
)
# retry the same with the HTTP API, so that we can inspect the status code
with pytest.raises(TimelineCreate406):
@@ -139,8 +147,10 @@ def test_branch_behind(neon_env_builder: NeonEnvBuilder):
print_gc_result(gc_result)
with pytest.raises(Exception, match="invalid branch start lsn: .*"):
# this gced_lsn is pretty random, so if gc is disabled this woudln't fail
env.neon_cli.create_branch(
"test_branch_create_fail", "test_branch_behind", ancestor_start_lsn=gced_lsn
env.create_branch(
"test_branch_create_fail",
ancestor_branch_name="test_branch_behind",
ancestor_start_lsn=gced_lsn,
)
# retry the same with the HTTP API, so that we can inspect the status code
with pytest.raises(TimelineCreate406):

View File

@@ -38,7 +38,7 @@ def test_branching_with_pgbench(
env = neon_simple_env
# Use aggressive GC and checkpoint settings, so that we also exercise GC during the test
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"gc_period": "5 s",
"gc_horizon": f"{1024 ** 2}",
@@ -55,7 +55,7 @@ def test_branching_with_pgbench(
pg_bin.run_capture(["pgbench", "-i", "-I", "dtGvp", f"-s{scale}", connstr])
pg_bin.run_capture(["pgbench", "-T15", connstr])
env.neon_cli.create_branch("b0", tenant_id=tenant)
env.create_branch("b0", tenant_id=tenant)
endpoints: List[Endpoint] = []
endpoints.append(env.endpoints.create_start("b0", tenant_id=tenant))
@@ -84,9 +84,9 @@ def test_branching_with_pgbench(
threads = []
if ty == "cascade":
env.neon_cli.create_branch(f"b{i + 1}", f"b{i}", tenant_id=tenant)
env.create_branch(f"b{i + 1}", ancestor_branch_name=f"b{i}", tenant_id=tenant)
else:
env.neon_cli.create_branch(f"b{i + 1}", "b0", tenant_id=tenant)
env.create_branch(f"b{i + 1}", ancestor_branch_name="b0", tenant_id=tenant)
endpoints.append(env.endpoints.create_start(f"b{i + 1}", tenant_id=tenant))
@@ -120,7 +120,7 @@ def test_branching_unnormalized_start_lsn(neon_simple_env: NeonEnv, pg_bin: PgBi
env = neon_simple_env
env.neon_cli.create_branch("b0")
env.create_branch("b0")
endpoint0 = env.endpoints.create_start("b0")
pg_bin.run_capture(["pgbench", "-i", endpoint0.connstr()])
@@ -133,7 +133,7 @@ def test_branching_unnormalized_start_lsn(neon_simple_env: NeonEnv, pg_bin: PgBi
start_lsn = Lsn((int(curr_lsn) - XLOG_BLCKSZ) // XLOG_BLCKSZ * XLOG_BLCKSZ)
log.info(f"Branching b1 from b0 starting at lsn {start_lsn}...")
env.neon_cli.create_branch("b1", "b0", ancestor_start_lsn=start_lsn)
env.create_branch("b1", ancestor_branch_name="b0", ancestor_start_lsn=start_lsn)
endpoint1 = env.endpoints.create_start("b1")
pg_bin.run_capture(["pgbench", "-i", endpoint1.connstr()])
@@ -432,9 +432,7 @@ def test_branching_while_stuck_find_gc_cutoffs(neon_env_builder: NeonEnvBuilder)
wait_until_paused(env, failpoint)
env.neon_cli.create_branch(
tenant_id=env.initial_tenant, ancestor_branch_name="main", new_branch_name="branch"
)
env.create_branch("branch", ancestor_branch_name="main")
client.configure_failpoints((failpoint, "off"))

View File

@@ -34,7 +34,7 @@ def test_local_corruption(neon_env_builder: NeonEnvBuilder):
tenant_timelines: List[Tuple[TenantId, TimelineId, Endpoint]] = []
for _ in range(3):
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
with endpoint.cursor() as cur:
@@ -84,13 +84,11 @@ def test_local_corruption(neon_env_builder: NeonEnvBuilder):
def test_create_multiple_timelines_parallel(neon_simple_env: NeonEnv):
env = neon_simple_env
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures = [
executor.submit(
env.neon_cli.create_timeline, f"test-create-multiple-timelines-{i}", tenant_id
)
executor.submit(env.create_timeline, f"test-create-multiple-timelines-{i}", tenant_id)
for i in range(4)
]
for future in futures:
@@ -151,7 +149,7 @@ def test_timeline_init_break_before_checkpoint_recreate(
]
)
env.neon_cli.create_tenant(env.initial_tenant)
env.create_tenant(env.initial_tenant)
tenant_id = env.initial_tenant
timelines_dir = env.pageserver.timeline_dir(tenant_id)

View File

@@ -34,7 +34,7 @@ def test_change_pageserver(neon_env_builder: NeonEnvBuilder, make_httpserver):
ignore_notify
)
env.neon_cli.create_branch("test_change_pageserver")
env.create_branch("test_change_pageserver")
endpoint = env.endpoints.create_start("test_change_pageserver")
# Put this tenant into a dual-attached state

View File

@@ -56,8 +56,10 @@ def test_clog_truncate(neon_simple_env: NeonEnv):
# create new branch after clog truncation and start a compute node on it
log.info(f"create branch at lsn_after_truncation {lsn_after_truncation}")
env.neon_cli.create_branch(
"test_clog_truncate_new", "main", ancestor_start_lsn=lsn_after_truncation
env.create_branch(
"test_clog_truncate_new",
ancestor_branch_name="main",
ancestor_start_lsn=lsn_after_truncation,
)
endpoint2 = env.endpoints.create_start("test_clog_truncate_new")

View File

@@ -23,7 +23,7 @@ def test_lsof_pageserver_pid(neon_simple_env: NeonEnv):
env = neon_simple_env
def start_workload():
env.neon_cli.create_branch("test_lsof_pageserver_pid")
env.create_branch("test_lsof_pageserver_pid")
endpoint = env.endpoints.create_start("test_lsof_pageserver_pid")
with closing(endpoint.connect()) as conn:
with conn.cursor() as cur:

View File

@@ -38,7 +38,7 @@ def test_safekeepers_reconfigure_reorder(
):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_safekeepers_reconfigure_reorder")
env.create_branch("test_safekeepers_reconfigure_reorder")
endpoint = env.endpoints.create_start("test_safekeepers_reconfigure_reorder")

View File

@@ -18,7 +18,7 @@ from fixtures.neon_fixtures import NeonEnvBuilder, WalCraft
)
def test_crafted_wal_end(neon_env_builder: NeonEnvBuilder, wal_type: str):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_crafted_wal_end")
env.create_branch("test_crafted_wal_end")
env.pageserver.allowed_errors.extend(
[
# seems like pageserver stop triggers these

View File

@@ -31,7 +31,7 @@ def test_createdb(neon_simple_env: NeonEnv, strategy: str):
lsn = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")
# Create a branch
env.neon_cli.create_branch("test_createdb2", "main", ancestor_start_lsn=lsn)
env.create_branch("test_createdb2", ancestor_branch_name="main", ancestor_start_lsn=lsn)
endpoint2 = env.endpoints.create_start("test_createdb2")
# Test that you can connect to the new database on both branches
@@ -77,10 +77,14 @@ def test_dropdb(neon_simple_env: NeonEnv, test_output_dir):
lsn_after_drop = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")
# Create two branches before and after database drop.
env.neon_cli.create_branch("test_before_dropdb", "main", ancestor_start_lsn=lsn_before_drop)
env.create_branch(
"test_before_dropdb", ancestor_branch_name="main", ancestor_start_lsn=lsn_before_drop
)
endpoint_before = env.endpoints.create_start("test_before_dropdb")
env.neon_cli.create_branch("test_after_dropdb", "main", ancestor_start_lsn=lsn_after_drop)
env.create_branch(
"test_after_dropdb", ancestor_branch_name="main", ancestor_start_lsn=lsn_after_drop
)
endpoint_after = env.endpoints.create_start("test_after_dropdb")
# Test that database exists on the branch before drop

View File

@@ -18,7 +18,7 @@ def test_createuser(neon_simple_env: NeonEnv):
lsn = query_scalar(cur, "SELECT pg_current_wal_insert_lsn()")
# Create a branch
env.neon_cli.create_branch("test_createuser2", "main", ancestor_start_lsn=lsn)
env.create_branch("test_createuser2", ancestor_branch_name="main", ancestor_start_lsn=lsn)
endpoint2 = env.endpoints.create_start("test_createuser2")
# Test that you can connect to new branch as a new user

View File

@@ -59,11 +59,11 @@ def test_min_resident_size_override_handling(
env.pageserver.stop()
env.pageserver.start()
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
assert_overrides(tenant_id, config_level_override)
# Also ensure that specifying the paramter to create_tenant works, in addition to http-level recconfig.
tenant_id, _ = env.neon_cli.create_tenant(conf={"min_resident_size_override": "100"})
tenant_id, _ = env.create_tenant(conf={"min_resident_size_override": "100"})
assert_config(tenant_id, 100, 100)
ps_http.set_tenant_config(tenant_id, {})
assert_config(tenant_id, None, config_level_override)
@@ -280,7 +280,7 @@ def _eviction_env(
def pgbench_init_tenant(
layer_size: int, scale: int, env: NeonEnv, pg_bin: PgBin
) -> Tuple[TenantId, TimelineId]:
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
"gc_period": "0s",
"compaction_period": "0s",

View File

@@ -81,7 +81,7 @@ def test_remote_extensions(
# Start a compute node with remote_extension spec
# and check that it can download the extensions and use them to CREATE EXTENSION.
env = neon_env_builder_local.init_start()
env.neon_cli.create_branch("test_remote_extensions")
env.create_branch("test_remote_extensions")
endpoint = env.endpoints.create(
"test_remote_extensions",
config_lines=["log_min_messages=debug3"],

View File

@@ -15,7 +15,7 @@ def test_endpoint_crash(neon_env_builder: NeonEnvBuilder, sql_func: str):
Test that triggering crash from neon_test_utils crashes the endpoint
"""
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_endpoint_crash")
env.create_branch("test_endpoint_crash")
endpoint = env.endpoints.create_start("test_endpoint_crash")
endpoint.safe_psql("CREATE EXTENSION neon_test_utils;")

View File

@@ -3,7 +3,7 @@ from fixtures.neon_fixtures import NeonEnvBuilder
def test_fsm_truncate(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_fsm_truncate")
env.create_branch("test_fsm_truncate")
endpoint = env.endpoints.create_start("test_fsm_truncate")
endpoint.safe_psql(
"CREATE TABLE t1(key int); CREATE TABLE t2(key int); TRUNCATE TABLE t1; TRUNCATE TABLE t2;"

View File

@@ -68,7 +68,7 @@ async def update_and_gc(env: NeonEnv, endpoint: Endpoint, timeline: TimelineId):
def test_gc_aggressive(neon_env_builder: NeonEnvBuilder):
# Disable pitr, because here we want to test branch creation after GC
env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"})
timeline = env.neon_cli.create_branch("test_gc_aggressive", "main")
timeline = env.create_branch("test_gc_aggressive", ancestor_branch_name="main")
endpoint = env.endpoints.create_start("test_gc_aggressive")
with endpoint.cursor() as cur:
@@ -99,7 +99,7 @@ def test_gc_index_upload(neon_env_builder: NeonEnvBuilder):
# Disable time-based pitr, we will use LSN-based thresholds in the manual GC calls
env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"})
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_gc_index_upload", "main")
timeline_id = env.create_branch("test_gc_index_upload", ancestor_branch_name="main")
endpoint = env.endpoints.create_start("test_gc_index_upload")
pageserver_http = env.pageserver.http_client()

View File

@@ -158,7 +158,7 @@ def test_import_from_pageserver_small(
neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS)
env = neon_env_builder.init_start()
timeline = env.neon_cli.create_branch("test_import_from_pageserver_small")
timeline = env.create_branch("test_import_from_pageserver_small")
endpoint = env.endpoints.create_start("test_import_from_pageserver_small")
num_rows = 3000
@@ -177,7 +177,7 @@ def test_import_from_pageserver_multisegment(
neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS)
env = neon_env_builder.init_start()
timeline = env.neon_cli.create_branch("test_import_from_pageserver_multisegment")
timeline = env.create_branch("test_import_from_pageserver_multisegment")
endpoint = env.endpoints.create_start("test_import_from_pageserver_multisegment")
# For `test_import_from_pageserver_multisegment`, we want to make sure that the data

View File

@@ -178,9 +178,9 @@ def test_gc_of_remote_layers(neon_env_builder: NeonEnvBuilder):
def tenant_update_config(changes):
tenant_config.update(changes)
env.neon_cli.config_tenant(tenant_id, tenant_config)
env.config_tenant(tenant_id, tenant_config)
tenant_id, timeline_id = env.neon_cli.create_tenant(conf=tenant_config)
tenant_id, timeline_id = env.create_tenant(conf=tenant_config)
log.info("tenant id is %s", tenant_id)
env.initial_tenant = tenant_id # update_and_gc relies on this
ps_http = env.pageserver.http_client()

View File

@@ -8,7 +8,7 @@ def test_image_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
env = neon_simple_env
pageserver_http = env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
# small checkpoint distance to create more delta layer files
"checkpoint_distance": f"{1024 ** 2}",
@@ -52,7 +52,7 @@ def test_delta_layer_writer_fail_before_finish(neon_simple_env: NeonEnv):
env = neon_simple_env
pageserver_http = env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
# small checkpoint distance to create more delta layer files
"checkpoint_distance": f"{1024 ** 2}",

View File

@@ -56,7 +56,7 @@ def test_issue_5878(neon_env_builder: NeonEnvBuilder):
"compaction_target_size": f"{128 * (1024**3)}", # make it so that we only have 1 partition => image coverage for delta layers => enables gc of delta layers
}
tenant_id, timeline_id = env.neon_cli.create_tenant(conf=tenant_config)
tenant_id, timeline_id = env.create_tenant(conf=tenant_config)
endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)

View File

@@ -219,7 +219,7 @@ def test_ondemand_wal_download_in_replication_slot_funcs(neon_env_builder: NeonE
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("init")
env.create_branch("init")
endpoint = env.endpoints.create_start("init")
with endpoint.connect().cursor() as cur:
@@ -270,7 +270,7 @@ def test_lr_with_slow_safekeeper(neon_env_builder: NeonEnvBuilder, vanilla_pg):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("init")
env.create_branch("init")
endpoint = env.endpoints.create_start("init")
with endpoint.connect().cursor() as cur:
@@ -352,7 +352,7 @@ FROM generate_series(1, 16384) AS seq; -- Inserts enough rows to exceed 16MB of
def test_restart_endpoint(neon_simple_env: NeonEnv, vanilla_pg):
env = neon_simple_env
env.neon_cli.create_branch("init")
env.create_branch("init")
endpoint = env.endpoints.create_start("init")
tenant_id = endpoint.safe_psql("show neon.tenant_id")[0][0]
timeline_id = endpoint.safe_psql("show neon.timeline_id")[0][0]
@@ -397,7 +397,7 @@ def test_restart_endpoint(neon_simple_env: NeonEnv, vanilla_pg):
def test_large_records(neon_simple_env: NeonEnv, vanilla_pg):
env = neon_simple_env
env.neon_cli.create_branch("init")
env.create_branch("init")
endpoint = env.endpoints.create_start("init")
cur = endpoint.connect().cursor()
@@ -445,7 +445,7 @@ def test_large_records(neon_simple_env: NeonEnv, vanilla_pg):
def test_slots_and_branching(neon_simple_env: NeonEnv):
env = neon_simple_env
tenant, timeline = env.neon_cli.create_tenant()
tenant, timeline = env.create_tenant()
env.pageserver.http_client()
main_branch = env.endpoints.create_start("main", tenant_id=tenant)
@@ -457,7 +457,7 @@ def test_slots_and_branching(neon_simple_env: NeonEnv):
wait_for_last_flush_lsn(env, main_branch, tenant, timeline)
# Create branch ws.
env.neon_cli.create_branch("ws", "main", tenant_id=tenant)
env.create_branch("ws", ancestor_branch_name="main", tenant_id=tenant)
ws_branch = env.endpoints.create_start("ws", tenant_id=tenant)
# Check that we can create slot with the same name
@@ -469,10 +469,10 @@ def test_slots_and_branching(neon_simple_env: NeonEnv):
def test_replication_shutdown(neon_simple_env: NeonEnv):
# Ensure Postgres can exit without stuck when a replication job is active + neon extension installed
env = neon_simple_env
env.neon_cli.create_branch("test_replication_shutdown_publisher", "main")
env.create_branch("test_replication_shutdown_publisher", ancestor_branch_name="main")
pub = env.endpoints.create("test_replication_shutdown_publisher")
env.neon_cli.create_branch("test_replication_shutdown_subscriber")
env.create_branch("test_replication_shutdown_subscriber")
sub = env.endpoints.create("test_replication_shutdown_subscriber")
pub.respec(skip_pg_catalog_updates=False)
@@ -575,7 +575,7 @@ def test_subscriber_synchronous_commit(neon_simple_env: NeonEnv, vanilla_pg):
vanilla_pg.start()
vanilla_pg.safe_psql("create extension neon;")
env.neon_cli.create_branch("subscriber")
env.create_branch("subscriber")
sub = env.endpoints.create("subscriber")
sub.start()

View File

@@ -32,7 +32,7 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool):
"""
env = neon_env_builder.init_start()
tenant_id, _ = env.neon_cli.create_tenant(
tenant_id, _ = env.create_tenant(
conf={
# disable default GC and compaction
"gc_period": "1000 m",
@@ -43,7 +43,7 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool):
}
)
timeline_id = env.neon_cli.create_branch("test_lsn_mapping", tenant_id=tenant_id)
timeline_id = env.create_branch("test_lsn_mapping", tenant_id=tenant_id)
endpoint_main = env.endpoints.create_start("test_lsn_mapping", tenant_id=tenant_id)
timeline_id = endpoint_main.safe_psql("show neon.timeline_id")[0][0]
@@ -123,8 +123,8 @@ def test_lsn_mapping(neon_env_builder: NeonEnvBuilder, with_lease: bool):
endpoint_here.stop_and_destroy()
# Do the "past" check again at a new branch to ensure that we don't return something before the branch cutoff
timeline_id_child = env.neon_cli.create_branch(
"test_lsn_mapping_child", tenant_id=tenant_id, ancestor_branch_name="test_lsn_mapping"
timeline_id_child = env.create_branch(
"test_lsn_mapping_child", ancestor_branch_name="test_lsn_mapping", tenant_id=tenant_id
)
# Timestamp is in the unreachable past
@@ -190,7 +190,7 @@ def test_ts_of_lsn_api(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
new_timeline_id = env.neon_cli.create_branch("test_ts_of_lsn_api")
new_timeline_id = env.create_branch("test_ts_of_lsn_api")
endpoint_main = env.endpoints.create_start("test_ts_of_lsn_api")
cur = endpoint_main.connect().cursor()

View File

@@ -72,9 +72,7 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
assert int(next_multixact_id) > int(next_multixact_id_old)
# Branch at this point
env.neon_cli.create_branch(
"test_multixact_new", ancestor_branch_name="main", ancestor_start_lsn=lsn
)
env.create_branch("test_multixact_new", ancestor_branch_name="main", ancestor_start_lsn=lsn)
endpoint_new = env.endpoints.create_start("test_multixact_new")
next_multixact_id_new = endpoint_new.safe_psql(

View File

@@ -44,12 +44,12 @@ def test_cli_timeline_list(neon_simple_env: NeonEnv):
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
# Create a branch for us
main_timeline_id = env.neon_cli.create_branch("test_cli_branch_list_main")
main_timeline_id = env.create_branch("test_cli_branch_list_main")
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
# Create a nested branch
nested_timeline_id = env.neon_cli.create_branch(
"test_cli_branch_list_nested", "test_cli_branch_list_main"
nested_timeline_id = env.create_branch(
"test_cli_branch_list_nested", ancestor_branch_name="test_cli_branch_list_main"
)
helper_compare_timeline_list(pageserver_http_client, env, env.initial_tenant)
@@ -77,13 +77,13 @@ def test_cli_tenant_list(neon_simple_env: NeonEnv):
helper_compare_tenant_list(pageserver_http_client, env)
# Create new tenant
tenant1, _ = env.neon_cli.create_tenant()
tenant1, _ = env.create_tenant()
# check tenant1 appeared
helper_compare_tenant_list(pageserver_http_client, env)
# Create new tenant
tenant2, _ = env.neon_cli.create_tenant()
tenant2, _ = env.create_tenant()
# check tenant2 appeared
helper_compare_tenant_list(pageserver_http_client, env)
@@ -98,7 +98,7 @@ def test_cli_tenant_list(neon_simple_env: NeonEnv):
def test_cli_tenant_create(neon_simple_env: NeonEnv):
env = neon_simple_env
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
timelines = env.neon_cli.list_timelines(tenant_id)
# an initial timeline should be created upon tenant creation

View File

@@ -8,7 +8,7 @@ from fixtures.neon_fixtures import NeonEnvBuilder
# Verify that the neon extension is installed and has the correct version.
def test_neon_extension(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_create_extension_neon")
env.create_branch("test_create_extension_neon")
endpoint_main = env.endpoints.create("test_create_extension_neon")
# don't skip pg_catalog updates - it runs CREATE EXTENSION neon
@@ -35,7 +35,7 @@ def test_neon_extension(neon_env_builder: NeonEnvBuilder):
# Verify that the neon extension can be upgraded/downgraded.
def test_neon_extension_compatibility(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_neon_extension_compatibility")
env.create_branch("test_neon_extension_compatibility")
endpoint_main = env.endpoints.create("test_neon_extension_compatibility")
# don't skip pg_catalog updates - it runs CREATE EXTENSION neon
@@ -72,7 +72,7 @@ def test_neon_extension_compatibility(neon_env_builder: NeonEnvBuilder):
# Verify that the neon extension can be auto-upgraded to the latest version.
def test_neon_extension_auto_upgrade(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_neon_extension_auto_upgrade")
env.create_branch("test_neon_extension_auto_upgrade")
endpoint_main = env.endpoints.create("test_neon_extension_auto_upgrade")
# don't skip pg_catalog updates - it runs CREATE EXTENSION neon

View File

@@ -1,4 +1,5 @@
import pytest
from fixtures.common_types import TimelineId
from fixtures.neon_fixtures import NeonEnvBuilder
from fixtures.port_distributor import PortDistributor
@@ -10,22 +11,36 @@ def test_neon_cli_basics(neon_env_builder: NeonEnvBuilder, port_distributor: Por
# Skipping the init step that creates a local tenant in Pytest tests
try:
env.neon_cli.start()
env.neon_cli.create_tenant(tenant_id=env.initial_tenant, set_default=True)
env.create_tenant(tenant_id=env.initial_tenant, set_default=True)
main_branch_name = "main"
pg_port = port_distributor.get_port()
http_port = port_distributor.get_port()
env.neon_cli.endpoint_create(
main_branch_name, pg_port, http_port, endpoint_id="ep-basic-main"
main_branch_name,
pg_port,
http_port,
endpoint_id="ep-basic-main",
tenant_id=env.initial_tenant,
pg_version=env.pg_version,
)
env.neon_cli.endpoint_start("ep-basic-main")
branch_name = "migration-check"
env.neon_cli.create_branch(branch_name)
env.neon_cli.create_branch(
tenant_id=env.initial_tenant,
timeline_id=TimelineId.generate(),
new_branch_name=branch_name,
)
pg_port = port_distributor.get_port()
http_port = port_distributor.get_port()
env.neon_cli.endpoint_create(
branch_name, pg_port, http_port, endpoint_id=f"ep-{branch_name}"
branch_name,
pg_port,
http_port,
endpoint_id=f"ep-{branch_name}",
tenant_id=env.initial_tenant,
pg_version=env.pg_version,
)
env.neon_cli.endpoint_start(f"ep-{branch_name}")
finally:
@@ -43,12 +58,26 @@ def test_neon_two_primary_endpoints_fail(
pg_port = port_distributor.get_port()
http_port = port_distributor.get_port()
env.neon_cli.endpoint_create(branch_name, pg_port, http_port, "ep1")
env.neon_cli.endpoint_create(
branch_name,
pg_port,
http_port,
endpoint_id="ep1",
tenant_id=env.initial_tenant,
pg_version=env.pg_version,
)
pg_port = port_distributor.get_port()
http_port = port_distributor.get_port()
# ep1 is not running so create will succeed
env.neon_cli.endpoint_create(branch_name, pg_port, http_port, "ep2")
env.neon_cli.endpoint_create(
branch_name,
pg_port,
http_port,
endpoint_id="ep2",
tenant_id=env.initial_tenant,
pg_version=env.pg_version,
)
env.neon_cli.endpoint_start("ep1")

View File

@@ -6,10 +6,10 @@ from fixtures.utils import wait_until
def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion):
env = neon_simple_env
env.neon_cli.create_branch("test_neon_superuser_publisher", "main")
env.create_branch("test_neon_superuser_publisher", ancestor_branch_name="main")
pub = env.endpoints.create("test_neon_superuser_publisher")
env.neon_cli.create_branch("test_neon_superuser_subscriber")
env.create_branch("test_neon_superuser_subscriber")
sub = env.endpoints.create("test_neon_superuser_subscriber")
pub.respec(skip_pg_catalog_updates=False)

View File

@@ -5,7 +5,7 @@ from fixtures.pageserver.http import PageserverHttpClient
def check_tenant(env: NeonEnv, pageserver_http: PageserverHttpClient):
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
# we rely upon autocommit after each statement
res_1 = endpoint.safe_psql_many(

View File

@@ -17,7 +17,7 @@ from fixtures.utils import print_gc_result, query_scalar
def test_old_request_lsn(neon_env_builder: NeonEnvBuilder):
# Disable pitr, because here we want to test branch creation after GC
env = neon_env_builder.init_start(initial_tenant_conf={"pitr_interval": "0 sec"})
env.neon_cli.create_branch("test_old_request_lsn", "main")
env.create_branch("test_old_request_lsn", ancestor_branch_name="main")
endpoint = env.endpoints.create_start("test_old_request_lsn")
pg_conn = endpoint.connect()

View File

@@ -545,7 +545,7 @@ def test_compaction_downloads_on_demand_without_image_creation(neon_env_builder:
layer_sizes += layer.layer_file_size
pageserver_http.evict_layer(tenant_id, timeline_id, layer.layer_file_name)
env.neon_cli.config_tenant(tenant_id, {"compaction_threshold": "3"})
env.config_tenant(tenant_id, {"compaction_threshold": "3"})
pageserver_http.timeline_compact(tenant_id, timeline_id)
layers = pageserver_http.layer_map_info(tenant_id, timeline_id)
@@ -647,7 +647,7 @@ def test_compaction_downloads_on_demand_with_image_creation(neon_env_builder: Ne
# layers -- threshold of 2 would sound more reasonable, but keeping it as 1
# to be less flaky
conf["image_creation_threshold"] = "1"
env.neon_cli.config_tenant(tenant_id, {k: str(v) for k, v in conf.items()})
env.config_tenant(tenant_id, {k: str(v) for k, v in conf.items()})
pageserver_http.timeline_compact(tenant_id, timeline_id)
layers = pageserver_http.layer_map_info(tenant_id, timeline_id)

View File

@@ -59,7 +59,7 @@ def check_client(env: NeonEnv, client: PageserverHttpClient):
def test_pageserver_http_get_wal_receiver_not_found(neon_simple_env: NeonEnv):
env = neon_simple_env
with env.pageserver.http_client() as client:
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
timeline_details = client.timeline_detail(
tenant_id=tenant_id, timeline_id=timeline_id, include_non_incremental_logical_size=True
@@ -108,7 +108,7 @@ def expect_updated_msg_lsn(
def test_pageserver_http_get_wal_receiver_success(neon_simple_env: NeonEnv):
env = neon_simple_env
with env.pageserver.http_client() as client:
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
endpoint = env.endpoints.create_start(DEFAULT_BRANCH_NAME, tenant_id=tenant_id)
# insert something to force sk -> ps message

View File

@@ -9,7 +9,7 @@ def test_pageserver_catchup_while_compute_down(neon_env_builder: NeonEnvBuilder)
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_pageserver_catchup_while_compute_down")
env.create_branch("test_pageserver_catchup_while_compute_down")
# Make shared_buffers large to ensure we won't query pageserver while it is down.
endpoint = env.endpoints.create_start(
"test_pageserver_catchup_while_compute_down", config_lines=["shared_buffers=512MB"]

View File

@@ -150,7 +150,7 @@ def test_generations_upgrade(neon_env_builder: NeonEnvBuilder):
env.pageserver.start()
env.storage_controller.node_configure(env.pageserver.id, {"availability": "Active"})
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id=env.initial_tenant, conf=TENANT_CONF, timeline_id=env.initial_timeline
)
@@ -643,9 +643,7 @@ def test_upgrade_generationless_local_file_paths(
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
tenant_id, timeline_id, conf=TENANT_CONF, placement_policy='{"Attached":1}'
)
env.create_tenant(tenant_id, timeline_id, conf=TENANT_CONF, placement_policy='{"Attached":1}')
workload = Workload(env, tenant_id, timeline_id)
workload.init()

View File

@@ -42,7 +42,7 @@ async def run_worker_for_tenant(
async def run_worker(env: NeonEnv, tenant_conf, entries: int) -> Tuple[TenantId, TimelineId, Lsn]:
tenant, timeline = env.neon_cli.create_tenant(conf=tenant_conf)
tenant, timeline = env.create_tenant(conf=tenant_conf)
last_flush_lsn = await run_worker_for_tenant(env, entries, tenant)
return tenant, timeline, last_flush_lsn

View File

@@ -14,7 +14,7 @@ from fixtures.neon_fixtures import NeonEnv, PgBin
# least the code gets exercised.
def test_pageserver_reconnect(neon_simple_env: NeonEnv, pg_bin: PgBin):
env = neon_simple_env
env.neon_cli.create_branch("test_pageserver_restarts")
env.create_branch("test_pageserver_restarts")
endpoint = env.endpoints.create_start("test_pageserver_restarts")
n_reconnects = 1000
timeout = 0.01
@@ -46,7 +46,7 @@ def test_pageserver_reconnect(neon_simple_env: NeonEnv, pg_bin: PgBin):
# Test handling errors during page server reconnect
def test_pageserver_reconnect_failure(neon_simple_env: NeonEnv):
env = neon_simple_env
env.neon_cli.create_branch("test_pageserver_reconnect")
env.create_branch("test_pageserver_reconnect")
endpoint = env.endpoints.create_start("test_pageserver_reconnect")
con = endpoint.connect()

View File

@@ -169,7 +169,7 @@ def test_pageserver_chaos(
# Use a tiny checkpoint distance, to create a lot of layers quickly.
# That allows us to stress the compaction and layer flushing logic more.
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"checkpoint_distance": "5000000",
}

View File

@@ -12,7 +12,7 @@ from fixtures.neon_fixtures import NeonEnv, PgBin
# running.
def test_pageserver_restarts_under_worload(neon_simple_env: NeonEnv, pg_bin: PgBin):
env = neon_simple_env
env.neon_cli.create_branch("test_pageserver_restarts")
env.create_branch("test_pageserver_restarts")
endpoint = env.endpoints.create_start("test_pageserver_restarts")
n_restarts = 10
scale = 10

View File

@@ -650,7 +650,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder):
tenant_id = TenantId.generate()
timeline_a = TimelineId.generate()
timeline_b = TimelineId.generate()
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id,
timeline_a,
placement_policy='{"Attached":1}',
@@ -658,7 +658,7 @@ def test_secondary_background_downloads(neon_env_builder: NeonEnvBuilder):
# to trigger the upload promptly.
conf={"heatmap_period": f"{upload_period_secs}s"},
)
env.neon_cli.create_timeline("main2", tenant_id, timeline_b)
env.create_timeline("main2", tenant_id, timeline_b)
tenant_timelines[tenant_id] = [timeline_a, timeline_b]
@@ -778,9 +778,7 @@ def test_slow_secondary_downloads(neon_env_builder: NeonEnvBuilder, via_controll
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
tenant_id, timeline_id, conf=TENANT_CONF, placement_policy='{"Attached":1}'
)
env.create_tenant(tenant_id, timeline_id, conf=TENANT_CONF, placement_policy='{"Attached":1}')
attached_to_id = env.storage_controller.locate(tenant_id)[0]["node_id"]
ps_attached = env.get_pageserver(attached_to_id)

View File

@@ -57,7 +57,7 @@ def test_pitr_gc(neon_env_builder: NeonEnvBuilder):
# Branch at the point where only 100 rows were inserted
# It must have been preserved by PITR setting
env.neon_cli.create_branch("test_pitr_gc_hundred", "main", ancestor_start_lsn=lsn_a)
env.create_branch("test_pitr_gc_hundred", ancestor_branch_name="main", ancestor_start_lsn=lsn_a)
endpoint_hundred = env.endpoints.create_start("test_pitr_gc_hundred")

View File

@@ -25,7 +25,7 @@ def test_pageserver_recovery(neon_env_builder: NeonEnvBuilder):
)
# Create a branch for us
env.neon_cli.create_branch("test_pageserver_recovery", "main")
env.create_branch("test_pageserver_recovery", ancestor_branch_name="main")
endpoint = env.endpoints.create_start("test_pageserver_recovery")

View File

@@ -230,7 +230,7 @@ def test_remote_storage_upload_queue_retries(
# create tenant with config that will determinstically allow
# compaction and gc
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
conf={
# small checkpointing and compaction targets to ensure we generate many upload operations
"checkpoint_distance": f"{64 * 1024}",
@@ -640,7 +640,9 @@ def test_empty_branch_remote_storage_upload(neon_env_builder: NeonEnvBuilder):
client = env.pageserver.http_client()
new_branch_name = "new_branch"
new_branch_timeline_id = env.neon_cli.create_branch(new_branch_name, "main", env.initial_tenant)
new_branch_timeline_id = env.create_branch(
new_branch_name, ancestor_branch_name="main", tenant_id=env.initial_tenant
)
assert_nothing_to_upload(client, env.initial_tenant, new_branch_timeline_id)
timelines_before_detach = set(

View File

@@ -60,9 +60,7 @@ def test_tenant_s3_restore(
last_flush_lsns = []
for timeline in ["first", "second"]:
timeline_id = env.neon_cli.create_branch(
timeline, tenant_id=tenant_id, ancestor_branch_name=parent
)
timeline_id = env.create_branch(timeline, ancestor_branch_name=parent, tenant_id=tenant_id)
with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint:
run_pg_bench_small(pg_bin, endpoint.connstr())
endpoint.safe_psql(f"CREATE TABLE created_{timeline}(id integer);")

View File

@@ -77,7 +77,7 @@ def test_sharding_smoke(
assert all(s < expect_initdb_size // 2 for s in sizes.values())
# Test that timeline creation works on a sharded tenant
timeline_b = env.neon_cli.create_branch("branch_b", tenant_id=tenant_id)
timeline_b = env.create_branch("branch_b", tenant_id=tenant_id)
# Test that we can write data to a sharded tenant
workload = Workload(env, tenant_id, timeline_b, branch_name="branch_b")
@@ -378,7 +378,7 @@ def test_sharding_split_smoke(
env.start()
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id,
timeline_id,
shard_count=shard_count,
@@ -1127,7 +1127,7 @@ def test_sharding_split_failures(
timeline_id = TimelineId.generate()
# Create a tenant with secondary locations enabled
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id, timeline_id, shard_count=initial_shard_count, placement_policy='{"Attached":1}'
)
@@ -1441,7 +1441,7 @@ def test_sharding_unlogged_relation(neon_env_builder: NeonEnvBuilder):
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(tenant_id, timeline_id, shard_count=8)
env.create_tenant(tenant_id, timeline_id, shard_count=8)
# We will create many tables to ensure it's overwhelmingly likely that at least one
# of them doesn't land on shard 0
@@ -1483,7 +1483,7 @@ def test_top_tenants(neon_env_builder: NeonEnvBuilder):
for i in range(0, n_tenants):
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(tenant_id, timeline_id)
env.create_tenant(tenant_id, timeline_id)
# Write a different amount of data to each tenant
w = Workload(env, tenant_id, timeline_id)

View File

@@ -96,7 +96,7 @@ def test_storage_controller_smoke(
# Creating several tenants should spread out across the pageservers
for tid in tenant_ids:
env.neon_cli.create_tenant(tid, shard_count=shards_per_tenant)
env.create_tenant(tid, shard_count=shards_per_tenant)
# Repeating a creation should be idempotent (we are just testing it doesn't return an error)
env.storage_controller.tenant_create(
@@ -172,7 +172,7 @@ def test_storage_controller_smoke(
# Create some fresh tenants
tenant_ids = set(TenantId.generate() for i in range(0, tenant_count))
for tid in tenant_ids:
env.neon_cli.create_tenant(tid, shard_count=shards_per_tenant)
env.create_tenant(tid, shard_count=shards_per_tenant)
counts = get_node_shard_counts(env, tenant_ids)
# Nothing should have been scheduled on the node in Draining
@@ -806,10 +806,7 @@ def test_storage_controller_s3_time_travel_recovery(
env.storage_controller.consistency_check()
branch_name = "main"
timeline_id = env.neon_cli.create_timeline(
branch_name,
tenant_id=tenant_id,
)
timeline_id = env.create_timeline(branch_name, tenant_id=tenant_id)
# Write some nontrivial amount of data into the endpoint and wait until it is uploaded
with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint:
run_pg_bench_small(pg_bin, endpoint.connstr())
@@ -1009,9 +1006,7 @@ def test_storage_controller_tenant_deletion(
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
tenant_id, timeline_id, shard_count=2, placement_policy='{"Attached":1}'
)
env.create_tenant(tenant_id, timeline_id, shard_count=2, placement_policy='{"Attached":1}')
# Ensure all the locations are configured, including secondaries
env.storage_controller.reconcile_until_idle()
@@ -1217,10 +1212,7 @@ def test_storage_controller_heartbeats(
env.storage_controller.tenant_create(tid)
branch_name = "main"
env.neon_cli.create_timeline(
branch_name,
tenant_id=tid,
)
env.create_timeline(branch_name, tenant_id=tid)
with env.endpoints.create_start("main", tenant_id=tid) as endpoint:
run_pg_bench_small(pg_bin, endpoint.connstr())
@@ -1322,9 +1314,9 @@ def test_storage_controller_re_attach(neon_env_builder: NeonEnvBuilder):
# We'll have two tenants.
tenant_a = TenantId.generate()
env.neon_cli.create_tenant(tenant_a, placement_policy='{"Attached":1}')
env.create_tenant(tenant_a, placement_policy='{"Attached":1}')
tenant_b = TenantId.generate()
env.neon_cli.create_tenant(tenant_b, placement_policy='{"Attached":1}')
env.create_tenant(tenant_b, placement_policy='{"Attached":1}')
# Each pageserver will have one attached and one secondary location
env.storage_controller.tenant_shard_migrate(
@@ -1647,7 +1639,7 @@ def test_tenant_import(neon_env_builder: NeonEnvBuilder, shard_count, remote_sto
# Create a second timeline to ensure that import finds both
timeline_a = env.initial_timeline
timeline_b = env.neon_cli.create_branch("branch_b", tenant_id=tenant_id)
timeline_b = env.create_branch("branch_b", tenant_id=tenant_id)
workload_a = Workload(env, tenant_id, timeline_a, branch_name="main")
workload_a.init()
@@ -1731,7 +1723,7 @@ def test_graceful_cluster_restart(neon_env_builder: NeonEnvBuilder):
for _ in range(0, tenant_count):
tid = TenantId.generate()
tenant_ids.append(tid)
env.neon_cli.create_tenant(
env.create_tenant(
tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant
)
@@ -1818,7 +1810,7 @@ def test_skip_drain_on_secondary_lag(neon_env_builder: NeonEnvBuilder, pg_bin: P
env = neon_env_builder.init_configs()
env.start()
tid, timeline_id = env.neon_cli.create_tenant(placement_policy='{"Attached":1}')
tid, timeline_id = env.create_tenant(placement_policy='{"Attached":1}')
# Give things a chance to settle.
env.storage_controller.reconcile_until_idle(timeout_secs=30)
@@ -1924,7 +1916,7 @@ def test_background_operation_cancellation(neon_env_builder: NeonEnvBuilder):
for _ in range(0, tenant_count):
tid = TenantId.generate()
tenant_ids.append(tid)
env.neon_cli.create_tenant(
env.create_tenant(
tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant
)
@@ -1984,7 +1976,7 @@ def test_storage_controller_node_deletion(
for _ in range(0, tenant_count):
tid = TenantId.generate()
tenant_ids.append(tid)
env.neon_cli.create_tenant(
env.create_tenant(
tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant
)
@@ -2109,7 +2101,7 @@ def test_storage_controller_metadata_health(
)
# Mock tenant with unhealthy scrubber scan result
tenant_b, _ = env.neon_cli.create_tenant(shard_count=shard_count)
tenant_b, _ = env.create_tenant(shard_count=shard_count)
tenant_b_shard_ids = (
env.storage_controller.tenant_shard_split(tenant_b, shard_count=shard_count)
if shard_count is not None
@@ -2117,7 +2109,7 @@ def test_storage_controller_metadata_health(
)
# Mock tenant that never gets a health update from scrubber
tenant_c, _ = env.neon_cli.create_tenant(shard_count=shard_count)
tenant_c, _ = env.create_tenant(shard_count=shard_count)
tenant_c_shard_ids = (
env.storage_controller.tenant_shard_split(tenant_c, shard_count=shard_count)
@@ -2517,7 +2509,7 @@ def test_storage_controller_validate_during_migration(neon_env_builder: NeonEnvB
tenant_id = env.initial_tenant
timeline_id = env.initial_timeline
env.neon_cli.create_tenant(tenant_id, timeline_id)
env.create_tenant(tenant_id, timeline_id)
env.storage_controller.pageserver_api().set_tenant_config(tenant_id, TENANT_CONF)
# Write enough data that a compaction would do some work (deleting some L0s)
@@ -2652,7 +2644,7 @@ def test_storage_controller_proxy_during_migration(
tenant_id = env.initial_tenant
timeline_id = env.initial_timeline
env.neon_cli.create_tenant(tenant_id, timeline_id)
env.create_tenant(tenant_id, timeline_id)
# The test stalls a reconcile on purpose to check if the long running
# reconcile alert fires.
@@ -2831,7 +2823,7 @@ def test_shard_preferred_azs(neon_env_builder: NeonEnvBuilder):
# Generate a layer to avoid shard split handling on ps from tripping
# up on debug assert.
timeline_id = TimelineId.generate()
env.neon_cli.create_timeline("bar", tids[0], timeline_id)
env.create_timeline("bar", tids[0], timeline_id)
workload = Workload(env, tids[0], timeline_id, branch_name="bar")
workload.init()

View File

@@ -135,7 +135,7 @@ def test_scrubber_physical_gc(neon_env_builder: NeonEnvBuilder, shard_count: Opt
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(tenant_id, timeline_id, shard_count=shard_count)
env.create_tenant(tenant_id, timeline_id, shard_count=shard_count)
workload = Workload(env, tenant_id, timeline_id)
workload.init()
@@ -185,7 +185,7 @@ def test_scrubber_physical_gc_ancestors(
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id,
timeline_id,
shard_count=shard_count,
@@ -303,7 +303,7 @@ def test_scrubber_physical_gc_timeline_deletion(neon_env_builder: NeonEnvBuilder
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id,
timeline_id,
shard_count=None,
@@ -385,7 +385,7 @@ def test_scrubber_physical_gc_ancestors_split(neon_env_builder: NeonEnvBuilder):
tenant_id = TenantId.generate()
timeline_id = TimelineId.generate()
initial_shard_count = 2
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id,
timeline_id,
shard_count=initial_shard_count,

View File

@@ -9,11 +9,11 @@ from fixtures.utils import wait_until
# It requires tracking information about replication origins at page server side
def test_subscriber_restart(neon_simple_env: NeonEnv):
env = neon_simple_env
env.neon_cli.create_branch("publisher")
env.create_branch("publisher")
pub = env.endpoints.create("publisher")
pub.start()
sub_timeline_id = env.neon_cli.create_branch("subscriber")
sub_timeline_id = env.create_branch("subscriber")
sub = env.endpoints.create("subscriber")
sub.start()

View File

@@ -38,7 +38,7 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder):
# Check that we raise on misspelled configs
invalid_conf_key = "some_invalid_setting_name_blah_blah_123"
try:
env.neon_cli.create_tenant(
env.create_tenant(
conf={
invalid_conf_key: "20000",
}
@@ -54,9 +54,9 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder):
"evictions_low_residence_duration_metric_threshold": "42s",
"eviction_policy": json.dumps({"kind": "NoEviction"}),
}
tenant, _ = env.neon_cli.create_tenant(conf=new_conf)
tenant, _ = env.create_tenant(conf=new_conf)
env.neon_cli.create_timeline("test_tenant_conf", tenant_id=tenant)
env.create_timeline("test_tenant_conf", tenant_id=tenant)
env.endpoints.create_start("test_tenant_conf", "main", tenant)
# check the configuration of the default tenant
@@ -121,10 +121,7 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder):
),
"max_lsn_wal_lag": "13000000",
}
env.neon_cli.config_tenant(
tenant_id=tenant,
conf=conf_update,
)
env.config_tenant(tenant_id=tenant, conf=conf_update)
updated_tenant_config = http_client.tenant_config(tenant_id=tenant)
updated_specific_config = updated_tenant_config.tenant_specific_overrides
@@ -172,10 +169,8 @@ def test_tenant_config(neon_env_builder: NeonEnvBuilder):
final_conf = {
"pitr_interval": "1 min",
}
env.neon_cli.config_tenant(
tenant_id=tenant,
conf=final_conf,
)
env.config_tenant(tenant_id=tenant, conf=final_conf)
final_tenant_config = http_client.tenant_config(tenant_id=tenant)
final_specific_config = final_tenant_config.tenant_specific_overrides
assert final_specific_config["pitr_interval"] == "1m"
@@ -218,7 +213,7 @@ def test_creating_tenant_conf_after_attach(neon_env_builder: NeonEnvBuilder):
assert isinstance(env.pageserver_remote_storage, LocalFsStorage)
# tenant is created with defaults, as in without config file
(tenant_id, timeline_id) = env.neon_cli.create_tenant()
(tenant_id, timeline_id) = env.create_tenant()
config_path = env.pageserver.tenant_dir(tenant_id) / "config-v1"
http_client = env.pageserver.http_client()
@@ -240,9 +235,9 @@ def test_creating_tenant_conf_after_attach(neon_env_builder: NeonEnvBuilder):
func=lambda: assert_tenant_state(http_client, tenant_id, "Active"),
)
env.neon_cli.config_tenant(tenant_id, {"gc_horizon": "1000000"})
env.config_tenant(tenant_id, {"gc_horizon": "1000000"})
contents_first = config_path.read_text()
env.neon_cli.config_tenant(tenant_id, {"gc_horizon": "0"})
env.config_tenant(tenant_id, {"gc_horizon": "0"})
contents_later = config_path.read_text()
# dont test applying the setting here, we have that another test case to show it
@@ -298,7 +293,7 @@ def test_live_reconfig_get_evictions_low_residence_duration_metric_threshold(
metric = get_metric()
assert int(metric.value) > 0, "metric is updated"
env.neon_cli.config_tenant(
env.config_tenant(
tenant_id, {"evictions_low_residence_duration_metric_threshold": default_value}
)
updated_metric = get_metric()
@@ -306,9 +301,7 @@ def test_live_reconfig_get_evictions_low_residence_duration_metric_threshold(
metric.value
), "metric is unchanged when setting same value"
env.neon_cli.config_tenant(
tenant_id, {"evictions_low_residence_duration_metric_threshold": "2day"}
)
env.config_tenant(tenant_id, {"evictions_low_residence_duration_metric_threshold": "2day"})
metric = get_metric()
assert int(metric.labels["low_threshold_secs"]) == 2 * 24 * 60 * 60
assert int(metric.value) == 0
@@ -320,9 +313,7 @@ def test_live_reconfig_get_evictions_low_residence_duration_metric_threshold(
assert int(metric.labels["low_threshold_secs"]) == 2 * 24 * 60 * 60
assert int(metric.value) > 0
env.neon_cli.config_tenant(
tenant_id, {"evictions_low_residence_duration_metric_threshold": "2h"}
)
env.config_tenant(tenant_id, {"evictions_low_residence_duration_metric_threshold": "2h"})
metric = get_metric()
assert int(metric.labels["low_threshold_secs"]) == 2 * 60 * 60
assert int(metric.value) == 0, "value resets if label changes"
@@ -334,7 +325,7 @@ def test_live_reconfig_get_evictions_low_residence_duration_metric_threshold(
assert int(metric.labels["low_threshold_secs"]) == 2 * 60 * 60
assert int(metric.value) > 0, "set a non-zero value for next step"
env.neon_cli.config_tenant(tenant_id, {})
env.config_tenant(tenant_id, {})
metric = get_metric()
assert int(metric.labels["low_threshold_secs"]) == 24 * 60 * 60, "label resets to default"
assert int(metric.value) == 0, "value resets to default"

View File

@@ -78,7 +78,7 @@ def test_tenant_delete_smoke(
# may need to retry on some remote storage errors injected by the test harness
error_tolerant_delete(ps_http, tenant_id)
env.neon_cli.create_tenant(
env.create_tenant(
tenant_id=tenant_id,
conf=many_small_layers_tenant_config(),
)
@@ -89,9 +89,7 @@ def test_tenant_delete_smoke(
# create two timelines one being the parent of another
parent = None
for timeline in ["first", "second"]:
timeline_id = env.neon_cli.create_branch(
timeline, tenant_id=tenant_id, ancestor_branch_name=parent
)
timeline_id = env.create_branch(timeline, ancestor_branch_name=parent, tenant_id=tenant_id)
with env.endpoints.create_start(timeline, tenant_id=tenant_id) as endpoint:
run_pg_bench_small(pg_bin, endpoint.connstr())
wait_for_last_flush_lsn(env, endpoint, tenant=tenant_id, timeline=timeline_id)
@@ -339,7 +337,7 @@ def test_tenant_delete_scrubber(pg_bin: PgBin, make_httpserver, neon_env_builder
ps_http = env.pageserver.http_client()
# create a tenant separate from the main tenant so that we have one remaining
# after we deleted it, as the scrubber treats empty buckets as an error.
(tenant_id, timeline_id) = env.neon_cli.create_tenant()
(tenant_id, timeline_id) = env.create_tenant()
with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint:
run_pg_bench_small(pg_bin, endpoint.connstr())

View File

@@ -72,7 +72,7 @@ def test_tenant_reattach(neon_env_builder: NeonEnvBuilder, mode: str):
pageserver_http = env.pageserver.http_client()
# create new nenant
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
env.pageserver.allowed_errors.extend(PERMIT_PAGE_SERVICE_ERRORS)
@@ -241,7 +241,7 @@ def test_tenant_reattach_while_busy(
pageserver_http = env.pageserver.http_client()
# create new nenant
tenant_id, timeline_id = env.neon_cli.create_tenant(
tenant_id, timeline_id = env.create_tenant(
# Create layers aggressively
conf={"checkpoint_distance": "100000"}
)

View File

@@ -219,7 +219,7 @@ def test_tenant_relocation(
log.info("tenant to relocate %s initial_timeline_id %s", tenant_id, env.initial_timeline)
env.neon_cli.create_branch("test_tenant_relocation_main", tenant_id=tenant_id)
env.create_branch("test_tenant_relocation_main", tenant_id=tenant_id)
ep_main = env.endpoints.create_start(
branch_name="test_tenant_relocation_main", tenant_id=tenant_id
)
@@ -232,7 +232,7 @@ def test_tenant_relocation(
expected_sum=500500,
)
env.neon_cli.create_branch(
env.create_branch(
new_branch_name="test_tenant_relocation_second",
ancestor_branch_name="test_tenant_relocation_main",
ancestor_start_lsn=current_lsn_main,
@@ -404,7 +404,7 @@ def test_emergency_relocate_with_branches_slow_replay(
# - A logical replication message between the inserts, so that we can conveniently
# pause the WAL ingestion between the two inserts.
# - Child branch, created after the inserts
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
main_endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
with main_endpoint.cursor() as cur:
@@ -417,7 +417,7 @@ def test_emergency_relocate_with_branches_slow_replay(
current_lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_flush_lsn()"))
main_endpoint.stop()
env.neon_cli.create_branch("child", tenant_id=tenant_id, ancestor_start_lsn=current_lsn)
env.create_branch("child", tenant_id=tenant_id, ancestor_start_lsn=current_lsn)
# Now kill the pageserver, remove the tenant directory, and restart. This simulates
# the scenario that a pageserver dies unexpectedly and cannot be recovered, so we relocate
@@ -548,7 +548,7 @@ def test_emergency_relocate_with_branches_createdb(
pageserver_http = env.pageserver.http_client()
# create new nenant
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
main_endpoint = env.endpoints.create_start("main", tenant_id=tenant_id)
with main_endpoint.cursor() as cur:
@@ -556,7 +556,7 @@ def test_emergency_relocate_with_branches_createdb(
cur.execute("CREATE DATABASE neondb")
current_lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_flush_lsn()"))
env.neon_cli.create_branch("child", tenant_id=tenant_id, ancestor_start_lsn=current_lsn)
env.create_branch("child", tenant_id=tenant_id, ancestor_start_lsn=current_lsn)
with main_endpoint.cursor(dbname="neondb") as cur:
cur.execute("CREATE TABLE test_migrate_one AS SELECT generate_series(1,100)")

View File

@@ -27,7 +27,7 @@ def test_empty_tenant_size(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_configs()
env.start()
(tenant_id, timeline_id) = env.neon_cli.create_tenant()
(tenant_id, timeline_id) = env.create_tenant()
http_client = env.pageserver.http_client()
initial_size = http_client.tenant_size(tenant_id)
@@ -67,12 +67,12 @@ def test_branched_empty_timeline_size(neon_simple_env: NeonEnv, test_output_dir:
gc_horizon
"""
env = neon_simple_env
(tenant_id, _) = env.neon_cli.create_tenant()
(tenant_id, _) = env.create_tenant()
http_client = env.pageserver.http_client()
initial_size = http_client.tenant_size(tenant_id)
first_branch_timeline_id = env.neon_cli.create_branch("first-branch", tenant_id=tenant_id)
first_branch_timeline_id = env.create_branch("first-branch", tenant_id=tenant_id)
with env.endpoints.create_start("first-branch", tenant_id=tenant_id) as endpoint:
with endpoint.cursor() as cur:
@@ -104,13 +104,13 @@ def test_branched_from_many_empty_parents_size(neon_simple_env: NeonEnv, test_ou
nth_n: 10------------I--------100
"""
env = neon_simple_env
(tenant_id, _) = env.neon_cli.create_tenant()
(tenant_id, _) = env.create_tenant()
http_client = env.pageserver.http_client()
initial_size = http_client.tenant_size(tenant_id)
first_branch_name = "first"
env.neon_cli.create_branch(first_branch_name, tenant_id=tenant_id)
env.create_branch(first_branch_name, tenant_id=tenant_id)
size_after_branching = http_client.tenant_size(tenant_id)
@@ -123,7 +123,7 @@ def test_branched_from_many_empty_parents_size(neon_simple_env: NeonEnv, test_ou
for i in range(0, 4):
latest_branch_name = f"nth_{i}"
last_branch = env.neon_cli.create_branch(
last_branch = env.create_branch(
latest_branch_name, ancestor_branch_name=last_branch_name, tenant_id=tenant_id
)
last_branch_name = latest_branch_name
@@ -159,7 +159,7 @@ def test_branch_point_within_horizon(neon_simple_env: NeonEnv, test_output_dir:
env = neon_simple_env
gc_horizon = 20_000
(tenant_id, main_id) = env.neon_cli.create_tenant(conf={"gc_horizon": str(gc_horizon)})
(tenant_id, main_id) = env.create_tenant(conf={"gc_horizon": str(gc_horizon)})
http_client = env.pageserver.http_client()
with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint:
@@ -172,9 +172,7 @@ def test_branch_point_within_horizon(neon_simple_env: NeonEnv, test_output_dir:
assert flushed_lsn.lsn_int - gc_horizon > initdb_lsn.lsn_int
branch_id = env.neon_cli.create_branch(
"branch", tenant_id=tenant_id, ancestor_start_lsn=flushed_lsn
)
branch_id = env.create_branch("branch", tenant_id=tenant_id, ancestor_start_lsn=flushed_lsn)
with env.endpoints.create_start("branch", tenant_id=tenant_id) as endpoint:
with endpoint.cursor() as cur:
@@ -201,7 +199,7 @@ def test_parent_within_horizon(neon_simple_env: NeonEnv, test_output_dir: Path):
env = neon_simple_env
gc_horizon = 5_000
(tenant_id, main_id) = env.neon_cli.create_tenant(conf={"gc_horizon": str(gc_horizon)})
(tenant_id, main_id) = env.create_tenant(conf={"gc_horizon": str(gc_horizon)})
http_client = env.pageserver.http_client()
with env.endpoints.create_start("main", tenant_id=tenant_id) as endpoint:
@@ -220,9 +218,7 @@ def test_parent_within_horizon(neon_simple_env: NeonEnv, test_output_dir: Path):
assert flushed_lsn.lsn_int - gc_horizon > initdb_lsn.lsn_int
branch_id = env.neon_cli.create_branch(
"branch", tenant_id=tenant_id, ancestor_start_lsn=flushed_lsn
)
branch_id = env.create_branch("branch", tenant_id=tenant_id, ancestor_start_lsn=flushed_lsn)
with env.endpoints.create_start("branch", tenant_id=tenant_id) as endpoint:
with endpoint.cursor() as cur:
@@ -248,13 +244,13 @@ def test_only_heads_within_horizon(neon_simple_env: NeonEnv, test_output_dir: Pa
"""
env = neon_simple_env
(tenant_id, main_id) = env.neon_cli.create_tenant(conf={"gc_horizon": "1024"})
(tenant_id, main_id) = env.create_tenant(conf={"gc_horizon": "1024"})
http_client = env.pageserver.http_client()
initial_size = http_client.tenant_size(tenant_id)
first_id = env.neon_cli.create_branch("first", tenant_id=tenant_id)
second_id = env.neon_cli.create_branch("second", tenant_id=tenant_id)
first_id = env.create_branch("first", tenant_id=tenant_id)
second_id = env.create_branch("second", tenant_id=tenant_id)
ids = {"main": main_id, "first": first_id, "second": second_id}
@@ -530,8 +526,8 @@ def test_get_tenant_size_with_multiple_branches(
size_at_branch = http_client.tenant_size(tenant_id)
assert size_at_branch > 0
first_branch_timeline_id = env.neon_cli.create_branch(
"first-branch", main_branch_name, tenant_id
first_branch_timeline_id = env.create_branch(
"first-branch", ancestor_branch_name=main_branch_name, tenant_id=tenant_id
)
size_after_first_branch = http_client.tenant_size(tenant_id)
@@ -557,8 +553,8 @@ def test_get_tenant_size_with_multiple_branches(
size_after_continuing_on_main = http_client.tenant_size(tenant_id)
assert size_after_continuing_on_main > size_after_growing_first_branch
second_branch_timeline_id = env.neon_cli.create_branch(
"second-branch", main_branch_name, tenant_id
second_branch_timeline_id = env.create_branch(
"second-branch", ancestor_branch_name=main_branch_name, tenant_id=tenant_id
)
size_after_second_branch = http_client.tenant_size(tenant_id)
assert_size_approx_equal(size_after_second_branch, size_after_continuing_on_main)
@@ -633,8 +629,8 @@ def test_synthetic_size_while_deleting(neon_env_builder: NeonEnvBuilder):
orig_size = client.tenant_size(env.initial_tenant)
branch_id = env.neon_cli.create_branch(
tenant_id=env.initial_tenant, ancestor_branch_name="main", new_branch_name="branch"
branch_id = env.create_branch(
"branch", ancestor_branch_name="main", tenant_id=env.initial_tenant
)
client.configure_failpoints((failpoint, "pause"))
@@ -651,8 +647,8 @@ def test_synthetic_size_while_deleting(neon_env_builder: NeonEnvBuilder):
assert_size_approx_equal(orig_size, size)
branch_id = env.neon_cli.create_branch(
tenant_id=env.initial_tenant, ancestor_branch_name="main", new_branch_name="branch2"
branch_id = env.create_branch(
"branch2", ancestor_branch_name="main", tenant_id=env.initial_tenant
)
client.configure_failpoints((failpoint, "pause"))
@@ -749,7 +745,7 @@ def test_lsn_lease_size(neon_env_builder: NeonEnvBuilder, test_output_dir: Path,
env, env.initial_tenant, env.initial_timeline, test_output_dir, action="branch"
)
tenant, timeline = env.neon_cli.create_tenant(conf=conf)
tenant, timeline = env.create_tenant(conf=conf)
lease_res = insert_with_action(env, tenant, timeline, test_output_dir, action="lease")
assert_size_approx_equal_for_lease_test(lease_res, ro_branch_res)
@@ -793,8 +789,8 @@ def insert_with_action(
res = client.timeline_lsn_lease(tenant, timeline, last_flush_lsn)
log.info(f"result from lsn_lease api: {res}")
elif action == "branch":
ro_branch = env.neon_cli.create_branch(
"ro_branch", tenant_id=tenant, ancestor_start_lsn=last_flush_lsn
ro_branch = env.create_branch(
"ro_branch", ancestor_start_lsn=last_flush_lsn, tenant_id=tenant
)
log.info(f"{ro_branch=} created")
else:

View File

@@ -31,8 +31,8 @@ def test_tenant_tasks(neon_env_builder: NeonEnvBuilder):
timeline_delete_wait_completed(client, tenant, t)
# Create tenant, start compute
tenant, _ = env.neon_cli.create_tenant()
env.neon_cli.create_timeline(name, tenant_id=tenant)
tenant, _ = env.create_tenant()
env.create_timeline(name, tenant_id=tenant)
endpoint = env.endpoints.create_start(name, tenant_id=tenant)
assert_tenant_state(
client,

View File

@@ -63,7 +63,7 @@ def test_tenant_creation_fails(neon_simple_env: NeonEnv):
)
assert initial_tenants == new_tenants, "should not create new tenants"
neon_simple_env.neon_cli.create_tenant()
neon_simple_env.create_tenant()
def test_tenants_normal_work(neon_env_builder: NeonEnvBuilder):
@@ -71,11 +71,11 @@ def test_tenants_normal_work(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
"""Tests tenants with and without wal acceptors"""
tenant_1, _ = env.neon_cli.create_tenant()
tenant_2, _ = env.neon_cli.create_tenant()
tenant_1, _ = env.create_tenant()
tenant_2, _ = env.create_tenant()
env.neon_cli.create_timeline("test_tenants_normal_work", tenant_id=tenant_1)
env.neon_cli.create_timeline("test_tenants_normal_work", tenant_id=tenant_2)
env.create_timeline("test_tenants_normal_work", tenant_id=tenant_1)
env.create_timeline("test_tenants_normal_work", tenant_id=tenant_2)
endpoint_tenant1 = env.endpoints.create_start(
"test_tenants_normal_work",
@@ -102,11 +102,11 @@ def test_metrics_normal_work(neon_env_builder: NeonEnvBuilder):
neon_env_builder.pageserver_config_override = "availability_zone='test_ps_az'"
env = neon_env_builder.init_start()
tenant_1, _ = env.neon_cli.create_tenant()
tenant_2, _ = env.neon_cli.create_tenant()
tenant_1, _ = env.create_tenant()
tenant_2, _ = env.create_tenant()
timeline_1 = env.neon_cli.create_timeline("test_metrics_normal_work", tenant_id=tenant_1)
timeline_2 = env.neon_cli.create_timeline("test_metrics_normal_work", tenant_id=tenant_2)
timeline_1 = env.create_timeline("test_metrics_normal_work", tenant_id=tenant_1)
timeline_2 = env.create_timeline("test_metrics_normal_work", tenant_id=tenant_2)
endpoint_tenant1 = env.endpoints.create_start("test_metrics_normal_work", tenant_id=tenant_1)
endpoint_tenant2 = env.endpoints.create_start("test_metrics_normal_work", tenant_id=tenant_2)
@@ -250,11 +250,11 @@ def test_pageserver_metrics_removed_after_detach(neon_env_builder: NeonEnvBuilde
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
tenant_1, _ = env.neon_cli.create_tenant()
tenant_2, _ = env.neon_cli.create_tenant()
tenant_1, _ = env.create_tenant()
tenant_2, _ = env.create_tenant()
env.neon_cli.create_timeline("test_metrics_removed_after_detach", tenant_id=tenant_1)
env.neon_cli.create_timeline("test_metrics_removed_after_detach", tenant_id=tenant_2)
env.create_timeline("test_metrics_removed_after_detach", tenant_id=tenant_1)
env.create_timeline("test_metrics_removed_after_detach", tenant_id=tenant_2)
endpoint_tenant1 = env.endpoints.create_start(
"test_metrics_removed_after_detach", tenant_id=tenant_1

View File

@@ -66,7 +66,7 @@ def test_tenants_many(neon_env_builder: NeonEnvBuilder):
for _ in range(1, 5):
# Use a tiny checkpoint distance, to create a lot of layers quickly
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
"checkpoint_distance": "5000000",
}

View File

@@ -46,10 +46,11 @@ def test_timeline_archive(neon_env_builder: NeonEnvBuilder, shard_count: int):
# construct a pair of branches to validate that pageserver prohibits
# archival of ancestor timelines when they have non-archived child branches
parent_timeline_id = env.neon_cli.create_branch("test_ancestor_branch_archive_parent")
parent_timeline_id = env.create_branch("test_ancestor_branch_archive_parent")
leaf_timeline_id = env.neon_cli.create_branch(
"test_ancestor_branch_archive_branch1", "test_ancestor_branch_archive_parent"
leaf_timeline_id = env.create_branch(
"test_ancestor_branch_archive_branch1",
ancestor_branch_name="test_ancestor_branch_archive_parent",
)
with pytest.raises(

View File

@@ -68,12 +68,12 @@ def test_timeline_delete(neon_simple_env: NeonEnv):
# construct pair of branches to validate that pageserver prohibits
# deletion of ancestor timelines when they have child branches
parent_timeline_id = env.neon_cli.create_branch(
new_branch_name="test_ancestor_branch_delete_parent", ancestor_branch_name="main"
parent_timeline_id = env.create_branch(
"test_ancestor_branch_delete_parent", ancestor_branch_name="main"
)
leaf_timeline_id = env.neon_cli.create_branch(
new_branch_name="test_ancestor_branch_delete_branch1",
leaf_timeline_id = env.create_branch(
"test_ancestor_branch_delete_branch1",
ancestor_branch_name="test_ancestor_branch_delete_parent",
)
@@ -184,7 +184,7 @@ def test_delete_timeline_exercise_crash_safety_failpoints(
ps_http = env.pageserver.http_client()
timeline_id = env.neon_cli.create_timeline("delete")
timeline_id = env.create_timeline("delete")
with env.endpoints.create_start("delete") as endpoint:
# generate enough layers
run_pg_bench_small(pg_bin, endpoint.connstr())
@@ -334,7 +334,7 @@ def test_timeline_resurrection_on_attach(
wait_for_upload(ps_http, tenant_id, main_timeline_id, current_lsn)
log.info("upload of checkpoint is done")
branch_timeline_id = env.neon_cli.create_branch("new", "main")
branch_timeline_id = env.create_branch("new", ancestor_branch_name="main")
# Two variants of this test:
# - In fill_branch=True, the deleted branch has layer files.
@@ -409,13 +409,11 @@ def test_timeline_delete_fail_before_local_delete(neon_env_builder: NeonEnvBuild
ps_http.configure_failpoints(("timeline-delete-before-rm", "return"))
# construct pair of branches
intermediate_timeline_id = env.neon_cli.create_branch(
"test_timeline_delete_fail_before_local_delete"
)
intermediate_timeline_id = env.create_branch("test_timeline_delete_fail_before_local_delete")
leaf_timeline_id = env.neon_cli.create_branch(
leaf_timeline_id = env.create_branch(
"test_timeline_delete_fail_before_local_delete1",
"test_timeline_delete_fail_before_local_delete",
ancestor_branch_name="test_timeline_delete_fail_before_local_delete",
)
leaf_timeline_path = env.pageserver.timeline_dir(env.initial_tenant, leaf_timeline_id)
@@ -514,7 +512,7 @@ def test_concurrent_timeline_delete_stuck_on(
env = neon_env_builder.init_start()
child_timeline_id = env.neon_cli.create_branch("child", "main")
child_timeline_id = env.create_branch("child", ancestor_branch_name="main")
ps_http = env.pageserver.http_client()
@@ -591,7 +589,7 @@ def test_delete_timeline_client_hangup(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
child_timeline_id = env.neon_cli.create_branch("child", "main")
child_timeline_id = env.create_branch("child", ancestor_branch_name="main")
ps_http = env.pageserver.http_client(retries=Retry(0, read=False))
@@ -656,7 +654,7 @@ def test_timeline_delete_works_for_remote_smoke(
timeline_ids = [env.initial_timeline]
for i in range(2):
branch_timeline_id = env.neon_cli.create_branch(f"new{i}", "main")
branch_timeline_id = env.create_branch(f"new{i}", ancestor_branch_name="main")
with env.endpoints.create_start(f"new{i}") as pg, pg.cursor() as cur:
cur.execute("CREATE TABLE f (i integer);")
cur.execute("INSERT INTO f VALUES (generate_series(1,1000));")
@@ -733,7 +731,7 @@ def test_delete_orphaned_objects(
ps_http = env.pageserver.http_client()
timeline_id = env.neon_cli.create_timeline("delete")
timeline_id = env.create_timeline("delete")
with env.endpoints.create_start("delete") as endpoint:
# generate enough layers
run_pg_bench_small(pg_bin, endpoint.connstr())
@@ -791,7 +789,7 @@ def test_timeline_delete_resumed_on_attach(
ps_http = env.pageserver.http_client()
timeline_id = env.neon_cli.create_timeline("delete")
timeline_id = env.create_timeline("delete")
with env.endpoints.create_start("delete") as endpoint:
# generate enough layers
run_pg_bench_small(pg_bin, endpoint.connstr())

View File

@@ -133,9 +133,7 @@ def test_ancestor_detach_branched_from(
name = "new main"
timeline_id = env.neon_cli.create_branch(
name, "main", env.initial_tenant, ancestor_start_lsn=branch_at
)
timeline_id = env.create_branch(name, ancestor_branch_name="main", ancestor_start_lsn=branch_at)
recorded = Lsn(client.timeline_detail(env.initial_tenant, timeline_id)["ancestor_lsn"])
if branch_at is None:
@@ -262,19 +260,19 @@ def test_ancestor_detach_reparents_earlier(neon_env_builder: NeonEnvBuilder):
wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
# as this only gets reparented, we don't need to write to it like new main
reparented = env.neon_cli.create_branch(
"reparented", "main", env.initial_tenant, ancestor_start_lsn=branchpoint_pipe
reparented = env.create_branch(
"reparented", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_pipe
)
same_branchpoint = env.neon_cli.create_branch(
"same_branchpoint", "main", env.initial_tenant, ancestor_start_lsn=branchpoint_x
same_branchpoint = env.create_branch(
"same_branchpoint", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_x
)
timeline_id = env.neon_cli.create_branch(
"new main", "main", env.initial_tenant, ancestor_start_lsn=branchpoint_x
timeline_id = env.create_branch(
"new main", ancestor_branch_name="main", ancestor_start_lsn=branchpoint_x
)
after = env.neon_cli.create_branch("after", "main", env.initial_tenant, ancestor_start_lsn=None)
after = env.create_branch("after", ancestor_branch_name="main", ancestor_start_lsn=None)
all_reparented = client.detach_ancestor(env.initial_tenant, timeline_id)
assert set(all_reparented) == {reparented, same_branchpoint}
@@ -365,8 +363,8 @@ def test_detached_receives_flushes_while_being_detached(neon_env_builder: NeonEn
branchpoint = wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
timeline_id = env.neon_cli.create_branch(
"new main", "main", tenant_id=env.initial_tenant, ancestor_start_lsn=branchpoint
timeline_id = env.create_branch(
"new main", ancestor_branch_name="main", ancestor_start_lsn=branchpoint
)
log.info("starting the new main endpoint")
@@ -479,10 +477,9 @@ def test_compaction_induced_by_detaches_in_history(
for num in more_good_numbers:
branch_name = f"br-{len(branches)}"
branch_timeline_id = env.neon_cli.create_branch(
branch_timeline_id = env.create_branch(
branch_name,
ancestor_branch_name=branches[-1][0],
tenant_id=env.initial_tenant,
ancestor_start_lsn=branch_lsn,
)
branches.append((branch_name, branch_timeline_id))
@@ -599,15 +596,15 @@ def test_timeline_ancestor_detach_idempotent_success(
else:
client = env.pageserver.http_client()
first_branch = env.neon_cli.create_branch("first_branch")
first_branch = env.create_branch("first_branch")
_ = env.neon_cli.create_branch("second_branch", ancestor_branch_name="first_branch")
_ = env.create_branch("second_branch", ancestor_branch_name="first_branch")
# these two will be reparented, and they should be returned in stable order
# from pageservers OR otherwise there will be an `error!` logging from
# storage controller
reparented1 = env.neon_cli.create_branch("first_reparented", ancestor_branch_name="main")
reparented2 = env.neon_cli.create_branch("second_reparented", ancestor_branch_name="main")
reparented1 = env.create_branch("first_reparented", ancestor_branch_name="main")
reparented2 = env.create_branch("second_reparented", ancestor_branch_name="main")
first_reparenting_response = client.detach_ancestor(env.initial_tenant, first_branch)
assert set(first_reparenting_response) == {reparented1, reparented2}
@@ -658,9 +655,9 @@ def test_timeline_ancestor_detach_errors(neon_env_builder: NeonEnvBuilder, shard
client.detach_ancestor(env.initial_tenant, env.initial_timeline)
assert info.value.status_code == 409
_ = env.neon_cli.create_branch("first_branch")
_ = env.create_branch("first_branch")
second_branch = env.neon_cli.create_branch("second_branch", ancestor_branch_name="first_branch")
second_branch = env.create_branch("second_branch", ancestor_branch_name="first_branch")
# funnily enough this does not have a prefix
with pytest.raises(PageserverApiException, match="too many ancestors") as info:
@@ -697,7 +694,7 @@ def test_sharded_timeline_detach_ancestor(neon_env_builder: NeonEnvBuilder):
utilized_pageservers = {x["node_id"] for x in shards}
assert len(utilized_pageservers) > 1, "all shards got placed on single pageserver?"
branch_timeline_id = env.neon_cli.create_branch(branch_name, tenant_id=env.initial_tenant)
branch_timeline_id = env.create_branch(branch_name)
with env.endpoints.create_start(branch_name, tenant_id=env.initial_tenant) as ep:
ep.safe_psql(
@@ -849,7 +846,7 @@ def test_timeline_detach_ancestor_interrupted_by_deletion(
pageservers = dict((int(p.id), p) for p in env.pageservers)
detached_timeline = env.neon_cli.create_branch("detached soon", "main")
detached_timeline = env.create_branch("detached soon", ancestor_branch_name="main")
pausepoint = "timeline-detach-ancestor::before_starting_after_locking-pausable"
@@ -993,7 +990,7 @@ def test_sharded_tad_interleaved_after_partial_success(neon_env_builder: NeonEnv
ps.http_client().timeline_checkpoint(shard_id, env.initial_timeline)
def create_reparentable_timeline() -> TimelineId:
return env.neon_cli.create_branch(
return env.create_branch(
"first_branch", ancestor_branch_name="main", ancestor_start_lsn=first_branch_lsn
)
@@ -1002,7 +999,7 @@ def test_sharded_tad_interleaved_after_partial_success(neon_env_builder: NeonEnv
else:
first_branch = None
detached_branch = env.neon_cli.create_branch(
detached_branch = env.create_branch(
"detached_branch", ancestor_branch_name="main", ancestor_start_lsn=detached_branch_lsn
)
@@ -1169,7 +1166,7 @@ def test_retryable_500_hit_through_storcon_during_timeline_detach_ancestor(
shards = env.storage_controller.locate(env.initial_tenant)
assert len(set(x["node_id"] for x in shards)) == shard_count
detached_branch = env.neon_cli.create_branch("detached_branch", ancestor_branch_name="main")
detached_branch = env.create_branch("detached_branch", ancestor_branch_name="main")
pausepoint = "timeline-detach-ancestor::before_starting_after_locking-pausable"
failpoint = "timeline-detach-ancestor::before_starting_after_locking"
@@ -1294,8 +1291,8 @@ def test_retried_detach_ancestor_after_failed_reparenting(neon_env_builder: Neon
)
branch_lsn = wait_for_last_flush_lsn(env, ep, env.initial_tenant, env.initial_timeline)
http.timeline_checkpoint(env.initial_tenant, env.initial_timeline)
branch = env.neon_cli.create_branch(
f"branch_{counter}", "main", ancestor_start_lsn=branch_lsn
branch = env.create_branch(
f"branch_{counter}", ancestor_branch_name="main", ancestor_start_lsn=branch_lsn
)
timelines.append(branch)
@@ -1432,7 +1429,7 @@ def test_timeline_is_deleted_before_timeline_detach_ancestor_completes(
http = env.pageserver.http_client()
detached = env.neon_cli.create_branch("detached")
detached = env.create_branch("detached")
failpoint = "timeline-detach-ancestor::after_activating_before_finding-pausable"

View File

@@ -28,7 +28,7 @@ def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool
pss = ManyPageservers(list(map(lambda ps: ScrollableLog(ps, None), env.pageservers)))
foo_branch = env.neon_cli.create_branch("foo", "main", env.initial_tenant)
foo_branch = env.create_branch("foo", ancestor_branch_name="main", tenant_id=env.initial_tenant)
gc_active_line = ".* gc_loop.*: [12] timelines need GC"
gc_skipped_line = ".* gc_loop.*: Skipping GC: .*"

View File

@@ -36,7 +36,7 @@ from fixtures.utils import get_timeline_dir_size, wait_until
def test_timeline_size(neon_simple_env: NeonEnv):
env = neon_simple_env
new_timeline_id = env.neon_cli.create_branch("test_timeline_size", "main")
new_timeline_id = env.create_branch("test_timeline_size", ancestor_branch_name="main")
client = env.pageserver.http_client()
client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id)
@@ -68,7 +68,9 @@ def test_timeline_size(neon_simple_env: NeonEnv):
def test_timeline_size_createdropdb(neon_simple_env: NeonEnv):
env = neon_simple_env
new_timeline_id = env.neon_cli.create_branch("test_timeline_size_createdropdb", "main")
new_timeline_id = env.create_branch(
"test_timeline_size_createdropdb", ancestor_branch_name="main"
)
client = env.pageserver.http_client()
client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id)
@@ -148,7 +150,7 @@ def wait_for_pageserver_catchup(endpoint_main: Endpoint, polling_interval=1, tim
def test_timeline_size_quota_on_startup(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
client = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_size_quota_on_startup")
new_timeline_id = env.create_branch("test_timeline_size_quota_on_startup")
client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id)
@@ -236,7 +238,7 @@ def test_timeline_size_quota_on_startup(neon_env_builder: NeonEnvBuilder):
def test_timeline_size_quota(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
client = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_size_quota")
new_timeline_id = env.create_branch("test_timeline_size_quota")
client.timeline_wait_logical_size(env.initial_tenant, new_timeline_id)
@@ -373,7 +375,7 @@ def test_timeline_physical_size_init(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_init")
new_timeline_id = env.create_branch("test_timeline_physical_size_init")
endpoint = env.endpoints.create_start("test_timeline_physical_size_init")
endpoint.safe_psql_many(
@@ -410,7 +412,7 @@ def test_timeline_physical_size_post_checkpoint(neon_env_builder: NeonEnvBuilder
env = neon_env_builder.init_start()
pageserver_http = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_post_checkpoint")
new_timeline_id = env.create_branch("test_timeline_physical_size_post_checkpoint")
endpoint = env.endpoints.create_start("test_timeline_physical_size_post_checkpoint")
endpoint.safe_psql_many(
@@ -446,7 +448,7 @@ def test_timeline_physical_size_post_compaction(neon_env_builder: NeonEnvBuilder
)
pageserver_http = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_post_compaction")
new_timeline_id = env.create_branch("test_timeline_physical_size_post_compaction")
endpoint = env.endpoints.create_start("test_timeline_physical_size_post_compaction")
# We don't want autovacuum to run on the table, while we are calculating the
@@ -496,7 +498,7 @@ def test_timeline_physical_size_post_gc(neon_env_builder: NeonEnvBuilder):
)
pageserver_http = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_physical_size_post_gc")
new_timeline_id = env.create_branch("test_timeline_physical_size_post_gc")
endpoint = env.endpoints.create_start("test_timeline_physical_size_post_gc")
# Like in test_timeline_physical_size_post_compaction, disable autovacuum
@@ -543,7 +545,7 @@ def test_timeline_size_metrics(
env = neon_simple_env
pageserver_http = env.pageserver.http_client()
new_timeline_id = env.neon_cli.create_branch("test_timeline_size_metrics")
new_timeline_id = env.create_branch("test_timeline_size_metrics")
endpoint = env.endpoints.create_start("test_timeline_size_metrics")
endpoint.safe_psql_many(
@@ -620,7 +622,7 @@ def test_tenant_physical_size(neon_env_builder: NeonEnvBuilder):
pageserver_http = env.pageserver.http_client()
client = env.pageserver.http_client()
tenant, timeline = env.neon_cli.create_tenant()
tenant, timeline = env.create_tenant()
def get_timeline_resident_physical_size(timeline: TimelineId):
sizes = get_physical_size_values(env, tenant, timeline)
@@ -631,7 +633,7 @@ def test_tenant_physical_size(neon_env_builder: NeonEnvBuilder):
for i in range(10):
n_rows = random.randint(100, 1000)
timeline = env.neon_cli.create_branch(f"test_tenant_physical_size_{i}", tenant_id=tenant)
timeline = env.create_branch(f"test_tenant_physical_size_{i}", tenant_id=tenant)
endpoint = env.endpoints.create_start(f"test_tenant_physical_size_{i}", tenant_id=tenant)
endpoint.safe_psql_many(
@@ -743,7 +745,7 @@ def test_ondemand_activation(neon_env_builder: NeonEnvBuilder):
tenant_ids = {env.initial_tenant}
for _i in range(0, n_tenants - 1):
tenant_id = TenantId.generate()
env.neon_cli.create_tenant(tenant_id)
env.create_tenant(tenant_id)
tenant_ids.add(tenant_id)
# Restart pageserver with logical size calculations paused
@@ -990,8 +992,8 @@ def test_eager_attach_does_not_queue_up(neon_env_builder: NeonEnvBuilder):
# the supporting_second does nothing except queue behind env.initial_tenant
# for purposes of showing that eager_tenant breezes past the queue
supporting_second, _ = env.neon_cli.create_tenant()
eager_tenant, _ = env.neon_cli.create_tenant()
supporting_second, _ = env.create_tenant()
eager_tenant, _ = env.create_tenant()
client = env.pageserver.http_client()
client.tenant_location_conf(
@@ -1067,7 +1069,7 @@ def test_lazy_attach_activation(neon_env_builder: NeonEnvBuilder, activation_met
env = neon_env_builder.init_start()
# because this returns (also elsewhere in this file), we know that SpawnMode::Create skips the queue
lazy_tenant, _ = env.neon_cli.create_tenant()
lazy_tenant, _ = env.create_tenant()
client = env.pageserver.http_client()
client.tenant_location_conf(
@@ -1131,7 +1133,7 @@ def test_lazy_attach_activation(neon_env_builder: NeonEnvBuilder, activation_met
# starting up the endpoint should make it jump the queue
wait_until(10, 1, lazy_tenant_is_active)
elif activation_method == "branch":
env.neon_cli.create_timeline("second_branch", lazy_tenant)
env.create_timeline("second_branch", lazy_tenant)
wait_until(10, 1, lazy_tenant_is_active)
elif activation_method == "delete":
delete_lazy_activating(lazy_tenant, env.pageserver, expect_attaching=True)

View File

@@ -13,7 +13,7 @@ def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark):
# Problems with FSM/VM forks truncation are most frequently detected during page reconstruction triggered
# by image layer generation. So adjust default parameters to make it happen more frequently.
tenant, _ = env.neon_cli.create_tenant(
tenant, _ = env.create_tenant(
conf={
# disable automatic GC
"gc_period": "0s",

View File

@@ -96,7 +96,7 @@ def test_twophase(neon_simple_env: NeonEnv):
Test branching, when a transaction is in prepared state
"""
env = neon_simple_env
env.neon_cli.create_branch("test_twophase")
env.create_branch("test_twophase")
twophase_test_on_timeline(env)
@@ -147,7 +147,7 @@ def test_twophase_at_wal_segment_start(neon_simple_env: NeonEnv):
very first page of a WAL segment and the server was started up at that first page.
"""
env = neon_simple_env
timeline_id = env.neon_cli.create_branch("test_twophase", "main")
timeline_id = env.create_branch("test_twophase", ancestor_branch_name="main")
endpoint = env.endpoints.create_start(
"test_twophase", config_lines=["max_prepared_transactions=5"]

View File

@@ -146,7 +146,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder):
# start postgres on each timeline
endpoints = []
for branch_name in branch_names:
new_timeline_id = env.neon_cli.create_branch(branch_name)
new_timeline_id = env.create_branch(branch_name)
endpoints.append(env.endpoints.create_start(branch_name))
branch_names_to_timeline_ids[branch_name] = new_timeline_id
@@ -284,7 +284,7 @@ def test_restarts(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = n_acceptors
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_safekeepers_restarts")
env.create_branch("test_safekeepers_restarts")
endpoint = env.endpoints.create_start("test_safekeepers_restarts")
# we rely upon autocommit after each statement
@@ -314,7 +314,7 @@ def test_broker(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_broker", "main")
timeline_id = env.create_branch("test_broker", ancestor_branch_name="main")
endpoint = env.endpoints.create_start("test_broker")
endpoint.safe_psql("CREATE TABLE t(key int primary key, value text)")
@@ -374,7 +374,7 @@ def test_wal_removal(neon_env_builder: NeonEnvBuilder, auth_enabled: bool):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_safekeepers_wal_removal")
timeline_id = env.create_branch("test_safekeepers_wal_removal")
endpoint = env.endpoints.create_start("test_safekeepers_wal_removal")
# Note: it is important to insert at least two segments, as currently
@@ -504,7 +504,7 @@ def test_wal_backup(neon_env_builder: NeonEnvBuilder):
)
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_safekeepers_wal_backup")
timeline_id = env.create_branch("test_safekeepers_wal_backup")
endpoint = env.endpoints.create_start("test_safekeepers_wal_backup")
pg_conn = endpoint.connect()
@@ -561,7 +561,7 @@ def test_s3_wal_replay(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_s3_wal_replay")
timeline_id = env.create_branch("test_s3_wal_replay")
endpoint = env.endpoints.create_start("test_s3_wal_replay")
@@ -849,7 +849,7 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_timeline_status")
timeline_id = env.create_branch("test_timeline_status")
endpoint = env.endpoints.create_start("test_timeline_status")
wa = env.safekeepers[0]
@@ -948,7 +948,7 @@ def test_start_replication_term(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_start_replication_term")
timeline_id = env.create_branch("test_start_replication_term")
endpoint = env.endpoints.create_start("test_start_replication_term")
endpoint.safe_psql("CREATE TABLE t(key int primary key, value text)")
@@ -980,7 +980,7 @@ def test_sk_auth(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_sk_auth")
timeline_id = env.create_branch("test_sk_auth")
env.endpoints.create_start("test_sk_auth")
sk = env.safekeepers[0]
@@ -1041,7 +1041,7 @@ def test_restart_endpoint(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_sk_auth_restart_endpoint")
env.create_branch("test_sk_auth_restart_endpoint")
endpoint = env.endpoints.create_start("test_sk_auth_restart_endpoint")
with closing(endpoint.connect()) as conn:
@@ -1125,7 +1125,7 @@ def test_late_init(neon_env_builder: NeonEnvBuilder):
sk1.stop()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_late_init")
timeline_id = env.create_branch("test_late_init")
endpoint = env.endpoints.create_start("test_late_init")
# create and insert smth while safekeeper is down...
endpoint.safe_psql("create table t(key int, value text)")
@@ -1261,7 +1261,7 @@ def test_lagging_sk(neon_env_builder: NeonEnvBuilder):
# create and insert smth while safekeeper is down...
sk1.stop()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_lagging_sk")
timeline_id = env.create_branch("test_lagging_sk")
ep = env.endpoints.create_start("test_lagging_sk")
ep.safe_psql("create table t(key int, value text)")
# make small insert to be on the same segment
@@ -1348,7 +1348,7 @@ def test_peer_recovery(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_peer_recovery")
timeline_id = env.create_branch("test_peer_recovery")
endpoint = env.endpoints.create_start("test_peer_recovery")
endpoint.safe_psql("create table t(key int, value text)")
@@ -1412,7 +1412,7 @@ def test_wp_graceful_shutdown(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_wp_graceful_shutdown")
timeline_id = env.create_branch("test_wp_graceful_shutdown")
ep = env.endpoints.create_start("test_wp_graceful_shutdown")
ep.safe_psql("create table t(key int, value text)")
ep.stop()
@@ -1605,7 +1605,7 @@ def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 4
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_replace_safekeeper")
timeline_id = env.create_branch("test_replace_safekeeper")
log.info("Use only first 3 safekeepers")
env.safekeepers[3].stop()
@@ -1672,12 +1672,12 @@ def test_delete_force(neon_env_builder: NeonEnvBuilder, auth_enabled: bool):
# Create two tenants: one will be deleted, other should be preserved.
tenant_id = env.initial_tenant
timeline_id_1 = env.neon_cli.create_branch("br1") # Active, delete explicitly
timeline_id_2 = env.neon_cli.create_branch("br2") # Inactive, delete explicitly
timeline_id_3 = env.neon_cli.create_branch("br3") # Active, delete with the tenant
timeline_id_4 = env.neon_cli.create_branch("br4") # Inactive, delete with the tenant
timeline_id_1 = env.create_branch("br1") # Active, delete explicitly
timeline_id_2 = env.create_branch("br2") # Inactive, delete explicitly
timeline_id_3 = env.create_branch("br3") # Active, delete with the tenant
timeline_id_4 = env.create_branch("br4") # Inactive, delete with the tenant
tenant_id_other, timeline_id_other = env.neon_cli.create_tenant()
tenant_id_other, timeline_id_other = env.create_tenant()
# Populate branches
endpoint_1 = env.endpoints.create_start("br1")
@@ -2009,7 +2009,7 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
tenant_id = env.initial_tenant
timeline_id = env.neon_cli.create_branch("test_idle_reconnections")
timeline_id = env.create_branch("test_idle_reconnections")
def collect_stats() -> Dict[str, float]:
# we need to collect safekeeper_pg_queries_received_total metric from all safekeepers
@@ -2244,7 +2244,7 @@ def test_broker_discovery(neon_env_builder: NeonEnvBuilder):
neon_env_builder.enable_safekeeper_remote_storage(RemoteStorageKind.LOCAL_FS)
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_broker_discovery")
env.create_branch("test_broker_discovery")
endpoint = env.endpoints.create_start(
"test_broker_discovery",
@@ -2325,7 +2325,7 @@ def test_s3_eviction(
# start postgres on each timeline
endpoints: list[Endpoint] = []
for branch_name in branch_names:
timeline_id = env.neon_cli.create_branch(branch_name)
timeline_id = env.create_branch(branch_name)
timelines.append(timeline_id)
endpoints.append(env.endpoints.create_start(branch_name))

View File

@@ -218,7 +218,7 @@ def test_restarts_under_load(neon_env_builder: NeonEnvBuilder):
neon_env_builder.enable_safekeeper_remote_storage(RemoteStorageKind.LOCAL_FS)
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_safekeepers_restarts_under_load")
env.create_branch("test_safekeepers_restarts_under_load")
# Enable backpressure with 1MB maximal lag, because we don't want to block on `wait_for_lsn()` for too long
endpoint = env.endpoints.create_start(
"test_safekeepers_restarts_under_load", config_lines=["max_replication_write_lag=1MB"]
@@ -234,7 +234,7 @@ def test_restarts_frequent_checkpoints(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_restarts_frequent_checkpoints")
env.create_branch("test_restarts_frequent_checkpoints")
# Enable backpressure with 1MB maximal lag, because we don't want to block on `wait_for_lsn()` for too long
endpoint = env.endpoints.create_start(
"test_restarts_frequent_checkpoints",
@@ -325,7 +325,7 @@ def test_compute_restarts(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_compute_restarts")
env.create_branch("test_compute_restarts")
asyncio.run(run_compute_restarts(env))
@@ -435,7 +435,7 @@ def test_concurrent_computes(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_concurrent_computes")
env.create_branch("test_concurrent_computes")
asyncio.run(run_concurrent_computes(env))
@@ -484,7 +484,7 @@ def test_unavailability(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 2
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_safekeepers_unavailability")
env.create_branch("test_safekeepers_unavailability")
endpoint = env.endpoints.create_start("test_safekeepers_unavailability")
asyncio.run(run_unavailability(env, endpoint))
@@ -493,7 +493,7 @@ def test_unavailability(neon_env_builder: NeonEnvBuilder):
async def run_recovery_uncommitted(env: NeonEnv):
(sk1, sk2, _) = env.safekeepers
env.neon_cli.create_branch("test_recovery_uncommitted")
env.create_branch("test_recovery_uncommitted")
ep = env.endpoints.create_start("test_recovery_uncommitted")
ep.safe_psql("create table t(key int, value text)")
ep.safe_psql("insert into t select generate_series(1, 100), 'payload'")
@@ -589,7 +589,7 @@ def test_wal_truncation(neon_env_builder: NeonEnvBuilder):
async def run_segment_init_failure(env: NeonEnv):
env.neon_cli.create_branch("test_segment_init_failure")
env.create_branch("test_segment_init_failure")
ep = env.endpoints.create_start("test_segment_init_failure")
ep.safe_psql("create table t(key int, value text)")
ep.safe_psql("insert into t select generate_series(1, 100), 'payload'")
@@ -684,7 +684,7 @@ def test_race_conditions(neon_env_builder: NeonEnvBuilder):
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_safekeepers_race_conditions")
env.create_branch("test_safekeepers_race_conditions")
endpoint = env.endpoints.create_start("test_safekeepers_race_conditions")
asyncio.run(run_race_conditions(env, endpoint))
@@ -761,7 +761,7 @@ def test_wal_lagging(neon_env_builder: NeonEnvBuilder, test_output_dir: Path, bu
neon_env_builder.num_safekeepers = 3
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_wal_lagging")
env.create_branch("test_wal_lagging")
endpoint = env.endpoints.create_start("test_wal_lagging")
asyncio.run(run_wal_lagging(env, endpoint, test_output_dir))

View File

@@ -14,7 +14,7 @@ def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder):
env = neon_env_builder.init_start()
env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
expected_timeout_error = f"Timed out while waiting for WAL record at LSN {future_lsn} to arrive"
env.pageserver.allowed_errors.append(f".*{expected_timeout_error}.*")
@@ -57,7 +57,7 @@ def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuil
env = neon_env_builder.init_start()
env.pageserver.http_client()
tenant_id, timeline_id = env.neon_cli.create_tenant()
tenant_id, timeline_id = env.create_tenant()
elements_to_insert = 1_000_000
expected_timeout_error = f"Timed out while waiting for WAL record at LSN {future_lsn} to arrive"

View File

@@ -38,7 +38,7 @@ def test_wal_restore(
pg_distrib_dir: Path,
):
env = neon_env_builder.init_start()
env.neon_cli.create_branch("test_wal_restore")
env.create_branch("test_wal_restore")
endpoint = env.endpoints.create_start("test_wal_restore")
endpoint.safe_psql("create table t as select generate_series(1,300000)")
tenant_id = TenantId(endpoint.safe_psql("show neon.tenant_id")[0][0])

View File

@@ -40,7 +40,7 @@ def test_walredo_not_left_behind_on_detach(neon_env_builder: NeonEnvBuilder):
pageserver_http.tenant_status(tenant_id)
# create new nenant
tenant_id, _ = env.neon_cli.create_tenant()
tenant_id, _ = env.create_tenant()
# assert tenant exists on disk
assert (env.pageserver.tenant_dir(tenant_id)).exists()