tests: Rename NeonLocalCli functions to match the 'neon_local' commands (#9195)

This makes it more clear that the functions in NeonLocalCli are just
typed wrappers around the corresponding 'neon_local' commands.
This commit is contained in:
Heikki Linnakangas
2024-10-03 22:03:27 +03:00
parent 56bb1ac458
commit 8ef0c38b23
9 changed files with 42 additions and 43 deletions

View File

@@ -170,7 +170,7 @@ class NeonLocalCli(AbstractNeonCli):
def raw_cli(self, *args, **kwargs) -> subprocess.CompletedProcess[str]:
return super().raw_cli(*args, **kwargs)
def create_tenant(
def tenant_create(
self,
tenant_id: TenantId,
timeline_id: TimelineId,
@@ -224,19 +224,19 @@ class NeonLocalCli(AbstractNeonCli):
res = self.raw_cli(args)
res.check_returncode()
def import_tenant(self, tenant_id: TenantId):
def tenant_import(self, tenant_id: TenantId):
args = ["tenant", "import", "--tenant-id", str(tenant_id)]
res = self.raw_cli(args)
res.check_returncode()
def set_default(self, tenant_id: TenantId):
def tenant_set_default(self, tenant_id: TenantId):
"""
Update default tenant for future operations that require tenant_id.
"""
res = self.raw_cli(["tenant", "set-default", "--tenant-id", str(tenant_id)])
res.check_returncode()
def config_tenant(self, tenant_id: TenantId, conf: Dict[str, str]):
def tenant_config(self, tenant_id: TenantId, conf: Dict[str, str]):
"""
Update tenant config.
"""
@@ -252,12 +252,12 @@ class NeonLocalCli(AbstractNeonCli):
res = self.raw_cli(args)
res.check_returncode()
def list_tenants(self) -> "subprocess.CompletedProcess[str]":
def tenant_list(self) -> "subprocess.CompletedProcess[str]":
res = self.raw_cli(["tenant", "list"])
res.check_returncode()
return res
def create_timeline(
def timeline_create(
self,
new_branch_name: str,
tenant_id: TenantId,
@@ -285,7 +285,7 @@ class NeonLocalCli(AbstractNeonCli):
return timeline_id
def create_branch(
def timeline_branch(
self,
tenant_id: TenantId,
timeline_id: TimelineId,
@@ -346,7 +346,7 @@ class NeonLocalCli(AbstractNeonCli):
res = self.raw_cli(cmd)
res.check_returncode()
def list_timelines(self, tenant_id: TenantId) -> List[Tuple[str, TimelineId]]:
def timeline_list(self, tenant_id: TenantId) -> List[Tuple[str, TimelineId]]:
"""
Returns a list of (branch_name, timeline_id) tuples out of parsed `neon timeline list` CLI output.
"""
@@ -455,7 +455,7 @@ class NeonLocalCli(AbstractNeonCli):
args.extend(["-m", "immediate"])
return self.raw_cli(args)
def broker_start(
def storage_broker_start(
self, timeout_in_seconds: Optional[int] = None
) -> "subprocess.CompletedProcess[str]":
cmd = ["storage_broker", "start"]
@@ -463,7 +463,7 @@ class NeonLocalCli(AbstractNeonCli):
cmd.append(f"--start-timeout={timeout_in_seconds}s")
return self.raw_cli(cmd)
def broker_stop(self) -> "subprocess.CompletedProcess[str]":
def storage_broker_stop(self) -> "subprocess.CompletedProcess[str]":
cmd = ["storage_broker", "stop"]
return self.raw_cli(cmd)
@@ -578,7 +578,7 @@ class NeonLocalCli(AbstractNeonCli):
return self.raw_cli(args, check_return_code=check_return_code)
def map_branch(
def mappings_map_branch(
self, name: str, tenant_id: TenantId, timeline_id: TimelineId
) -> "subprocess.CompletedProcess[str]":
"""

View File

@@ -1344,7 +1344,7 @@ class NeonEnv:
tenant_id = tenant_id or TenantId.generate()
timeline_id = timeline_id or TimelineId.generate()
self.neon_cli.create_tenant(
self.neon_cli.tenant_create(
tenant_id=tenant_id,
timeline_id=timeline_id,
pg_version=self.pg_version,
@@ -1363,7 +1363,7 @@ class NeonEnv:
Update tenant config.
"""
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.config_tenant(tenant_id, conf)
self.neon_cli.tenant_config(tenant_id, conf)
def create_branch(
self,
@@ -1376,7 +1376,7 @@ class NeonEnv:
new_timeline_id = new_timeline_id or TimelineId.generate()
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.create_branch(
self.neon_cli.timeline_branch(
tenant_id, new_timeline_id, new_branch_name, ancestor_branch_name, ancestor_start_lsn
)
@@ -1391,7 +1391,7 @@ class NeonEnv:
timeline_id = timeline_id or TimelineId.generate()
tenant_id = tenant_id or self.initial_tenant
self.neon_cli.create_timeline(new_branch_name, tenant_id, timeline_id, self.pg_version)
self.neon_cli.timeline_create(new_branch_name, tenant_id, timeline_id, self.pg_version)
return timeline_id
@@ -4049,7 +4049,7 @@ class Safekeeper(LogUtils):
1) wait for remote_consistent_lsn and wal_backup_lsn on safekeeper to reach it.
2) checkpoint timeline on safekeeper, which should remove WAL before this LSN; optionally wait for that.
"""
cli = self.http_client()
client = self.http_client()
target_segment_file = lsn.segment_name()
@@ -4061,7 +4061,7 @@ class Safekeeper(LogUtils):
assert all(target_segment_file <= s for s in segments)
def are_lsns_advanced():
stat = cli.timeline_status(tenant_id, timeline_id)
stat = client.timeline_status(tenant_id, timeline_id)
log.info(
f"waiting for remote_consistent_lsn and backup_lsn on sk {self.id} to reach {lsn}, currently remote_consistent_lsn={stat.remote_consistent_lsn}, backup_lsn={stat.backup_lsn}"
)
@@ -4070,7 +4070,7 @@ class Safekeeper(LogUtils):
# xxx: max wait is long because we might be waiting for reconnection from
# pageserver to this safekeeper
wait_until(30, 1, are_lsns_advanced)
cli.checkpoint(tenant_id, timeline_id)
client.checkpoint(tenant_id, timeline_id)
if wait_wal_removal:
wait_until(30, 1, are_segments_removed)
@@ -4098,13 +4098,13 @@ class NeonBroker(LogUtils):
timeout_in_seconds: Optional[int] = None,
):
assert not self.running
self.env.neon_cli.broker_start(timeout_in_seconds)
self.env.neon_cli.storage_broker_start(timeout_in_seconds)
self.running = True
return self
def stop(self):
if self.running:
self.env.neon_cli.broker_stop()
self.env.neon_cli.storage_broker_stop()
self.running = False
return self
@@ -4733,10 +4733,10 @@ def flush_ep_to_pageserver(
commit_lsn: Lsn = Lsn(0)
# In principle in the absense of failures polling single sk would be enough.
for sk in env.safekeepers:
cli = sk.http_client()
client = sk.http_client()
# wait until compute connections are gone
wait_walreceivers_absent(cli, tenant, timeline)
commit_lsn = max(cli.get_commit_lsn(tenant, timeline), commit_lsn)
wait_walreceivers_absent(client, tenant, timeline)
commit_lsn = max(client.get_commit_lsn(tenant, timeline), commit_lsn)
# Note: depending on WAL filtering implementation, probably most shards
# won't be able to reach commit_lsn (unless gaps are also ack'ed), so this

View File

@@ -173,7 +173,7 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE
wait_until_paused(env, "before-upload-index-pausable")
env.neon_cli.map_branch(initial_branch, env.initial_tenant, env.initial_timeline)
env.neon_cli.mappings_map_branch(initial_branch, env.initial_tenant, env.initial_timeline)
with pytest.raises(RuntimeError, match="ERROR: Not found: Timeline"):
env.endpoints.create_start(

View File

@@ -109,7 +109,7 @@ def test_timeline_init_break_before_checkpoint(neon_env_builder: NeonEnvBuilder)
tenant_id = env.initial_tenant
timelines_dir = env.pageserver.timeline_dir(tenant_id)
old_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
old_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
initial_timeline_dirs = [d for d in timelines_dir.iterdir()]
# Introduce failpoint during timeline init (some intermediate files are on disk), before it's checkpointed.
@@ -121,7 +121,7 @@ def test_timeline_init_break_before_checkpoint(neon_env_builder: NeonEnvBuilder)
env.pageserver.restart(immediate=True)
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
new_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
assert (
new_tenant_timelines == old_tenant_timelines
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
@@ -153,7 +153,7 @@ def test_timeline_init_break_before_checkpoint_recreate(
tenant_id = env.initial_tenant
timelines_dir = env.pageserver.timeline_dir(tenant_id)
old_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
old_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
initial_timeline_dirs = [d for d in timelines_dir.iterdir()]
# Some fixed timeline ID (like control plane does)
@@ -174,7 +174,7 @@ def test_timeline_init_break_before_checkpoint_recreate(
env.pageserver.restart(immediate=True)
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
new_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
assert (
new_tenant_timelines == old_tenant_timelines
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"
@@ -199,7 +199,7 @@ def test_timeline_create_break_after_dir_creation(neon_env_builder: NeonEnvBuild
tenant_id = env.initial_tenant
timelines_dir = env.pageserver.timeline_dir(tenant_id)
old_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
old_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
initial_timeline_dirs = [d for d in timelines_dir.iterdir()]
# Introduce failpoint when creating a new timeline, right after creating its directory
@@ -209,7 +209,7 @@ def test_timeline_create_break_after_dir_creation(neon_env_builder: NeonEnvBuild
# Creating the timeline didn't finish. The other timelines on tenant should still be present and work normally.
# "New" timeline is not present in the list, allowing pageserver to retry the same request
new_tenant_timelines = env.neon_cli.list_timelines(tenant_id)
new_tenant_timelines = env.neon_cli.timeline_list(tenant_id)
assert (
new_tenant_timelines == old_tenant_timelines
), f"Pageserver after restart should ignore non-initialized timelines for tenant {tenant_id}"

View File

@@ -517,7 +517,7 @@ def test_historic_storage_formats(
assert metadata_summary["tenant_count"] >= 1
assert metadata_summary["timeline_count"] >= 1
env.neon_cli.import_tenant(dataset.tenant_id)
env.neon_cli.tenant_import(dataset.tenant_id)
# Discover timelines
timelines = env.pageserver.http_client().timeline_list(dataset.tenant_id)

View File

@@ -31,7 +31,7 @@ def helper_compare_timeline_list(
)
)
timelines_cli = env.neon_cli.list_timelines(initial_tenant)
timelines_cli = env.neon_cli.timeline_list(initial_tenant)
cli_timeline_ids = sorted([timeline_id for (_, timeline_id) in timelines_cli])
assert timelines_api == cli_timeline_ids
@@ -55,8 +55,7 @@ def test_cli_timeline_list(neon_simple_env: NeonEnv):
# Check that all new branches are visible via CLI
timelines_cli = [
timeline_id
for (_, timeline_id) in env.neon_cli.list_timelines(tenant_id=env.initial_tenant)
timeline_id for (_, timeline_id) in env.neon_cli.timeline_list(env.initial_tenant)
]
assert main_timeline_id in timelines_cli
@@ -67,7 +66,7 @@ def helper_compare_tenant_list(pageserver_http_client: PageserverHttpClient, env
tenants = pageserver_http_client.tenant_list()
tenants_api = sorted(map(lambda t: cast(str, t["id"]), tenants))
res = env.neon_cli.list_tenants()
res = env.neon_cli.tenant_list()
tenants_cli = sorted(map(lambda t: t.split()[0], res.stdout.splitlines()))
assert tenants_api == tenants_cli
@@ -91,7 +90,7 @@ def test_cli_tenant_list(neon_simple_env: NeonEnv):
# check tenant2 appeared
helper_compare_tenant_list(pageserver_http_client, env)
res = env.neon_cli.list_tenants()
res = env.neon_cli.tenant_list()
tenants = sorted(map(lambda t: TenantId(t.split()[0]), res.stdout.splitlines()))
assert env.initial_tenant in tenants
@@ -102,7 +101,7 @@ def test_cli_tenant_list(neon_simple_env: NeonEnv):
def test_cli_tenant_create(neon_simple_env: NeonEnv):
env = neon_simple_env
tenant_id, _ = env.create_tenant()
timelines = env.neon_cli.list_timelines(tenant_id)
timelines = env.neon_cli.timeline_list(tenant_id)
# an initial timeline should be created upon tenant creation
assert len(timelines) == 1
@@ -135,7 +134,7 @@ def test_cli_start_stop(neon_env_builder: NeonEnvBuilder):
env.neon_cli.pageserver_stop(env.pageserver.id)
env.neon_cli.safekeeper_stop()
env.neon_cli.storage_controller_stop(False)
env.neon_cli.broker_stop()
env.neon_cli.storage_broker_stop()
# Keep NeonEnv state up to date, it usually owns starting/stopping services
env.pageserver.running = False
@@ -178,7 +177,7 @@ def test_cli_start_stop_multi(neon_env_builder: NeonEnvBuilder):
# Stop this to get out of the way of the following `start`
env.neon_cli.storage_controller_stop(False)
env.neon_cli.broker_stop()
env.neon_cli.storage_broker_stop()
# Default start
res = env.neon_cli.raw_cli(["start"])

View File

@@ -27,7 +27,7 @@ def test_neon_cli_basics(neon_env_builder: NeonEnvBuilder, port_distributor: Por
env.neon_cli.endpoint_start("ep-basic-main")
branch_name = "migration-check"
env.neon_cli.create_branch(
env.neon_cli.timeline_branch(
tenant_id=env.initial_tenant,
timeline_id=TimelineId.generate(),
new_branch_name=branch_name,

View File

@@ -1681,7 +1681,7 @@ def test_tenant_import(neon_env_builder: NeonEnvBuilder, shard_count, remote_sto
)
# Now import it again
env.neon_cli.import_tenant(tenant_id)
env.neon_cli.tenant_import(tenant_id)
# Check we found the shards
describe = env.storage_controller.tenant_describe(tenant_id)

View File

@@ -32,7 +32,7 @@ from prometheus_client.samples import Sample
def test_tenant_creation_fails(neon_simple_env: NeonEnv):
tenants_dir = neon_simple_env.pageserver.tenant_dir()
initial_tenants = sorted(
map(lambda t: t.split()[0], neon_simple_env.neon_cli.list_tenants().stdout.splitlines())
map(lambda t: t.split()[0], neon_simple_env.neon_cli.tenant_list().stdout.splitlines())
)
[d for d in tenants_dir.iterdir()]
@@ -59,7 +59,7 @@ def test_tenant_creation_fails(neon_simple_env: NeonEnv):
# an empty tenant dir with no config in it.
neon_simple_env.pageserver.allowed_errors.append(".*Failed to load tenant config.*")
new_tenants = sorted(
map(lambda t: t.split()[0], neon_simple_env.neon_cli.list_tenants().stdout.splitlines())
map(lambda t: t.split()[0], neon_simple_env.neon_cli.tenant_list().stdout.splitlines())
)
assert initial_tenants == new_tenants, "should not create new tenants"