mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-09 14:32:57 +00:00
test: copy dict to avoid error on retry (#8811)
there is no "const" in python, so when we modify the global dict, it will remain that way on the retry. fix to not have it influence other tests which might be run on the same runner. evidence: <https://neon-github-public-dev.s3.amazonaws.com/reports/pr-8625/10513146742/index.html#/testresult/453c4ce05ada7496>
This commit is contained in:
@@ -430,12 +430,17 @@ def enable_remote_storage_versioning(
|
||||
return response
|
||||
|
||||
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG = {
|
||||
"gc_period": "0s",
|
||||
"compaction_period": "0s",
|
||||
"checkpoint_distance": 1024**2,
|
||||
"image_creation_threshold": 100,
|
||||
}
|
||||
def many_small_layers_tenant_config() -> Dict[str, Any]:
|
||||
"""
|
||||
Create a new dict to avoid issues with deleting from the global value.
|
||||
In python, the global is mutable.
|
||||
"""
|
||||
return {
|
||||
"gc_period": "0s",
|
||||
"compaction_period": "0s",
|
||||
"checkpoint_distance": 1024**2,
|
||||
"image_creation_threshold": 100,
|
||||
}
|
||||
|
||||
|
||||
def poll_for_remote_storage_iterations(remote_storage_kind: RemoteStorageKind) -> int:
|
||||
|
||||
@@ -8,9 +8,9 @@ from fixtures.neon_fixtures import (
|
||||
PgBin,
|
||||
)
|
||||
from fixtures.pageserver.utils import (
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
assert_prefix_empty,
|
||||
enable_remote_storage_versioning,
|
||||
many_small_layers_tenant_config,
|
||||
wait_for_upload,
|
||||
)
|
||||
from fixtures.remote_storage import RemoteStorageKind, s3_storage
|
||||
@@ -33,7 +33,7 @@ def test_tenant_s3_restore(
|
||||
|
||||
# change it back after initdb, recovery doesn't work if the two
|
||||
# index_part.json uploads happen at same second or too close to each other.
|
||||
initial_tenant_conf = MANY_SMALL_LAYERS_TENANT_CONFIG
|
||||
initial_tenant_conf = many_small_layers_tenant_config()
|
||||
del initial_tenant_conf["checkpoint_distance"]
|
||||
|
||||
env = neon_env_builder.init_start(initial_tenant_conf)
|
||||
@@ -50,7 +50,7 @@ def test_tenant_s3_restore(
|
||||
tenant_id = env.initial_tenant
|
||||
|
||||
# now lets create the small layers
|
||||
ps_http.set_tenant_config(tenant_id, MANY_SMALL_LAYERS_TENANT_CONFIG)
|
||||
ps_http.set_tenant_config(tenant_id, many_small_layers_tenant_config())
|
||||
|
||||
# Default tenant and the one we created
|
||||
assert ps_http.get_metric_value("pageserver_tenant_manager_slots", {"mode": "attached"}) == 1
|
||||
|
||||
@@ -23,11 +23,11 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverHttpClient
|
||||
from fixtures.pageserver.utils import (
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
assert_prefix_empty,
|
||||
assert_prefix_not_empty,
|
||||
enable_remote_storage_versioning,
|
||||
list_prefix,
|
||||
many_small_layers_tenant_config,
|
||||
remote_storage_delete_key,
|
||||
timeline_delete_wait_completed,
|
||||
)
|
||||
@@ -654,7 +654,7 @@ def test_storage_controller_s3_time_travel_recovery(
|
||||
tenant_id,
|
||||
shard_count=2,
|
||||
shard_stripe_size=8192,
|
||||
tenant_config=MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
tenant_config=many_small_layers_tenant_config(),
|
||||
)
|
||||
|
||||
# Check that the consistency check passes
|
||||
|
||||
@@ -9,9 +9,9 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverApiException
|
||||
from fixtures.pageserver.utils import (
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
assert_prefix_empty,
|
||||
assert_prefix_not_empty,
|
||||
many_small_layers_tenant_config,
|
||||
wait_for_upload,
|
||||
)
|
||||
from fixtures.remote_storage import RemoteStorageKind, s3_storage
|
||||
@@ -76,7 +76,7 @@ def test_tenant_delete_smoke(
|
||||
|
||||
env.neon_cli.create_tenant(
|
||||
tenant_id=tenant_id,
|
||||
conf=MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
conf=many_small_layers_tenant_config(),
|
||||
)
|
||||
|
||||
# Default tenant and the one we created
|
||||
@@ -215,7 +215,7 @@ def test_tenant_delete_races_timeline_creation(neon_env_builder: NeonEnvBuilder)
|
||||
# (and there is no way to reconstruct the used remote storage kind)
|
||||
remote_storage_kind = RemoteStorageKind.MOCK_S3
|
||||
neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=many_small_layers_tenant_config())
|
||||
ps_http = env.pageserver.http_client()
|
||||
tenant_id = env.initial_tenant
|
||||
|
||||
@@ -330,7 +330,7 @@ def test_tenant_delete_scrubber(pg_bin: PgBin, neon_env_builder: NeonEnvBuilder)
|
||||
|
||||
remote_storage_kind = RemoteStorageKind.MOCK_S3
|
||||
neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=many_small_layers_tenant_config())
|
||||
|
||||
ps_http = env.pageserver.http_client()
|
||||
# create a tenant separate from the main tenant so that we have one remaining
|
||||
|
||||
@@ -16,9 +16,9 @@ from fixtures.neon_fixtures import (
|
||||
)
|
||||
from fixtures.pageserver.http import PageserverApiException
|
||||
from fixtures.pageserver.utils import (
|
||||
MANY_SMALL_LAYERS_TENANT_CONFIG,
|
||||
assert_prefix_empty,
|
||||
assert_prefix_not_empty,
|
||||
many_small_layers_tenant_config,
|
||||
poll_for_remote_storage_iterations,
|
||||
timeline_delete_wait_completed,
|
||||
wait_for_last_record_lsn,
|
||||
@@ -782,7 +782,7 @@ def test_timeline_delete_resumed_on_attach(
|
||||
remote_storage_kind = s3_storage()
|
||||
neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind)
|
||||
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=MANY_SMALL_LAYERS_TENANT_CONFIG)
|
||||
env = neon_env_builder.init_start(initial_tenant_conf=many_small_layers_tenant_config())
|
||||
|
||||
tenant_id = env.initial_tenant
|
||||
|
||||
|
||||
Reference in New Issue
Block a user