mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-15 01:12:56 +00:00
infra paths are slightly different than our testing environment. added test against actual infra, and modified our tests to have the same structure as infra so the tests should still pass
This commit is contained in:
@@ -12,7 +12,7 @@ use std::str;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tracing::info;
|
||||
|
||||
const SHARE_EXT_PATH: &str = "share/postgresql/extension";
|
||||
const SHARE_EXT_PATH: &str = "share/extension";
|
||||
|
||||
fn get_pg_config(argument: &str, pgbin: &str) -> String {
|
||||
// gives the result of `pg_config [argument]`
|
||||
@@ -174,6 +174,20 @@ pub async fn get_available_libraries(
|
||||
);
|
||||
}
|
||||
|
||||
// TODO: shouldn't we filter this by .so?
|
||||
/*
|
||||
right now we are getting weird stuff like
|
||||
"pg_isolation_regress": RemotePath("v14/lib/pgxs/src/test/isolation/pg_isolation_regress"),
|
||||
Makefile.shlib": RemotePath("v14/lib/pgxs/src/Makefile.shlib")
|
||||
which we should not try to download...
|
||||
|
||||
the expected format is
|
||||
"timescaledb-2.10.1.so": RemotePath("v14/lib/timescaledb-2.10.1.so")
|
||||
|
||||
Q: do we care about files like this?
|
||||
libecpg.so.6": RemotePath("v14/lib/libecpg.so.6")
|
||||
libpgtypes.so.3": RemotePath("v14/lib/libpgtypes.so.3")
|
||||
*/
|
||||
let all_available_libraries = list_files_in_prefixes(remote_storage, &paths).await?;
|
||||
|
||||
info!("list of library files {:?}", &all_available_libraries);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
from contextlib import closing
|
||||
from io import BytesIO
|
||||
@@ -22,7 +23,7 @@ comment = 'This is a mock extension'
|
||||
default_version = '1.0'
|
||||
module_pathname = '$libdir/test_ext{i}'
|
||||
relocatable = true"""
|
||||
return output
|
||||
return BytesIO(bytes(output, "utf-8"))
|
||||
|
||||
|
||||
def sql_file_content():
|
||||
@@ -33,7 +34,11 @@ def sql_file_content():
|
||||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
"""
|
||||
return output
|
||||
return BytesIO(bytes(output, "utf-8"))
|
||||
|
||||
|
||||
def fake_library_content():
|
||||
return BytesIO(bytes("\n111\n", "utf-8"))
|
||||
|
||||
|
||||
# Prepare some mock extension files and upload them to the bucket
|
||||
@@ -47,9 +52,10 @@ def prepare_mock_ext_storage(
|
||||
):
|
||||
bucket_prefix = ext_remote_storage.prefix_in_bucket
|
||||
custom_prefix = str(tenant_id)
|
||||
PUB_EXT_ROOT = f"v{pg_version}/share/postgresql/extension"
|
||||
PRIVATE_EXT_ROOT = f"v{pg_version}/{custom_prefix}/share/postgresql/extension"
|
||||
LOCAL_EXT_ROOT = f"pg_install/{PUB_EXT_ROOT}"
|
||||
|
||||
PUB_EXT_ROOT = f"v{pg_version}/share/extension"
|
||||
PRIVATE_EXT_ROOT = f"v{pg_version}/{custom_prefix}/share/extension"
|
||||
LOCAL_EXT_ROOT = f"pg_install/{pg_version}/share/postgresql/extension"
|
||||
|
||||
PUB_LIB_ROOT = f"v{pg_version}/lib"
|
||||
PRIVATE_LIB_ROOT = f"v{pg_version}/{custom_prefix}/lib"
|
||||
@@ -70,56 +76,53 @@ def prepare_mock_ext_storage(
|
||||
|
||||
# Upload several test_ext{i}.control files to the bucket
|
||||
for i in range(NUM_EXT):
|
||||
public_ext = BytesIO(bytes(control_file_content("public", i), "utf-8"))
|
||||
public_remote_name = f"{bucket_prefix}/{PUB_EXT_ROOT}/test_ext{i}.control"
|
||||
public_local_name = f"{LOCAL_EXT_ROOT}/test_ext{i}.control"
|
||||
custom_ext = BytesIO(bytes(control_file_content(str(tenant_id), i), "utf-8"))
|
||||
custom_remote_name = f"{bucket_prefix}/{PRIVATE_EXT_ROOT}/custom_ext{i}.control"
|
||||
custom_local_name = f"{LOCAL_EXT_ROOT}/custom_ext{i}.control"
|
||||
cleanup_files += [public_local_name, custom_local_name]
|
||||
cleanup_files += [
|
||||
f"{LOCAL_EXT_ROOT}/test_ext{i}.control",
|
||||
f"{LOCAL_EXT_ROOT}/custom_ext{i}.control",
|
||||
]
|
||||
|
||||
log.info(f"Uploading control file to {public_remote_name}")
|
||||
remote_storage_client.upload_fileobj(
|
||||
public_ext, ext_remote_storage.bucket_name, public_remote_name
|
||||
control_file_content("public", i), ext_remote_storage.bucket_name, public_remote_name
|
||||
)
|
||||
log.info(f"Uploading control file to {custom_remote_name}")
|
||||
remote_storage_client.upload_fileobj(
|
||||
custom_ext, ext_remote_storage.bucket_name, custom_remote_name
|
||||
control_file_content(str(tenant_id), i),
|
||||
ext_remote_storage.bucket_name,
|
||||
custom_remote_name,
|
||||
)
|
||||
|
||||
# Upload SQL file for the extension we're going to create
|
||||
sql_filename = "test_ext0--1.0.sql"
|
||||
test_sql_public_remote_path = f"{bucket_prefix}/{PUB_EXT_ROOT}/{sql_filename}"
|
||||
test_sql_local_path = f"{LOCAL_EXT_ROOT}/{sql_filename}"
|
||||
test_ext_sql_file = BytesIO(bytes(sql_file_content(), "utf-8"))
|
||||
remote_storage_client.upload_fileobj(
|
||||
test_ext_sql_file,
|
||||
sql_file_content(),
|
||||
ext_remote_storage.bucket_name,
|
||||
test_sql_public_remote_path,
|
||||
)
|
||||
cleanup_files += [test_sql_local_path]
|
||||
cleanup_files += [f"{LOCAL_EXT_ROOT}/{sql_filename}"]
|
||||
|
||||
# upload some fake library files
|
||||
for i in range(2):
|
||||
public_library = BytesIO(bytes("\n111\n", "utf-8"))
|
||||
public_remote_name = f"{bucket_prefix}/{PUB_LIB_ROOT}/test_lib{i}.so"
|
||||
public_local_name = f"{LOCAL_LIB_ROOT}/test_lib{i}.so"
|
||||
custom_library = BytesIO(bytes("\n111\n", "utf-8"))
|
||||
custom_remote_name = f"{bucket_prefix}/{PRIVATE_LIB_ROOT}/custom_lib{i}.so"
|
||||
custom_local_name = f"{LOCAL_LIB_ROOT}/custom_lib{i}.so"
|
||||
|
||||
log.info(f"uploading library to {public_remote_name}")
|
||||
log.info(f"uploading library to {custom_remote_name}")
|
||||
|
||||
log.info(f"uploading fake library to {public_remote_name}")
|
||||
remote_storage_client.upload_fileobj(
|
||||
public_library,
|
||||
fake_library_content(),
|
||||
ext_remote_storage.bucket_name,
|
||||
public_remote_name,
|
||||
)
|
||||
|
||||
log.info(f"uploading fake library to {custom_remote_name}")
|
||||
remote_storage_client.upload_fileobj(
|
||||
custom_library,
|
||||
fake_library_content(),
|
||||
ext_remote_storage.bucket_name,
|
||||
custom_remote_name,
|
||||
)
|
||||
cleanup_files += [public_local_name, custom_local_name]
|
||||
cleanup_files += [f"{LOCAL_LIB_ROOT}/test_lib{i}.so", f"{LOCAL_LIB_ROOT}/custom_lib{i}.so"]
|
||||
|
||||
return cleanup_files
|
||||
|
||||
@@ -136,7 +139,8 @@ def prepare_mock_ext_storage(
|
||||
# export AWS_SECURITY_TOKEN='test'
|
||||
# export AWS_SESSION_TOKEN='test'
|
||||
# export AWS_DEFAULT_REGION='us-east-1'
|
||||
#
|
||||
|
||||
|
||||
@pytest.mark.parametrize("remote_storage_kind", available_s3_storages())
|
||||
def test_remote_extensions(
|
||||
neon_env_builder: NeonEnvBuilder,
|
||||
@@ -245,3 +249,47 @@ def test_remote_extensions(
|
||||
log.info(f"Deleted {file}")
|
||||
except FileNotFoundError:
|
||||
log.info(f"{file} does not exist, so cannot be deleted")
|
||||
|
||||
|
||||
"""
|
||||
This tests against the actual infra for real S3
|
||||
Note in particular that we don't need to set up a bucket (real or mock)
|
||||
because we are testing the files already uploaded as part of CI/CD
|
||||
"""
|
||||
|
||||
|
||||
def test_remote_extensions_in_bucket(neon_env_builder: NeonEnvBuilder):
|
||||
neon_env_builder.enable_remote_storage(
|
||||
remote_storage_kind=RemoteStorageKind.REAL_S3,
|
||||
test_name="test_remote_extensions_in_bucket",
|
||||
enable_remote_extensions=False, # we don't enable remote extensions here; instead we use the real bucket
|
||||
)
|
||||
neon_env_builder.num_safekeepers = 3
|
||||
env = neon_env_builder.init_start()
|
||||
tenant_id, _ = env.neon_cli.create_tenant()
|
||||
env.neon_cli.create_timeline("test_remote_extensions_in_bucket", tenant_id=tenant_id)
|
||||
|
||||
# Start a compute node and check that it can download the extensions
|
||||
# and use them to CREATE EXTENSION and LOAD 'library.so'
|
||||
remote_ext_config = {
|
||||
"bucket": "neon-dev-extensions-eu-central-1",
|
||||
"region": "eu-central-1",
|
||||
"endpoint": None,
|
||||
"prefix": "5412197734", # build tag
|
||||
}
|
||||
endpoint = env.endpoints.create_start(
|
||||
"test_remote_extensions_in_bucket",
|
||||
tenant_id=tenant_id,
|
||||
remote_ext_config=json.dumps(remote_ext_config),
|
||||
)
|
||||
|
||||
with closing(endpoint.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SELECT * FROM pg_available_extensions")
|
||||
all_extensions = [x[0] for x in cur.fetchall()]
|
||||
log.info("ALEK******\n\n" * 20)
|
||||
log.info(all_extensions)
|
||||
log.info("ALEK******\n\n" * 20)
|
||||
|
||||
# TODO: test download anon
|
||||
# TODO: test load library / create extension
|
||||
|
||||
Reference in New Issue
Block a user