real s3 and tenant specific files

This commit is contained in:
Alek Westover
2023-06-26 15:25:20 -04:00
parent 7667fdc5c8
commit a2e154f07b
5 changed files with 39 additions and 58 deletions

View File

@@ -190,21 +190,22 @@ fn main() -> Result<()> {
if let Some(ref ext_remote_storage) = ext_remote_storage {
new_state.extensions.available_extensions =
rt.block_on(get_available_extensions(&ext_remote_storage, pgbin, None))?;
}
// append private tenant extensions
// TODO not implemented yet
// let private_ext_list = rt.block_on(get_available_extensions(
// &ext_remote_storage,
// pgbin,
// tenant_id,
// ))?;
// new_state.extensions.available_extensions.extend(private_ext_list);
// append private tenant extensions
let private_ext_list = rt.block_on(get_available_extensions(
&ext_remote_storage,
pgbin,
tenant_id,
))?;
new_state
.extensions
.available_extensions
.extend(private_ext_list);
}
spec_set = true;
} else {
spec_set = false;
tenant_id = None;
}
let compute_node = ComputeNode {
connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?,

View File

@@ -92,9 +92,10 @@ pub async fn get_available_extensions(
let remote_sharedir = match tenant_id {
None => RemotePath::new(&Path::new(&pg_version).join("share/postgresql/extension"))?,
Some(tenant_id) => RemotePath::new(
&Path::new(&pg_version)
.join(&tenant_id.to_string())
.join("share/postgresql/extension"),
// &Path::new(&pg_version)
// .join(&tenant_id.to_string())
// .join("share/postgresql/extension"),
Path::new(&tenant_id.to_string()),
)?,
};

View File

@@ -349,11 +349,17 @@ impl RemoteStorage for S3Bucket {
/// See the doc for `RemoteStorage::list_files`
async fn list_files(&self, folder: Option<&RemotePath>) -> anyhow::Result<Vec<RemotePath>> {
// TODO: if bucket prefix is empty, folder is prefixed with a "/" I think. Is this desired?
let folder_name = folder
let mut folder_name = folder
.map(|p| self.relative_path_to_s3_object(p))
.or_else(|| self.prefix_in_bucket.clone());
// remove leading "/" if one exists
if let Some(folder_name_slash) = folder_name.clone() {
if folder_name_slash.starts_with(REMOTE_STORAGE_PREFIX_SEPARATOR) {
folder_name = Some(folder_name_slash[1..].to_string());
}
}
// AWS may need to break the response into several parts
let mut continuation_token = None;
let mut all_files = vec![];

View File

@@ -667,7 +667,7 @@ class NeonEnvBuilder:
)
elif remote_storage_kind == RemoteStorageKind.REAL_S3:
self.enable_real_s3_remote_storage(
test_name=test_name,
test_name=test_name,
force_enable=force_enable,
enable_remote_extensions=enable_remote_extensions,
)

View File

@@ -11,28 +11,6 @@ from fixtures.neon_fixtures import (
)
from fixtures.pg_version import PgVersion
"""
TODO Alek:
Calling list_files on a non-existing path returns [] (expectedly) but then
causes the program to crash somehow (for both real and mock s3 storage)
stderr: command failed: unexpected compute status: Empty
- real s3 tests: I think the paths were slightly different than I was expecting
- clean up the junk I put in the bucket
- libs/remote_storage/src/s3_bucket.rs TODO // TODO: if bucket prefix is empty,
the folder is prefixed with a "/" I think. Is this desired?
"""
"""
for local test running with mock s3: make sure the following environment variables are set
export AWS_ACCESS_KEY_ID='test'
export AWS_SECRET_ACCESS_KEY='test'
export AWS_SECURITY_TOKEN='test'
export AWS_SESSION_TOKEN='test'
export AWS_DEFAULT_REGION='us-east-1'
"""
def ext_contents(owner, i):
output = f"""# mock {owner} extension{i}
@@ -43,7 +21,10 @@ relocatable = true"""
return output
@pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.MOCK_S3])
# NOTE: you must have appropriate AWS credentials to run REAL_S3 test.
@pytest.mark.parametrize(
"remote_storage_kind", [RemoteStorageKind.MOCK_S3, RemoteStorageKind.REAL_S3]
)
def test_file_download(
neon_env_builder: NeonEnvBuilder, remote_storage_kind: RemoteStorageKind, pg_version: PgVersion
):
@@ -54,8 +35,6 @@ def test_file_download(
Finally, we list available extensions and assert that test_ext is present
"""
## temporarily disable RemoteStorageKind.REAL_S3
neon_env_builder.enable_remote_storage(
remote_storage_kind=remote_storage_kind,
test_name="test_file_download",
@@ -81,19 +60,18 @@ def test_file_download(
public_remote_name = f"{BUCKET_PREFIX}/{PUB_EXT_ROOT}/test_ext{i}.control"
public_local_name = f"pg_install/{PUB_EXT_ROOT}/test_ext{i}.control"
# private extensions
BytesIO(bytes(ext_contents(str(tenant_id), i), "utf-8"))
f"{BUCKET_PREFIX}/{str(tenant_id)}/private_ext{i}.control"
private_ext = BytesIO(bytes(ext_contents(str(tenant_id), i), "utf-8"))
private_remote_name = f"{BUCKET_PREFIX}/{str(tenant_id)}/private_ext{i}.control"
private_local_name = f"pg_install/{PUB_EXT_ROOT}/private_ext{i}.control"
cleanup_files += [public_local_name, private_local_name]
if remote_storage_kind == RemoteStorageKind.MOCK_S3:
env.remote_storage_client.upload_fileobj(
public_ext, env.ext_remote_storage.bucket_name, public_remote_name
)
# env.remote_storage_client.upload_fileobj(
# private_ext, env.ext_remote_storage.bucket_name, private_remote_name
# )
env.remote_storage_client.upload_fileobj(
public_ext, env.ext_remote_storage.bucket_name, public_remote_name
)
env.remote_storage_client.upload_fileobj(
private_ext, env.ext_remote_storage.bucket_name, private_remote_name
)
TEST_EXT_SQL_PATH = f"v{pg_version}/share/postgresql/extension/test_ext0--1.0.sql"
test_ext_sql_file = BytesIO(
@@ -119,17 +97,12 @@ def test_file_download(
111
"""
)
# TODO: maybe if we are using REAL_S3 storage, we should not upload files
# or at least, maybe we should delete them afterwards
env.remote_storage_client.upload_fileobj(
test_lib_file,
env.ext_remote_storage.bucket_name,
os.path.join(BUCKET_PREFIX, TEST_LIB_PATH),
)
tenant, _ = env.neon_cli.create_tenant()
env.neon_cli.create_timeline("test_file_download", tenant_id=tenant)
region = "us-east-1"
if remote_storage_kind == RemoteStorageKind.REAL_S3:
region = "eu-central-1"
@@ -146,7 +119,7 @@ def test_file_download(
try:
endpoint = env.endpoints.create_start(
"test_file_download",
tenant_id=tenant,
tenant_id=tenant_id,
remote_ext_config=remote_ext_config,
config_lines=["log_min_messages=debug3"],
)
@@ -159,7 +132,7 @@ def test_file_download(
log.info(all_extensions)
for i in range(5):
assert f"test_ext{i}" in all_extensions
# assert f"private_ext{i}" in all_extensions
assert f"private_ext{i}" in all_extensions
cur.execute("CREATE EXTENSION test_ext0")
cur.execute("SELECT extname FROM pg_extension")
@@ -181,7 +154,7 @@ def test_file_download(
finally:
# cleanup downloaded extensions
# TODO: clean up downloaded libraries too
# TODO: make sure this runs even if the test fails
# This runs even if the test fails
# this is important because if the files aren't cleaned up then the test can
# pass even without successfully downloading the files if a previous run (or
# run with different type of remote storage) of the test did download the