get ready for next step

This commit is contained in:
Alek Westover
2023-06-12 14:04:12 -04:00
parent 6990102bb2
commit eebe9c513f
5 changed files with 63 additions and 106 deletions

View File

@@ -1,3 +1,7 @@
/*
* This is a MWE of using the aws-sdk-s3 to download a file from an S3 bucket
* */
use aws_sdk_s3::{self, config::Region, Error};
use aws_config::{self, meta::region::RegionProviderChain};

View File

@@ -1,77 +0,0 @@
/*
* The following code attempts to actually download a file from the S3 bucket specified in
* `pageserver.toml` as :
* ```[remote_storage]
* bucket_name = 'neon-dev-extensions'
* bucket_region = 'eu-central-1'
*
* Note: must run `export AWS_PROFILE=PowerUserAccess-11111111111` for your SSO credentials
* to get loaded; alternatively go get other credentials. But SSO is better.
*
* Next steps:
* 1. **make it work with AWS** (this code hopefully!)
* 2. make it work for downloading multiple files, not just a single file
* 3. integrate it with compute_ctl
* 4. actually upload stuff to the bucket? so that it can be downloaded. Does this allow us to
* modify `Dockerfile.computenode`? to delete the extension loading that is happening there?
* 5. How do the tenants upload extensions?
* 6. Maybe think about duplicating less stuff.
* */
use remote_storage::*;
use std::path::Path;
use std::fs::File;
use std::io::{BufWriter, Write};
use toml_edit;
use anyhow;
use tokio::io::AsyncReadExt;
use remote_storage::GenericRemoteStorage::AwsS3;
async fn download_file() -> anyhow::Result<()> {
// read configurations from `pageserver.toml`
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
let cfg_file_contents = std::fs::read_to_string(cfg_file_path).unwrap();
let toml = cfg_file_contents
.parse::<toml_edit::Document>()
.expect("Error parsing toml");
let remote_storage_data = toml.get("remote_storage")
.expect("field should be present");
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)
.expect("error parsing toml")
.expect("error parsing toml");
println!("CONFIG LGTM!!!\n {:?}", remote_storage_config);
// query S3 bucket
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
let from_path = "neon-dev-extensions/fuzzystrmatch.control";
let remote_from_path = RemotePath::new(Path::new(from_path))?;
if let AwsS3(printablebucket) = &remote_storage {
println!("S3Bucket looks fine, AFAICT{:?}", printablebucket);
}
println!("{:?}"&remote_from_path);
let mut data = remote_storage.download(&remote_from_path).await;
/*
let mut write_data_buffer = Vec::new();
data.download_stream.read_to_end(&mut write_data_buffer).await?;
// write `data` to a file locally
let f = File::create("alek.out").expect("problem creating file");
let mut f = BufWriter::new(f);
f.write_all(&mut write_data_buffer).expect("error writing data");
*/
Ok(())
}
#[tokio::main]
async fn main() {
match download_file().await {
Err(_)=>println!("Err"),
_ => println!("SUCEECESS")
}
}

View File

@@ -0,0 +1,54 @@
/* This is a MWE of using our RemoteStorage API to call the aws stuff and download a file
*
* /
use remote_storage::*;
use std::path::Path;
use std::fs::File;
use std::io::{BufWriter, Write};
use toml_edit;
use anyhow;
use tokio::io::AsyncReadExt;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let from_path = "fuzzystrmatch.control";
let remote_from_path = RemotePath::new(Path::new(from_path))?;
println!("{:?}", remote_from_path.clone());
// read configurations from `pageserver.toml`
let cfg_file_path = Path::new("./../.neon/pageserver.toml");
let cfg_file_contents = std::fs::read_to_string(cfg_file_path).unwrap();
let toml = cfg_file_contents
.parse::<toml_edit::Document>()
.expect("Error parsing toml");
let remote_storage_data = toml.get("remote_storage")
.expect("field should be present");
let remote_storage_config = RemoteStorageConfig::from_toml(remote_storage_data)
.expect("error parsing toml")
.expect("error parsing toml");
// query S3 bucket
let remote_storage = GenericRemoteStorage::from_config(&remote_storage_config)?;
let from_path = "fuzzystrmatch.control";
let remote_from_path = RemotePath::new(Path::new(from_path))?;
println!("{:?}", remote_from_path.clone());
// if let GenericRemoteStorage::AwsS3(mybucket) = remote_storage {
// println!("{:?}",mybucket.relative_path_to_s3_object(&remote_from_path));
// }
let mut data = remote_storage.download(&remote_from_path).await.expect("data yay");
let mut write_data_buffer = Vec::new();
data.download_stream.read_to_end(&mut write_data_buffer).await?;
// write `data` to a file locally
let f = File::create("alek.out").expect("problem creating file");
let mut f = BufWriter::new(f);
f.write_all(&mut write_data_buffer).expect("error writing data");
// let stuff = response.body;
// let data = stuff.collect().await.expect("error reading data").to_vec();
// println!("data: {:?}", std::str::from_utf8(&data));
Ok(())
}

View File

@@ -1,17 +1,5 @@
/*
* The following code **works** for "downloading" a file stored at the fake bucket specified in
* `pageserver.toml` as :
* ```[remote_storage]
* local_path = '../fakes3'```
*
* Next steps:
* 1. make it work with AWS
* 2. make it work for downloading multiple files, not just a single file
* 3. integrate it with compute_ctl
* 4. actually upload stuff to the bucket? so that it can be downloaded. Does this allow us to
* modify `Dockerfile.computenode`? to delete the extension loading that is happening there?
* 5. How do the tenants upload extensions?
* 6. Maybe think about duplicating less stuff.
* This is a MWE of "downloading" a local file from a fake local bucket
* */
use remote_storage::*;

View File

@@ -1,3 +1,7 @@
/* **WIP**
* This is a MWE of using our RemoteStorage API to call the aws stuff and download multiple files
* /
use remote_storage::*;
use std::path::Path;
use std::fs::File;
@@ -6,22 +10,6 @@ use toml_edit;
use anyhow;
use tokio::io::AsyncReadExt;
// let region_provider = RegionProviderChain::first_try(Region::new("eu-central-1"))
// .or_default_provider()
// .or_else(Region::new("eu-central-1"));
// let shared_config = aws_config::from_env().region(region_provider).load().await;
// let client = aws_sdk_s3::Client::new(&shared_config);
// let bucket_name = "neon-dev-extensions";
// let object_key = "fuzzystrmatch.control";
// let response = client
// .get_object()
// .bucket(bucket_name)
// .key(object_key)
// .send()
// .await?;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let from_path = "fuzzystrmatch.control";