diff --git a/Cargo.lock b/Cargo.lock index 3c862241a4..ef1da386e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -907,12 +907,14 @@ dependencies = [ "opentelemetry", "postgres", "regex", + "remote_storage", "reqwest", "serde", "serde_json", "tar", "tokio", "tokio-postgres", + "toml_edit", "tracing", "tracing-opentelemetry", "tracing-subscriber", @@ -980,6 +982,7 @@ dependencies = [ "tar", "thiserror", "toml", + "tracing", "url", "utils", "workspace_hack", diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index f8f8f729ce..11121ecc90 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -32,3 +32,5 @@ url.workspace = true compute_api.workspace = true utils.workspace = true workspace_hack.workspace = true +toml_edit.workspace = true +remote_storage = { version = "0.1", path = "../libs/remote_storage/" } diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 68f6bf3844..35d52a4e4a 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -5,6 +5,8 @@ //! - `compute_ctl` accepts cluster (compute node) specification as a JSON file. //! - Every start is a fresh start, so the data directory is removed and //! initialized again on each run. +//! - If remote_extension_config is provided, it will be used to fetch extensions list +//! and download `shared_preload_libraries` from the remote storage. //! - Next it will put configuration files into the `PGDATA` directory. //! - Sync safekeepers and get commit LSN. //! - Get `basebackup` from pageserver using the returned on the previous step LSN. @@ -27,7 +29,8 @@ //! compute_ctl -D /var/db/postgres/compute \ //! -C 'postgresql://cloud_admin@localhost/postgres' \ //! -S /var/db/postgres/specs/current.json \ -//! -b /usr/local/bin/postgres +//! -b /usr/local/bin/postgres \ +//! -r {"bucket": "my-bucket", "region": "eu-central-1", "endpoint": "http:://localhost:9000"} \ //! ``` //! use std::collections::HashMap; @@ -35,7 +38,7 @@ use std::fs::File; use std::panic; use std::path::Path; use std::process::exit; -use std::sync::{mpsc, Arc, Condvar, Mutex}; +use std::sync::{mpsc, Arc, Condvar, Mutex, OnceLock}; use std::{thread, time::Duration}; use anyhow::{Context, Result}; @@ -48,6 +51,8 @@ use compute_api::responses::ComputeStatus; use compute_tools::compute::{ComputeNode, ComputeState, ParsedSpec}; use compute_tools::configurator::launch_configurator; +use compute_tools::extension_server::launch_download_extensions; +use compute_tools::extension_server::{get_pg_version, init_remote_storage}; use compute_tools::http::api::launch_http_server; use compute_tools::logger::*; use compute_tools::monitor::launch_monitor; @@ -60,10 +65,21 @@ fn main() -> Result<()> { init_tracing_and_logging(DEFAULT_LOG_LEVEL)?; let build_tag = option_env!("BUILD_TAG").unwrap_or(BUILD_TAG_DEFAULT); - info!("build_tag: {build_tag}"); let matches = cli().get_matches(); + let pgbin_default = String::from("postgres"); + let pgbin = matches.get_one::("pgbin").unwrap_or(&pgbin_default); + + let remote_ext_config = matches.get_one::("remote-ext-config"); + // NOTE TODO: until control-plane changes, we can use the following line to forcibly enable remote extensions + // let remote_ext_config = Some( + // r#"{"bucket": "neon-dev-extensions", "region": "eu-central-1", "endpoint": null, "prefix": "5555"}"#.to_string(), + // ); + let ext_remote_storage = remote_ext_config.map(|x| { + init_remote_storage(x, build_tag) + .expect("cannot initialize remote extension storage from config") + }); let http_port = *matches .get_one::("http-port") @@ -128,9 +144,6 @@ fn main() -> Result<()> { let compute_id = matches.get_one::("compute-id"); let control_plane_uri = matches.get_one::("control-plane-uri"); - // Try to use just 'postgres' if no path is provided - let pgbin = matches.get_one::("pgbin").unwrap(); - let spec; let mut live_config_allowed = false; match spec_json { @@ -168,6 +181,7 @@ fn main() -> Result<()> { let mut new_state = ComputeState::new(); let spec_set; + if let Some(spec) = spec { let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?; new_state.pspec = Some(pspec); @@ -179,9 +193,12 @@ fn main() -> Result<()> { connstr: Url::parse(connstr).context("cannot parse connstr as a URL")?, pgdata: pgdata.to_string(), pgbin: pgbin.to_string(), + pgversion: get_pg_version(pgbin), live_config_allowed, state: Mutex::new(new_state), state_changed: Condvar::new(), + ext_remote_storage, + available_extensions: OnceLock::new(), }; let compute = Arc::new(compute_node); @@ -190,6 +207,8 @@ fn main() -> Result<()> { let _http_handle = launch_http_server(http_port, &compute).expect("cannot launch http endpoint thread"); + let extension_server_port: u16 = http_port; + if !spec_set { // No spec provided, hang waiting for it. info!("no compute spec provided, waiting"); @@ -227,10 +246,13 @@ fn main() -> Result<()> { let _configurator_handle = launch_configurator(&compute).expect("cannot launch configurator thread"); + let _download_extensions_handle = + launch_download_extensions(&compute).expect("cannot launch download extensions thread"); + // Start Postgres let mut delay_exit = false; let mut exit_code = None; - let pg = match compute.start_compute() { + let pg = match compute.start_compute(extension_server_port) { Ok(pg) => Some(pg), Err(err) => { error!("could not start the compute node: {:?}", err); @@ -359,6 +381,12 @@ fn cli() -> clap::Command { .long("control-plane-uri") .value_name("CONTROL_PLANE_API_BASE_URI"), ) + .arg( + Arg::new("remote-ext-config") + .short('r') + .long("remote-ext-config") + .value_name("REMOTE_EXT_CONFIG"), + ) } #[test] diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index b33f4f05dd..63ef984ede 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -1,14 +1,17 @@ +use std::collections::HashSet; use std::fs; use std::io::BufRead; use std::os::unix::fs::PermissionsExt; use std::path::Path; use std::process::{Command, Stdio}; use std::str::FromStr; -use std::sync::{Condvar, Mutex}; +use std::sync::{Condvar, Mutex, OnceLock}; use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; +use futures::future::join_all; use postgres::{Client, NoTls}; +use tokio; use tokio_postgres; use tracing::{info, instrument, warn}; use utils::id::{TenantId, TimelineId}; @@ -18,9 +21,11 @@ use compute_api::responses::{ComputeMetrics, ComputeStatus}; use compute_api::spec::{ComputeMode, ComputeSpec}; use utils::measured_stream::MeasuredReader; -use crate::config; +use remote_storage::GenericRemoteStorage; + use crate::pg_helpers::*; use crate::spec::*; +use crate::{config, extension_server}; /// Compute node info shared across several `compute_ctl` threads. pub struct ComputeNode { @@ -28,6 +33,7 @@ pub struct ComputeNode { pub connstr: url::Url, pub pgdata: String, pub pgbin: String, + pub pgversion: String, /// We should only allow live re- / configuration of the compute node if /// it uses 'pull model', i.e. it can go to control-plane and fetch /// the latest configuration. Otherwise, there could be a case: @@ -47,6 +53,10 @@ pub struct ComputeNode { pub state: Mutex, /// `Condvar` to allow notifying waiters about state changes. pub state_changed: Condvar, + /// the S3 bucket that we search for extensions in + pub ext_remote_storage: Option, + // cached lists of available extensions and libraries + pub available_extensions: OnceLock>, } #[derive(Clone, Debug)] @@ -357,14 +367,22 @@ impl ComputeNode { /// Do all the preparations like PGDATA directory creation, configuration, /// safekeepers sync, basebackup, etc. #[instrument(skip_all)] - pub fn prepare_pgdata(&self, compute_state: &ComputeState) -> Result<()> { + pub fn prepare_pgdata( + &self, + compute_state: &ComputeState, + extension_server_port: u16, + ) -> Result<()> { let pspec = compute_state.pspec.as_ref().expect("spec must be set"); let spec = &pspec.spec; let pgdata_path = Path::new(&self.pgdata); // Remove/create an empty pgdata directory and put configuration there. self.create_pgdata()?; - config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), &pspec.spec)?; + config::write_postgres_conf( + &pgdata_path.join("postgresql.conf"), + &pspec.spec, + Some(extension_server_port), + )?; // Syncing safekeepers is only safe with primary nodes: if a primary // is already connected it will be kicked out, so a secondary (standby) @@ -506,7 +524,7 @@ impl ComputeNode { // Write new config let pgdata_path = Path::new(&self.pgdata); - config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), &spec)?; + config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), &spec, None)?; let mut client = Client::connect(self.connstr.as_str(), NoTls)?; self.pg_reload_conf(&mut client)?; @@ -536,7 +554,7 @@ impl ComputeNode { } #[instrument(skip_all)] - pub fn start_compute(&self) -> Result { + pub fn start_compute(&self, extension_server_port: u16) -> Result { let compute_state = self.state.lock().unwrap().clone(); let pspec = compute_state.pspec.as_ref().expect("spec must be set"); info!( @@ -547,7 +565,26 @@ impl ComputeNode { pspec.timeline_id, ); - self.prepare_pgdata(&compute_state)?; + // This part is sync, because we need to download + // remote shared_preload_libraries before postgres start (if any) + { + let library_load_start_time = Utc::now(); + self.prepare_preload_libraries(&compute_state)?; + + let library_load_time = Utc::now() + .signed_duration_since(library_load_start_time) + .to_std() + .unwrap() + .as_millis() as u64; + let mut state = self.state.lock().unwrap(); + state.metrics.load_libraries_ms = library_load_time; + info!( + "Loading shared_preload_libraries took {:?}ms", + library_load_time + ); + } + + self.prepare_pgdata(&compute_state, extension_server_port)?; let start_time = Utc::now(); let pg = self.start_postgres(pspec.storage_auth_token.clone())?; @@ -695,4 +732,92 @@ LIMIT 100", "{{\"pg_stat_statements\": []}}".to_string() } } + + // If remote extension storage is configured, + // download extension control files + #[tokio::main] + pub async fn prepare_external_extensions(&self, compute_state: &ComputeState) -> Result<()> { + if let Some(ref ext_remote_storage) = self.ext_remote_storage { + let pspec = compute_state.pspec.as_ref().expect("spec must be set"); + let spec = &pspec.spec; + let custom_ext_prefixes = spec.custom_extensions.clone().unwrap_or(Vec::new()); + info!("custom_ext_prefixes: {:?}", &custom_ext_prefixes); + let available_extensions = extension_server::get_available_extensions( + ext_remote_storage, + &self.pgbin, + &self.pgversion, + &custom_ext_prefixes, + ) + .await?; + self.available_extensions + .set(available_extensions) + .expect("available_extensions.set error"); + } + Ok(()) + } + + pub async fn download_extension(&self, ext_name: &str) -> Result<()> { + match &self.ext_remote_storage { + None => anyhow::bail!("No remote extension storage"), + Some(remote_storage) => { + extension_server::download_extension( + ext_name, + remote_storage, + &self.pgbin, + &self.pgversion, + ) + .await + } + } + } + + #[tokio::main] + pub async fn prepare_preload_libraries(&self, compute_state: &ComputeState) -> Result<()> { + if self.ext_remote_storage.is_none() { + return Ok(()); + } + let pspec = compute_state.pspec.as_ref().expect("spec must be set"); + let spec = &pspec.spec; + + info!("parse shared_preload_libraries from spec.cluster.settings"); + let mut libs_vec = Vec::new(); + if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") { + libs_vec = libs + .split(&[',', '\'', ' ']) + .filter(|s| *s != "neon" && !s.is_empty()) + .map(str::to_string) + .collect(); + } + info!("parse shared_preload_libraries from provided postgresql.conf"); + // that is used in neon_local and python tests + if let Some(conf) = &spec.cluster.postgresql_conf { + let conf_lines = conf.split('\n').collect::>(); + let mut shared_preload_libraries_line = ""; + for line in conf_lines { + if line.starts_with("shared_preload_libraries") { + shared_preload_libraries_line = line; + } + } + let mut preload_libs_vec = Vec::new(); + if let Some(libs) = shared_preload_libraries_line.split("='").nth(1) { + preload_libs_vec = libs + .split(&[',', '\'', ' ']) + .filter(|s| *s != "neon" && !s.is_empty()) + .map(str::to_string) + .collect(); + } + libs_vec.extend(preload_libs_vec); + } + + info!("Downloading to shared preload libraries: {:?}", &libs_vec); + let mut download_tasks = Vec::new(); + for library in &libs_vec { + download_tasks.push(self.download_extension(library)); + } + let results = join_all(download_tasks).await; + for result in results { + result?; // propogate any errors + } + Ok(()) + } } diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 68b943eec8..2da671a149 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -33,7 +33,11 @@ pub fn line_in_file(path: &Path, line: &str) -> Result { } /// Create or completely rewrite configuration file specified by `path` -pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> { +pub fn write_postgres_conf( + path: &Path, + spec: &ComputeSpec, + extension_server_port: Option, +) -> Result<()> { // File::create() destroys the file content if it exists. let mut file = File::create(path)?; @@ -87,5 +91,9 @@ pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> { writeln!(file, "# Managed by compute_ctl: end")?; } + if let Some(port) = extension_server_port { + writeln!(file, "neon.extension_server_port={}", port)?; + } + Ok(()) } diff --git a/compute_tools/src/configurator.rs b/compute_tools/src/configurator.rs index 13550e0176..b39481d20b 100644 --- a/compute_tools/src/configurator.rs +++ b/compute_tools/src/configurator.rs @@ -42,13 +42,15 @@ fn configurator_main_loop(compute: &Arc) { } } -pub fn launch_configurator(compute: &Arc) -> Result> { +pub fn launch_configurator( + compute: &Arc, +) -> Result, std::io::Error> { let compute = Arc::clone(compute); - Ok(thread::Builder::new() + thread::Builder::new() .name("compute-configurator".into()) .spawn(move || { configurator_main_loop(&compute); info!("configurator thread is exited"); - })?) + }) } diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs new file mode 100644 index 0000000000..bff8fca303 --- /dev/null +++ b/compute_tools/src/extension_server.rs @@ -0,0 +1,237 @@ +// Download extension files from the extension store +// and put them in the right place in the postgres directory +/* +The layout of the S3 bucket is as follows: + +v14/ext_index.json + -- this contains information necessary to create control files +v14/extensions/test_ext1.tar.gz + -- this contains the library files and sql files necessary to create this extension +v14/extensions/custom_ext1.tar.gz + +The difference between a private and public extensions is determined by who can +load the extension this is specified in ext_index.json + +Speicially, ext_index.json has a list of public extensions, and a list of +extensions enabled for specific tenant-ids. +*/ +use crate::compute::ComputeNode; +use anyhow::Context; +use anyhow::{self, Result}; +use flate2::read::GzDecoder; +use remote_storage::*; +use serde_json::{self, Value}; +use std::collections::HashSet; +use std::num::{NonZeroU32, NonZeroUsize}; +use std::path::Path; +use std::str; +use std::sync::Arc; +use std::thread; +use tar::Archive; +use tokio::io::AsyncReadExt; +use tracing::info; + +fn get_pg_config(argument: &str, pgbin: &str) -> String { + // gives the result of `pg_config [argument]` + // where argument is a flag like `--version` or `--sharedir` + let pgconfig = pgbin + .strip_suffix("postgres") + .expect("bad pgbin") + .to_owned() + + "/pg_config"; + let config_output = std::process::Command::new(pgconfig) + .arg(argument) + .output() + .expect("pg_config error"); + std::str::from_utf8(&config_output.stdout) + .expect("pg_config error") + .trim() + .to_string() +} + +pub fn get_pg_version(pgbin: &str) -> String { + // pg_config --version returns a (platform specific) human readable string + // such as "PostgreSQL 15.4". We parse this to v14/v15 + let human_version = get_pg_config("--version", pgbin); + if human_version.contains("15") { + return "v15".to_string(); + } else if human_version.contains("14") { + return "v14".to_string(); + } + panic!("Unsuported postgres version {human_version}"); +} + +// download extension control files +// if custom_ext_prefixes is provided - search also in custom extension paths +pub async fn get_available_extensions( + remote_storage: &GenericRemoteStorage, + pgbin: &str, + pg_version: &str, + custom_ext_prefixes: &[String], +) -> Result> { + let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"); + let index_path = pg_version.to_owned() + "/ext_index.json"; + let index_path = RemotePath::new(Path::new(&index_path)).context("error forming path")?; + info!("download ext_index.json: {:?}", &index_path); + + // TODO: potential optimization: cache ext_index.json + let mut download = remote_storage.download(&index_path).await?; + let mut write_data_buffer = Vec::new(); + download + .download_stream + .read_to_end(&mut write_data_buffer) + .await?; + let ext_index_str = match str::from_utf8(&write_data_buffer) { + Ok(v) => v, + Err(e) => panic!("Invalid UTF-8 sequence: {}", e), + }; + + let ext_index_full: Value = serde_json::from_str(ext_index_str)?; + let ext_index_full = ext_index_full.as_object().context("error parsing json")?; + let control_data = ext_index_full["control_data"] + .as_object() + .context("json parse error")?; + let enabled_extensions = ext_index_full["enabled_extensions"] + .as_object() + .context("json parse error")?; + info!("{:?}", control_data.clone()); + info!("{:?}", enabled_extensions.clone()); + + let mut prefixes = vec!["public".to_string()]; + prefixes.extend(custom_ext_prefixes.to_owned()); + info!("{:?}", &prefixes); + let mut all_extensions = HashSet::new(); + for prefix in prefixes { + let prefix_extensions = match enabled_extensions.get(&prefix) { + Some(Value::Array(ext_name)) => ext_name, + _ => { + info!("prefix {} has no extensions", prefix); + continue; + } + }; + info!("{:?}", prefix_extensions); + for ext_name in prefix_extensions { + all_extensions.insert(ext_name.as_str().context("json parse error")?.to_string()); + } + } + + for prefix in &all_extensions { + let control_contents = control_data[prefix].as_str().context("json parse error")?; + let control_path = local_sharedir.join(prefix.to_owned() + ".control"); + + info!("WRITING FILE {:?}{:?}", control_path, control_contents); + std::fs::write(control_path, control_contents)?; + } + + Ok(all_extensions.into_iter().collect()) +} + +// download all sqlfiles (and possibly data files) for a given extension name +pub async fn download_extension( + ext_name: &str, + remote_storage: &GenericRemoteStorage, + pgbin: &str, + pg_version: &str, +) -> Result<()> { + // TODO: potential optimization: only download the extension if it doesn't exist + // problem: how would we tell if it exists? + let ext_name = ext_name.replace(".so", ""); + let ext_name_targz = ext_name.to_owned() + ".tar.gz"; + if Path::new(&ext_name_targz).exists() { + info!("extension {:?} already exists", ext_name_targz); + return Ok(()); + } + let ext_path = RemotePath::new( + &Path::new(pg_version) + .join("extensions") + .join(ext_name_targz.clone()), + )?; + info!( + "Start downloading extension {:?} from {:?}", + ext_name, ext_path + ); + let mut download = remote_storage.download(&ext_path).await?; + let mut write_data_buffer = Vec::new(); + download + .download_stream + .read_to_end(&mut write_data_buffer) + .await?; + let unzip_dest = pgbin.strip_suffix("/bin/postgres").expect("bad pgbin"); + let tar = GzDecoder::new(std::io::Cursor::new(write_data_buffer)); + let mut archive = Archive::new(tar); + archive.unpack(unzip_dest)?; + info!("Download + unzip {:?} completed successfully", &ext_path); + + let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"); + let zip_sharedir = format!("{unzip_dest}/extensions/{ext_name}/share/extension"); + info!("mv {zip_sharedir:?}/* {local_sharedir:?}"); + for file in std::fs::read_dir(zip_sharedir)? { + let old_file = file?.path(); + let new_file = + Path::new(&local_sharedir).join(old_file.file_name().context("error parsing file")?); + std::fs::rename(old_file, new_file)?; + } + let local_libdir = Path::new(&get_pg_config("--libdir", pgbin)).join("postgresql"); + let zip_libdir = format!("{unzip_dest}/extensions/{ext_name}/lib"); + info!("mv {zip_libdir:?}/* {local_libdir:?}"); + for file in std::fs::read_dir(zip_libdir)? { + let old_file = file?.path(); + let new_file = + Path::new(&local_libdir).join(old_file.file_name().context("error parsing file")?); + std::fs::rename(old_file, new_file)?; + } + Ok(()) +} + +// This function initializes the necessary structs to use remmote storage (should be fairly cheap) +pub fn init_remote_storage( + remote_ext_config: &str, + default_prefix: &str, +) -> anyhow::Result { + let remote_ext_config: serde_json::Value = serde_json::from_str(remote_ext_config)?; + + let remote_ext_bucket = remote_ext_config["bucket"] + .as_str() + .context("config parse error")?; + let remote_ext_region = remote_ext_config["region"] + .as_str() + .context("config parse error")?; + let remote_ext_endpoint = remote_ext_config["endpoint"].as_str(); + let remote_ext_prefix = remote_ext_config["prefix"] + .as_str() + .unwrap_or(default_prefix) + .to_string(); + + // TODO: potentially allow modification of other parameters + // however, default values should be fine for now + let config = S3Config { + bucket_name: remote_ext_bucket.to_string(), + bucket_region: remote_ext_region.to_string(), + prefix_in_bucket: Some(remote_ext_prefix), + endpoint: remote_ext_endpoint.map(|x| x.to_string()), + concurrency_limit: NonZeroUsize::new(100).expect("100 != 0"), + max_keys_per_list_response: None, + }; + let config = RemoteStorageConfig { + max_concurrent_syncs: NonZeroUsize::new(100).expect("100 != 0"), + max_sync_errors: NonZeroU32::new(100).expect("100 != 0"), + storage: RemoteStorageKind::AwsS3(config), + }; + GenericRemoteStorage::from_config(&config) +} + +pub fn launch_download_extensions( + compute: &Arc, +) -> Result, std::io::Error> { + let compute = Arc::clone(compute); + thread::Builder::new() + .name("download-extensions".into()) + .spawn(move || { + info!("start download_extension_files"); + let compute_state = compute.state.lock().expect("error unlocking compute.state"); + compute + .prepare_external_extensions(&compute_state) + .expect("error preparing extensions"); + info!("download_extension_files done, exiting thread"); + }) +} diff --git a/compute_tools/src/http/api.rs b/compute_tools/src/http/api.rs index afd9c2fb54..8fd40cdfe1 100644 --- a/compute_tools/src/http/api.rs +++ b/compute_tools/src/http/api.rs @@ -121,6 +121,27 @@ async fn routes(req: Request, compute: &Arc) -> Response { + info!("serving {:?} POST request", route); + info!("req.uri {:?}", req.uri()); + let filename = route.split('/').last().unwrap().to_string(); + info!( + "serving /extension_server POST request, filename: {:?}", + &filename + ); + + match compute.download_extension(&filename).await { + Ok(_) => Response::new(Body::from("OK")), + Err(e) => { + error!("extension download failed: {}", e); + let mut resp = Response::new(Body::from(e.to_string())); + *resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + resp + } + } + } + // Return the `404 Not Found` for any other routes. _ => { let mut not_found = Response::new(Body::from("404 Not Found")); diff --git a/compute_tools/src/http/openapi_spec.yaml b/compute_tools/src/http/openapi_spec.yaml index 2680269756..dc26cc63eb 100644 --- a/compute_tools/src/http/openapi_spec.yaml +++ b/compute_tools/src/http/openapi_spec.yaml @@ -139,6 +139,34 @@ paths: application/json: schema: $ref: "#/components/schemas/GenericError" + /extension_server: + post: + tags: + - Extension + summary: Download extension from S3 to local folder. + description: "" + operationId: downloadExtension + responses: + 200: + description: Extension downloaded + content: + text/plain: + schema: + type: string + description: Error text or 'OK' if download succeeded. + example: "OK" + 400: + description: Request is invalid. + content: + application/json: + schema: + $ref: "#/components/schemas/GenericError" + 500: + description: Extension download request failed. + content: + application/json: + schema: + $ref: "#/components/schemas/GenericError" components: securitySchemes: diff --git a/compute_tools/src/lib.rs b/compute_tools/src/lib.rs index 24811f75ee..c061ab2da3 100644 --- a/compute_tools/src/lib.rs +++ b/compute_tools/src/lib.rs @@ -9,6 +9,7 @@ pub mod http; #[macro_use] pub mod logger; pub mod compute; +pub mod extension_server; pub mod monitor; pub mod params; pub mod pg_helpers; diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index d2e7b698dd..03d0d021d0 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -105,10 +105,10 @@ fn watch_compute_activity(compute: &ComputeNode) { } /// Launch a separate compute monitor thread and return its `JoinHandle`. -pub fn launch_monitor(state: &Arc) -> Result> { +pub fn launch_monitor(state: &Arc) -> Result, std::io::Error> { let state = Arc::clone(state); - Ok(thread::Builder::new() + thread::Builder::new() .name("compute-monitor".into()) - .spawn(move || watch_compute_activity(&state))?) + .spawn(move || watch_compute_activity(&state)) } diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 575a5332a8..eff7c93b46 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -124,7 +124,7 @@ pub fn get_spec_from_control_plane( pub fn handle_configuration(spec: &ComputeSpec, pgdata_path: &Path) -> Result<()> { // File `postgresql.conf` is no longer included into `basebackup`, so just // always write all config into it creating new file. - config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?; + config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec, None)?; update_pg_hba(pgdata_path)?; diff --git a/control_plane/Cargo.toml b/control_plane/Cargo.toml index a341ff0263..d2c99c5f36 100644 --- a/control_plane/Cargo.toml +++ b/control_plane/Cargo.toml @@ -32,3 +32,4 @@ utils.workspace = true compute_api.workspace = true workspace_hack.workspace = true +tracing.workspace = true diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 8995a18564..8f71cb65e2 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -658,6 +658,8 @@ fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<( .get_one::("endpoint_id") .ok_or_else(|| anyhow!("No endpoint ID was provided to start"))?; + let remote_ext_config = sub_args.get_one::("remote-ext-config"); + // If --safekeepers argument is given, use only the listed safekeeper nodes. let safekeepers = if let Some(safekeepers_str) = sub_args.get_one::("safekeepers") { @@ -699,7 +701,7 @@ fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<( _ => {} } println!("Starting existing endpoint {endpoint_id}..."); - endpoint.start(&auth_token, safekeepers)?; + endpoint.start(&auth_token, safekeepers, remote_ext_config)?; } else { let branch_name = sub_args .get_one::("branch-name") @@ -743,7 +745,7 @@ fn handle_endpoint(ep_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<( pg_version, mode, )?; - ep.start(&auth_token, safekeepers)?; + ep.start(&auth_token, safekeepers, remote_ext_config)?; } } "stop" => { @@ -1003,6 +1005,12 @@ fn cli() -> Command { .help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more") .required(false); + let remote_ext_config_args = Arg::new("remote-ext-config") + .long("remote-ext-config") + .num_args(1) + .help("Configure the S3 bucket that we search for extensions in.") + .required(false); + let lsn_arg = Arg::new("lsn") .long("lsn") .help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.") @@ -1161,6 +1169,7 @@ fn cli() -> Command { .arg(pg_version_arg) .arg(hot_standby_arg) .arg(safekeepers_arg) + .arg(remote_ext_config_args) ) .subcommand( Command::new("stop") diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 6df6e47f29..35e863b98e 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -313,7 +313,7 @@ impl Endpoint { // TODO: use future host field from safekeeper spec // Pass the list of safekeepers to the replica so that it can connect to any of them, - // whichever is availiable. + // whichever is available. let sk_ports = self .env .safekeepers @@ -420,7 +420,12 @@ impl Endpoint { Ok(()) } - pub fn start(&self, auth_token: &Option, safekeepers: Vec) -> Result<()> { + pub fn start( + &self, + auth_token: &Option, + safekeepers: Vec, + remote_ext_config: Option<&String>, + ) -> Result<()> { if self.status() == "running" { anyhow::bail!("The endpoint is already running"); } @@ -488,6 +493,15 @@ impl Endpoint { pageserver_connstring: Some(pageserver_connstring), safekeeper_connstrings, storage_auth_token: auth_token.clone(), + // TODO FIXME: This is a hack to test custom extensions locally. + // In test_download_extensions, we assume that the custom extension + // prefix is the tenant ID. So we set it here. + // + // The proper way to implement this is to pass the custom extension + // in spec, but we don't have a way to do that yet in the python tests. + // NEW HACK: we enable the anon custom extension for everyone! this is of course just for testing + // how will we do it for real? + custom_extensions: Some(vec!["123454321".to_string(), self.tenant_id.to_string()]), }; let spec_path = self.endpoint_path().join("spec.json"); std::fs::write(spec_path, serde_json::to_string_pretty(&spec)?)?; @@ -519,6 +533,11 @@ impl Endpoint { .stdin(std::process::Stdio::null()) .stderr(logfile.try_clone()?) .stdout(logfile); + + if let Some(remote_ext_config) = remote_ext_config { + cmd.args(["--remote-ext-config", remote_ext_config]); + } + let child = cmd.spawn()?; // Write down the pid so we can wait for it when we want to stop diff --git a/docs/rfcs/024-extension-loading.md b/docs/rfcs/024-extension-loading.md new file mode 100644 index 0000000000..bc9ed4407b --- /dev/null +++ b/docs/rfcs/024-extension-loading.md @@ -0,0 +1,183 @@ +# Supporting custom user Extensions (Dynamic Extension Loading) +Created 2023-05-03 + +## Motivation + +There are many extensions in the PostgreSQL ecosystem, and not all extensions +are of a quality that we can confidently support them. Additionally, our +current extension inclusion mechanism has several problems because we build all +extensions into the primary Compute image: We build the extensions every time +we build the compute image regardless of whether we actually need to rebuild +the image, and the inclusion of these extensions in the image adds a hard +dependency on all supported extensions - thus increasing the image size, and +with it the time it takes to download that image - increasing first start +latency. + +This RFC proposes a dynamic loading mechanism that solves most of these +problems. + +## Summary + +`compute_ctl` is made responsible for loading extensions on-demand into +the container's file system for dynamically loaded extensions, and will also +make sure that the extensions in `shared_preload_libraries` are downloaded +before the compute node starts. + +## Components + +compute_ctl, PostgreSQL, neon (extension), Compute Host Node, Extension Store + +## Requirements + +Compute nodes with no extra extensions should not be negatively impacted by +the existence of support for many extensions. + +Installing an extension into PostgreSQL should be easy. + +Non-preloaded extensions shouldn't impact startup latency. + +Uninstalled extensions shouldn't impact query latency. + +A small latency penalty for dynamically loaded extensions is acceptable in +the first seconds of compute startup, but not in steady-state operations. + +## Proposed implementation + +### On-demand, JIT-loading of extensions + +Before postgres starts we download +- control files for all extensions available to that compute node; +- all `shared_preload_libraries`; + +After postgres is running, `compute_ctl` listens for requests to load files. +When PostgreSQL requests a file, `compute_ctl` downloads it. + +PostgreSQL requests files in the following cases: +- When loading a preload library set in `local_preload_libraries` +- When explicitly loading a library with `LOAD` +- Wnen creating extension with `CREATE EXTENSION` (download sql scripts, (optional) extension data files and (optional) library files))) + + +#### Summary + +Pros: + - Startup is only as slow as it takes to load all (shared_)preload_libraries + - Supports BYO Extension + +Cons: + - O(sizeof(extensions)) IO requirement for loading all extensions. + +### Alternative solutions + +1. Allow users to add their extensions to the base image + + Pros: + - Easy to deploy + + Cons: + - Doesn't scale - first start size is dependent on image size; + - All extensions are shared across all users: It doesn't allow users to + bring their own restrictive-licensed extensions + +2. Bring Your Own compute image + + Pros: + - Still easy to deploy + - User can bring own patched version of PostgreSQL + + Cons: + - First start latency is O(sizeof(extensions image)) + - Warm instance pool for skipping pod schedule latency is not feasible with + O(n) custom images + - Support channels are difficult to manage + +3. Download all user extensions in bulk on compute start + + Pros: + - Easy to deploy + - No startup latency issues for "clean" users. + - Warm instance pool for skipping pod schedule latency is possible + + Cons: + - Downloading all extensions in advance takes a lot of time, thus startup + latency issues + +4. Store user's extensions in persistent storage + + Pros: + - Easy to deploy + - No startup latency issues + - Warm instance pool for skipping pod schedule latency is possible + + Cons: + - EC2 instances have only limited number of attachments shared between EBS + volumes, direct-attached NVMe drives, and ENIs. + - Compute instance migration isn't trivially solved for EBS mounts (e.g. + the device is unavailable whilst moving the mount between instances). + - EBS can only mount on one instance at a time (except the expensive IO2 + device type). + +5. Store user's extensions in network drive + + Pros: + - Easy to deploy + - Few startup latency issues + - Warm instance pool for skipping pod schedule latency is possible + + Cons: + - We'd need networked drives, and a lot of them, which would store many + duplicate extensions. + - **UNCHECKED:** Compute instance migration may not work nicely with + networked IOs + + +### Idea extensions + +The extension store does not have to be S3 directly, but could be a Node-local +caching service on top of S3. This would reduce the load on the network for +popular extensions. + +## Extension Storage implementation + +Extension Storage in our case is an S3 bucket with a "directory" per build and postgres version, +where extension files are stored as plain files in the bucket following the same directory structure as in the postgres. + +i.e. + +`s3://///lib/postgis-3.1.so` +`s3://///share/extension/postgis.control` +`s3://///share/extension/postgis--3.1.sql` + +To handle custom extensions, that available only to specific users, we use per-extension subdirectories: + +i.e. +`s3://////lib/ext-name.so`, etc. +`s3://////share/extension/ext-name.control`, etc. + +On compute start, `compute_ctl` accepts a list of custom_ext_prefixes. + +To get the list of available extensions,`compute_ctl` downloads control files from all prefixes: + +`s3://///share/extension/` +`s3://////share/extension/` +`s3://////share/extension/` + + + +### How to add new extension to the Extension Storage? + +Simply upload build artifacts to the S3 bucket. +Implement a CI step for that. Splitting it from ompute-node-image build. + +### How do we deal with extension versions and updates? + +Currently, we rebuild extensions on every compute-node-image build and store them in the prefix. +This is needed to ensure that `/share` and `/lib` files are in sync. + +For extension updates, we rely on the PostgreSQL extension versioning mechanism (sql update scripts) and extension authors to not break backwards compatibility within one major version of PostgreSQL. + +### Alternatives + +For extensions written on trusted languages we can also adopt +`dbdev` PostgreSQL Package Manager based on `pg_tle` by Supabase. +This will increase the amount supported extensions and decrease the amount of work required to support them. diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index 6124c81f50..fd57ff43bd 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -75,6 +75,7 @@ pub struct ComputeMetrics { pub start_postgres_ms: u64, pub config_ms: u64, pub total_startup_ms: u64, + pub load_libraries_ms: u64, } /// Response of the `/computes/{compute_id}/spec` control-plane API. diff --git a/libs/compute_api/src/spec.rs b/libs/compute_api/src/spec.rs index b3f0e9ba43..293f6dc294 100644 --- a/libs/compute_api/src/spec.rs +++ b/libs/compute_api/src/spec.rs @@ -60,6 +60,9 @@ pub struct ComputeSpec { /// If set, 'storage_auth_token' is used as the password to authenticate to /// the pageserver and safekeepers. pub storage_auth_token: Option, + + // list of prefixes to search for custom extensions in remote extension storage + pub custom_extensions: Option>, } #[serde_as] diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 92ef793a34..5e311b3cdc 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -190,6 +190,20 @@ pub enum GenericRemoteStorage { } impl GenericRemoteStorage { + // A function for listing all the files in a "directory" + // Example: + // list_files("foo/bar") = ["foo/bar/a.txt", "foo/bar/b.txt"] + pub async fn list_files(&self, folder: Option<&RemotePath>) -> anyhow::Result> { + match self { + Self::LocalFs(s) => s.list_files(folder).await, + Self::AwsS3(s) => s.list_files(folder).await, + Self::Unreliable(s) => s.list_files(folder).await, + } + } + + // lists common *prefixes*, if any of files + // Example: + // list_prefixes("foo123","foo567","bar123","bar432") = ["foo", "bar"] pub async fn list_prefixes( &self, prefix: Option<&RemotePath>, @@ -201,14 +215,6 @@ impl GenericRemoteStorage { } } - pub async fn list_files(&self, folder: Option<&RemotePath>) -> anyhow::Result> { - match self { - Self::LocalFs(s) => s.list_files(folder).await, - Self::AwsS3(s) => s.list_files(folder).await, - Self::Unreliable(s) => s.list_files(folder).await, - } - } - pub async fn upload( &self, from: impl io::AsyncRead + Unpin + Send + Sync + 'static, diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index 43d818dfb9..d71592eb93 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -349,10 +349,17 @@ impl RemoteStorage for S3Bucket { /// See the doc for `RemoteStorage::list_files` async fn list_files(&self, folder: Option<&RemotePath>) -> anyhow::Result> { - let folder_name = folder + let mut folder_name = folder .map(|p| self.relative_path_to_s3_object(p)) .or_else(|| self.prefix_in_bucket.clone()); + // remove leading "/" if one exists + if let Some(folder_name_slash) = folder_name.clone() { + if folder_name_slash.starts_with(REMOTE_STORAGE_PREFIX_SEPARATOR) { + folder_name = Some(folder_name_slash[1..].to_string()); + } + } + // AWS may need to break the response into several parts let mut continuation_token = None; let mut all_files = vec![]; diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index 1948023472..53917d8bc4 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -4,6 +4,7 @@ MODULE_big = neon OBJS = \ $(WIN32RES) \ + extension_server.o \ file_cache.o \ libpagestore.o \ libpqwalproposer.o \ diff --git a/pgxn/neon/extension_server.c b/pgxn/neon/extension_server.c new file mode 100644 index 0000000000..01c86867db --- /dev/null +++ b/pgxn/neon/extension_server.c @@ -0,0 +1,104 @@ + +/*------------------------------------------------------------------------- + * + * extension_server.c + * Request compute_ctl to download extension files. + * + * IDENTIFICATION + * contrib/neon/extension_server.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "tcop/pquery.h" +#include "tcop/utility.h" +#include "access/xact.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" +#include "commands/defrem.h" +#include "miscadmin.h" +#include "utils/acl.h" +#include "fmgr.h" +#include "utils/guc.h" +#include "port.h" +#include "fmgr.h" + +#include + +static int extension_server_port = 0; + +static download_extension_file_hook_type prev_download_extension_file_hook = NULL; + +// to download all SQL (and data) files for an extension: +// curl -X POST http://localhost:8080/extension_server/postgis +// it covers two possible extension files layouts: +// 1. extension_name--version--platform.sql +// 2. extension_name/extension_name--version.sql +// extension_name/extra_files.csv +// +// to download specific library file: +// curl -X POST http://localhost:8080/extension_server/postgis-3.so?is_library=true +static bool +neon_download_extension_file_http(const char *filename, bool is_library) +{ + CURL *curl; + CURLcode res; + char *compute_ctl_url; + char *postdata; + bool ret = false; + + if ((curl = curl_easy_init()) == NULL) + { + elog(ERROR, "Failed to initialize curl handle"); + } + + compute_ctl_url = psprintf("http://localhost:%d/extension_server/%s%s", + extension_server_port, filename, is_library ? "?is_library=true" : ""); + + elog(LOG, "Sending request to compute_ctl: %s", compute_ctl_url); + + curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST"); + curl_easy_setopt(curl, CURLOPT_URL, compute_ctl_url); + // NOTE: 15L may be insufficient time for large extensions like postgis + curl_easy_setopt(curl, CURLOPT_TIMEOUT, 15L /* seconds */); + + if (curl) + { + /* Perform the request, res will get the return code */ + res = curl_easy_perform(curl); + /* Check for errors */ + if (res == CURLE_OK) + { + ret = true; + } + else + { + // Don't error here because postgres will try to find the file + // and will fail with some proper error message if it's not found. + elog(WARNING, "neon_download_extension_file_http failed: %s\n", curl_easy_strerror(res)); + } + + /* always cleanup */ + curl_easy_cleanup(curl); + } + + return ret; +} + +void pg_init_extension_server() +{ + // Port to connect to compute_ctl on localhost + // to request extension files. + DefineCustomIntVariable("neon.extension_server_port", + "connection string to the compute_ctl", + NULL, + &extension_server_port, + 0, 0, INT_MAX, + PGC_POSTMASTER, + 0, /* no flags required */ + NULL, NULL, NULL); + + // set download_extension_file_hook + prev_download_extension_file_hook = download_extension_file_hook; + download_extension_file_hook = neon_download_extension_file_http; +} diff --git a/pgxn/neon/extension_server.h b/pgxn/neon/extension_server.h new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/pgxn/neon/extension_server.h @@ -0,0 +1 @@ + diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index b45d7cfc32..c7211ea05a 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -35,8 +35,11 @@ _PG_init(void) { pg_init_libpagestore(); pg_init_walproposer(); + InitControlPlaneConnector(); + pg_init_extension_server(); + // Important: This must happen after other parts of the extension // are loaded, otherwise any settings to GUCs that were set before // the extension was loaded will be removed. diff --git a/pgxn/neon/neon.h b/pgxn/neon/neon.h index 60d321a945..2610da4311 100644 --- a/pgxn/neon/neon.h +++ b/pgxn/neon/neon.h @@ -21,6 +21,8 @@ extern char *neon_tenant; extern void pg_init_libpagestore(void); extern void pg_init_walproposer(void); +extern void pg_init_extension_server(void); + /* * Returns true if we shouldn't do REDO on that block in record indicated by * block_id; false otherwise. diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index eafc061ab9..0bb40d36c3 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -530,6 +530,16 @@ def available_remote_storages() -> List[RemoteStorageKind]: return remote_storages +def available_s3_storages() -> List[RemoteStorageKind]: + remote_storages = [RemoteStorageKind.MOCK_S3] + if os.getenv("ENABLE_REAL_S3_REMOTE_STORAGE") is not None: + remote_storages.append(RemoteStorageKind.REAL_S3) + log.info("Enabling real s3 storage for tests") + else: + log.info("Using mock implementations to test remote storage") + return remote_storages + + @dataclass class LocalFsStorage: root: Path @@ -550,6 +560,16 @@ class S3Storage: "AWS_SECRET_ACCESS_KEY": self.secret_key, } + def to_string(self) -> str: + return json.dumps( + { + "bucket": self.bucket_name, + "region": self.bucket_region, + "endpoint": self.endpoint, + "prefix": self.prefix_in_bucket, + } + ) + RemoteStorage = Union[LocalFsStorage, S3Storage] @@ -616,10 +636,12 @@ class NeonEnvBuilder: self.rust_log_override = rust_log_override self.port_distributor = port_distributor self.remote_storage = remote_storage + self.ext_remote_storage: Optional[S3Storage] = None + self.remote_storage_client: Optional[Any] = None self.remote_storage_users = remote_storage_users self.broker = broker self.run_id = run_id - self.mock_s3_server = mock_s3_server + self.mock_s3_server: MockS3Server = mock_s3_server self.pageserver_config_override = pageserver_config_override self.num_safekeepers = num_safekeepers self.safekeepers_id_start = safekeepers_id_start @@ -667,15 +689,24 @@ class NeonEnvBuilder: remote_storage_kind: RemoteStorageKind, test_name: str, force_enable: bool = True, + enable_remote_extensions: bool = False, ): if remote_storage_kind == RemoteStorageKind.NOOP: return elif remote_storage_kind == RemoteStorageKind.LOCAL_FS: self.enable_local_fs_remote_storage(force_enable=force_enable) elif remote_storage_kind == RemoteStorageKind.MOCK_S3: - self.enable_mock_s3_remote_storage(bucket_name=test_name, force_enable=force_enable) + self.enable_mock_s3_remote_storage( + bucket_name=test_name, + force_enable=force_enable, + enable_remote_extensions=enable_remote_extensions, + ) elif remote_storage_kind == RemoteStorageKind.REAL_S3: - self.enable_real_s3_remote_storage(test_name=test_name, force_enable=force_enable) + self.enable_real_s3_remote_storage( + test_name=test_name, + force_enable=force_enable, + enable_remote_extensions=enable_remote_extensions, + ) else: raise RuntimeError(f"Unknown storage type: {remote_storage_kind}") @@ -689,11 +720,15 @@ class NeonEnvBuilder: assert force_enable or self.remote_storage is None, "remote storage is enabled already" self.remote_storage = LocalFsStorage(Path(self.repo_dir / "local_fs_remote_storage")) - def enable_mock_s3_remote_storage(self, bucket_name: str, force_enable: bool = True): + def enable_mock_s3_remote_storage( + self, bucket_name: str, force_enable: bool = True, enable_remote_extensions: bool = False + ): """ Sets up the pageserver to use the S3 mock server, creates the bucket, if it's not present already. Starts up the mock server, if that does not run yet. Errors, if the pageserver has some remote storage configuration already, unless `force_enable` is not set to `True`. + + Also creates the bucket for extensions, self.ext_remote_storage bucket """ assert force_enable or self.remote_storage is None, "remote storage is enabled already" mock_endpoint = self.mock_s3_server.endpoint() @@ -714,9 +749,22 @@ class NeonEnvBuilder: bucket_region=mock_region, access_key=self.mock_s3_server.access_key(), secret_key=self.mock_s3_server.secret_key(), + prefix_in_bucket="pageserver", ) - def enable_real_s3_remote_storage(self, test_name: str, force_enable: bool = True): + if enable_remote_extensions: + self.ext_remote_storage = S3Storage( + bucket_name=bucket_name, + endpoint=mock_endpoint, + bucket_region=mock_region, + access_key=self.mock_s3_server.access_key(), + secret_key=self.mock_s3_server.secret_key(), + prefix_in_bucket="ext", + ) + + def enable_real_s3_remote_storage( + self, test_name: str, force_enable: bool = True, enable_remote_extensions: bool = False + ): """ Sets up configuration to use real s3 endpoint without mock server """ @@ -756,6 +804,15 @@ class NeonEnvBuilder: prefix_in_bucket=self.remote_storage_prefix, ) + if enable_remote_extensions: + self.ext_remote_storage = S3Storage( + bucket_name="neon-dev-extensions", + bucket_region="eu-central-1", + access_key=access_key, + secret_key=secret_key, + prefix_in_bucket="5555", + ) + def cleanup_local_storage(self): if self.preserve_database_files: return @@ -789,6 +846,7 @@ class NeonEnvBuilder: # `self.remote_storage_prefix` is coupled with `S3Storage` storage type, # so this line effectively a no-op assert isinstance(self.remote_storage, S3Storage) + assert self.remote_storage_client is not None if self.keep_remote_storage_contents: log.info("keep_remote_storage_contents skipping remote storage cleanup") @@ -918,6 +976,8 @@ class NeonEnv: self.neon_binpath = config.neon_binpath self.pg_distrib_dir = config.pg_distrib_dir self.endpoint_counter = 0 + self.remote_storage_client = config.remote_storage_client + self.ext_remote_storage = config.ext_remote_storage # generate initial tenant ID here instead of letting 'neon init' generate it, # so that we don't need to dig it out of the config file afterwards. @@ -1504,6 +1564,7 @@ class NeonCli(AbstractNeonCli): safekeepers: Optional[List[int]] = None, tenant_id: Optional[TenantId] = None, lsn: Optional[Lsn] = None, + remote_ext_config: Optional[str] = None, ) -> "subprocess.CompletedProcess[str]": args = [ "endpoint", @@ -1513,6 +1574,8 @@ class NeonCli(AbstractNeonCli): "--pg-version", self.env.pg_version, ] + if remote_ext_config is not None: + args.extend(["--remote-ext-config", remote_ext_config]) if lsn is not None: args.append(f"--lsn={lsn}") args.extend(["--pg-port", str(pg_port)]) @@ -2371,7 +2434,7 @@ class Endpoint(PgProtocol): return self - def start(self) -> "Endpoint": + def start(self, remote_ext_config: Optional[str] = None) -> "Endpoint": """ Start the Postgres instance. Returns self. @@ -2387,6 +2450,7 @@ class Endpoint(PgProtocol): http_port=self.http_port, tenant_id=self.tenant_id, safekeepers=self.active_safekeepers, + remote_ext_config=remote_ext_config, ) self.running = True @@ -2476,6 +2540,7 @@ class Endpoint(PgProtocol): hot_standby: bool = False, lsn: Optional[Lsn] = None, config_lines: Optional[List[str]] = None, + remote_ext_config: Optional[str] = None, ) -> "Endpoint": """ Create an endpoint, apply config, and start Postgres. @@ -2490,7 +2555,7 @@ class Endpoint(PgProtocol): config_lines=config_lines, hot_standby=hot_standby, lsn=lsn, - ).start() + ).start(remote_ext_config=remote_ext_config) log.info(f"Postgres startup took {time.time() - started_at} seconds") @@ -2524,6 +2589,7 @@ class EndpointFactory: lsn: Optional[Lsn] = None, hot_standby: bool = False, config_lines: Optional[List[str]] = None, + remote_ext_config: Optional[str] = None, ) -> Endpoint: ep = Endpoint( self.env, @@ -2540,6 +2606,7 @@ class EndpointFactory: hot_standby=hot_standby, config_lines=config_lines, lsn=lsn, + remote_ext_config=remote_ext_config, ) def create( diff --git a/test_runner/fixtures/types.py b/test_runner/fixtures/types.py index 7d179cc7fb..ef88e09de4 100644 --- a/test_runner/fixtures/types.py +++ b/test_runner/fixtures/types.py @@ -89,6 +89,9 @@ class TenantId(Id): def __repr__(self) -> str: return f'`TenantId("{self.id.hex()}")' + def __str__(self) -> str: + return self.id.hex() + class TimelineId(Id): def __repr__(self) -> str: diff --git a/test_runner/regress/data/extension_test/v14/anon.tar.gz b/test_runner/regress/data/extension_test/v14/anon.tar.gz new file mode 100644 index 0000000000..4c7959fe8b Binary files /dev/null and b/test_runner/regress/data/extension_test/v14/anon.tar.gz differ diff --git a/test_runner/regress/data/extension_test/v14/embedding.tar.gz b/test_runner/regress/data/extension_test/v14/embedding.tar.gz new file mode 100644 index 0000000000..98ba0a5c2c Binary files /dev/null and b/test_runner/regress/data/extension_test/v14/embedding.tar.gz differ diff --git a/test_runner/regress/data/extension_test/v14/ext_index.json b/test_runner/regress/data/extension_test/v14/ext_index.json new file mode 100644 index 0000000000..dd84369e30 --- /dev/null +++ b/test_runner/regress/data/extension_test/v14/ext_index.json @@ -0,0 +1,14 @@ +{ + "enabled_extensions": { + "123454321": [ + "anon" + ], + "public": [ + "embedding" + ] + }, + "control_data": { + "embedding": "comment = 'hnsw index' \ndefault_version = '0.1.0' \nmodule_pathname = '$libdir/embedding' \nrelocatable = true \ntrusted = true", + "anon": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n" + } +} diff --git a/test_runner/regress/data/extension_test/v15/anon.tar.gz b/test_runner/regress/data/extension_test/v15/anon.tar.gz new file mode 100644 index 0000000000..8c4fc44967 Binary files /dev/null and b/test_runner/regress/data/extension_test/v15/anon.tar.gz differ diff --git a/test_runner/regress/data/extension_test/v15/embedding.tar.gz b/test_runner/regress/data/extension_test/v15/embedding.tar.gz new file mode 100644 index 0000000000..4fa980ba13 Binary files /dev/null and b/test_runner/regress/data/extension_test/v15/embedding.tar.gz differ diff --git a/test_runner/regress/data/extension_test/v15/ext_index.json b/test_runner/regress/data/extension_test/v15/ext_index.json new file mode 100644 index 0000000000..7fa10701f4 --- /dev/null +++ b/test_runner/regress/data/extension_test/v15/ext_index.json @@ -0,0 +1,14 @@ +{ + "enabled_extensions": { + "123454321": [ + "anon" + ], + "public": [ + "embedding" + ] + }, + "control_data": { + "embedding": "comment = 'hnsw index' \ndefault_version = '0.1.0' \nmodule_pathname = '$libdir/embedding' \nrelocatable = true \ntrusted = true", + "anon": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n" + } +} \ No newline at end of file diff --git a/test_runner/regress/test_download_extensions.py b/test_runner/regress/test_download_extensions.py new file mode 100644 index 0000000000..2ce96eb7c0 --- /dev/null +++ b/test_runner/regress/test_download_extensions.py @@ -0,0 +1,122 @@ +import os +import shutil +from contextlib import closing + +import pytest +from fixtures.log_helper import log +from fixtures.neon_fixtures import ( + NeonEnvBuilder, + RemoteStorageKind, + available_s3_storages, +) +from fixtures.pg_version import PgVersion + +# Generate mock extension files and upload them to the mock bucket. +# +# NOTE: You must have appropriate AWS credentials to run REAL_S3 test. +# It may also be necessary to set the following environment variables for MOCK_S3 test: +# export AWS_ACCESS_KEY_ID='test' # export AWS_SECRET_ACCESS_KEY='test' +# export AWS_SECURITY_TOKEN='test' # export AWS_SESSION_TOKEN='test' +# export AWS_DEFAULT_REGION='us-east-1' + + +@pytest.mark.parametrize("remote_storage_kind", available_s3_storages()) +def test_remote_extensions( + neon_env_builder: NeonEnvBuilder, + remote_storage_kind: RemoteStorageKind, + pg_version: PgVersion, +): + neon_env_builder.enable_remote_storage( + remote_storage_kind=remote_storage_kind, + test_name="test_remote_extensions", + enable_remote_extensions=True, + ) + neon_env_builder.num_safekeepers = 3 + env = neon_env_builder.init_start() + tenant_id, _ = env.neon_cli.create_tenant() + env.neon_cli.create_timeline("test_remote_extensions", tenant_id=tenant_id) + + # For MOCK_S3 we upload test files. + # For REAL_S3 we use the files already in the bucket + if remote_storage_kind == RemoteStorageKind.MOCK_S3: + log.info("Uploading test files to mock bucket") + + def upload_test_file(from_path, to_path): + assert env.ext_remote_storage is not None # satisfy mypy + assert env.remote_storage_client is not None # satisfy mypy + with open( + f"test_runner/regress/data/extension_test/v{pg_version}/{from_path}", "rb" + ) as f: + env.remote_storage_client.upload_fileobj( + f, + env.ext_remote_storage.bucket_name, + f"ext/v{pg_version}/{to_path}", + ) + + upload_test_file("ext_index.json", "ext_index.json") + upload_test_file("anon.tar.gz", "extensions/anon.tar.gz") + upload_test_file("embedding.tar.gz", "extensions/embedding.tar.gz") + + assert env.ext_remote_storage is not None # satisfy mypy + assert env.remote_storage_client is not None # satisfy mypy + try: + # Start a compute node and check that it can download the extensions + # and use them to CREATE EXTENSION and LOAD + endpoint = env.endpoints.create_start( + "test_remote_extensions", + tenant_id=tenant_id, + remote_ext_config=env.ext_remote_storage.to_string(), + # config_lines=["log_min_messages=debug3"], + ) + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + # Check that appropriate control files were downloaded + cur.execute("SELECT * FROM pg_available_extensions") + all_extensions = [x[0] for x in cur.fetchall()] + log.info(all_extensions) + assert "anon" in all_extensions + assert "embedding" in all_extensions + # TODO: check that we cant't download custom extensions for other tenant ids + + # check that we can download public extension + cur.execute("CREATE EXTENSION embedding") + cur.execute("SELECT extname FROM pg_extension") + assert "embedding" in [x[0] for x in cur.fetchall()] + + # check that we can download private extension + try: + cur.execute("CREATE EXTENSION anon") + except Exception as err: + log.info("error creating anon extension") + assert "pgcrypto" in str(err), "unexpected error creating anon extension" + + # TODO: try to load libraries as well + + finally: + # Cleaning up downloaded files is important for local tests + # or else one test could reuse the files from another test or another test run + cleanup_files = [ + "lib/postgresql/anon.so", + "lib/postgresql/embedding.so", + "share/postgresql/extension/anon.control", + "share/postgresql/extension/embedding--0.1.0.sql", + "share/postgresql/extension/embedding.control", + ] + cleanup_files = [f"pg_install/v{pg_version}/" + x for x in cleanup_files] + cleanup_folders = [ + "extensions", + f"pg_install/v{pg_version}/share/postgresql/extension/anon", + f"pg_install/v{pg_version}/extensions", + ] + for file in cleanup_files: + try: + os.remove(file) + log.info(f"removed file {file}") + except Exception as err: + log.info(f"error removing file {file}: {err}") + for folder in cleanup_folders: + try: + shutil.rmtree(folder) + log.info(f"removed folder {folder}") + except Exception as err: + log.info(f"error removing file {file}: {err}") diff --git a/test_runner/regress/test_timeline_delete.py b/test_runner/regress/test_timeline_delete.py index a4c5bf626a..1f6e702ac0 100644 --- a/test_runner/regress/test_timeline_delete.py +++ b/test_runner/regress/test_timeline_delete.py @@ -276,6 +276,7 @@ def assert_prefix_empty(neon_env_builder: NeonEnvBuilder, prefix: Optional[str] assert isinstance(neon_env_builder.remote_storage, S3Storage) # Note that this doesnt use pagination, so list is not guaranteed to be exhaustive. + assert neon_env_builder.remote_storage_client is not None response = neon_env_builder.remote_storage_client.list_objects_v2( Bucket=neon_env_builder.remote_storage.bucket_name, Prefix=prefix or neon_env_builder.remote_storage.prefix_in_bucket or "", @@ -630,7 +631,7 @@ def test_timeline_delete_works_for_remote_smoke( ) # for some reason the check above doesnt immediately take effect for the below. - # Assume it is mock server incosistency and check twice. + # Assume it is mock server inconsistency and check twice. wait_until( 2, 0.5, diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 12c5dc8281..93a5ee7749 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 12c5dc8281d20b5bd636e1097eea80a7bc609591 +Subproject commit 93a5ee7749f109ecb9e5481be485c8cb17fe72ce diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index e3fbfc4d14..293a06e5e1 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit e3fbfc4d143b2d3c3c1813ce747f8af35aa9405e +Subproject commit 293a06e5e14ed9be3f5002c63b4fac391491ec17 diff --git a/vendor/revisions.json b/vendor/revisions.json index 18da5900a8..52971b0587 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,4 +1,4 @@ { - "postgres-v15": "e3fbfc4d143b2d3c3c1813ce747f8af35aa9405e", - "postgres-v14": "12c5dc8281d20b5bd636e1097eea80a7bc609591" + "postgres-v15": "293a06e5e14ed9be3f5002c63b4fac391491ec17", + "postgres-v14": "93a5ee7749f109ecb9e5481be485c8cb17fe72ce" }