mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-09 06:22:57 +00:00
Merge branch 'main' into communicator-rewrite
This commit is contained in:
@@ -48,7 +48,7 @@ use postgres_connection::parse_host_port;
|
||||
use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId};
|
||||
use safekeeper_api::{
|
||||
DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT,
|
||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT,
|
||||
DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, PgMajorVersion, PgVersionId,
|
||||
};
|
||||
use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR;
|
||||
use tokio::task::JoinSet;
|
||||
@@ -64,7 +64,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1);
|
||||
const DEFAULT_BRANCH_NAME: &str = "main";
|
||||
project_git_version!(GIT_VERSION);
|
||||
|
||||
const DEFAULT_PG_VERSION: u32 = 17;
|
||||
const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17;
|
||||
|
||||
const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/";
|
||||
|
||||
@@ -169,7 +169,7 @@ struct TenantCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version to use for the initial timeline")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
#[clap(
|
||||
long,
|
||||
@@ -292,7 +292,7 @@ struct TimelineCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
#[derive(clap::Args)]
|
||||
@@ -324,7 +324,7 @@ struct TimelineImportCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version of the backup being imported")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand)]
|
||||
@@ -603,7 +603,7 @@ struct EndpointCreateCmdArgs {
|
||||
|
||||
#[arg(default_value_t = DEFAULT_PG_VERSION)]
|
||||
#[clap(long, help = "Postgres version")]
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
/// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings.
|
||||
///
|
||||
@@ -919,7 +919,7 @@ fn print_timeline(
|
||||
br_sym = "┗━";
|
||||
}
|
||||
|
||||
print!("{} @{}: ", br_sym, ancestor_lsn);
|
||||
print!("{br_sym} @{ancestor_lsn}: ");
|
||||
}
|
||||
|
||||
// Finally print a timeline id and name with new line
|
||||
@@ -1295,7 +1295,7 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re
|
||||
},
|
||||
new_members: None,
|
||||
};
|
||||
let pg_version = args.pg_version * 10000;
|
||||
let pg_version = PgVersionId::from(args.pg_version);
|
||||
let req = safekeeper_api::models::TimelineCreateRequest {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
@@ -1677,7 +1677,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res
|
||||
// safekeeper nodes; otherwise all from the env.
|
||||
let safekeepers = parse_safekeepers(&args.safekeepers)?;
|
||||
endpoint
|
||||
.reconfigure(pageserver_conninfo, None, safekeepers)
|
||||
.reconfigure(Some(pageserver_conninfo), None, safekeepers, None)
|
||||
.await?;
|
||||
}
|
||||
EndpointCmd::Stop(args) => {
|
||||
@@ -1771,7 +1771,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) ->
|
||||
StopMode::Immediate => true,
|
||||
};
|
||||
if let Err(e) = get_pageserver(env, args.pageserver_id)?.stop(immediate) {
|
||||
eprintln!("pageserver stop failed: {}", e);
|
||||
eprintln!("pageserver stop failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1780,7 +1780,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) ->
|
||||
let pageserver = get_pageserver(env, args.pageserver_id)?;
|
||||
//TODO what shutdown strategy should we use here?
|
||||
if let Err(e) = pageserver.stop(false) {
|
||||
eprintln!("pageserver stop failed: {}", e);
|
||||
eprintln!("pageserver stop failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -1797,7 +1797,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) ->
|
||||
{
|
||||
Ok(_) => println!("Page server is up and running"),
|
||||
Err(err) => {
|
||||
eprintln!("Page server is not available: {}", err);
|
||||
eprintln!("Page server is not available: {err}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1834,7 +1834,7 @@ async fn handle_storage_controller(
|
||||
},
|
||||
};
|
||||
if let Err(e) = svc.stop(stop_args).await {
|
||||
eprintln!("stop failed: {}", e);
|
||||
eprintln!("stop failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1856,7 +1856,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) ->
|
||||
let safekeeper = get_safekeeper(env, args.id)?;
|
||||
|
||||
if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await {
|
||||
eprintln!("safekeeper start failed: {}", e);
|
||||
eprintln!("safekeeper start failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1868,7 +1868,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) ->
|
||||
StopMode::Immediate => true,
|
||||
};
|
||||
if let Err(e) = safekeeper.stop(immediate) {
|
||||
eprintln!("safekeeper stop failed: {}", e);
|
||||
eprintln!("safekeeper stop failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1881,12 +1881,12 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) ->
|
||||
};
|
||||
|
||||
if let Err(e) = safekeeper.stop(immediate) {
|
||||
eprintln!("safekeeper stop failed: {}", e);
|
||||
eprintln!("safekeeper stop failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await {
|
||||
eprintln!("safekeeper start failed: {}", e);
|
||||
eprintln!("safekeeper start failed: {e}");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -2142,7 +2142,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) {
|
||||
|
||||
let storage = EndpointStorage::from_env(env);
|
||||
if let Err(e) = storage.stop(immediate) {
|
||||
eprintln!("endpoint_storage stop failed: {:#}", e);
|
||||
eprintln!("endpoint_storage stop failed: {e:#}");
|
||||
}
|
||||
|
||||
for ps_conf in &env.pageservers {
|
||||
|
||||
@@ -71,6 +71,7 @@ use nix::sys::signal::{Signal, kill};
|
||||
use pageserver_api::shard::ShardStripeSize;
|
||||
use pem::Pem;
|
||||
use reqwest::header::CONTENT_TYPE;
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use safekeeper_api::membership::SafekeeperGeneration;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
@@ -92,7 +93,7 @@ pub struct EndpointConf {
|
||||
pg_port: u16,
|
||||
external_http_port: u16,
|
||||
internal_http_port: u16,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
grpc: bool,
|
||||
skip_pg_catalog_updates: bool,
|
||||
reconfigure_concurrency: usize,
|
||||
@@ -195,7 +196,7 @@ impl ComputeControlPlane {
|
||||
pg_port: Option<u16>,
|
||||
external_http_port: Option<u16>,
|
||||
internal_http_port: Option<u16>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
mode: ComputeMode,
|
||||
grpc: bool,
|
||||
skip_pg_catalog_updates: bool,
|
||||
@@ -315,7 +316,7 @@ pub struct Endpoint {
|
||||
pub internal_http_address: SocketAddr,
|
||||
|
||||
// postgres major version in the format: 14, 15, etc.
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
|
||||
// These are not part of the endpoint as such, but the environment
|
||||
// the endpoint runs in.
|
||||
@@ -375,29 +376,6 @@ impl std::fmt::Display for EndpointTerminateMode {
|
||||
}
|
||||
}
|
||||
|
||||
/// Protocol used to connect to a Pageserver.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum PageserverProtocol {
|
||||
Libpq,
|
||||
Grpc,
|
||||
}
|
||||
|
||||
impl PageserverProtocol {
|
||||
/// Returns the URL scheme for the protocol, used in connstrings.
|
||||
pub fn scheme(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Libpq => "postgresql",
|
||||
Self::Grpc => "grpc",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for PageserverProtocol {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.scheme())
|
||||
}
|
||||
}
|
||||
|
||||
impl Endpoint {
|
||||
fn from_dir_entry(entry: std::fs::DirEntry, env: &LocalEnv) -> Result<Endpoint> {
|
||||
if !entry.file_type()?.is_dir() {
|
||||
@@ -560,7 +538,7 @@ impl Endpoint {
|
||||
conf.append("hot_standby", "on");
|
||||
// prefetching of blocks referenced in WAL doesn't make sense for us
|
||||
// Neon hot standby ignores pages that are not in the shared_buffers
|
||||
if self.pg_version >= 15 {
|
||||
if self.pg_version >= PgMajorVersion::PG15 {
|
||||
conf.append("recovery_prefetch", "off");
|
||||
}
|
||||
}
|
||||
@@ -838,10 +816,10 @@ impl Endpoint {
|
||||
|
||||
// Launch compute_ctl
|
||||
let conn_str = self.connstr("cloud_admin", "postgres");
|
||||
println!("Starting postgres node at '{}'", conn_str);
|
||||
println!("Starting postgres node at '{conn_str}'");
|
||||
if create_test_user {
|
||||
let conn_str = self.connstr("test", "neondb");
|
||||
println!("Also at '{}'", conn_str);
|
||||
println!("Also at '{conn_str}'");
|
||||
}
|
||||
let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl"));
|
||||
cmd.args([
|
||||
@@ -940,8 +918,7 @@ impl Endpoint {
|
||||
Err(e) => {
|
||||
if Instant::now().duration_since(start_at) > start_timeout {
|
||||
return Err(e).context(format!(
|
||||
"timed out {:?} waiting to connect to compute_ctl HTTP",
|
||||
start_timeout,
|
||||
"timed out {start_timeout:?} waiting to connect to compute_ctl HTTP",
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -980,7 +957,7 @@ impl Endpoint {
|
||||
// reqwest does not export its error construction utility functions, so let's craft the message ourselves
|
||||
let url = response.url().to_owned();
|
||||
let msg = match response.text().await {
|
||||
Ok(err_body) => format!("Error: {}", err_body),
|
||||
Ok(err_body) => format!("Error: {err_body}"),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
};
|
||||
Err(anyhow::anyhow!(msg))
|
||||
@@ -989,15 +966,11 @@ impl Endpoint {
|
||||
|
||||
pub async fn reconfigure(
|
||||
&self,
|
||||
pageserver_conninfo: PageserverConnectionInfo,
|
||||
pageserver_conninfo: Option<PageserverConnectionInfo>,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
safekeepers: Option<Vec<NodeId>>,
|
||||
safekeeper_generation: Option<SafekeeperGeneration>,
|
||||
) -> Result<()> {
|
||||
anyhow::ensure!(
|
||||
!pageserver_conninfo.shards.is_empty(),
|
||||
"no pageservers provided"
|
||||
);
|
||||
|
||||
let (mut spec, compute_ctl_config) = {
|
||||
let config_path = self.endpoint_path().join("config.json");
|
||||
let file = std::fs::File::open(config_path)?;
|
||||
@@ -1009,7 +982,15 @@ impl Endpoint {
|
||||
let postgresql_conf = self.read_postgresql_conf()?;
|
||||
spec.cluster.postgresql_conf = Some(postgresql_conf);
|
||||
|
||||
spec.pageserver_connection_info = Some(pageserver_conninfo);
|
||||
if let Some(pageserver_conninfo) = pageserver_conninfo {
|
||||
// If pageservers are provided, we need to ensure that they are not empty.
|
||||
// This is a requirement for the compute_ctl configuration.
|
||||
anyhow::ensure!(
|
||||
!pageserver_conninfo.shards.is_empty(),
|
||||
"no pageservers provided"
|
||||
);
|
||||
spec.pageserver_connection_info = Some(pageserver_conninfo);
|
||||
}
|
||||
if stripe_size.is_some() {
|
||||
spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize);
|
||||
}
|
||||
@@ -1018,6 +999,9 @@ impl Endpoint {
|
||||
if let Some(safekeepers) = safekeepers {
|
||||
let safekeeper_connstrings = self.build_safekeepers_connstrs(safekeepers)?;
|
||||
spec.safekeeper_connstrings = safekeeper_connstrings;
|
||||
if let Some(g) = safekeeper_generation {
|
||||
spec.safekeepers_generation = Some(g.into_inner());
|
||||
}
|
||||
}
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
@@ -1048,13 +1032,31 @@ impl Endpoint {
|
||||
} else {
|
||||
let url = response.url().to_owned();
|
||||
let msg = match response.text().await {
|
||||
Ok(err_body) => format!("Error: {}", err_body),
|
||||
Ok(err_body) => format!("Error: {err_body}"),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
};
|
||||
Err(anyhow::anyhow!(msg))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn reconfigure_pageservers(
|
||||
&self,
|
||||
pageservers: PageserverConnectionInfo,
|
||||
stripe_size: Option<ShardStripeSize>,
|
||||
) -> Result<()> {
|
||||
self.reconfigure(Some(pageservers), stripe_size, None, None)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn reconfigure_safekeepers(
|
||||
&self,
|
||||
safekeepers: Vec<NodeId>,
|
||||
generation: SafekeeperGeneration,
|
||||
) -> Result<()> {
|
||||
self.reconfigure(None, None, Some(safekeepers), Some(generation))
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn stop(
|
||||
&self,
|
||||
mode: EndpointTerminateMode,
|
||||
|
||||
@@ -12,9 +12,11 @@ use std::{env, fs};
|
||||
|
||||
use anyhow::{Context, bail};
|
||||
use clap::ValueEnum;
|
||||
use pageserver_api::config::PostHogConfig;
|
||||
use pem::Pem;
|
||||
use postgres_backend::AuthType;
|
||||
use reqwest::{Certificate, Url};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utils::auth::encode_from_key_file;
|
||||
use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId};
|
||||
@@ -210,7 +212,11 @@ pub struct NeonStorageControllerConf {
|
||||
|
||||
pub use_local_compute_notifications: bool,
|
||||
|
||||
pub timeline_safekeeper_count: Option<i64>,
|
||||
pub timeline_safekeeper_count: Option<usize>,
|
||||
|
||||
pub posthog_config: Option<PostHogConfig>,
|
||||
|
||||
pub kick_secondary_downloads: Option<bool>,
|
||||
}
|
||||
|
||||
impl NeonStorageControllerConf {
|
||||
@@ -242,6 +248,8 @@ impl Default for NeonStorageControllerConf {
|
||||
use_https_safekeeper_api: false,
|
||||
use_local_compute_notifications: true,
|
||||
timeline_safekeeper_count: None,
|
||||
posthog_config: None,
|
||||
kick_secondary_downloads: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,7 +265,7 @@ impl Default for EndpointStorageConf {
|
||||
impl NeonBroker {
|
||||
pub fn client_url(&self) -> Url {
|
||||
let url = if let Some(addr) = self.listen_https_addr {
|
||||
format!("https://{}", addr)
|
||||
format!("https://{addr}")
|
||||
} else {
|
||||
format!(
|
||||
"http://{}",
|
||||
@@ -421,25 +429,21 @@ impl LocalEnv {
|
||||
self.pg_distrib_dir.clone()
|
||||
}
|
||||
|
||||
pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
let path = self.pg_distrib_dir.clone();
|
||||
|
||||
#[allow(clippy::manual_range_patterns)]
|
||||
match pg_version {
|
||||
14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))),
|
||||
_ => bail!("Unsupported postgres version: {}", pg_version),
|
||||
}
|
||||
Ok(path.join(pg_version.v_str()))
|
||||
}
|
||||
|
||||
pub fn pg_dir(&self, pg_version: u32, dir_name: &str) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_dir(&self, pg_version: PgMajorVersion, dir_name: &str) -> anyhow::Result<PathBuf> {
|
||||
Ok(self.pg_distrib_dir(pg_version)?.join(dir_name))
|
||||
}
|
||||
|
||||
pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
self.pg_dir(pg_version, "bin")
|
||||
}
|
||||
|
||||
pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result<PathBuf> {
|
||||
pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result<PathBuf> {
|
||||
self.pg_dir(pg_version, "lib")
|
||||
}
|
||||
|
||||
@@ -730,7 +734,7 @@ impl LocalEnv {
|
||||
let config_toml_path = dentry.path().join("pageserver.toml");
|
||||
let config_toml: PageserverConfigTomlSubset = toml_edit::de::from_str(
|
||||
&std::fs::read_to_string(&config_toml_path)
|
||||
.with_context(|| format!("read {:?}", config_toml_path))?,
|
||||
.with_context(|| format!("read {config_toml_path:?}"))?,
|
||||
)
|
||||
.context("parse pageserver.toml")?;
|
||||
let identity_toml_path = dentry.path().join("identity.toml");
|
||||
@@ -740,7 +744,7 @@ impl LocalEnv {
|
||||
}
|
||||
let identity_toml: IdentityTomlSubset = toml_edit::de::from_str(
|
||||
&std::fs::read_to_string(&identity_toml_path)
|
||||
.with_context(|| format!("read {:?}", identity_toml_path))?,
|
||||
.with_context(|| format!("read {identity_toml_path:?}"))?,
|
||||
)
|
||||
.context("parse identity.toml")?;
|
||||
let PageserverConfigTomlSubset {
|
||||
|
||||
@@ -22,6 +22,7 @@ use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_client::mgmt_api;
|
||||
use postgres_backend::AuthType;
|
||||
use postgres_connection::{PgConnectionConfig, parse_host_port};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use utils::auth::{Claims, Scope};
|
||||
use utils::id::{NodeId, TenantId, TimelineId};
|
||||
use utils::lsn::Lsn;
|
||||
@@ -121,7 +122,7 @@ impl PageServerNode {
|
||||
.env
|
||||
.generate_auth_token(&Claims::new(None, Scope::GenerationsApi))
|
||||
.unwrap();
|
||||
overrides.push(format!("control_plane_api_token='{}'", jwt_token));
|
||||
overrides.push(format!("control_plane_api_token='{jwt_token}'"));
|
||||
}
|
||||
|
||||
if !conf.other.contains_key("remote_storage") {
|
||||
@@ -607,7 +608,7 @@ impl PageServerNode {
|
||||
timeline_id: TimelineId,
|
||||
base: (Lsn, PathBuf),
|
||||
pg_wal: Option<(Lsn, PathBuf)>,
|
||||
pg_version: u32,
|
||||
pg_version: PgMajorVersion,
|
||||
) -> anyhow::Result<()> {
|
||||
// Init base reader
|
||||
let (start_lsn, base_tarfile_path) = base;
|
||||
|
||||
@@ -143,7 +143,7 @@ impl SafekeeperNode {
|
||||
let id_string = id.to_string();
|
||||
// TODO: add availability_zone to the config.
|
||||
// Right now we just specify any value here and use it to check metrics in tests.
|
||||
let availability_zone = format!("sk-{}", id_string);
|
||||
let availability_zone = format!("sk-{id_string}");
|
||||
|
||||
let mut args = vec![
|
||||
"-D".to_owned(),
|
||||
|
||||
@@ -6,6 +6,8 @@ use std::str::FromStr;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::background_process;
|
||||
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
|
||||
use camino::{Utf8Path, Utf8PathBuf};
|
||||
use hyper0::Uri;
|
||||
use nix::unistd::Pid;
|
||||
@@ -22,6 +24,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt;
|
||||
use pem::Pem;
|
||||
use postgres_backend::AuthType;
|
||||
use reqwest::{Method, Response};
|
||||
use safekeeper_api::PgMajorVersion;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::process::Command;
|
||||
@@ -31,9 +34,6 @@ use utils::auth::{Claims, Scope, encode_from_key_file};
|
||||
use utils::id::{NodeId, TenantId};
|
||||
use whoami::username;
|
||||
|
||||
use crate::background_process;
|
||||
use crate::local_env::{LocalEnv, NeonStorageControllerConf};
|
||||
|
||||
pub struct StorageController {
|
||||
env: LocalEnv,
|
||||
private_key: Option<Pem>,
|
||||
@@ -48,7 +48,7 @@ pub struct StorageController {
|
||||
|
||||
const COMMAND: &str = "storage_controller";
|
||||
|
||||
const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16;
|
||||
const STORAGE_CONTROLLER_POSTGRES_VERSION: PgMajorVersion = PgMajorVersion::PG16;
|
||||
|
||||
const DB_NAME: &str = "storage_controller";
|
||||
|
||||
@@ -167,7 +167,7 @@ impl StorageController {
|
||||
fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf {
|
||||
self.env
|
||||
.base_data_dir
|
||||
.join(format!("storage_controller_{}", instance_id))
|
||||
.join(format!("storage_controller_{instance_id}"))
|
||||
}
|
||||
|
||||
fn pid_file(&self, instance_id: u8) -> Utf8PathBuf {
|
||||
@@ -184,9 +184,15 @@ impl StorageController {
|
||||
/// to other versions if that one isn't found. Some automated tests create circumstances
|
||||
/// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`.
|
||||
async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result<Utf8PathBuf> {
|
||||
let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14];
|
||||
const PREFER_VERSIONS: [PgMajorVersion; 5] = [
|
||||
STORAGE_CONTROLLER_POSTGRES_VERSION,
|
||||
PgMajorVersion::PG16,
|
||||
PgMajorVersion::PG15,
|
||||
PgMajorVersion::PG14,
|
||||
PgMajorVersion::PG17,
|
||||
];
|
||||
|
||||
for v in prefer_versions {
|
||||
for v in PREFER_VERSIONS {
|
||||
let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap();
|
||||
if tokio::fs::try_exists(&path).await? {
|
||||
return Ok(path);
|
||||
@@ -220,7 +226,7 @@ impl StorageController {
|
||||
"-d",
|
||||
DB_NAME,
|
||||
"-p",
|
||||
&format!("{}", postgres_port),
|
||||
&format!("{postgres_port}"),
|
||||
];
|
||||
let pg_lib_dir = self.get_pg_lib_dir().await.unwrap();
|
||||
let envs = [
|
||||
@@ -263,7 +269,7 @@ impl StorageController {
|
||||
"-h",
|
||||
"localhost",
|
||||
"-p",
|
||||
&format!("{}", postgres_port),
|
||||
&format!("{postgres_port}"),
|
||||
"-U",
|
||||
&username(),
|
||||
"-O",
|
||||
@@ -425,7 +431,7 @@ impl StorageController {
|
||||
// from `LocalEnv`'s config file (`.neon/config`).
|
||||
tokio::fs::write(
|
||||
&pg_data_path.join("postgresql.conf"),
|
||||
format!("port = {}\nfsync=off\n", postgres_port),
|
||||
format!("port = {postgres_port}\nfsync=off\n"),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -477,7 +483,7 @@ impl StorageController {
|
||||
self.setup_database(postgres_port).await?;
|
||||
}
|
||||
|
||||
let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port);
|
||||
let database_url = format!("postgresql://localhost:{postgres_port}/{DB_NAME}");
|
||||
|
||||
// We support running a startup SQL script to fiddle with the database before we launch storcon.
|
||||
// This is used by the test suite.
|
||||
@@ -508,7 +514,7 @@ impl StorageController {
|
||||
drop(client);
|
||||
conn.await??;
|
||||
|
||||
let addr = format!("{}:{}", host, listen_port);
|
||||
let addr = format!("{host}:{listen_port}");
|
||||
let address_for_peers = Uri::builder()
|
||||
.scheme(scheme)
|
||||
.authority(addr.clone())
|
||||
@@ -557,6 +563,10 @@ impl StorageController {
|
||||
args.push("--use-local-compute-notifications".to_string());
|
||||
}
|
||||
|
||||
if let Some(value) = self.config.kick_secondary_downloads {
|
||||
args.push(format!("--kick-secondary-downloads={value}"));
|
||||
}
|
||||
|
||||
if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() {
|
||||
args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap()));
|
||||
}
|
||||
@@ -628,10 +638,28 @@ impl StorageController {
|
||||
args.push("--timelines-onto-safekeepers".to_string());
|
||||
}
|
||||
|
||||
if let Some(sk_cnt) = self.config.timeline_safekeeper_count {
|
||||
// neon_local is used in test environments where we often have less than 3 safekeepers.
|
||||
if self.config.timeline_safekeeper_count.is_some() || self.env.safekeepers.len() < 3 {
|
||||
let sk_cnt = self
|
||||
.config
|
||||
.timeline_safekeeper_count
|
||||
.unwrap_or(self.env.safekeepers.len());
|
||||
|
||||
args.push(format!("--timeline-safekeeper-count={sk_cnt}"));
|
||||
}
|
||||
|
||||
let mut envs = vec![
|
||||
("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
|
||||
("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
|
||||
];
|
||||
|
||||
if let Some(posthog_config) = &self.config.posthog_config {
|
||||
envs.push((
|
||||
"POSTHOG_CONFIG".to_string(),
|
||||
serde_json::to_string(posthog_config)?,
|
||||
));
|
||||
}
|
||||
|
||||
println!("Starting storage controller");
|
||||
|
||||
background_process::start_process(
|
||||
@@ -639,10 +667,7 @@ impl StorageController {
|
||||
&instance_dir,
|
||||
&self.env.storage_controller_bin(),
|
||||
args,
|
||||
vec![
|
||||
("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
|
||||
("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()),
|
||||
],
|
||||
envs,
|
||||
background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)),
|
||||
&start_args.start_timeout,
|
||||
|| async {
|
||||
@@ -806,9 +831,9 @@ impl StorageController {
|
||||
builder = builder.json(&body)
|
||||
}
|
||||
if let Some(private_key) = &self.private_key {
|
||||
println!("Getting claims for path {}", path);
|
||||
println!("Getting claims for path {path}");
|
||||
if let Some(required_claims) = Self::get_claims_for_path(&path)? {
|
||||
println!("Got claims {:?} for path {}", required_claims, path);
|
||||
println!("Got claims {required_claims:?} for path {path}");
|
||||
let jwt_token = encode_from_key_file(&required_claims, private_key)?;
|
||||
builder = builder.header(
|
||||
reqwest::header::AUTHORIZATION,
|
||||
|
||||
Reference in New Issue
Block a user