mirror of
https://github.com/neondatabase/neon.git
synced 2025-12-27 08:09:58 +00:00
[proxy] fix sql-over-http password setting (#12177)
## Problem Looks like our sql-over-http tests get to rely on "trust" authentication, so the path that made sure the authkeys data was set was never being hit. ## Summary of changes Slight refactor to WakeComputeBackends, as well as making sure auth keys are propagated. Fix tests to ensure passwords are tested.
This commit is contained in:
@@ -14,7 +14,7 @@ use crate::context::RequestContext;
|
|||||||
use crate::control_plane::client::cplane_proxy_v1;
|
use crate::control_plane::client::cplane_proxy_v1;
|
||||||
use crate::control_plane::{self, CachedNodeInfo, NodeInfo};
|
use crate::control_plane::{self, CachedNodeInfo, NodeInfo};
|
||||||
use crate::error::{ReportableError, UserFacingError};
|
use crate::error::{ReportableError, UserFacingError};
|
||||||
use crate::pglb::connect_compute::ComputeConnectBackend;
|
use crate::pglb::connect_compute::WakeComputeBackend;
|
||||||
use crate::pqproto::BeMessage;
|
use crate::pqproto::BeMessage;
|
||||||
use crate::proxy::NeonOptions;
|
use crate::proxy::NeonOptions;
|
||||||
use crate::stream::PqStream;
|
use crate::stream::PqStream;
|
||||||
@@ -109,7 +109,7 @@ impl ConsoleRedirectBackend {
|
|||||||
pub struct ConsoleRedirectNodeInfo(pub(super) NodeInfo);
|
pub struct ConsoleRedirectNodeInfo(pub(super) NodeInfo);
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl ComputeConnectBackend for ConsoleRedirectNodeInfo {
|
impl WakeComputeBackend for ConsoleRedirectNodeInfo {
|
||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
_ctx: &RequestContext,
|
_ctx: &RequestContext,
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ use crate::control_plane::{
|
|||||||
RoleAccessControl,
|
RoleAccessControl,
|
||||||
};
|
};
|
||||||
use crate::intern::EndpointIdInt;
|
use crate::intern::EndpointIdInt;
|
||||||
use crate::pglb::connect_compute::ComputeConnectBackend;
|
use crate::pglb::connect_compute::WakeComputeBackend;
|
||||||
use crate::pqproto::BeMessage;
|
use crate::pqproto::BeMessage;
|
||||||
use crate::proxy::NeonOptions;
|
use crate::proxy::NeonOptions;
|
||||||
use crate::rate_limiter::EndpointRateLimiter;
|
use crate::rate_limiter::EndpointRateLimiter;
|
||||||
@@ -407,13 +407,13 @@ impl Backend<'_, ComputeUserInfo> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl ComputeConnectBackend for Backend<'_, ComputeCredentials> {
|
impl WakeComputeBackend for Backend<'_, ComputeUserInfo> {
|
||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
) -> Result<CachedNodeInfo, control_plane::errors::WakeComputeError> {
|
) -> Result<CachedNodeInfo, control_plane::errors::WakeComputeError> {
|
||||||
match self {
|
match self {
|
||||||
Self::ControlPlane(api, creds) => api.wake_compute(ctx, &creds.info).await,
|
Self::ControlPlane(api, info) => api.wake_compute(ctx, info).await,
|
||||||
Self::Local(local) => Ok(Cached::new_uncached(local.node_info.clone())),
|
Self::Local(local) => Ok(Cached::new_uncached(local.node_info.clone())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -136,11 +136,11 @@ impl AuthInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn with_auth_keys(keys: &ComputeCredentialKeys) -> Self {
|
pub(crate) fn with_auth_keys(keys: ComputeCredentialKeys) -> Self {
|
||||||
Self {
|
Self {
|
||||||
auth: match keys {
|
auth: match keys {
|
||||||
ComputeCredentialKeys::AuthKeys(AuthKeys::ScramSha256(auth_keys)) => {
|
ComputeCredentialKeys::AuthKeys(AuthKeys::ScramSha256(auth_keys)) => {
|
||||||
Some(Auth::Scram(Box::new(*auth_keys)))
|
Some(Auth::Scram(Box::new(auth_keys)))
|
||||||
}
|
}
|
||||||
ComputeCredentialKeys::JwtPayload(_) | ComputeCredentialKeys::None => None,
|
ComputeCredentialKeys::JwtPayload(_) | ComputeCredentialKeys::None => None,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ pub(crate) trait ConnectMechanism {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub(crate) trait ComputeConnectBackend {
|
pub(crate) trait WakeComputeBackend {
|
||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
@@ -91,7 +91,7 @@ impl ConnectMechanism for TcpMechanism {
|
|||||||
|
|
||||||
/// Try to connect to the compute node, retrying if necessary.
|
/// Try to connect to the compute node, retrying if necessary.
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) async fn connect_to_compute<M: ConnectMechanism, B: ComputeConnectBackend>(
|
pub(crate) async fn connect_to_compute<M: ConnectMechanism, B: WakeComputeBackend>(
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
mechanism: &M,
|
mechanism: &M,
|
||||||
user_info: &B,
|
user_info: &B,
|
||||||
|
|||||||
@@ -358,12 +358,12 @@ pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let creds = match &user_info {
|
let (cplane, creds) = match user_info {
|
||||||
auth::Backend::ControlPlane(_, creds) => creds,
|
auth::Backend::ControlPlane(cplane, creds) => (cplane, creds),
|
||||||
auth::Backend::Local(_) => unreachable!("local proxy does not run tcp proxy service"),
|
auth::Backend::Local(_) => unreachable!("local proxy does not run tcp proxy service"),
|
||||||
};
|
};
|
||||||
let params_compat = creds.info.options.get(NeonOptions::PARAMS_COMPAT).is_some();
|
let params_compat = creds.info.options.get(NeonOptions::PARAMS_COMPAT).is_some();
|
||||||
let mut auth_info = compute::AuthInfo::with_auth_keys(&creds.keys);
|
let mut auth_info = compute::AuthInfo::with_auth_keys(creds.keys);
|
||||||
auth_info.set_startup_params(¶ms, params_compat);
|
auth_info.set_startup_params(¶ms, params_compat);
|
||||||
|
|
||||||
let res = connect_to_compute(
|
let res = connect_to_compute(
|
||||||
@@ -373,7 +373,7 @@ pub(crate) async fn handle_client<S: AsyncRead + AsyncWrite + Unpin + Send>(
|
|||||||
auth: auth_info,
|
auth: auth_info,
|
||||||
locks: &config.connect_compute_locks,
|
locks: &config.connect_compute_locks,
|
||||||
},
|
},
|
||||||
&user_info,
|
&auth::Backend::ControlPlane(cplane, creds.info),
|
||||||
config.wake_compute_retry_config,
|
config.wake_compute_retry_config,
|
||||||
&config.connect_to_compute,
|
&config.connect_to_compute,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use std::time::Duration;
|
|||||||
use anyhow::{Context, bail};
|
use anyhow::{Context, bail};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
use postgres_client::config::{AuthKeys, ScramKeys, SslMode};
|
use postgres_client::config::SslMode;
|
||||||
use postgres_client::tls::{MakeTlsConnect, NoTls};
|
use postgres_client::tls::{MakeTlsConnect, NoTls};
|
||||||
use retry::{ShouldRetryWakeCompute, retry_after};
|
use retry::{ShouldRetryWakeCompute, retry_after};
|
||||||
use rstest::rstest;
|
use rstest::rstest;
|
||||||
@@ -19,9 +19,7 @@ use tracing_test::traced_test;
|
|||||||
|
|
||||||
use super::retry::CouldRetry;
|
use super::retry::CouldRetry;
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::auth::backend::{
|
use crate::auth::backend::{ComputeUserInfo, MaybeOwned};
|
||||||
ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo, MaybeOwned,
|
|
||||||
};
|
|
||||||
use crate::config::{ComputeConfig, RetryConfig};
|
use crate::config::{ComputeConfig, RetryConfig};
|
||||||
use crate::control_plane::client::{ControlPlaneClient, TestControlPlaneClient};
|
use crate::control_plane::client::{ControlPlaneClient, TestControlPlaneClient};
|
||||||
use crate::control_plane::messages::{ControlPlaneErrorMessage, Details, MetricsAuxInfo, Status};
|
use crate::control_plane::messages::{ControlPlaneErrorMessage, Details, MetricsAuxInfo, Status};
|
||||||
@@ -575,19 +573,13 @@ fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeIn
|
|||||||
|
|
||||||
fn helper_create_connect_info(
|
fn helper_create_connect_info(
|
||||||
mechanism: &TestConnectMechanism,
|
mechanism: &TestConnectMechanism,
|
||||||
) -> auth::Backend<'static, ComputeCredentials> {
|
) -> auth::Backend<'static, ComputeUserInfo> {
|
||||||
auth::Backend::ControlPlane(
|
auth::Backend::ControlPlane(
|
||||||
MaybeOwned::Owned(ControlPlaneClient::Test(Box::new(mechanism.clone()))),
|
MaybeOwned::Owned(ControlPlaneClient::Test(Box::new(mechanism.clone()))),
|
||||||
ComputeCredentials {
|
ComputeUserInfo {
|
||||||
info: ComputeUserInfo {
|
endpoint: "endpoint".into(),
|
||||||
endpoint: "endpoint".into(),
|
user: "user".into(),
|
||||||
user: "user".into(),
|
options: NeonOptions::parse_options_raw(""),
|
||||||
options: NeonOptions::parse_options_raw(""),
|
|
||||||
},
|
|
||||||
keys: ComputeCredentialKeys::AuthKeys(AuthKeys::ScramSha256(ScramKeys {
|
|
||||||
client_key: [0; 32],
|
|
||||||
server_key: [0; 32],
|
|
||||||
})),
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use crate::error::ReportableError;
|
|||||||
use crate::metrics::{
|
use crate::metrics::{
|
||||||
ConnectOutcome, ConnectionFailuresBreakdownGroup, Metrics, RetriesMetricGroup, RetryType,
|
ConnectOutcome, ConnectionFailuresBreakdownGroup, Metrics, RetriesMetricGroup, RetryType,
|
||||||
};
|
};
|
||||||
use crate::pglb::connect_compute::ComputeConnectBackend;
|
use crate::pglb::connect_compute::WakeComputeBackend;
|
||||||
use crate::proxy::retry::{retry_after, should_retry};
|
use crate::proxy::retry::{retry_after, should_retry};
|
||||||
|
|
||||||
// Use macro to retain original callsite.
|
// Use macro to retain original callsite.
|
||||||
@@ -23,7 +23,7 @@ macro_rules! log_wake_compute_error {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn wake_compute<B: ComputeConnectBackend>(
|
pub(crate) async fn wake_compute<B: WakeComputeBackend>(
|
||||||
num_retries: &mut u32,
|
num_retries: &mut u32,
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
api: &B,
|
api: &B,
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool};
|
|||||||
use super::http_conn_pool::{self, HttpConnPool, Send, poll_http2_client};
|
use super::http_conn_pool::{self, HttpConnPool, Send, poll_http2_client};
|
||||||
use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool};
|
use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool};
|
||||||
use crate::auth::backend::local::StaticAuthRules;
|
use crate::auth::backend::local::StaticAuthRules;
|
||||||
use crate::auth::backend::{ComputeCredentials, ComputeUserInfo};
|
use crate::auth::backend::{ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo};
|
||||||
use crate::auth::{self, AuthError};
|
use crate::auth::{self, AuthError};
|
||||||
use crate::compute_ctl::{
|
use crate::compute_ctl::{
|
||||||
ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest,
|
ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest,
|
||||||
@@ -180,7 +180,7 @@ impl PoolingBackend {
|
|||||||
let conn_id = uuid::Uuid::new_v4();
|
let conn_id = uuid::Uuid::new_v4();
|
||||||
tracing::Span::current().record("conn_id", display(conn_id));
|
tracing::Span::current().record("conn_id", display(conn_id));
|
||||||
info!(%conn_id, "pool: opening a new connection '{conn_info}'");
|
info!(%conn_id, "pool: opening a new connection '{conn_info}'");
|
||||||
let backend = self.auth_backend.as_ref().map(|()| keys);
|
let backend = self.auth_backend.as_ref().map(|()| keys.info);
|
||||||
crate::pglb::connect_compute::connect_to_compute(
|
crate::pglb::connect_compute::connect_to_compute(
|
||||||
ctx,
|
ctx,
|
||||||
&TokioMechanism {
|
&TokioMechanism {
|
||||||
@@ -188,6 +188,7 @@ impl PoolingBackend {
|
|||||||
conn_info,
|
conn_info,
|
||||||
pool: self.pool.clone(),
|
pool: self.pool.clone(),
|
||||||
locks: &self.config.connect_compute_locks,
|
locks: &self.config.connect_compute_locks,
|
||||||
|
keys: keys.keys,
|
||||||
},
|
},
|
||||||
&backend,
|
&backend,
|
||||||
self.config.wake_compute_retry_config,
|
self.config.wake_compute_retry_config,
|
||||||
@@ -214,16 +215,13 @@ impl PoolingBackend {
|
|||||||
let conn_id = uuid::Uuid::new_v4();
|
let conn_id = uuid::Uuid::new_v4();
|
||||||
tracing::Span::current().record("conn_id", display(conn_id));
|
tracing::Span::current().record("conn_id", display(conn_id));
|
||||||
debug!(%conn_id, "pool: opening a new connection '{conn_info}'");
|
debug!(%conn_id, "pool: opening a new connection '{conn_info}'");
|
||||||
let backend = self.auth_backend.as_ref().map(|()| ComputeCredentials {
|
let backend = self.auth_backend.as_ref().map(|()| ComputeUserInfo {
|
||||||
info: ComputeUserInfo {
|
user: conn_info.user_info.user.clone(),
|
||||||
user: conn_info.user_info.user.clone(),
|
endpoint: EndpointId::from(format!(
|
||||||
endpoint: EndpointId::from(format!(
|
"{}{LOCAL_PROXY_SUFFIX}",
|
||||||
"{}{LOCAL_PROXY_SUFFIX}",
|
conn_info.user_info.endpoint.normalize()
|
||||||
conn_info.user_info.endpoint.normalize()
|
)),
|
||||||
)),
|
options: conn_info.user_info.options.clone(),
|
||||||
options: conn_info.user_info.options.clone(),
|
|
||||||
},
|
|
||||||
keys: crate::auth::backend::ComputeCredentialKeys::None,
|
|
||||||
});
|
});
|
||||||
crate::pglb::connect_compute::connect_to_compute(
|
crate::pglb::connect_compute::connect_to_compute(
|
||||||
ctx,
|
ctx,
|
||||||
@@ -495,6 +493,7 @@ struct TokioMechanism {
|
|||||||
pool: Arc<GlobalConnPool<postgres_client::Client, EndpointConnPool<postgres_client::Client>>>,
|
pool: Arc<GlobalConnPool<postgres_client::Client, EndpointConnPool<postgres_client::Client>>>,
|
||||||
conn_info: ConnInfo,
|
conn_info: ConnInfo,
|
||||||
conn_id: uuid::Uuid,
|
conn_id: uuid::Uuid,
|
||||||
|
keys: ComputeCredentialKeys,
|
||||||
|
|
||||||
/// connect_to_compute concurrency lock
|
/// connect_to_compute concurrency lock
|
||||||
locks: &'static ApiLocks<Host>,
|
locks: &'static ApiLocks<Host>,
|
||||||
@@ -520,6 +519,10 @@ impl ConnectMechanism for TokioMechanism {
|
|||||||
.dbname(&self.conn_info.dbname)
|
.dbname(&self.conn_info.dbname)
|
||||||
.connect_timeout(compute_config.timeout);
|
.connect_timeout(compute_config.timeout);
|
||||||
|
|
||||||
|
if let ComputeCredentialKeys::AuthKeys(auth_keys) = self.keys {
|
||||||
|
config.auth_keys(auth_keys);
|
||||||
|
}
|
||||||
|
|
||||||
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute);
|
||||||
let res = config.connect(compute_config).await;
|
let res = config.connect(compute_config).await;
|
||||||
drop(pause);
|
drop(pause);
|
||||||
|
|||||||
@@ -4046,6 +4046,16 @@ def static_proxy(
|
|||||||
"CREATE TABLE neon_control_plane.endpoints (endpoint_id VARCHAR(255) PRIMARY KEY, allowed_ips VARCHAR(255))"
|
"CREATE TABLE neon_control_plane.endpoints (endpoint_id VARCHAR(255) PRIMARY KEY, allowed_ips VARCHAR(255))"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
vanilla_pg.stop()
|
||||||
|
vanilla_pg.edit_hba(
|
||||||
|
[
|
||||||
|
"local all all trust",
|
||||||
|
"host all all 127.0.0.1/32 scram-sha-256",
|
||||||
|
"host all all ::1/128 scram-sha-256",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
vanilla_pg.start()
|
||||||
|
|
||||||
proxy_port = port_distributor.get_port()
|
proxy_port = port_distributor.get_port()
|
||||||
mgmt_port = port_distributor.get_port()
|
mgmt_port = port_distributor.get_port()
|
||||||
http_port = port_distributor.get_port()
|
http_port = port_distributor.get_port()
|
||||||
|
|||||||
@@ -19,11 +19,15 @@ TABLE_NAME = "neon_control_plane.endpoints"
|
|||||||
async def test_proxy_psql_allowed_ips(static_proxy: NeonProxy, vanilla_pg: VanillaPostgres):
|
async def test_proxy_psql_allowed_ips(static_proxy: NeonProxy, vanilla_pg: VanillaPostgres):
|
||||||
# Shouldn't be able to connect to this project
|
# Shouldn't be able to connect to this project
|
||||||
vanilla_pg.safe_psql(
|
vanilla_pg.safe_psql(
|
||||||
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('private-project', '8.8.8.8')"
|
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('private-project', '8.8.8.8')",
|
||||||
|
user="proxy",
|
||||||
|
password="password",
|
||||||
)
|
)
|
||||||
# Should be able to connect to this project
|
# Should be able to connect to this project
|
||||||
vanilla_pg.safe_psql(
|
vanilla_pg.safe_psql(
|
||||||
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('generic-project', '::1,127.0.0.1')"
|
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('generic-project', '::1,127.0.0.1')",
|
||||||
|
user="proxy",
|
||||||
|
password="password",
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_cannot_connect(**kwargs):
|
def check_cannot_connect(**kwargs):
|
||||||
@@ -60,7 +64,9 @@ async def test_proxy_http_allowed_ips(static_proxy: NeonProxy, vanilla_pg: Vanil
|
|||||||
|
|
||||||
# Shouldn't be able to connect to this project
|
# Shouldn't be able to connect to this project
|
||||||
vanilla_pg.safe_psql(
|
vanilla_pg.safe_psql(
|
||||||
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('proxy', '8.8.8.8')"
|
f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('proxy', '8.8.8.8')",
|
||||||
|
user="proxy",
|
||||||
|
password="password",
|
||||||
)
|
)
|
||||||
|
|
||||||
def query(status: int, query: str, *args):
|
def query(status: int, query: str, *args):
|
||||||
@@ -75,6 +81,8 @@ async def test_proxy_http_allowed_ips(static_proxy: NeonProxy, vanilla_pg: Vanil
|
|||||||
query(400, "select 1;") # ip address is not allowed
|
query(400, "select 1;") # ip address is not allowed
|
||||||
# Should be able to connect to this project
|
# Should be able to connect to this project
|
||||||
vanilla_pg.safe_psql(
|
vanilla_pg.safe_psql(
|
||||||
f"UPDATE {TABLE_NAME} SET allowed_ips = '8.8.8.8,127.0.0.1' WHERE endpoint_id = 'proxy'"
|
f"UPDATE {TABLE_NAME} SET allowed_ips = '8.8.8.8,127.0.0.1' WHERE endpoint_id = 'proxy'",
|
||||||
|
user="proxy",
|
||||||
|
password="password",
|
||||||
)
|
)
|
||||||
query(200, "select 1;") # should work now
|
query(200, "select 1;") # should work now
|
||||||
|
|||||||
Reference in New Issue
Block a user