proxy: Remove the endpoint filter cache (#12488)

## Problem

The endpoint filter cache is still unused because it's not yet reliable
enough to be used. It only consumes a lot of memory.

## Summary of changes

Remove the code. Needs a new design.

neondatabase/cloud#30634
This commit is contained in:
Folke Behrens
2025-07-07 19:46:33 +02:00
committed by GitHub
parent 55aef2993d
commit e65d5f7369
11 changed files with 11 additions and 504 deletions

View File

@@ -7,7 +7,7 @@ use once_cell::sync::OnceCell;
use smol_str::SmolStr;
use tokio::sync::mpsc;
use tracing::field::display;
use tracing::{Span, debug, error, info_span};
use tracing::{Span, error, info_span};
use try_lock::TryLock;
use uuid::Uuid;
@@ -15,10 +15,7 @@ use self::parquet::RequestData;
use crate::control_plane::messages::{ColdStartInfo, MetricsAuxInfo};
use crate::error::ErrorKind;
use crate::intern::{BranchIdInt, ProjectIdInt};
use crate::metrics::{
ConnectOutcome, InvalidEndpointsGroup, LatencyAccumulated, LatencyTimer, Metrics, Protocol,
Waiting,
};
use crate::metrics::{LatencyAccumulated, LatencyTimer, Metrics, Protocol, Waiting};
use crate::pqproto::StartupMessageParams;
use crate::protocol2::{ConnectionInfo, ConnectionInfoExtra};
use crate::types::{DbName, EndpointId, RoleName};
@@ -70,8 +67,6 @@ struct RequestContextInner {
// This sender is only used to log the length of session in case of success.
disconnect_sender: Option<mpsc::UnboundedSender<RequestData>>,
pub(crate) latency_timer: LatencyTimer,
// Whether proxy decided that it's not a valid endpoint end rejected it before going to cplane.
rejected: Option<bool>,
disconnect_timestamp: Option<chrono::DateTime<Utc>>,
}
@@ -106,7 +101,6 @@ impl Clone for RequestContext {
auth_method: inner.auth_method.clone(),
jwt_issuer: inner.jwt_issuer.clone(),
success: inner.success,
rejected: inner.rejected,
cold_start_info: inner.cold_start_info,
pg_options: inner.pg_options.clone(),
testodrome_query_id: inner.testodrome_query_id.clone(),
@@ -151,7 +145,6 @@ impl RequestContext {
auth_method: None,
jwt_issuer: None,
success: false,
rejected: None,
cold_start_info: ColdStartInfo::Unknown,
pg_options: None,
testodrome_query_id: None,
@@ -183,11 +176,6 @@ impl RequestContext {
)
}
pub(crate) fn set_rejected(&self, rejected: bool) {
let mut this = self.0.try_lock().expect("should not deadlock");
this.rejected = Some(rejected);
}
pub(crate) fn set_cold_start_info(&self, info: ColdStartInfo) {
self.0
.try_lock()
@@ -461,38 +449,6 @@ impl RequestContextInner {
}
fn log_connect(&mut self) {
let outcome = if self.success {
ConnectOutcome::Success
} else {
ConnectOutcome::Failed
};
// TODO: get rid of entirely/refactor
// check for false positives
// AND false negatives
if let Some(rejected) = self.rejected {
let ep = self
.endpoint_id
.as_ref()
.map(|x| x.as_str())
.unwrap_or_default();
// This makes sense only if cache is disabled
debug!(
?outcome,
?rejected,
?ep,
"check endpoint is valid with outcome"
);
Metrics::get()
.proxy
.invalid_endpoints_total
.inc(InvalidEndpointsGroup {
protocol: self.protocol,
rejected: rejected.into(),
outcome,
});
}
if let Some(tx) = self.sender.take() {
// If type changes, this error handling needs to be updated.
let tx: mpsc::UnboundedSender<RequestData> = tx;