diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 13058f08f1..81cf37a996 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -42,7 +42,7 @@ use crate::rate_limiter::EndpointRateLimiter; use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX}; pub(crate) struct PoolingBackend { - pub(crate) http_conn_pool: Arc>>, + pub(crate) http_conn_pool: Arc>, pub(crate) local_pool: Arc>, pub(crate) pool: Arc>>, @@ -593,7 +593,7 @@ impl ConnectMechanism for TokioMechanism { } struct HyperMechanism { - pool: Arc>>, + pool: Arc>, conn_info: ConnInfo, conn_id: uuid::Uuid, diff --git a/proxy/src/serverless/http_conn_pool.rs b/proxy/src/serverless/http_conn_pool.rs index 1c6574e57e..f233294009 100644 --- a/proxy/src/serverless/http_conn_pool.rs +++ b/proxy/src/serverless/http_conn_pool.rs @@ -29,7 +29,7 @@ pub(crate) struct ClientDataHttp(); // Per-endpoint connection pool // Number of open connections is limited by the `max_conns_per_endpoint`. -pub(crate) struct HttpConnPool { +pub(crate) struct HttpConnPool { // TODO(conrad): // either we should open more connections depending on stream count // (not exposed by hyper, need our own counter) @@ -39,13 +39,13 @@ pub(crate) struct HttpConnPool { // seems somewhat redundant though. // // Probably we should run a semaphore and just the single conn. TBD. - conns: VecDeque>, + conns: VecDeque>, _guard: HttpEndpointPoolsGuard<'static>, global_connections_count: Arc, } -impl HttpConnPool { - fn get_conn_entry(&mut self) -> Option> { +impl HttpConnPool { + fn get_conn_entry(&mut self) -> Option> { let Self { conns, .. } = self; loop { @@ -85,7 +85,7 @@ impl HttpConnPool { } } -impl EndpointConnPoolExt for HttpConnPool { +impl EndpointConnPoolExt for HttpConnPool { fn clear_closed(&mut self) -> usize { let Self { conns, .. } = self; let old_len = conns.len(); @@ -100,7 +100,7 @@ impl EndpointConnPoolExt for HttpConnPool { } } -impl Drop for HttpConnPool { +impl Drop for HttpConnPool { fn drop(&mut self) { if !self.conns.is_empty() { self.global_connections_count @@ -114,14 +114,14 @@ impl Drop for HttpConnPool { } } -impl GlobalConnPool> { +impl GlobalConnPool { #[expect(unused_results)] pub(crate) fn get( self: &Arc, ctx: &RequestContext, conn_info: &ConnInfo, - ) -> Result>, HttpConnError> { - let result: Result>, HttpConnError>; + ) -> Result>, HttpConnError> { + let result: Result>, HttpConnError>; let Some(endpoint) = conn_info.endpoint_cache_key() else { result = Ok(None); return result; @@ -146,7 +146,7 @@ impl GlobalConnPool> { fn get_or_create_endpoint_pool( self: &Arc, endpoint: &EndpointCacheKey, - ) -> Arc>> { + ) -> Arc> { // fast path if let Some(pool) = self.global_pool.get(endpoint) { return pool.clone(); @@ -186,7 +186,7 @@ impl GlobalConnPool> { } pub(crate) fn poll_http2_client( - global_pool: Arc>>, + global_pool: Arc>, ctx: &RequestContext, conn_info: &ConnInfo, client: Send,