diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index 6e57caf998..c76e2ff6d9 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -122,25 +122,24 @@ where error!(error = ?err, "could not connect to compute node"); - let node_info = - if err.get_error_kind() == crate::error::ErrorKind::Postgres || !node_info.cached() { - // If the error is Postgres, that means that we managed to connect to the compute node, but there was an error. - // Do not need to retrieve a new node_info, just return the old one. - if !err.should_retry(num_retries) { - return Err(err.into()); - } - node_info - } else { - // if we failed to connect, it's likely that the compute node was suspended, wake a new compute node - info!("compute node's state has likely changed; requesting a wake-up"); - ctx.latency_timer.cache_miss(); - let old_node_info = invalidate_cache(node_info); - let mut node_info = wake_compute(&mut num_retries, ctx, user_info).await?; - node_info.reuse_settings(old_node_info); + let node_info = if !node_info.cached() { + // If we just recieved this from cplane and dodn't get it from cache, we shouldn't retry. + // Do not need to retrieve a new node_info, just return the old one. + if !err.should_retry(num_retries) { + return Err(err.into()); + } + node_info + } else { + // if we failed to connect, it's likely that the compute node was suspended, wake a new compute node + info!("compute node's state has likely changed; requesting a wake-up"); + ctx.latency_timer.cache_miss(); + let old_node_info = invalidate_cache(node_info); + let mut node_info = wake_compute(&mut num_retries, ctx, user_info).await?; + node_info.reuse_settings(old_node_info); - mechanism.update_connect_config(&mut node_info.config); - node_info - }; + mechanism.update_connect_config(&mut node_info.config); + node_info + }; // now that we have a new node, try connect to it repeatedly. // this can error for a few reasons, for instance: diff --git a/proxy/src/proxy/tests.rs b/proxy/src/proxy/tests.rs index efbd661bbf..1a01f32339 100644 --- a/proxy/src/proxy/tests.rs +++ b/proxy/src/proxy/tests.rs @@ -375,8 +375,6 @@ enum ConnectAction { Connect, Retry, Fail, - RetryPg, - FailPg, } #[derive(Clone)] @@ -466,14 +464,6 @@ impl ConnectMechanism for TestConnectMechanism { retryable: false, kind: ErrorKind::Compute, }), - ConnectAction::FailPg => Err(TestConnectError { - retryable: false, - kind: ErrorKind::Postgres, - }), - ConnectAction::RetryPg => Err(TestConnectError { - retryable: true, - kind: ErrorKind::Postgres, - }), x => panic!("expecting action {:?}, connect is called instead", x), } } @@ -572,32 +562,6 @@ async fn connect_to_compute_retry() { mechanism.verify(); } -#[tokio::test] -async fn connect_to_compute_retry_pg() { - let _ = env_logger::try_init(); - use ConnectAction::*; - let mut ctx = RequestMonitoring::test(); - let mechanism = TestConnectMechanism::new(vec![Wake, RetryPg, Connect]); - let user_info = helper_create_connect_info(&mechanism); - connect_to_compute(&mut ctx, &mechanism, &user_info, false) - .await - .unwrap(); - mechanism.verify(); -} - -#[tokio::test] -async fn connect_to_compute_fail_pg() { - let _ = env_logger::try_init(); - use ConnectAction::*; - let mut ctx = RequestMonitoring::test(); - let mechanism = TestConnectMechanism::new(vec![Wake, FailPg]); - let user_info = helper_create_connect_info(&mechanism); - connect_to_compute(&mut ctx, &mechanism, &user_info, false) - .await - .unwrap_err(); - mechanism.verify(); -} - /// Test that we don't retry if the error is not retryable. #[tokio::test] async fn connect_to_compute_non_retry_1() {