diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index 9d05fa32b3..a856e758db 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -22,7 +22,7 @@ use aws_sdk_s3::{ Client, }; use aws_smithy_http::body::SdkBody; -use hyper::Body; +use hyper::{Body, StatusCode}; use scopeguard::ScopeGuard; use tokio::{ io::{self, AsyncRead}, @@ -529,7 +529,16 @@ impl RemoteStorage for S3Bucket { } } Err(e) => { - return Err(e.into()); + if let Some(r) = e.raw_response() { + if r.http().status() == StatusCode::NOT_FOUND { + // 404 is acceptable for deletions. AWS S3 does not return this, but + // some other implementations might (e.g. GCS XML API returns 404 on DeleteObject + // to a missing key) + continue; + } else { + return Err(anyhow::format_err!("DeleteObjects response error: {e}")); + } + } } } } diff --git a/pageserver/src/deletion_queue.rs b/pageserver/src/deletion_queue.rs index 21d45dfad3..3b934fd351 100644 --- a/pageserver/src/deletion_queue.rs +++ b/pageserver/src/deletion_queue.rs @@ -310,6 +310,8 @@ impl BackendQueueWorker { match self.remote_storage.delete_objects(&self.accumulator).await { Ok(()) => { + // Note: we assume that the remote storage layer returns Ok(()) if some + // or all of the deleted objects were already gone. DELETION_QUEUE_EXECUTED.inc_by(self.accumulator.len() as u64); info!( "Executed deletion batch {}..{}",