From 72832b32140a78db7612af626d7c69079d73f445 Mon Sep 17 00:00:00 2001 From: Conrad Ludgate Date: Wed, 9 Apr 2025 16:04:42 +0100 Subject: [PATCH] chore: fix clippy lints from nightly-2025-03-16 (#11273) I like to run nightly clippy every so often to make our future rust upgrades easier. Some notable changes: * Prefer `next_back()` over `last()`. Generic iterators will implement `last()` to run forward through the iterator until the end. * Prefer `io::Error::other()`. * Use implicit returns One case where I haven't dealt with the issues is the now [more-sensitive "large enum variant" lint](https://github.com/rust-lang/rust-clippy/pull/13833). I chose not to take any decisions around it here, and simply marked them as allow for now. --- compute_tools/src/catalog.rs | 17 +++++++++-------- control_plane/storcon_cli/src/main.rs | 2 +- libs/postgres_backend/src/lib.rs | 7 +++---- libs/postgres_backend/tests/simple_select.rs | 4 ++-- libs/pq_proto/src/framed.rs | 2 +- libs/pq_proto/src/lib.rs | 2 +- .../src/authentication/sasl.rs | 9 +++------ libs/remote_storage/src/azure_blob.rs | 5 ++--- libs/utils/src/crashsafe.rs | 9 +++------ pageserver/src/http/routes.rs | 12 ++++++------ pageserver/src/tenant.rs | 7 +++---- pageserver/src/tenant/blob_io.rs | 7 ++----- pageserver/src/tenant/block_io.rs | 8 ++------ .../tenant/storage_layer/batch_split_writer.rs | 2 +- .../inmemory_layer/vectored_dio_read.rs | 2 +- .../src/tenant/storage_layer/merge_iterator.rs | 1 + pageserver/src/tenant/timeline.rs | 1 + pageserver/src/tenant/upload_queue.rs | 1 + proxy/src/cancellation.rs | 7 +------ proxy/src/proxy/tests/mod.rs | 5 ++--- proxy/src/serverless/conn_pool_lib.rs | 1 + safekeeper/src/timeline.rs | 1 + safekeeper/src/timeline_eviction.rs | 5 ++--- storage_broker/src/bin/storage_broker.rs | 6 ++++++ storage_broker/src/lib.rs | 1 + 25 files changed, 57 insertions(+), 67 deletions(-) diff --git a/compute_tools/src/catalog.rs b/compute_tools/src/catalog.rs index db3e07e086..082ba62b8e 100644 --- a/compute_tools/src/catalog.rs +++ b/compute_tools/src/catalog.rs @@ -98,13 +98,15 @@ pub async fn get_database_schema( .kill_on_drop(true) .spawn()?; - let stdout = cmd.stdout.take().ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stdout.") - })?; + let stdout = cmd + .stdout + .take() + .ok_or_else(|| std::io::Error::other("Failed to capture stdout."))?; - let stderr = cmd.stderr.take().ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stderr.") - })?; + let stderr = cmd + .stderr + .take() + .ok_or_else(|| std::io::Error::other("Failed to capture stderr."))?; let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new()); let stderr_reader = BufReader::new(stderr); @@ -128,8 +130,7 @@ pub async fn get_database_schema( } }); - return Err(SchemaDumpError::IO(std::io::Error::new( - std::io::ErrorKind::Other, + return Err(SchemaDumpError::IO(std::io::Error::other( "failed to start pg_dump", ))); } diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index b7e479d90c..19c686dcfd 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -941,7 +941,7 @@ async fn main() -> anyhow::Result<()> { let mut node_to_fill_descs = Vec::new(); for desc in node_descs { - let to_drain = nodes.iter().any(|id| *id == desc.id); + let to_drain = nodes.contains(&desc.id); if to_drain { node_to_drain_descs.push(desc); } else { diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index a0a891f0dc..654dde8da6 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -5,7 +5,6 @@ #![deny(unsafe_code)] #![deny(clippy::undocumented_unsafe_blocks)] use std::future::Future; -use std::io::ErrorKind; use std::net::SocketAddr; use std::os::fd::{AsRawFd, RawFd}; use std::pin::Pin; @@ -227,7 +226,7 @@ impl MaybeWriteOnly { match self { MaybeWriteOnly::Full(framed) => framed.read_startup_message().await, MaybeWriteOnly::WriteOnly(_) => { - Err(io::Error::new(ErrorKind::Other, "reading from write only half").into()) + Err(io::Error::other("reading from write only half").into()) } MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"), } @@ -237,7 +236,7 @@ impl MaybeWriteOnly { match self { MaybeWriteOnly::Full(framed) => framed.read_message().await, MaybeWriteOnly::WriteOnly(_) => { - Err(io::Error::new(ErrorKind::Other, "reading from write only half").into()) + Err(io::Error::other("reading from write only half").into()) } MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"), } @@ -975,7 +974,7 @@ impl AsyncWrite for CopyDataWriter<'_, IO> { .write_message_noflush(&BeMessage::CopyData(buf)) // write_message only writes to the buffer, so it can fail iff the // message is invaid, but CopyData can't be invalid. - .map_err(|_| io::Error::new(ErrorKind::Other, "failed to serialize CopyData"))?; + .map_err(|_| io::Error::other("failed to serialize CopyData"))?; Poll::Ready(Ok(buf.len())) } diff --git a/libs/postgres_backend/tests/simple_select.rs b/libs/postgres_backend/tests/simple_select.rs index 907ef9eed3..75ca123014 100644 --- a/libs/postgres_backend/tests/simple_select.rs +++ b/libs/postgres_backend/tests/simple_select.rs @@ -85,8 +85,8 @@ static KEY: Lazy> = Lazy::new(|| { static CERT: Lazy> = Lazy::new(|| { let mut cursor = Cursor::new(include_bytes!("cert.pem")); - let cert = rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap(); - cert + + rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap() }); // test that basic select with ssl works diff --git a/libs/pq_proto/src/framed.rs b/libs/pq_proto/src/framed.rs index 8e216d0f44..4e5e48ecf5 100644 --- a/libs/pq_proto/src/framed.rs +++ b/libs/pq_proto/src/framed.rs @@ -35,7 +35,7 @@ impl ConnectionError { pub fn into_io_error(self) -> io::Error { match self { ConnectionError::Io(io) => io, - ConnectionError::Protocol(pe) => io::Error::new(io::ErrorKind::Other, pe.to_string()), + ConnectionError::Protocol(pe) => io::Error::other(pe.to_string()), } } } diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index e435ffbf7e..e7afc64564 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -257,7 +257,7 @@ pub enum ProtocolError { impl ProtocolError { /// Proxy stream.rs uses only io::Error; provide it. pub fn into_io_error(self) -> io::Error { - io::Error::new(io::ErrorKind::Other, self.to_string()) + io::Error::other(self.to_string()) } } diff --git a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs index 27e05e24ec..2daf9a80d4 100644 --- a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs +++ b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs @@ -212,7 +212,7 @@ impl ScramSha256 { password, channel_binding, } => (nonce, password, channel_binding), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -291,7 +291,7 @@ impl ScramSha256 { server_key, auth_message, } => (server_key, auth_message), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -301,10 +301,7 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("SCRAM error: {}", e), - )); + return Err(io::Error::other(format!("SCRAM error: {}", e))); } ServerFinalMessage::Verifier(verifier) => verifier, }; diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index dee61a410d..18146c5464 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -801,8 +801,7 @@ where // that support needs to be hacked in. // // including {self:?} into the message would be useful, but unsure how to unproject. - _ => std::task::Poll::Ready(Err(std::io::Error::new( - std::io::ErrorKind::Other, + _ => std::task::Poll::Ready(Err(std::io::Error::other( "cloned or initial values cannot be read", ))), } @@ -855,7 +854,7 @@ where }; Err(azure_core::error::Error::new( azure_core::error::ErrorKind::Io, - std::io::Error::new(std::io::ErrorKind::Other, msg), + std::io::Error::other(msg), )) } diff --git a/libs/utils/src/crashsafe.rs b/libs/utils/src/crashsafe.rs index 290a5b2686..215fa36df4 100644 --- a/libs/utils/src/crashsafe.rs +++ b/libs/utils/src/crashsafe.rs @@ -81,12 +81,9 @@ pub fn path_with_suffix_extension( } pub fn fsync_file_and_parent(file_path: &Utf8Path) -> io::Result<()> { - let parent = file_path.parent().ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - format!("File {file_path:?} has no parent"), - ) - })?; + let parent = file_path + .parent() + .ok_or_else(|| io::Error::other(format!("File {file_path:?} has no parent")))?; fsync(file_path)?; fsync(parent)?; diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index e979a70aec..200b91fc82 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -3381,11 +3381,11 @@ async fn put_tenant_timeline_import_basebackup( let broker_client = state.broker_client.clone(); - let mut body = StreamReader::new(request.into_body().map(|res| { - res.map_err(|error| { - std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error)) - }) - })); + let mut body = StreamReader::new( + request + .into_body() + .map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))), + ); tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?; @@ -3459,7 +3459,7 @@ async fn put_tenant_timeline_import_wal( let mut body = StreamReader::new(request.into_body().map(|res| { res.map_err(|error| { - std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error)) + std::io::Error::other( anyhow::anyhow!(error)) }) })); diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 900e98d7e9..2ac2fd0b81 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -920,6 +920,7 @@ enum StartCreatingTimelineResult { Idempotent(Arc), } +#[allow(clippy::large_enum_variant, reason = "TODO")] enum TimelineInitAndSyncResult { ReadyToActivate(Arc), NeedsSpawnImportPgdata(TimelineInitAndSyncNeedsSpawnImportPgdata), @@ -1006,6 +1007,7 @@ enum CreateTimelineCause { Delete, } +#[allow(clippy::large_enum_variant, reason = "TODO")] enum LoadTimelineCause { Attach, Unoffload, @@ -4399,10 +4401,7 @@ impl Tenant { .to_string(); fail::fail_point!("tenant-config-before-write", |_| { - Err(std::io::Error::new( - std::io::ErrorKind::Other, - "tenant-config-before-write", - )) + Err(std::io::Error::other("tenant-config-before-write")) }); // Convert the config to a toml file. diff --git a/pageserver/src/tenant/blob_io.rs b/pageserver/src/tenant/blob_io.rs index ff9a7e57b6..b0b2a16c2f 100644 --- a/pageserver/src/tenant/blob_io.rs +++ b/pageserver/src/tenant/blob_io.rs @@ -15,7 +15,7 @@ //! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX //! use std::cmp::min; -use std::io::{Error, ErrorKind}; +use std::io::Error; use async_compression::Level; use bytes::{BufMut, BytesMut}; @@ -331,10 +331,7 @@ impl BlobWriter { return ( ( io_buf.slice_len(), - Err(Error::new( - ErrorKind::Other, - format!("blob too large ({len} bytes)"), - )), + Err(Error::other(format!("blob too large ({len} bytes)"))), ), srcbuf, ); diff --git a/pageserver/src/tenant/block_io.rs b/pageserver/src/tenant/block_io.rs index 66c586daff..6723155626 100644 --- a/pageserver/src/tenant/block_io.rs +++ b/pageserver/src/tenant/block_io.rs @@ -216,12 +216,8 @@ impl<'a> FileBlockReader<'a> { match cache .read_immutable_buf(self.file_id, blknum, ctx) .await - .map_err(|e| { - std::io::Error::new( - std::io::ErrorKind::Other, - format!("Failed to read immutable buf: {e:#}"), - ) - })? { + .map_err(|e| std::io::Error::other(format!("Failed to read immutable buf: {e:#}")))? + { ReadBufResult::Found(guard) => Ok(guard.into()), ReadBufResult::NotFound(write_guard) => { // Read the page from disk into the buffer diff --git a/pageserver/src/tenant/storage_layer/batch_split_writer.rs b/pageserver/src/tenant/storage_layer/batch_split_writer.rs index fd50e4805d..29ada15c36 100644 --- a/pageserver/src/tenant/storage_layer/batch_split_writer.rs +++ b/pageserver/src/tenant/storage_layer/batch_split_writer.rs @@ -366,7 +366,7 @@ impl SplitDeltaLayerWriter { ) .await?; let (start_key, prev_delta_writer) = - std::mem::replace(&mut self.inner, Some((key, next_delta_writer))).unwrap(); + self.inner.replace((key, next_delta_writer)).unwrap(); self.batches.add_unfinished_delta_writer( prev_delta_writer, start_key..key, diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs index 90455fd0ca..ea354fc716 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs @@ -766,7 +766,7 @@ mod tests { rand::Rng::fill(&mut rand::thread_rng(), &mut dst_slice[len..]); // to discover bugs Ok((dst, len)) } - Err(e) => Err(std::io::Error::new(std::io::ErrorKind::Other, e)), + Err(e) => Err(std::io::Error::other(e)), } } } diff --git a/pageserver/src/tenant/storage_layer/merge_iterator.rs b/pageserver/src/tenant/storage_layer/merge_iterator.rs index 76cdddd06a..55db9fe06a 100644 --- a/pageserver/src/tenant/storage_layer/merge_iterator.rs +++ b/pageserver/src/tenant/storage_layer/merge_iterator.rs @@ -59,6 +59,7 @@ impl LayerIterRef<'_> { /// 1. Unified iterator for image and delta layers. /// 2. `Ord` for use in [`MergeIterator::heap`] (for the k-merge). /// 3. Lazy creation of the real delta/image iterator. +#[allow(clippy::large_enum_variant, reason = "TODO")] pub(crate) enum IteratorWrapper<'a> { NotLoaded { ctx: &'a RequestContext, diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index ca34cbaf48..b10409689c 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -1039,6 +1039,7 @@ pub(crate) enum ShutdownMode { Hard, } +#[allow(clippy::large_enum_variant, reason = "TODO")] enum ImageLayerCreationOutcome { /// We generated an image layer Generated { diff --git a/pageserver/src/tenant/upload_queue.rs b/pageserver/src/tenant/upload_queue.rs index d5dc9666ce..be1b55ffa3 100644 --- a/pageserver/src/tenant/upload_queue.rs +++ b/pageserver/src/tenant/upload_queue.rs @@ -302,6 +302,7 @@ pub struct UploadQueueStoppedDeletable { pub(super) deleted_at: SetDeletedFlagProgress, } +#[allow(clippy::large_enum_variant, reason = "TODO")] pub enum UploadQueueStopped { Deletable(UploadQueueStoppedDeletable), Uninitialized, diff --git a/proxy/src/cancellation.rs b/proxy/src/cancellation.rs index 8263e5aa2a..d6a7406f67 100644 --- a/proxy/src/cancellation.rs +++ b/proxy/src/cancellation.rs @@ -425,12 +425,7 @@ impl CancelClosure { &mut mk_tls, &self.hostname, ) - .map_err(|e| { - CancelError::IO(std::io::Error::new( - std::io::ErrorKind::Other, - e.to_string(), - )) - })?; + .map_err(|e| CancelError::IO(std::io::Error::other(e.to_string())))?; self.cancel_token.cancel_query_raw(socket, tls).await?; debug!("query was cancelled"); diff --git a/proxy/src/proxy/tests/mod.rs b/proxy/src/proxy/tests/mod.rs index 2c3e70138d..2268e60d25 100644 --- a/proxy/src/proxy/tests/mod.rs +++ b/proxy/src/proxy/tests/mod.rs @@ -568,7 +568,7 @@ fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeIn fn helper_create_connect_info( mechanism: &TestConnectMechanism, ) -> auth::Backend<'static, ComputeCredentials> { - let user_info = auth::Backend::ControlPlane( + auth::Backend::ControlPlane( MaybeOwned::Owned(ControlPlaneClient::Test(Box::new(mechanism.clone()))), ComputeCredentials { info: ComputeUserInfo { @@ -578,8 +578,7 @@ fn helper_create_connect_info( }, keys: ComputeCredentialKeys::Password("password".into()), }, - ); - user_info + ) } fn config() -> ComputeConfig { diff --git a/proxy/src/serverless/conn_pool_lib.rs b/proxy/src/serverless/conn_pool_lib.rs index 77b548cc43..42a3ea17a2 100644 --- a/proxy/src/serverless/conn_pool_lib.rs +++ b/proxy/src/serverless/conn_pool_lib.rs @@ -47,6 +47,7 @@ impl ConnInfo { } #[derive(Clone)] +#[allow(clippy::large_enum_variant, reason = "TODO")] pub(crate) enum ClientDataEnum { Remote(ClientDataRemote), Local(ClientDataLocal), diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index e6a7ade9f2..b7ba28f435 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -138,6 +138,7 @@ impl Drop for WriteGuardSharedState<'_> { /// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this /// case, SafeKeeper is not available (because WAL is not present on disk) and all /// operations can be done only with control file. +#[allow(clippy::large_enum_variant, reason = "TODO")] pub enum StateSK { Loaded(SafeKeeper), Offloaded(Box>), diff --git a/safekeeper/src/timeline_eviction.rs b/safekeeper/src/timeline_eviction.rs index 06ccb32d03..84c636daf6 100644 --- a/safekeeper/src/timeline_eviction.rs +++ b/safekeeper/src/timeline_eviction.rs @@ -35,7 +35,7 @@ impl Manager { next_event: &Option, state: &StateSnapshot, ) -> bool { - let ready = self.backup_task.is_none() + self.backup_task.is_none() && self.recovery_task.is_none() && self.wal_removal_task.is_none() && self.partial_backup_task.is_none() @@ -61,8 +61,7 @@ impl Manager { .unwrap() .flush_lsn .segment_number(self.wal_seg_size) - == self.last_removed_segno + 1; - ready + == self.last_removed_segno + 1 } /// Evict the timeline to remote storage. Returns whether the eviction was successful. diff --git a/storage_broker/src/bin/storage_broker.rs b/storage_broker/src/bin/storage_broker.rs index f1bd7ba708..a7e0c986e6 100644 --- a/storage_broker/src/bin/storage_broker.rs +++ b/storage_broker/src/bin/storage_broker.rs @@ -96,6 +96,7 @@ enum Message { impl Message { /// Convert proto message to internal message. + #[allow(clippy::result_large_err, reason = "TODO")] pub fn from(proto_msg: TypedMessage) -> Result { match proto_msg.r#type() { MessageType::SafekeeperTimelineInfo => Ok(Message::SafekeeperTimelineInfo( @@ -127,6 +128,7 @@ impl Message { } /// Get the tenant_timeline_id from the message. + #[allow(clippy::result_large_err, reason = "TODO")] pub fn tenant_timeline_id(&self) -> Result, Status> { match self { Message::SafekeeperTimelineInfo(msg) => Ok(msg @@ -185,6 +187,7 @@ enum SubscriptionKey { impl SubscriptionKey { /// Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors). + #[allow(clippy::result_large_err, reason = "TODO")] pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result { match key { ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All), @@ -195,6 +198,7 @@ impl SubscriptionKey { } /// Parse from FilterTenantTimelineId + #[allow(clippy::result_large_err, reason = "TODO")] pub fn from_proto_filter_tenant_timeline_id( opt: Option<&FilterTenantTimelineId>, ) -> Result { @@ -385,6 +389,7 @@ impl Registry { } /// Send msg to relevant subscribers. + #[allow(clippy::result_large_err, reason = "TODO")] pub fn send_msg(&self, msg: &Message) -> Result<(), Status> { PROCESSED_MESSAGES_TOTAL.inc(); @@ -436,6 +441,7 @@ struct Publisher { impl Publisher { /// Send msg to relevant subscribers. + #[allow(clippy::result_large_err, reason = "TODO")] pub fn send_msg(&mut self, msg: &Message) -> Result<(), Status> { self.registry.send_msg(msg) } diff --git a/storage_broker/src/lib.rs b/storage_broker/src/lib.rs index 55d411f607..7b36f5e948 100644 --- a/storage_broker/src/lib.rs +++ b/storage_broker/src/lib.rs @@ -79,6 +79,7 @@ impl BrokerClientChannel { } // parse variable length bytes from protobuf +#[allow(clippy::result_large_err, reason = "TODO")] pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result { let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id) .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?;