chore: fix clippy lints from nightly-2025-03-16 (#11273)

I like to run nightly clippy every so often to make our future rust
upgrades easier. Some notable changes:

* Prefer `next_back()` over `last()`. Generic iterators will implement
`last()` to run forward through the iterator until the end.

* Prefer `io::Error::other()`.

* Use implicit returns

One case where I haven't dealt with the issues is the now
[more-sensitive "large enum variant"
lint](https://github.com/rust-lang/rust-clippy/pull/13833). I chose not
to take any decisions around it here, and simply marked them as allow
for now.
This commit is contained in:
Conrad Ludgate
2025-04-09 16:04:42 +01:00
committed by GitHub
parent d11f23a341
commit 72832b3214
25 changed files with 57 additions and 67 deletions

View File

@@ -98,13 +98,15 @@ pub async fn get_database_schema(
.kill_on_drop(true)
.spawn()?;
let stdout = cmd.stdout.take().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stdout.")
})?;
let stdout = cmd
.stdout
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stdout."))?;
let stderr = cmd.stderr.take().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stderr.")
})?;
let stderr = cmd
.stderr
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stderr."))?;
let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new());
let stderr_reader = BufReader::new(stderr);
@@ -128,8 +130,7 @@ pub async fn get_database_schema(
}
});
return Err(SchemaDumpError::IO(std::io::Error::new(
std::io::ErrorKind::Other,
return Err(SchemaDumpError::IO(std::io::Error::other(
"failed to start pg_dump",
)));
}

View File

@@ -941,7 +941,7 @@ async fn main() -> anyhow::Result<()> {
let mut node_to_fill_descs = Vec::new();
for desc in node_descs {
let to_drain = nodes.iter().any(|id| *id == desc.id);
let to_drain = nodes.contains(&desc.id);
if to_drain {
node_to_drain_descs.push(desc);
} else {

View File

@@ -5,7 +5,6 @@
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
use std::future::Future;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::os::fd::{AsRawFd, RawFd};
use std::pin::Pin;
@@ -227,7 +226,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> MaybeWriteOnly<IO> {
match self {
MaybeWriteOnly::Full(framed) => framed.read_startup_message().await,
MaybeWriteOnly::WriteOnly(_) => {
Err(io::Error::new(ErrorKind::Other, "reading from write only half").into())
Err(io::Error::other("reading from write only half").into())
}
MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"),
}
@@ -237,7 +236,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> MaybeWriteOnly<IO> {
match self {
MaybeWriteOnly::Full(framed) => framed.read_message().await,
MaybeWriteOnly::WriteOnly(_) => {
Err(io::Error::new(ErrorKind::Other, "reading from write only half").into())
Err(io::Error::other("reading from write only half").into())
}
MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"),
}
@@ -975,7 +974,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for CopyDataWriter<'_, IO> {
.write_message_noflush(&BeMessage::CopyData(buf))
// write_message only writes to the buffer, so it can fail iff the
// message is invaid, but CopyData can't be invalid.
.map_err(|_| io::Error::new(ErrorKind::Other, "failed to serialize CopyData"))?;
.map_err(|_| io::Error::other("failed to serialize CopyData"))?;
Poll::Ready(Ok(buf.len()))
}

View File

@@ -85,8 +85,8 @@ static KEY: Lazy<rustls::pki_types::PrivateKeyDer<'static>> = Lazy::new(|| {
static CERT: Lazy<rustls::pki_types::CertificateDer<'static>> = Lazy::new(|| {
let mut cursor = Cursor::new(include_bytes!("cert.pem"));
let cert = rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap();
cert
rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap()
});
// test that basic select with ssl works

View File

@@ -35,7 +35,7 @@ impl ConnectionError {
pub fn into_io_error(self) -> io::Error {
match self {
ConnectionError::Io(io) => io,
ConnectionError::Protocol(pe) => io::Error::new(io::ErrorKind::Other, pe.to_string()),
ConnectionError::Protocol(pe) => io::Error::other(pe.to_string()),
}
}
}

View File

@@ -257,7 +257,7 @@ pub enum ProtocolError {
impl ProtocolError {
/// Proxy stream.rs uses only io::Error; provide it.
pub fn into_io_error(self) -> io::Error {
io::Error::new(io::ErrorKind::Other, self.to_string())
io::Error::other(self.to_string())
}
}

View File

@@ -212,7 +212,7 @@ impl ScramSha256 {
password,
channel_binding,
} => (nonce, password, channel_binding),
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
_ => return Err(io::Error::other("invalid SCRAM state")),
};
let message =
@@ -291,7 +291,7 @@ impl ScramSha256 {
server_key,
auth_message,
} => (server_key, auth_message),
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
_ => return Err(io::Error::other("invalid SCRAM state")),
};
let message =
@@ -301,10 +301,7 @@ impl ScramSha256 {
let verifier = match parsed {
ServerFinalMessage::Error(e) => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("SCRAM error: {}", e),
));
return Err(io::Error::other(format!("SCRAM error: {}", e)));
}
ServerFinalMessage::Verifier(verifier) => verifier,
};

View File

@@ -801,8 +801,7 @@ where
// that support needs to be hacked in.
//
// including {self:?} into the message would be useful, but unsure how to unproject.
_ => std::task::Poll::Ready(Err(std::io::Error::new(
std::io::ErrorKind::Other,
_ => std::task::Poll::Ready(Err(std::io::Error::other(
"cloned or initial values cannot be read",
))),
}
@@ -855,7 +854,7 @@ where
};
Err(azure_core::error::Error::new(
azure_core::error::ErrorKind::Io,
std::io::Error::new(std::io::ErrorKind::Other, msg),
std::io::Error::other(msg),
))
}

View File

@@ -81,12 +81,9 @@ pub fn path_with_suffix_extension(
}
pub fn fsync_file_and_parent(file_path: &Utf8Path) -> io::Result<()> {
let parent = file_path.parent().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
format!("File {file_path:?} has no parent"),
)
})?;
let parent = file_path
.parent()
.ok_or_else(|| io::Error::other(format!("File {file_path:?} has no parent")))?;
fsync(file_path)?;
fsync(parent)?;

View File

@@ -3381,11 +3381,11 @@ async fn put_tenant_timeline_import_basebackup(
let broker_client = state.broker_client.clone();
let mut body = StreamReader::new(request.into_body().map(|res| {
res.map_err(|error| {
std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
})
}));
let mut body = StreamReader::new(
request
.into_body()
.map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))),
);
tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
@@ -3459,7 +3459,7 @@ async fn put_tenant_timeline_import_wal(
let mut body = StreamReader::new(request.into_body().map(|res| {
res.map_err(|error| {
std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
std::io::Error::other( anyhow::anyhow!(error))
})
}));

View File

@@ -920,6 +920,7 @@ enum StartCreatingTimelineResult {
Idempotent(Arc<Timeline>),
}
#[allow(clippy::large_enum_variant, reason = "TODO")]
enum TimelineInitAndSyncResult {
ReadyToActivate(Arc<Timeline>),
NeedsSpawnImportPgdata(TimelineInitAndSyncNeedsSpawnImportPgdata),
@@ -1006,6 +1007,7 @@ enum CreateTimelineCause {
Delete,
}
#[allow(clippy::large_enum_variant, reason = "TODO")]
enum LoadTimelineCause {
Attach,
Unoffload,
@@ -4399,10 +4401,7 @@ impl Tenant {
.to_string();
fail::fail_point!("tenant-config-before-write", |_| {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"tenant-config-before-write",
))
Err(std::io::Error::other("tenant-config-before-write"))
});
// Convert the config to a toml file.

View File

@@ -15,7 +15,7 @@
//! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX
//!
use std::cmp::min;
use std::io::{Error, ErrorKind};
use std::io::Error;
use async_compression::Level;
use bytes::{BufMut, BytesMut};
@@ -331,10 +331,7 @@ impl<const BUFFERED: bool> BlobWriter<BUFFERED> {
return (
(
io_buf.slice_len(),
Err(Error::new(
ErrorKind::Other,
format!("blob too large ({len} bytes)"),
)),
Err(Error::other(format!("blob too large ({len} bytes)"))),
),
srcbuf,
);

View File

@@ -216,12 +216,8 @@ impl<'a> FileBlockReader<'a> {
match cache
.read_immutable_buf(self.file_id, blknum, ctx)
.await
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to read immutable buf: {e:#}"),
)
})? {
.map_err(|e| std::io::Error::other(format!("Failed to read immutable buf: {e:#}")))?
{
ReadBufResult::Found(guard) => Ok(guard.into()),
ReadBufResult::NotFound(write_guard) => {
// Read the page from disk into the buffer

View File

@@ -366,7 +366,7 @@ impl SplitDeltaLayerWriter {
)
.await?;
let (start_key, prev_delta_writer) =
std::mem::replace(&mut self.inner, Some((key, next_delta_writer))).unwrap();
self.inner.replace((key, next_delta_writer)).unwrap();
self.batches.add_unfinished_delta_writer(
prev_delta_writer,
start_key..key,

View File

@@ -766,7 +766,7 @@ mod tests {
rand::Rng::fill(&mut rand::thread_rng(), &mut dst_slice[len..]); // to discover bugs
Ok((dst, len))
}
Err(e) => Err(std::io::Error::new(std::io::ErrorKind::Other, e)),
Err(e) => Err(std::io::Error::other(e)),
}
}
}

View File

@@ -59,6 +59,7 @@ impl LayerIterRef<'_> {
/// 1. Unified iterator for image and delta layers.
/// 2. `Ord` for use in [`MergeIterator::heap`] (for the k-merge).
/// 3. Lazy creation of the real delta/image iterator.
#[allow(clippy::large_enum_variant, reason = "TODO")]
pub(crate) enum IteratorWrapper<'a> {
NotLoaded {
ctx: &'a RequestContext,

View File

@@ -1039,6 +1039,7 @@ pub(crate) enum ShutdownMode {
Hard,
}
#[allow(clippy::large_enum_variant, reason = "TODO")]
enum ImageLayerCreationOutcome {
/// We generated an image layer
Generated {

View File

@@ -302,6 +302,7 @@ pub struct UploadQueueStoppedDeletable {
pub(super) deleted_at: SetDeletedFlagProgress,
}
#[allow(clippy::large_enum_variant, reason = "TODO")]
pub enum UploadQueueStopped {
Deletable(UploadQueueStoppedDeletable),
Uninitialized,

View File

@@ -425,12 +425,7 @@ impl CancelClosure {
&mut mk_tls,
&self.hostname,
)
.map_err(|e| {
CancelError::IO(std::io::Error::new(
std::io::ErrorKind::Other,
e.to_string(),
))
})?;
.map_err(|e| CancelError::IO(std::io::Error::other(e.to_string())))?;
self.cancel_token.cancel_query_raw(socket, tls).await?;
debug!("query was cancelled");

View File

@@ -568,7 +568,7 @@ fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeIn
fn helper_create_connect_info(
mechanism: &TestConnectMechanism,
) -> auth::Backend<'static, ComputeCredentials> {
let user_info = auth::Backend::ControlPlane(
auth::Backend::ControlPlane(
MaybeOwned::Owned(ControlPlaneClient::Test(Box::new(mechanism.clone()))),
ComputeCredentials {
info: ComputeUserInfo {
@@ -578,8 +578,7 @@ fn helper_create_connect_info(
},
keys: ComputeCredentialKeys::Password("password".into()),
},
);
user_info
)
}
fn config() -> ComputeConfig {

View File

@@ -47,6 +47,7 @@ impl ConnInfo {
}
#[derive(Clone)]
#[allow(clippy::large_enum_variant, reason = "TODO")]
pub(crate) enum ClientDataEnum {
Remote(ClientDataRemote),
Local(ClientDataLocal),

View File

@@ -138,6 +138,7 @@ impl Drop for WriteGuardSharedState<'_> {
/// Usually it holds SafeKeeper, but it also supports offloaded timeline state. In this
/// case, SafeKeeper is not available (because WAL is not present on disk) and all
/// operations can be done only with control file.
#[allow(clippy::large_enum_variant, reason = "TODO")]
pub enum StateSK {
Loaded(SafeKeeper<control_file::FileStorage, wal_storage::PhysicalStorage>),
Offloaded(Box<TimelineState<control_file::FileStorage>>),

View File

@@ -35,7 +35,7 @@ impl Manager {
next_event: &Option<tokio::time::Instant>,
state: &StateSnapshot,
) -> bool {
let ready = self.backup_task.is_none()
self.backup_task.is_none()
&& self.recovery_task.is_none()
&& self.wal_removal_task.is_none()
&& self.partial_backup_task.is_none()
@@ -61,8 +61,7 @@ impl Manager {
.unwrap()
.flush_lsn
.segment_number(self.wal_seg_size)
== self.last_removed_segno + 1;
ready
== self.last_removed_segno + 1
}
/// Evict the timeline to remote storage. Returns whether the eviction was successful.

View File

@@ -96,6 +96,7 @@ enum Message {
impl Message {
/// Convert proto message to internal message.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from(proto_msg: TypedMessage) -> Result<Self, Status> {
match proto_msg.r#type() {
MessageType::SafekeeperTimelineInfo => Ok(Message::SafekeeperTimelineInfo(
@@ -127,6 +128,7 @@ impl Message {
}
/// Get the tenant_timeline_id from the message.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn tenant_timeline_id(&self) -> Result<Option<TenantTimelineId>, Status> {
match self {
Message::SafekeeperTimelineInfo(msg) => Ok(msg
@@ -185,6 +187,7 @@ enum SubscriptionKey {
impl SubscriptionKey {
/// Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors).
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result<Self, Status> {
match key {
ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All),
@@ -195,6 +198,7 @@ impl SubscriptionKey {
}
/// Parse from FilterTenantTimelineId
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn from_proto_filter_tenant_timeline_id(
opt: Option<&FilterTenantTimelineId>,
) -> Result<Self, Status> {
@@ -385,6 +389,7 @@ impl Registry {
}
/// Send msg to relevant subscribers.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn send_msg(&self, msg: &Message) -> Result<(), Status> {
PROCESSED_MESSAGES_TOTAL.inc();
@@ -436,6 +441,7 @@ struct Publisher {
impl Publisher {
/// Send msg to relevant subscribers.
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn send_msg(&mut self, msg: &Message) -> Result<(), Status> {
self.registry.send_msg(msg)
}

View File

@@ -79,6 +79,7 @@ impl BrokerClientChannel {
}
// parse variable length bytes from protobuf
#[allow(clippy::result_large_err, reason = "TODO")]
pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result<TenantTimelineId, Status> {
let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id)
.map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?;