mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-04 03:52:56 +00:00
Fix clippy lints and enable clippy checking in CI
This commit is contained in:
@@ -128,6 +128,14 @@ jobs:
|
||||
- ~/.cargo/git
|
||||
- target
|
||||
|
||||
# Run style checks
|
||||
# has to run separately from cargo fmt section
|
||||
# since needs to run with dependencies
|
||||
- run:
|
||||
name: clippy
|
||||
command: |
|
||||
./run_clippy.sh
|
||||
|
||||
# Run rust unit tests
|
||||
- run: cargo test
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ impl ComputeControlPlane {
|
||||
.branch_get_by_name(&tenantid, branch_name)?
|
||||
.timeline_id;
|
||||
|
||||
let port = port.unwrap_or(self.get_port());
|
||||
let port = port.unwrap_or_else(|| self.get_port());
|
||||
let node = Arc::new(PostgresNode {
|
||||
name: branch_name.to_owned(),
|
||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
||||
@@ -385,7 +385,7 @@ impl PostgresNode {
|
||||
// procedure evolves quite actively right now, so let's think about it again
|
||||
// when things would be more stable (TODO).
|
||||
let lsn = self.sync_walkeepers()?;
|
||||
if lsn == Lsn(0 as u64) {
|
||||
if lsn == Lsn(0) {
|
||||
None
|
||||
} else {
|
||||
Some(lsn)
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
// script which will use local paths.
|
||||
//
|
||||
use anyhow::{Context, Result};
|
||||
use hex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
|
||||
@@ -180,7 +180,7 @@ impl PageServerNode {
|
||||
io::stdout().flush().unwrap();
|
||||
} else {
|
||||
if retries == 5 {
|
||||
print!("\n") // put a line break after dots for second message
|
||||
println!() // put a line break after dots for second message
|
||||
}
|
||||
println!(
|
||||
"Pageserver not responding yet, err {} retrying ({})...",
|
||||
|
||||
@@ -266,12 +266,7 @@ pub(crate) fn get_branches(conf: &PageServerConf, tenantid: &ZTenantId) -> Resul
|
||||
std::fs::read_dir(&branches_dir)?
|
||||
.map(|dir_entry_res| {
|
||||
let dir_entry = dir_entry_res?;
|
||||
Ok(BranchInfo::from_path(
|
||||
dir_entry.path(),
|
||||
conf,
|
||||
tenantid,
|
||||
&repo,
|
||||
)?)
|
||||
BranchInfo::from_path(dir_entry.path(), conf, tenantid, &repo)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -77,11 +77,9 @@ fn parse_request_param<T: FromStr>(
|
||||
) -> Result<T, ApiError> {
|
||||
match get_request_param(request, param_name)?.parse() {
|
||||
Ok(v) => Ok(v),
|
||||
Err(_) => {
|
||||
return Err(ApiError::BadRequest(
|
||||
"failed to parse tenant id".to_string(),
|
||||
))
|
||||
}
|
||||
Err(_) => Err(ApiError::BadRequest(
|
||||
"failed to parse tenant id".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +128,7 @@ async fn branch_detail_handler(request: Request<Body>) -> Result<Response<Body>,
|
||||
let tenantid: ZTenantId = parse_request_param(&request, "tenant_id")?;
|
||||
let branch_name: &str = get_request_param(&request, "branch_name")?;
|
||||
let conf = get_state(&request).conf;
|
||||
let path = conf.branch_path(&branch_name, &tenantid);
|
||||
let path = conf.branch_path(branch_name, &tenantid);
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(&tenantid)?;
|
||||
|
||||
@@ -260,7 +260,7 @@ impl LayeredRepository {
|
||||
}
|
||||
|
||||
timelines.insert(timelineid, timeline.clone());
|
||||
Ok(timeline.clone())
|
||||
Ok(timeline)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -673,8 +673,8 @@ impl Timeline for LayeredTimeline {
|
||||
|
||||
fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelishTag>> {
|
||||
let request_tag = RelTag {
|
||||
spcnode: spcnode,
|
||||
dbnode: dbnode,
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode: 0,
|
||||
forknum: 0,
|
||||
};
|
||||
|
||||
@@ -298,7 +298,7 @@ impl Layer for DeltaLayer {
|
||||
let mut desc = String::new();
|
||||
|
||||
if let Some(page_image_range) = v.page_image_range.as_ref() {
|
||||
let image = read_blob(&chapter, &page_image_range)?;
|
||||
let image = read_blob(&chapter, page_image_range)?;
|
||||
write!(&mut desc, " img {} bytes", image.len())?;
|
||||
}
|
||||
if let Some(record_range) = v.record_range.as_ref() {
|
||||
|
||||
@@ -647,7 +647,7 @@ impl InMemoryLayer {
|
||||
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
let drop_lsn = inner.drop_lsn.clone();
|
||||
let drop_lsn = inner.drop_lsn;
|
||||
let predecessor = inner.predecessor.clone();
|
||||
|
||||
let mut before_page_versions;
|
||||
@@ -702,7 +702,7 @@ impl InMemoryLayer {
|
||||
|
||||
if drop_lsn.is_none() {
|
||||
// Write a new base image layer at the cutoff point
|
||||
let image_layer = ImageLayer::create_from_src(self.conf, &timeline, self, end_lsn)?;
|
||||
let image_layer = ImageLayer::create_from_src(self.conf, timeline, self, end_lsn)?;
|
||||
frozen_layers.push(Arc::new(image_layer));
|
||||
trace!("freeze: created image layer {} at {}", self.seg, end_lsn);
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ impl LayerMap {
|
||||
// Also remove it from the SegEntry of this segment
|
||||
let mut segentry = self.segs.get_mut(&segtag).unwrap();
|
||||
assert!(Arc::ptr_eq(
|
||||
&segentry.open.as_ref().unwrap(),
|
||||
segentry.open.as_ref().unwrap(),
|
||||
&oldest_entry.layer
|
||||
));
|
||||
segentry.open = None;
|
||||
@@ -185,11 +185,9 @@ impl LayerMap {
|
||||
|
||||
/// Return the oldest in-memory layer, along with its generation number.
|
||||
pub fn peek_oldest_open(&self) -> Option<(Arc<InMemoryLayer>, u64)> {
|
||||
if let Some(oldest_entry) = self.open_layers.peek() {
|
||||
Some((Arc::clone(&oldest_entry.layer), oldest_entry.generation))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.open_layers
|
||||
.peek()
|
||||
.map(|oldest_entry| (Arc::clone(&oldest_entry.layer), oldest_entry.generation))
|
||||
}
|
||||
|
||||
/// Increment the generation number used to stamp open in-memory layers. Layers
|
||||
@@ -336,7 +334,7 @@ impl PartialOrd for OpenLayerEntry {
|
||||
}
|
||||
impl PartialEq for OpenLayerEntry {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(&other) == Ordering::Equal
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
impl Eq for OpenLayerEntry {}
|
||||
|
||||
@@ -32,5 +32,5 @@ pub const fn transaction_id_precedes(id1: TransactionId, id2: TransactionId) ->
|
||||
}
|
||||
|
||||
let diff = id1.wrapping_sub(id2) as i32;
|
||||
return diff < 0;
|
||||
diff < 0
|
||||
}
|
||||
|
||||
@@ -8,4 +8,8 @@
|
||||
# warnings and errors right in the editor.
|
||||
# In vscode, this setting is Rust-analyzer>Check On Save:Command
|
||||
|
||||
cargo clippy "${@:2}" -- -A clippy::new_without_default -A clippy::manual_range_contains -A clippy::comparison_chain
|
||||
|
||||
# * `-A unknown_lints` – do not warn about unknown lint suppressions
|
||||
# that people with newer toolchains might use
|
||||
# * `-D warnings` - fail on any warnings (`cargo` returns non-zero exit status)
|
||||
cargo clippy "${@:2}" --all-targets --all-features --all --tests -- -A unknown_lints -D warnings
|
||||
|
||||
@@ -74,7 +74,7 @@ fn request_callback(conf: WalAcceptorConf, timelineid: ZTimelineId, tenantid: ZT
|
||||
|
||||
impl<'pg> ReceiveWalConn<'pg> {
|
||||
pub fn new(pg: &'pg mut PostgresBackend) -> Result<ReceiveWalConn<'pg>> {
|
||||
let peer_addr = pg.get_peer_addr().clone();
|
||||
let peer_addr = *pg.get_peer_addr();
|
||||
Ok(ReceiveWalConn {
|
||||
pg_backend: pg,
|
||||
peer_addr,
|
||||
|
||||
@@ -421,7 +421,7 @@ where
|
||||
|
||||
// do the job
|
||||
let mut last_rec_lsn = Lsn(0);
|
||||
if msg.wal_data.len() > 0 {
|
||||
if !msg.wal_data.is_empty() {
|
||||
self.storage
|
||||
.write_wal(&self.s, msg.h.begin_lsn, &msg.wal_data)?;
|
||||
|
||||
|
||||
@@ -33,13 +33,13 @@ impl postgres_backend::Handler for SendWalHandler {
|
||||
let ztimelineid = sm
|
||||
.params
|
||||
.get("ztimelineid")
|
||||
.ok_or(anyhow!("timelineid is required"))?;
|
||||
.ok_or_else(|| anyhow!("timelineid is required"))?;
|
||||
self.timelineid = Some(ZTimelineId::from_str(ztimelineid)?);
|
||||
|
||||
let ztenantid = sm
|
||||
.params
|
||||
.get("ztenantid")
|
||||
.ok_or(anyhow!("tenantid is required"))?;
|
||||
.ok_or_else(|| anyhow!("tenantid is required"))?;
|
||||
self.tenantid = Some(ZTenantId::from_str(ztenantid)?);
|
||||
|
||||
if let Some(app_name) = sm.params.get("application_name") {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::anyhow;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -56,10 +55,10 @@ impl HttpErrorBody {
|
||||
}
|
||||
|
||||
pub fn response_from_msg_and_status(msg: String, status: StatusCode) -> Response<Body> {
|
||||
HttpErrorBody { msg }.into_response(status)
|
||||
HttpErrorBody { msg }.to_response(status)
|
||||
}
|
||||
|
||||
pub fn into_response(&self, status: StatusCode) -> Response<Body> {
|
||||
pub fn to_response(&self, status: StatusCode) -> Response<Body> {
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header(header::CONTENT_TYPE, "application/json")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use bytes::Buf;
|
||||
use hyper::{header, Body, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use super::error::ApiError;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user