diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 207d2420bd..a0d6e922a1 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -68,10 +68,7 @@ fn get_config(request: &Request) -> &'static PageServerConf { // healthcheck handler async fn status_handler(request: Request) -> Result, ApiError> { let config = get_config(&request); - Ok(json_response( - StatusCode::OK, - StatusResponse { id: config.id }, - )?) + json_response(StatusCode::OK, StatusResponse { id: config.id }) } async fn timeline_create_handler(mut request: Request) -> Result, ApiError> { @@ -131,7 +128,7 @@ async fn timeline_list_handler(request: Request) -> Result, }) } - Ok(json_response(StatusCode::OK, response_data)?) + json_response(StatusCode::OK, response_data) } // Gate non incremental logical size calculation behind a flag @@ -207,7 +204,7 @@ async fn timeline_detail_handler(request: Request) -> Result) -> Result, ApiError> { @@ -247,7 +244,7 @@ async fn timeline_attach_handler(request: Request) -> Result) -> Result, ApiError> { @@ -266,7 +263,7 @@ async fn timeline_detach_handler(request: Request) -> Result) -> Result, ApiError> { @@ -280,7 +277,7 @@ async fn tenant_list_handler(request: Request) -> Result, A .await .map_err(ApiError::from_err)??; - Ok(json_response(StatusCode::OK, response_data)?) + json_response(StatusCode::OK, response_data) } async fn tenant_create_handler(mut request: Request) -> Result, ApiError> { diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index d7a250f31e..5e93e3389b 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -1474,8 +1474,7 @@ impl LayeredTimeline { // // TODO: This perhaps should be done in 'flush_frozen_layers', after flushing // *all* the layers, to avoid fsyncing the file multiple times. - let disk_consistent_lsn; - disk_consistent_lsn = Lsn(frozen_layer.get_lsn_range().end.0 - 1); + let disk_consistent_lsn = Lsn(frozen_layer.get_lsn_range().end.0 - 1); // If we were able to advance 'disk_consistent_lsn', save it the metadata file. // After crash, we will restart WAL streaming and processing from that point. diff --git a/pageserver/src/layered_repository/filename.rs b/pageserver/src/layered_repository/filename.rs index cd63f014c4..497912b408 100644 --- a/pageserver/src/layered_repository/filename.rs +++ b/pageserver/src/layered_repository/filename.rs @@ -25,9 +25,7 @@ impl PartialOrd for DeltaFileName { impl Ord for DeltaFileName { fn cmp(&self, other: &Self) -> Ordering { - let mut cmp; - - cmp = self.key_range.start.cmp(&other.key_range.start); + let mut cmp = self.key_range.start.cmp(&other.key_range.start); if cmp != Ordering::Equal { return cmp; } @@ -117,9 +115,7 @@ impl PartialOrd for ImageFileName { impl Ord for ImageFileName { fn cmp(&self, other: &Self) -> Ordering { - let mut cmp; - - cmp = self.key_range.start.cmp(&other.key_range.start); + let mut cmp = self.key_range.start.cmp(&other.key_range.start); if cmp != Ordering::Equal { return cmp; } diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 8132ec9cc4..3984ee550f 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -296,9 +296,7 @@ impl LayerMap { key_range: &Range, lsn: Lsn, ) -> Result, Option>)>> { - let mut points: Vec; - - points = vec![key_range.start]; + let mut points = vec![key_range.start]; for l in self.historic_layers.iter() { if l.get_lsn_range().start > lsn { continue; diff --git a/pageserver/src/reltag.rs b/pageserver/src/reltag.rs index 46ff468f2f..18e26cc37a 100644 --- a/pageserver/src/reltag.rs +++ b/pageserver/src/reltag.rs @@ -39,9 +39,7 @@ impl PartialOrd for RelTag { impl Ord for RelTag { fn cmp(&self, other: &Self) -> Ordering { - let mut cmp; - - cmp = self.spcnode.cmp(&other.spcnode); + let mut cmp = self.spcnode.cmp(&other.spcnode); if cmp != Ordering::Equal { return cmp; } diff --git a/pageserver/src/remote_storage/local_fs.rs b/pageserver/src/remote_storage/local_fs.rs index 846adf8e9b..b40089d53c 100644 --- a/pageserver/src/remote_storage/local_fs.rs +++ b/pageserver/src/remote_storage/local_fs.rs @@ -58,7 +58,7 @@ impl LocalFs { &self, file_path: &Path, ) -> anyhow::Result> { - let metadata_path = storage_metadata_path(&file_path); + let metadata_path = storage_metadata_path(file_path); if metadata_path.exists() && metadata_path.is_file() { let metadata_string = fs::read_to_string(&metadata_path).await.with_context(|| { format!( diff --git a/pageserver/src/remote_storage/storage_sync/compression.rs b/pageserver/src/remote_storage/storage_sync/compression.rs index c5b041349a..511f79e0cf 100644 --- a/pageserver/src/remote_storage/storage_sync/compression.rs +++ b/pageserver/src/remote_storage/storage_sync/compression.rs @@ -201,8 +201,7 @@ pub async fn read_archive_header( .await .context("Failed to decompress a header from the archive")?; - Ok(ArchiveHeader::des(&header_bytes) - .context("Failed to deserialize a header from the archive")?) + ArchiveHeader::des(&header_bytes).context("Failed to deserialize a header from the archive") } /// Reads the archive metadata out of the archive name: diff --git a/pageserver/src/remote_storage/storage_sync/download.rs b/pageserver/src/remote_storage/storage_sync/download.rs index 32549c8650..773b4a12e5 100644 --- a/pageserver/src/remote_storage/storage_sync/download.rs +++ b/pageserver/src/remote_storage/storage_sync/download.rs @@ -225,8 +225,8 @@ async fn read_local_metadata( let local_metadata_bytes = fs::read(&local_metadata_path) .await .context("Failed to read local metadata file bytes")?; - Ok(TimelineMetadata::from_bytes(&local_metadata_bytes) - .context("Failed to read local metadata files bytes")?) + TimelineMetadata::from_bytes(&local_metadata_bytes) + .context("Failed to read local metadata files bytes") } #[cfg(test)] diff --git a/walkeeper/src/http/routes.rs b/walkeeper/src/http/routes.rs index 06a0682c37..26b23cddcc 100644 --- a/walkeeper/src/http/routes.rs +++ b/walkeeper/src/http/routes.rs @@ -31,7 +31,7 @@ struct SafekeeperStatus { async fn status_handler(request: Request) -> Result, ApiError> { let conf = get_conf(&request); let status = SafekeeperStatus { id: conf.my_id }; - Ok(json_response(StatusCode::OK, status)?) + json_response(StatusCode::OK, status) } fn get_conf(request: &Request) -> &SafeKeeperConf { @@ -106,7 +106,7 @@ async fn timeline_status_handler(request: Request) -> Result) -> Result, ApiError> { @@ -119,7 +119,7 @@ async fn timeline_create_handler(mut request: Request) -> Result Deserialize<'de>>( let whole_body = hyper::body::aggregate(request.body_mut()) .await .map_err(ApiError::from_err)?; - Ok(serde_json::from_reader(whole_body.reader()) - .map_err(|err| ApiError::BadRequest(format!("Failed to parse json request {}", err)))?) + serde_json::from_reader(whole_body.reader()) + .map_err(|err| ApiError::BadRequest(format!("Failed to parse json request {}", err))) } pub fn json_response(