diff --git a/pgxn/neon/communicator/src/backend_interface.rs b/pgxn/neon/communicator/src/backend_interface.rs index 9ed9028b96..fd0081e837 100644 --- a/pgxn/neon/communicator/src/backend_interface.rs +++ b/pgxn/neon/communicator/src/backend_interface.rs @@ -161,8 +161,11 @@ pub extern "C" fn bcomm_get_request_slot_status( // leave a slot in this state, so if it sees that, // something's gone wrong and it's not clear what to do // with it. - panic!("unexpected Filling state in request slot {}", request_slot_idx); - }, + panic!( + "unexpected Filling state in request slot {}", + request_slot_idx + ); + } NeonIOHandleState::Submitted => true, NeonIOHandleState::Processing => true, NeonIOHandleState::Completed => true, diff --git a/pgxn/neon/communicator/src/init.rs b/pgxn/neon/communicator/src/init.rs index 5f7d593c35..20bb4923e8 100644 --- a/pgxn/neon/communicator/src/init.rs +++ b/pgxn/neon/communicator/src/init.rs @@ -46,10 +46,7 @@ impl std::fmt::Debug for CommunicatorInitStruct { fmt.debug_struct("CommunicatorInitStruct") .field("submission_pipe_read_fd", &self.submission_pipe_read_fd) .field("submission_pipe_write_fd", &self.submission_pipe_write_fd) - .field( - "num_neon_request_slots", - &self.num_neon_request_slots, - ) + .field("num_neon_request_slots", &self.num_neon_request_slots) .field("neon_request_slots length", &self.neon_request_slots.len()) .finish() } diff --git a/pgxn/neon/communicator/src/integrated_cache.rs b/pgxn/neon/communicator/src/integrated_cache.rs index a7009f0eb5..e00e49bf3d 100644 --- a/pgxn/neon/communicator/src/integrated_cache.rs +++ b/pgxn/neon/communicator/src/integrated_cache.rs @@ -526,9 +526,7 @@ impl<'t> IntegratedCacheWriteAccess<'t> { self.relsize_cache.remove(&RelKey::from(rel)); // update with flush LSN - let _ = self - .global_lw_lsn - .fetch_max(flush_lsn.0, Ordering::Relaxed); + let _ = self.global_lw_lsn.fetch_max(flush_lsn.0, Ordering::Relaxed); // also forget all cached blocks for the relation // FIXME diff --git a/pgxn/neon/communicator/src/neon_request.rs b/pgxn/neon/communicator/src/neon_request.rs index 32a02cd8c3..1868147fbf 100644 --- a/pgxn/neon/communicator/src/neon_request.rs +++ b/pgxn/neon/communicator/src/neon_request.rs @@ -380,7 +380,6 @@ impl CRelUnlinkRequest { } } - #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct CForgetCacheRequest { diff --git a/pgxn/neon/communicator/src/worker_process/main_loop.rs b/pgxn/neon/communicator/src/worker_process/main_loop.rs index 2eacd13609..aadf9b3a60 100644 --- a/pgxn/neon/communicator/src/worker_process/main_loop.rs +++ b/pgxn/neon/communicator/src/worker_process/main_loop.rs @@ -359,8 +359,14 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { { Ok(nblocks) => { // update the cache - tracing::info!("updated relsize for {:?} in cache: {}, lsn {}", rel, nblocks, read_lsn); - self.cache.remember_rel_size(&rel, nblocks, not_modified_since); + tracing::info!( + "updated relsize for {:?} in cache: {}, lsn {}", + rel, + nblocks, + read_lsn + ); + self.cache + .remember_rel_size(&rel, nblocks, not_modified_since); NeonIOResult::RelSize(nblocks) } @@ -469,8 +475,11 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { // TODO: We could put the empty pages to the cache. Maybe have // a marker on the block entries for all-zero pages, instead of // actually storing the empty pages. - self.cache - .remember_rel_size(&req.reltag(), req.block_number + req.nblocks, Lsn(req.lsn)); + self.cache.remember_rel_size( + &req.reltag(), + req.block_number + req.nblocks, + Lsn(req.lsn), + ); NeonIOResult::WriteOK } NeonIORequest::RelCreate(req) => { @@ -484,7 +493,8 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { self.request_rel_truncate_counter.inc(); // TODO: need to grab an io-in-progress lock for this? I guess not - self.cache.remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn)); + self.cache + .remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn)); NeonIOResult::WriteOK } NeonIORequest::RelUnlink(req) => { @@ -496,7 +506,8 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { } NeonIORequest::ForgetCache(req) => { // TODO: need to grab an io-in-progress lock for this? I guess not - self.cache.forget_rel(&req.reltag(), Some(req.nblocks), Lsn(req.lsn)); + self.cache + .forget_rel(&req.reltag(), Some(req.nblocks), Lsn(req.lsn)); NeonIOResult::WriteOK } }