From 74df4a7b76dc3a171520979671c124f817ebf6ae Mon Sep 17 00:00:00 2001 From: Christian Schwarz Date: Thu, 1 Feb 2024 13:41:33 +0100 Subject: [PATCH] fix(walredo): walredo process that causes errors is never killed Before this PR, if walredo failed, we would still update `last_redo_at`. This means the `maybe_quiesce()` would never kill that process, although clearly something is wrong. However, we don't want to kill the process immediately on each redo failure, because, the redo failure could be caused by corrupted or malicious data. So, change `maybe_quiesce()` to determine inactivity based on last *successful* `last_redo_at`. The result is that a transient redo failure won't cause a kill. If there are *only* redo failures, we'll spawn & kill walredo process at frequency 1/(10*compaction_period). --- pageserver/src/walredo.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index cfb8052cf1..5b8cad062d 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -93,7 +93,7 @@ struct ProcessOutput { pub struct PostgresRedoManager { tenant_shard_id: TenantShardId, conf: &'static PageServerConf, - last_redo_at: std::sync::Mutex>, + last_successful_redo_at: std::sync::Mutex>, redo_process: RwLock>>, } @@ -165,7 +165,7 @@ impl PostgresRedoManager { } } // last batch - if batch_neon { + let res = if batch_neon { self.apply_batch_neon(key, lsn, img, &records[batch_start..]) } else { self.apply_batch_postgres( @@ -177,7 +177,11 @@ impl PostgresRedoManager { self.conf.wal_redo_timeout, pg_version, ) + }; + if res.is_ok() { + *(self.last_successful_redo_at.lock().unwrap()) = Some(Instant::now()); } + res } } @@ -193,7 +197,7 @@ impl PostgresRedoManager { PostgresRedoManager { tenant_shard_id, conf, - last_redo_at: std::sync::Mutex::default(), + last_successful_redo_at: std::sync::Mutex::default(), redo_process: RwLock::new(None), } } @@ -202,9 +206,9 @@ impl PostgresRedoManager { /// rely on our owner calling this function periodically in its own housekeeping /// loops. pub(crate) fn maybe_quiesce(&self, idle_timeout: Duration) { - if let Ok(g) = self.last_redo_at.try_lock() { - if let Some(last_redo_at) = *g { - if last_redo_at.elapsed() >= idle_timeout { + if let Ok(g) = self.last_successful_redo_at.try_lock() { + if let Some(last_successful_redo_at) = *g { + if last_successful_redo_at.elapsed() >= idle_timeout { drop(g); let mut guard = self.redo_process.write().unwrap(); *guard = None; @@ -227,8 +231,6 @@ impl PostgresRedoManager { wal_redo_timeout: Duration, pg_version: u32, ) -> anyhow::Result { - *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); - let (rel, blknum) = key_to_rel_block(key).context("invalid record")?; const MAX_RETRY_ATTEMPTS: u32 = 1; let mut n_attempts = 0u32;