From 3b4d4eb53502fc16f8bef65be814b31783106f23 Mon Sep 17 00:00:00 2001 From: Gleb Novikov Date: Thu, 29 May 2025 19:25:42 +0100 Subject: [PATCH] fast_import.rs: log number of jobs for pg_dump/pg_restore (#12068) ## Problem I have a hypothesis that import might be using lower number of jobs than max for the VM, where the job is running. This change will help finding this out from logs ## Summary of changes Added logging of number of jobs, which is passed into both `pg_dump` and `pg_restore` --- compute_tools/src/bin/fast_import.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/compute_tools/src/bin/fast_import.rs b/compute_tools/src/bin/fast_import.rs index 78acd78585..e65c210b23 100644 --- a/compute_tools/src/bin/fast_import.rs +++ b/compute_tools/src/bin/fast_import.rs @@ -339,6 +339,8 @@ async fn run_dump_restore( destination_connstring: String, ) -> Result<(), anyhow::Error> { let dumpdir = workdir.join("dumpdir"); + let num_jobs = num_cpus::get().to_string(); + info!("using {num_jobs} jobs for dump/restore"); let common_args = [ // schema mapping (prob suffices to specify them on one side) @@ -354,7 +356,7 @@ async fn run_dump_restore( "directory".to_string(), // concurrency "--jobs".to_string(), - num_cpus::get().to_string(), + num_jobs, // progress updates "--verbose".to_string(), ];