From 552249607da32a4c1c25a65339ce3b4533f6a728 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arpad=20M=C3=BCller?= Date: Tue, 24 Jun 2025 12:12:42 +0200 Subject: [PATCH] apply clippy fixes for 1.88.0 beta (#12331) The 1.88.0 stable release is near (this Thursday). We'd like to fix most warnings beforehand so that the compiler upgrade doesn't require approval from too many teams. This is therefore a preparation PR (like similar PRs before it). There is a lot of changes for this release, mostly because the `uninlined_format_args` lint has been added to the `style` lint group. One can read more about the lint [here](https://rust-lang.github.io/rust-clippy/master/#/uninlined_format_args). The PR is the result of `cargo +beta clippy --fix` and `cargo fmt`. One remaining warning is left for the proxy team. --------- Co-authored-by: Conrad Ludgate --- compute_tools/src/bin/fast_import.rs | 6 +- compute_tools/src/bin/fast_import/s3_uri.rs | 2 +- compute_tools/src/catalog.rs | 4 +- compute_tools/src/compute.rs | 21 ++-- compute_tools/src/config.rs | 18 ++-- compute_tools/src/extension_server.rs | 12 +-- compute_tools/src/http/routes/configure.rs | 2 +- compute_tools/src/installed_extensions.rs | 4 +- compute_tools/src/lsn_lease.rs | 2 +- compute_tools/src/pg_helpers.rs | 10 +- compute_tools/src/spec.rs | 8 +- compute_tools/src/spec_apply.rs | 95 +++++++++---------- compute_tools/src/sync_sk.rs | 2 +- control_plane/src/bin/neon_local.rs | 20 ++-- control_plane/src/endpoint.rs | 11 +-- control_plane/src/local_env.rs | 6 +- control_plane/src/pageserver.rs | 2 +- control_plane/src/safekeeper.rs | 2 +- control_plane/src/storage_controller.rs | 16 ++-- control_plane/storcon_cli/src/main.rs | 28 +++--- endpoint_storage/src/app.rs | 2 +- libs/desim/src/executor.rs | 2 +- libs/desim/src/proto.rs | 4 +- libs/http-utils/src/endpoint.rs | 8 +- libs/neon-shmem/src/lib.rs | 4 +- libs/pageserver_api/src/controller_api.rs | 3 +- libs/pageserver_api/src/keyspace.rs | 5 +- libs/pageserver_api/src/models.rs | 5 +- libs/postgres_backend/src/lib.rs | 2 +- libs/postgres_backend/tests/simple_select.rs | 4 +- libs/postgres_connection/src/lib.rs | 6 +- libs/postgres_ffi/src/waldecoder_handler.rs | 8 +- libs/postgres_ffi/src/walrecord.rs | 8 +- .../wal_craft/src/xlog_utils_test.rs | 2 +- libs/postgres_initdb/src/lib.rs | 6 +- libs/posthog_client_lite/src/lib.rs | 45 +++------ libs/pq_proto/src/lib.rs | 8 +- .../src/authentication/sasl.rs | 10 +- .../postgres-protocol2/src/message/backend.rs | 4 +- libs/proxy/postgres-types2/src/lib.rs | 2 +- libs/proxy/tokio-postgres2/src/error/mod.rs | 12 +-- libs/proxy/tokio-postgres2/src/row.rs | 4 +- libs/remote_storage/src/local_fs.rs | 14 +-- libs/safekeeper_api/src/membership.rs | 4 +- libs/utils/src/error.rs | 2 +- libs/utils/src/generation.rs | 2 +- libs/utils/src/id.rs | 2 +- libs/utils/src/postgres_client.rs | 2 +- libs/utils/src/shard.rs | 4 +- libs/utils/src/signals.rs | 2 +- libs/vm_monitor/src/dispatcher.rs | 3 +- libs/vm_monitor/src/filecache.rs | 2 +- .../benches/bench_interpret_wal.rs | 4 +- libs/wal_decoder/build.rs | 2 +- libs/wal_decoder/src/models/record.rs | 2 +- libs/walproposer/src/api_bindings.rs | 4 +- libs/walproposer/src/walproposer.rs | 13 +-- pageserver/client/src/mgmt_api.rs | 4 +- pageserver/compaction/src/simulator/draw.rs | 6 +- pageserver/ctl/src/draw_timeline_dir.rs | 10 +- pageserver/ctl/src/key.rs | 2 +- pageserver/ctl/src/layer_map_analyzer.rs | 3 +- pageserver/page_api/src/client.rs | 2 +- pageserver/pagebench/src/cmd/aux_files.rs | 6 +- pageserver/src/basebackup.rs | 12 +-- pageserver/src/http/routes.rs | 10 +- pageserver/src/metrics.rs | 7 +- pageserver/src/page_service.rs | 9 +- pageserver/src/pgdatadir_mapping.rs | 5 +- pageserver/src/tenant.rs | 55 +++++------ pageserver/src/tenant/checks.rs | 3 +- pageserver/src/tenant/metadata.rs | 3 +- pageserver/src/tenant/secondary/downloader.rs | 2 +- .../src/tenant/storage_layer/delta_layer.rs | 4 +- .../src/tenant/storage_layer/image_layer.rs | 7 +- .../tenant/storage_layer/inmemory_layer.rs | 2 +- .../inmemory_layer/vectored_dio_read.rs | 2 +- pageserver/src/tenant/storage_layer/layer.rs | 2 +- pageserver/src/tenant/timeline.rs | 18 ++-- pageserver/src/tenant/timeline/compaction.rs | 10 +- .../walreceiver/walreceiver_connection.rs | 3 +- .../owned_buffers_io/aligned_buffer/buffer.rs | 11 +-- .../aligned_buffer/buffer_mut.rs | 5 +- pageserver/src/walingest.rs | 12 +-- pageserver/src/walredo/apply_neon.rs | 50 +++------- proxy/src/lib.rs | 4 + safekeeper/src/control_file.rs | 10 +- safekeeper/src/handler.rs | 16 ++-- safekeeper/src/safekeeper.rs | 26 +++-- safekeeper/src/timeline_eviction.rs | 7 +- safekeeper/src/timeline_manager.rs | 2 +- safekeeper/src/timelines_global_map.rs | 4 +- safekeeper/src/wal_backup_partial.rs | 3 +- safekeeper/src/wal_storage.rs | 2 +- safekeeper/tests/walproposer_sim/log.rs | 2 +- .../tests/walproposer_sim/safekeeper.rs | 2 +- .../tests/walproposer_sim/simulation.rs | 2 +- .../tests/walproposer_sim/walproposer_api.rs | 7 +- storage_broker/benches/rps.rs | 2 +- storage_broker/build.rs | 2 +- storage_broker/src/lib.rs | 10 +- storage_controller/src/compute_hook.rs | 2 +- storage_controller/src/drain_utils.rs | 6 +- storage_controller/src/http.rs | 6 +- storage_controller/src/persistence.rs | 23 ++--- storage_controller/src/scheduler.rs | 6 +- storage_controller/src/service.rs | 31 +++--- storage_controller/src/tenant_shard.rs | 9 +- storage_controller/src/timeline_import.rs | 2 +- storage_scrubber/src/checks.rs | 2 +- storage_scrubber/src/lib.rs | 2 +- .../src/scan_safekeeper_metadata.rs | 4 +- 112 files changed, 404 insertions(+), 542 deletions(-) diff --git a/compute_tools/src/bin/fast_import.rs b/compute_tools/src/bin/fast_import.rs index e65c210b23..682525f6df 100644 --- a/compute_tools/src/bin/fast_import.rs +++ b/compute_tools/src/bin/fast_import.rs @@ -486,10 +486,8 @@ async fn cmd_pgdata( }; let superuser = "cloud_admin"; - let destination_connstring = format!( - "host=localhost port={} user={} dbname=neondb", - pg_port, superuser - ); + let destination_connstring = + format!("host=localhost port={pg_port} user={superuser} dbname=neondb"); let pgdata_dir = workdir.join("pgdata"); let mut proc = PostgresProcess::new(pgdata_dir.clone(), pg_bin_dir.clone(), pg_lib_dir.clone()); diff --git a/compute_tools/src/bin/fast_import/s3_uri.rs b/compute_tools/src/bin/fast_import/s3_uri.rs index cf4dab7c02..e1a85c73e7 100644 --- a/compute_tools/src/bin/fast_import/s3_uri.rs +++ b/compute_tools/src/bin/fast_import/s3_uri.rs @@ -69,7 +69,7 @@ impl clap::builder::TypedValueParser for S3Uri { S3Uri::from_str(value_str).map_err(|e| { clap::Error::raw( clap::error::ErrorKind::InvalidValue, - format!("Failed to parse S3 URI: {}", e), + format!("Failed to parse S3 URI: {e}"), ) }) } diff --git a/compute_tools/src/catalog.rs b/compute_tools/src/catalog.rs index 082ba62b8e..bc9f64075a 100644 --- a/compute_tools/src/catalog.rs +++ b/compute_tools/src/catalog.rs @@ -22,7 +22,7 @@ pub async fn get_dbs_and_roles(compute: &Arc) -> anyhow::Result { let mut lines = stderr_reader.lines(); if let Some(line) = lines.next_line().await? { - if line.contains(&format!("FATAL: database \"{}\" does not exist", dbname)) { + if line.contains(&format!("FATAL: database \"{dbname}\" does not exist")) { return Err(SchemaDumpError::DatabaseDoesNotExist); } warn!("pg_dump stderr: {}", line) diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 684d841897..f84a5f0841 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -250,8 +250,7 @@ impl ParsedSpec { // duplicate entry? if current == previous { return Err(format!( - "duplicate entry in safekeeper_connstrings: {}!", - current, + "duplicate entry in safekeeper_connstrings: {current}!", )); } @@ -410,7 +409,7 @@ impl ComputeNode { let options = match conn_conf.get_options() { // Allow the control plane to override any options set by the // compute - Some(options) => format!("{} {}", EXTRA_OPTIONS, options), + Some(options) => format!("{EXTRA_OPTIONS} {options}"), None => EXTRA_OPTIONS.to_string(), }; conn_conf.options(&options); @@ -1127,7 +1126,7 @@ impl ComputeNode { let sk_configs = sk_connstrs.into_iter().map(|connstr| { // Format connstr let id = connstr.clone(); - let connstr = format!("postgresql://no_user@{}", connstr); + let connstr = format!("postgresql://no_user@{connstr}"); let options = format!( "-c timeline_id={} tenant_id={}", pspec.timeline_id, pspec.tenant_id @@ -1490,7 +1489,7 @@ impl ComputeNode { let (mut client, connection) = conf.connect(NoTls).await?; tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -1633,7 +1632,7 @@ impl ComputeNode { Ok((mut client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); if let Err(e) = handle_migrations(&mut client).await { @@ -1937,7 +1936,7 @@ impl ComputeNode { let (client, connection) = connect_result.unwrap(); tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); let result = client @@ -2106,7 +2105,7 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(()) @@ -2133,7 +2132,7 @@ LIMIT 100", let version: Option = db_client .query_opt(version_query, &[&ext_name]) .await - .with_context(|| format!("Failed to execute query: {}", version_query))? + .with_context(|| format!("Failed to execute query: {version_query}"))? .map(|row| row.get(0)); // sanitize the inputs as postgres idents. @@ -2148,14 +2147,14 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } else { let query = format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}"); db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(ext_version) diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 933b30134f..169de5c963 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -51,7 +51,7 @@ pub fn write_postgres_conf( // Write the postgresql.conf content from the spec file as is. if let Some(conf) = &spec.cluster.postgresql_conf { - writeln!(file, "{}", conf)?; + writeln!(file, "{conf}")?; } // Add options for connecting to storage @@ -70,7 +70,7 @@ pub fn write_postgres_conf( ); // If generation is given, prepend sk list with g#number: if let Some(generation) = spec.safekeepers_generation { - write!(neon_safekeepers_value, "g#{}:", generation)?; + write!(neon_safekeepers_value, "g#{generation}:")?; } neon_safekeepers_value.push_str(&spec.safekeeper_connstrings.join(",")); writeln!( @@ -109,8 +109,8 @@ pub fn write_postgres_conf( tls::update_key_path_blocking(pgdata_path, tls_config); // these are the default, but good to be explicit. - writeln!(file, "ssl_cert_file = '{}'", SERVER_CRT)?; - writeln!(file, "ssl_key_file = '{}'", SERVER_KEY)?; + writeln!(file, "ssl_cert_file = '{SERVER_CRT}'")?; + writeln!(file, "ssl_key_file = '{SERVER_KEY}'")?; } // Locales @@ -191,8 +191,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -244,8 +243,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -263,7 +261,7 @@ pub fn write_postgres_conf( } } - writeln!(file, "neon.extension_server_port={}", extension_server_port)?; + writeln!(file, "neon.extension_server_port={extension_server_port}")?; if spec.drop_subscriptions_before_start { writeln!(file, "neon.disable_logical_replication_subscribers=true")?; @@ -291,7 +289,7 @@ where { let path = pgdata_path.join("compute_ctl_temp_override.conf"); let mut file = File::create(path)?; - write!(file, "{}", options)?; + write!(file, "{options}")?; let res = exec(); diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs index 3764bc1525..d8d5de34a5 100644 --- a/compute_tools/src/extension_server.rs +++ b/compute_tools/src/extension_server.rs @@ -310,10 +310,7 @@ async fn download_extension_tar(remote_ext_base_url: &Url, ext_path: &str) -> Re async fn do_extension_server_request(uri: Url) -> Result { let resp = reqwest::get(uri).await.map_err(|e| { ( - format!( - "could not perform remote extensions server request: {:?}", - e - ), + format!("could not perform remote extensions server request: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -323,7 +320,7 @@ async fn do_extension_server_request(uri: Url) -> Result match resp.bytes().await { Ok(resp) => Ok(resp), Err(e) => Err(( - format!("could not read remote extensions server response: {:?}", e), + format!("could not read remote extensions server response: {e:?}"), // It's fine to return and report error with status as 200 OK, // because we still failed to read the response. status.to_string(), @@ -334,10 +331,7 @@ async fn do_extension_server_request(uri: Url) -> Result Err(( - format!( - "unexpected remote extensions server response status code: {}", - status - ), + format!("unexpected remote extensions server response status code: {status}"), status.to_string(), )), } diff --git a/compute_tools/src/http/routes/configure.rs b/compute_tools/src/http/routes/configure.rs index c29e3a97da..b7325d283f 100644 --- a/compute_tools/src/http/routes/configure.rs +++ b/compute_tools/src/http/routes/configure.rs @@ -65,7 +65,7 @@ pub(in crate::http) async fn configure( if state.status == ComputeStatus::Failed { let err = state.error.as_ref().map_or("unknown error", |x| x); - let msg = format!("compute configuration failed: {:?}", err); + let msg = format!("compute configuration failed: {err:?}"); return Err(msg); } } diff --git a/compute_tools/src/installed_extensions.rs b/compute_tools/src/installed_extensions.rs index d95c168a99..411e03b7ec 100644 --- a/compute_tools/src/installed_extensions.rs +++ b/compute_tools/src/installed_extensions.rs @@ -43,7 +43,7 @@ pub async fn get_installed_extensions(mut conf: Config) -> Result Result Result> { let mut client = config.connect(NoTls)?; - let cmd = format!("lease lsn {} {} {} ", tenant_shard_id, timeline_id, lsn); + let cmd = format!("lease lsn {tenant_shard_id} {timeline_id} {lsn} "); let res = client.simple_query(&cmd)?; let msg = match res.first() { Some(msg) => msg, diff --git a/compute_tools/src/pg_helpers.rs b/compute_tools/src/pg_helpers.rs index 94467a0d2f..0a3ceed2fa 100644 --- a/compute_tools/src/pg_helpers.rs +++ b/compute_tools/src/pg_helpers.rs @@ -36,9 +36,9 @@ pub fn escape_literal(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); if res.contains('\\') { - format!("E'{}'", res) + format!("E'{res}'") } else { - format!("'{}'", res) + format!("'{res}'") } } @@ -46,7 +46,7 @@ pub fn escape_literal(s: &str) -> String { /// with `'{}'` is not required, as it returns a ready-to-use config string. pub fn escape_conf_value(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); - format!("'{}'", res) + format!("'{res}'") } pub trait GenericOptionExt { @@ -446,7 +446,7 @@ pub async fn tune_pgbouncer( let mut pgbouncer_connstr = "host=localhost port=6432 dbname=pgbouncer user=postgres sslmode=disable".to_string(); if let Ok(pass) = std::env::var("PGBOUNCER_PASSWORD") { - pgbouncer_connstr.push_str(format!(" password={}", pass).as_str()); + pgbouncer_connstr.push_str(format!(" password={pass}").as_str()); } pgbouncer_connstr }; @@ -464,7 +464,7 @@ pub async fn tune_pgbouncer( Ok((client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); break client; diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 4b38e6e29c..43cfbb48f7 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -23,12 +23,12 @@ fn do_control_plane_request( ) -> Result { let resp = reqwest::blocking::Client::new() .get(uri) - .header("Authorization", format!("Bearer {}", jwt)) + .header("Authorization", format!("Bearer {jwt}")) .send() .map_err(|e| { ( true, - format!("could not perform request to control plane: {:?}", e), + format!("could not perform request to control plane: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -39,7 +39,7 @@ fn do_control_plane_request( Ok(spec_resp) => Ok(spec_resp), Err(e) => Err(( true, - format!("could not deserialize control plane response: {:?}", e), + format!("could not deserialize control plane response: {e:?}"), status.to_string(), )), }, @@ -62,7 +62,7 @@ fn do_control_plane_request( // or some internal failure happened. Doesn't make much sense to retry in this case. _ => Err(( false, - format!("unexpected control plane response status code: {}", status), + format!("unexpected control plane response status code: {status}"), status.to_string(), )), } diff --git a/compute_tools/src/spec_apply.rs b/compute_tools/src/spec_apply.rs index 0d1389dbad..fcd072263a 100644 --- a/compute_tools/src/spec_apply.rs +++ b/compute_tools/src/spec_apply.rs @@ -933,56 +933,53 @@ async fn get_operations<'a>( PerDatabasePhase::DeleteDBRoleReferences => { let ctx = ctx.read().await; - let operations = - spec.delta_operations - .iter() - .flatten() - .filter(|op| op.action == "delete_role") - .filter_map(move |op| { - if db.is_owned_by(&op.name) { - return None; - } - if !ctx.roles.contains_key(&op.name) { - return None; - } - let quoted = op.name.pg_quote(); - let new_owner = match &db { - DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), - DB::UserDB(db) => db.owner.pg_quote(), - }; - let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); + let operations = spec + .delta_operations + .iter() + .flatten() + .filter(|op| op.action == "delete_role") + .filter_map(move |op| { + if db.is_owned_by(&op.name) { + return None; + } + if !ctx.roles.contains_key(&op.name) { + return None; + } + let quoted = op.name.pg_quote(); + let new_owner = match &db { + DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), + DB::UserDB(db) => db.owner.pg_quote(), + }; + let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); - Some(vec![ - // This will reassign all dependent objects to the db owner - Operation { - query: format!( - "REASSIGN OWNED BY {} TO {}", - quoted, new_owner, - ), - comment: None, - }, - // Revoke some potentially blocking privileges (Neon-specific currently) - Operation { - query: format!( - include_str!("sql/pre_drop_role_revoke_privileges.sql"), - // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` - role_name = escaped_role, - outer_tag = outer_tag, - ), - comment: None, - }, - // This now will only drop privileges of the role - // TODO: this is obviously not 100% true because of the above case, - // there could be still some privileges that are not revoked. Maybe this - // only drops privileges that were granted *by this* role, not *to this* role, - // but this has to be checked. - Operation { - query: format!("DROP OWNED BY {}", quoted), - comment: None, - }, - ]) - }) - .flatten(); + Some(vec![ + // This will reassign all dependent objects to the db owner + Operation { + query: format!("REASSIGN OWNED BY {quoted} TO {new_owner}",), + comment: None, + }, + // Revoke some potentially blocking privileges (Neon-specific currently) + Operation { + query: format!( + include_str!("sql/pre_drop_role_revoke_privileges.sql"), + // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` + role_name = escaped_role, + outer_tag = outer_tag, + ), + comment: None, + }, + // This now will only drop privileges of the role + // TODO: this is obviously not 100% true because of the above case, + // there could be still some privileges that are not revoked. Maybe this + // only drops privileges that were granted *by this* role, not *to this* role, + // but this has to be checked. + Operation { + query: format!("DROP OWNED BY {quoted}"), + comment: None, + }, + ]) + }) + .flatten(); Ok(Box::new(operations)) } diff --git a/compute_tools/src/sync_sk.rs b/compute_tools/src/sync_sk.rs index 22b7027b93..6c348644b2 100644 --- a/compute_tools/src/sync_sk.rs +++ b/compute_tools/src/sync_sk.rs @@ -27,7 +27,7 @@ pub async fn ping_safekeeper( let (client, conn) = config.connect(tokio_postgres::NoTls).await?; tokio::spawn(async move { if let Err(e) = conn.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 21f55336aa..b2dd1a7077 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -919,7 +919,7 @@ fn print_timeline( br_sym = "┗━"; } - print!("{} @{}: ", br_sym, ancestor_lsn); + print!("{br_sym} @{ancestor_lsn}: "); } // Finally print a timeline id and name with new line @@ -1742,7 +1742,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = get_pageserver(env, args.pageserver_id)?.stop(immediate) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } } @@ -1751,7 +1751,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> let pageserver = get_pageserver(env, args.pageserver_id)?; //TODO what shutdown strategy should we use here? if let Err(e) = pageserver.stop(false) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } @@ -1768,7 +1768,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> { Ok(_) => println!("Page server is up and running"), Err(err) => { - eprintln!("Page server is not available: {}", err); + eprintln!("Page server is not available: {err}"); exit(1); } } @@ -1805,7 +1805,7 @@ async fn handle_storage_controller( }, }; if let Err(e) = svc.stop(stop_args).await { - eprintln!("stop failed: {}", e); + eprintln!("stop failed: {e}"); exit(1); } } @@ -1827,7 +1827,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> let safekeeper = get_safekeeper(env, args.id)?; if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -1839,7 +1839,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } } @@ -1852,12 +1852,12 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -2113,7 +2113,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) { let storage = EndpointStorage::from_env(env); if let Err(e) = storage.stop(immediate) { - eprintln!("endpoint_storage stop failed: {:#}", e); + eprintln!("endpoint_storage stop failed: {e:#}"); } for ps_conf in &env.pageservers { diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index ae81e7abbe..dab53b0f27 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -846,10 +846,10 @@ impl Endpoint { // Launch compute_ctl let conn_str = self.connstr("cloud_admin", "postgres"); - println!("Starting postgres node at '{}'", conn_str); + println!("Starting postgres node at '{conn_str}'"); if create_test_user { let conn_str = self.connstr("test", "neondb"); - println!("Also at '{}'", conn_str); + println!("Also at '{conn_str}'"); } let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl")); cmd.args([ @@ -948,8 +948,7 @@ impl Endpoint { Err(e) => { if Instant::now().duration_since(start_at) > start_timeout { return Err(e).context(format!( - "timed out {:?} waiting to connect to compute_ctl HTTP", - start_timeout, + "timed out {start_timeout:?} waiting to connect to compute_ctl HTTP", )); } } @@ -988,7 +987,7 @@ impl Endpoint { // reqwest does not export its error construction utility functions, so let's craft the message ourselves let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) @@ -1054,7 +1053,7 @@ impl Endpoint { } else { let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index e8abde4901..34465b4d5d 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -260,7 +260,7 @@ impl Default for EndpointStorageConf { impl NeonBroker { pub fn client_url(&self) -> Url { let url = if let Some(addr) = self.listen_https_addr { - format!("https://{}", addr) + format!("https://{addr}") } else { format!( "http://{}", @@ -733,7 +733,7 @@ impl LocalEnv { let config_toml_path = dentry.path().join("pageserver.toml"); let config_toml: PageserverConfigTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&config_toml_path) - .with_context(|| format!("read {:?}", config_toml_path))?, + .with_context(|| format!("read {config_toml_path:?}"))?, ) .context("parse pageserver.toml")?; let identity_toml_path = dentry.path().join("identity.toml"); @@ -743,7 +743,7 @@ impl LocalEnv { } let identity_toml: IdentityTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&identity_toml_path) - .with_context(|| format!("read {:?}", identity_toml_path))?, + .with_context(|| format!("read {identity_toml_path:?}"))?, ) .context("parse identity.toml")?; let PageserverConfigTomlSubset { diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index a683d2daec..7fa00a6730 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -121,7 +121,7 @@ impl PageServerNode { .env .generate_auth_token(&Claims::new(None, Scope::GenerationsApi)) .unwrap(); - overrides.push(format!("control_plane_api_token='{}'", jwt_token)); + overrides.push(format!("control_plane_api_token='{jwt_token}'")); } if !conf.other.contains_key("remote_storage") { diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 28d369a315..da9dafd8e9 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -143,7 +143,7 @@ impl SafekeeperNode { let id_string = id.to_string(); // TODO: add availability_zone to the config. // Right now we just specify any value here and use it to check metrics in tests. - let availability_zone = format!("sk-{}", id_string); + let availability_zone = format!("sk-{id_string}"); let mut args = vec![ "-D".to_owned(), diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 334949924c..f2ac5bb2dd 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -167,7 +167,7 @@ impl StorageController { fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf { self.env .base_data_dir - .join(format!("storage_controller_{}", instance_id)) + .join(format!("storage_controller_{instance_id}")) } fn pid_file(&self, instance_id: u8) -> Utf8PathBuf { @@ -220,7 +220,7 @@ impl StorageController { "-d", DB_NAME, "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), ]; let pg_lib_dir = self.get_pg_lib_dir().await.unwrap(); let envs = [ @@ -263,7 +263,7 @@ impl StorageController { "-h", "localhost", "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), "-U", &username(), "-O", @@ -425,7 +425,7 @@ impl StorageController { // from `LocalEnv`'s config file (`.neon/config`). tokio::fs::write( &pg_data_path.join("postgresql.conf"), - format!("port = {}\nfsync=off\n", postgres_port), + format!("port = {postgres_port}\nfsync=off\n"), ) .await?; @@ -477,7 +477,7 @@ impl StorageController { self.setup_database(postgres_port).await?; } - let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port); + let database_url = format!("postgresql://localhost:{postgres_port}/{DB_NAME}"); // We support running a startup SQL script to fiddle with the database before we launch storcon. // This is used by the test suite. @@ -508,7 +508,7 @@ impl StorageController { drop(client); conn.await??; - let addr = format!("{}:{}", host, listen_port); + let addr = format!("{host}:{listen_port}"); let address_for_peers = Uri::builder() .scheme(scheme) .authority(addr.clone()) @@ -810,9 +810,9 @@ impl StorageController { builder = builder.json(&body) } if let Some(private_key) = &self.private_key { - println!("Getting claims for path {}", path); + println!("Getting claims for path {path}"); if let Some(required_claims) = Self::get_claims_for_path(&path)? { - println!("Got claims {:?} for path {}", required_claims, path); + println!("Got claims {required_claims:?} for path {path}"); let jwt_token = encode_from_key_file(&required_claims, private_key)?; builder = builder.header( reqwest::header::AUTHORIZATION, diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 507190b1e0..0036b7d0f6 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -649,7 +649,7 @@ async fn main() -> anyhow::Result<()> { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -771,8 +771,8 @@ async fn main() -> anyhow::Result<()> { println!("Tenant {tenant_id}"); let mut table = comfy_table::Table::new(); - table.add_row(["Policy", &format!("{:?}", policy)]); - table.add_row(["Stripe size", &format!("{:?}", stripe_size)]); + table.add_row(["Policy", &format!("{policy:?}")]); + table.add_row(["Stripe size", &format!("{stripe_size:?}")]); table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]); println!("{table}"); println!("Shards:"); @@ -789,7 +789,7 @@ async fn main() -> anyhow::Result<()> { let secondary = shard .node_secondary .iter() - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .collect::>() .join(","); @@ -863,7 +863,7 @@ async fn main() -> anyhow::Result<()> { } } else { // Make it obvious to the user that since they've omitted an AZ, we're clearing it - eprintln!("Clearing preferred AZ for tenant {}", tenant_id); + eprintln!("Clearing preferred AZ for tenant {tenant_id}"); } // Construct a request that modifies all the tenant's shards @@ -1134,8 +1134,7 @@ async fn main() -> anyhow::Result<()> { Err((tenant_shard_id, from, to, error)) => { failure += 1; println!( - "Failed to migrate {} from node {} to node {}: {}", - tenant_shard_id, from, to, error + "Failed to migrate {tenant_shard_id} from node {from} to node {to}: {error}" ); } } @@ -1277,8 +1276,7 @@ async fn main() -> anyhow::Result<()> { concurrency, } => { let mut path = format!( - "/v1/tenant/{}/timeline/{}/download_heatmap_layers", - tenant_shard_id, timeline_id, + "/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers", ); if let Some(c) = concurrency { @@ -1303,8 +1301,7 @@ async fn watch_tenant_shard( ) -> anyhow::Result<()> { if let Some(until_migrated_to) = until_migrated_to { println!( - "Waiting for tenant shard {} to be migrated to node {}", - tenant_shard_id, until_migrated_to + "Waiting for tenant shard {tenant_shard_id} to be migrated to node {until_migrated_to}" ); } @@ -1327,7 +1324,7 @@ async fn watch_tenant_shard( "attached: {} secondary: {} {}", shard .node_attached - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .unwrap_or("none".to_string()), shard .node_secondary @@ -1341,15 +1338,12 @@ async fn watch_tenant_shard( "(reconciler idle)" } ); - println!("{}", summary); + println!("{summary}"); // Maybe drop out if we finished migration if let Some(until_migrated_to) = until_migrated_to { if shard.node_attached == Some(until_migrated_to) && !shard.is_reconciling { - println!( - "Tenant shard {} is now on node {}", - tenant_shard_id, until_migrated_to - ); + println!("Tenant shard {tenant_shard_id} is now on node {until_migrated_to}"); break; } } diff --git a/endpoint_storage/src/app.rs b/endpoint_storage/src/app.rs index f44efe6d7a..42431c0066 100644 --- a/endpoint_storage/src/app.rs +++ b/endpoint_storage/src/app.rs @@ -374,7 +374,7 @@ MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH let request = Request::builder() .uri(format!("/{tenant}/{timeline}/{endpoint}/sub/path/key")) .method(method) - .header("Authorization", format!("Bearer {}", token)) + .header("Authorization", format!("Bearer {token}")) .body(Body::empty()) .unwrap(); let status = ServiceExt::ready(&mut app) diff --git a/libs/desim/src/executor.rs b/libs/desim/src/executor.rs index 51b11ff97e..bdb9c6cd4b 100644 --- a/libs/desim/src/executor.rs +++ b/libs/desim/src/executor.rs @@ -71,7 +71,7 @@ impl Runtime { debug!("thread panicked: {:?}", e); let mut result = ctx.result.lock(); if result.0 == -1 { - *result = (256, format!("thread panicked: {:?}", e)); + *result = (256, format!("thread panicked: {e:?}")); } }); } diff --git a/libs/desim/src/proto.rs b/libs/desim/src/proto.rs index 31bc29e6a6..7c3de4ff4b 100644 --- a/libs/desim/src/proto.rs +++ b/libs/desim/src/proto.rs @@ -47,8 +47,8 @@ impl Debug for AnyMessage { match self { AnyMessage::None => write!(f, "None"), AnyMessage::InternalConnect => write!(f, "InternalConnect"), - AnyMessage::Just32(v) => write!(f, "Just32({})", v), - AnyMessage::ReplCell(v) => write!(f, "ReplCell({:?})", v), + AnyMessage::Just32(v) => write!(f, "Just32({v})"), + AnyMessage::ReplCell(v) => write!(f, "ReplCell({v:?})"), AnyMessage::Bytes(v) => write!(f, "Bytes({})", hex::encode(v)), AnyMessage::LSN(v) => write!(f, "LSN({})", Lsn(*v)), } diff --git a/libs/http-utils/src/endpoint.rs b/libs/http-utils/src/endpoint.rs index 64147f2dd0..f32ced1180 100644 --- a/libs/http-utils/src/endpoint.rs +++ b/libs/http-utils/src/endpoint.rs @@ -582,14 +582,14 @@ pub fn attach_openapi_ui( deepLinking: true, showExtensions: true, showCommonExtensions: true, - url: "{}", + url: "{spec_mount_path}", }}) window.ui = ui; }}; - "#, spec_mount_path))).unwrap()) + "#))).unwrap()) }) ) } @@ -696,7 +696,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let mut req: Request = Request::default(); @@ -716,7 +716,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let req: Request = Request::default(); diff --git a/libs/neon-shmem/src/lib.rs b/libs/neon-shmem/src/lib.rs index e1b14b1371..c689959b68 100644 --- a/libs/neon-shmem/src/lib.rs +++ b/libs/neon-shmem/src/lib.rs @@ -86,7 +86,7 @@ impl ShmemHandle { // somewhat smaller than that, because with anything close to that, you'll run out of // memory anyway. if max_size >= 1 << 48 { - panic!("max size {} too large", max_size); + panic!("max size {max_size} too large"); } if initial_size > max_size { panic!("initial size {initial_size} larger than max size {max_size}"); @@ -279,7 +279,7 @@ mod tests { fn assert_range(ptr: *const u8, expected: u8, range: Range) { for i in range { let b = unsafe { *(ptr.add(i)) }; - assert_eq!(expected, b, "unexpected byte at offset {}", i); + assert_eq!(expected, b, "unexpected byte at offset {i}"); } } diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index 55495dd68e..ff18d40bfe 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -577,8 +577,7 @@ mod test { let err = serde_json::from_value::(create_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index 1b48d3c462..10a242e13b 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -334,8 +334,7 @@ impl KeySpace { std::cmp::max(range.start, prev.start) < std::cmp::min(range.end, prev.end); assert!( !overlap, - "Attempt to merge ovelapping keyspaces: {:?} overlaps {:?}", - prev, range + "Attempt to merge ovelapping keyspaces: {prev:?} overlaps {range:?}" ); } @@ -1104,7 +1103,7 @@ mod tests { // total range contains at least one shard-local page let all_nonzero = fragments.iter().all(|f| f.0 > 0); if !all_nonzero { - eprintln!("Found a zero-length fragment: {:?}", fragments); + eprintln!("Found a zero-length fragment: {fragments:?}"); } assert!(all_nonzero); } else { diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 2fc32c8f49..ee6725efbe 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -1182,7 +1182,7 @@ impl Display for ImageCompressionAlgorithm { ImageCompressionAlgorithm::Disabled => write!(f, "disabled"), ImageCompressionAlgorithm::Zstd { level } => { if let Some(level) = level { - write!(f, "zstd({})", level) + write!(f, "zstd({level})") } else { write!(f, "zstd") } @@ -2011,8 +2011,7 @@ mod tests { let err = serde_json::from_value::(config_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 714d8ac403..091299f842 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -939,7 +939,7 @@ impl PostgresBackendReader { FeMessage::CopyFail => Err(CopyStreamHandlerEnd::CopyFail), FeMessage::Terminate => Err(CopyStreamHandlerEnd::Terminate), _ => Err(CopyStreamHandlerEnd::from(ConnectionError::Protocol( - ProtocolError::Protocol(format!("unexpected message in COPY stream {:?}", msg)), + ProtocolError::Protocol(format!("unexpected message in COPY stream {msg:?}")), ))), }, None => Err(CopyStreamHandlerEnd::EOF), diff --git a/libs/postgres_backend/tests/simple_select.rs b/libs/postgres_backend/tests/simple_select.rs index 75ca123014..23e17799bd 100644 --- a/libs/postgres_backend/tests/simple_select.rs +++ b/libs/postgres_backend/tests/simple_select.rs @@ -61,7 +61,7 @@ async fn simple_select() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -137,7 +137,7 @@ async fn simple_select_ssl() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs index cd981b3729..2388303329 100644 --- a/libs/postgres_connection/src/lib.rs +++ b/libs/postgres_connection/src/lib.rs @@ -223,7 +223,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: None }" ); } @@ -239,7 +239,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "[::1]:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Ipv6(::1), port: 123, password: None }" ); } @@ -252,7 +252,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: Some(REDACTED-STRING) }" ); } diff --git a/libs/postgres_ffi/src/waldecoder_handler.rs b/libs/postgres_ffi/src/waldecoder_handler.rs index b4d50375bd..9cd40645ec 100644 --- a/libs/postgres_ffi/src/waldecoder_handler.rs +++ b/libs/postgres_ffi/src/waldecoder_handler.rs @@ -114,7 +114,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf).map_err( |e| WalDecodeError { - msg: format!("long header deserialization failed {}", e), + msg: format!("long header deserialization failed {e}"), lsn: self.lsn, }, )?; @@ -130,7 +130,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| { WalDecodeError { - msg: format!("header deserialization failed {}", e), + msg: format!("header deserialization failed {e}"), lsn: self.lsn, } })?; @@ -155,7 +155,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xl_tot_len = (&self.inputbuf[0..4]).get_u32_le(); if (xl_tot_len as usize) < XLOG_SIZE_OF_XLOG_RECORD { return Err(WalDecodeError { - msg: format!("invalid xl_tot_len {}", xl_tot_len), + msg: format!("invalid xl_tot_len {xl_tot_len}"), lsn: self.lsn, }); } @@ -218,7 +218,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xlogrec = XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]).map_err(|e| { WalDecodeError { - msg: format!("xlog record deserialization failed {}", e), + msg: format!("xlog record deserialization failed {e}"), lsn: self.lsn, } })?; diff --git a/libs/postgres_ffi/src/walrecord.rs b/libs/postgres_ffi/src/walrecord.rs index 1ccf4590a9..c0ae88363e 100644 --- a/libs/postgres_ffi/src/walrecord.rs +++ b/libs/postgres_ffi/src/walrecord.rs @@ -1199,7 +1199,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP2 MULTI_INSERT", pg_constants::XLOG_HEAP2_VISIBLE => "HEAP2 VISIBLE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1212,7 +1212,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP UPDATE", pg_constants::XLOG_HEAP_HOT_UPDATE => "HEAP HOT_UPDATE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1223,7 +1223,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "XLOG FPI", pg_constants::XLOG_FPI_FOR_HINT => "XLOG FPI_FOR_HINT", _ => { - unknown_str = format!("XLOG UNKNOWN_0x{:02x}", info); + unknown_str = format!("XLOG UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1231,7 +1231,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result { let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK; - unknown_str = format!("UNKNOWN_RM_{} INFO_0x{:02x}", rmid, info); + unknown_str = format!("UNKNOWN_RM_{rmid} INFO_0x{info:02x}"); &unknown_str } }; diff --git a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs index 4a33dbe25b..94371a35b5 100644 --- a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs +++ b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs @@ -34,7 +34,7 @@ fn test_end_of_wal(test_name: &str) { let cfg = Conf { pg_version, pg_distrib_dir: top_path.join("pg_install"), - datadir: top_path.join(format!("test_output/{}-{PG_MAJORVERSION}", test_name)), + datadir: top_path.join(format!("test_output/{test_name}-{PG_MAJORVERSION}")), }; if cfg.datadir.exists() { fs::remove_dir_all(&cfg.datadir).unwrap(); diff --git a/libs/postgres_initdb/src/lib.rs b/libs/postgres_initdb/src/lib.rs index ed54696861..4b4a597f73 100644 --- a/libs/postgres_initdb/src/lib.rs +++ b/libs/postgres_initdb/src/lib.rs @@ -31,15 +31,15 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Spawn(e) => write!(f, "Error spawning command: {:?}", e), + Error::Spawn(e) => write!(f, "Error spawning command: {e:?}"), Error::Failed { status, stderr } => write!( f, "Command failed with status {:?}: {}", status, String::from_utf8_lossy(stderr) ), - Error::WaitOutput(e) => write!(f, "Error waiting for command output: {:?}", e), - Error::Other(e) => write!(f, "Error: {:?}", e), + Error::WaitOutput(e) => write!(f, "Error waiting for command output: {e:?}"), + Error::Other(e) => write!(f, "Error: {e:?}"), } } } diff --git a/libs/posthog_client_lite/src/lib.rs b/libs/posthog_client_lite/src/lib.rs index 730878fb58..f21047bcfc 100644 --- a/libs/posthog_client_lite/src/lib.rs +++ b/libs/posthog_client_lite/src/lib.rs @@ -168,15 +168,13 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(provided) = provided else { // Left should be a string return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a string: {:?}", - provided + "The left side of the condition is not a string: {provided:?}" ))); }; let PostHogFlagFilterPropertyValue::List(requested) = requested else { // Right should be a list of string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a list: {:?}", - requested + "The right side of the condition is not a list: {requested:?}" ))); }; Ok(requested.contains(provided)) @@ -185,14 +183,12 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(requested) = requested else { // Right should be a string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a string: {:?}", - requested + "The right side of the condition is not a string: {requested:?}" ))); }; let Ok(requested) = requested.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the right side of the condition as a number: {:?}", - requested + "Can not parse the right side of the condition as a number: {requested:?}" ))); }; // Left can either be a number or a string @@ -201,16 +197,14 @@ impl FeatureStore { PostHogFlagFilterPropertyValue::String(provided) => { let Ok(provided) = provided.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the left side of the condition as a number: {:?}", - provided + "Can not parse the left side of the condition as a number: {provided:?}" ))); }; provided } _ => { return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a number or a string: {:?}", - provided + "The left side of the condition is not a number or a string: {provided:?}" ))); } }; @@ -218,14 +212,12 @@ impl FeatureStore { "lt" => Ok(provided < requested), "gt" => Ok(provided > requested), op => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - op + "Unsupported operator: {op}" ))), } } _ => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - operator + "Unsupported operator: {operator}" ))), } } @@ -373,8 +365,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } let Some(ref multivariate) = flag_config.filters.multivariate else { @@ -401,8 +392,7 @@ impl FeatureStore { // This should not happen because the rollout percentage always adds up to 100, but just in case that PostHog // returned invalid spec, we return an error. return Err(PostHogEvaluationError::Internal(format!( - "Rollout percentage does not add up to 100: {}", - flag_key + "Rollout percentage does not add up to 100: {flag_key}" ))); } GroupEvaluationResult::Unmatched => continue, @@ -413,8 +403,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -440,8 +429,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } if flag_config.filters.multivariate.is_some() { @@ -456,8 +444,7 @@ impl FeatureStore { match self.evaluate_group(group, hash_on_global_rollout_percentage, properties)? { GroupEvaluationResult::MatchedAndOverride(_) => { return Err(PostHogEvaluationError::Internal(format!( - "Boolean flag cannot have overrides: {}", - flag_key + "Boolean flag cannot have overrides: {flag_key}" ))); } GroupEvaluationResult::MatchedAndEvaluate => { @@ -471,8 +458,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -483,8 +469,7 @@ impl FeatureStore { Ok(flag_config.filters.multivariate.is_none()) } else { Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index e7afc64564..482dd9a298 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -198,7 +198,7 @@ impl fmt::Display for CancelKeyData { // This format is more compact and might work better for logs. f.debug_tuple("CancelKeyData") - .field(&format_args!("{:x}", id)) + .field(&format_args!("{id:x}")) .finish() } } @@ -291,8 +291,7 @@ impl FeMessage { let len = (&buf[1..5]).read_u32::().unwrap(); if len < 4 { return Err(ProtocolError::Protocol(format!( - "invalid message length {}", - len + "invalid message length {len}" ))); } @@ -367,8 +366,7 @@ impl FeStartupPacket { #[allow(clippy::manual_range_contains)] if len < 8 || len > MAX_STARTUP_PACKET_LENGTH { return Err(ProtocolError::Protocol(format!( - "invalid startup packet message length {}", - len + "invalid startup packet message length {len}" ))); } diff --git a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs index a7bf3da20a..b8304f9d8d 100644 --- a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs +++ b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs @@ -308,7 +308,7 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::other(format!("SCRAM error: {}", e))); + return Err(io::Error::other(format!("SCRAM error: {e}"))); } ServerFinalMessage::Verifier(verifier) => verifier, }; @@ -343,10 +343,8 @@ impl<'a> Parser<'a> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}", - i, target, c - ); + let m = + format!("unexpected character at byte {i}: expected `{target}` but got `{c}"); Err(io::Error::new(io::ErrorKind::InvalidInput, m)) } None => Err(io::Error::new( @@ -412,7 +410,7 @@ impl<'a> Parser<'a> { match self.it.peek() { Some(&(i, _)) => Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unexpected trailing data at byte {}", i), + format!("unexpected trailing data at byte {i}"), )), None => Ok(()), } diff --git a/libs/proxy/postgres-protocol2/src/message/backend.rs b/libs/proxy/postgres-protocol2/src/message/backend.rs index d7eaef9509..3fc9a9335c 100644 --- a/libs/proxy/postgres-protocol2/src/message/backend.rs +++ b/libs/proxy/postgres-protocol2/src/message/backend.rs @@ -211,7 +211,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown authentication tag `{}`", tag), + format!("unknown authentication tag `{tag}`"), )); } }, @@ -238,7 +238,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown message tag `{}`", tag), + format!("unknown message tag `{tag}`"), )); } }; diff --git a/libs/proxy/postgres-types2/src/lib.rs b/libs/proxy/postgres-types2/src/lib.rs index 7c9874bda3..c98c45636b 100644 --- a/libs/proxy/postgres-types2/src/lib.rs +++ b/libs/proxy/postgres-types2/src/lib.rs @@ -46,7 +46,7 @@ impl fmt::Display for Type { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { "public" | "pg_catalog" => {} - schema => write!(fmt, "{}.", schema)?, + schema => write!(fmt, "{schema}.")?, } fmt.write_str(self.name()) } diff --git a/libs/proxy/tokio-postgres2/src/error/mod.rs b/libs/proxy/tokio-postgres2/src/error/mod.rs index 8149bceeb9..5309bce17e 100644 --- a/libs/proxy/tokio-postgres2/src/error/mod.rs +++ b/libs/proxy/tokio-postgres2/src/error/mod.rs @@ -332,10 +332,10 @@ impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message)?; if let Some(detail) = &self.detail { - write!(fmt, "\nDETAIL: {}", detail)?; + write!(fmt, "\nDETAIL: {detail}")?; } if let Some(hint) = &self.hint { - write!(fmt, "\nHINT: {}", hint)?; + write!(fmt, "\nHINT: {hint}")?; } Ok(()) } @@ -398,9 +398,9 @@ impl fmt::Display for Error { Kind::Io => fmt.write_str("error communicating with the server")?, Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, Kind::Tls => fmt.write_str("error performing TLS handshake")?, - Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, - Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, - Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::ToSql(idx) => write!(fmt, "error serializing parameter {idx}")?, + Kind::FromSql(idx) => write!(fmt, "error deserializing column {idx}")?, + Kind::Column(column) => write!(fmt, "invalid column `{column}`")?, Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -411,7 +411,7 @@ impl fmt::Display for Error { Kind::Timeout => fmt.write_str("timeout waiting for server")?, }; if let Some(ref cause) = self.0.cause { - write!(fmt, ": {}", cause)?; + write!(fmt, ": {cause}")?; } Ok(()) } diff --git a/libs/proxy/tokio-postgres2/src/row.rs b/libs/proxy/tokio-postgres2/src/row.rs index 5fc955eef4..36d578558f 100644 --- a/libs/proxy/tokio-postgres2/src/row.rs +++ b/libs/proxy/tokio-postgres2/src/row.rs @@ -156,7 +156,7 @@ impl Row { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } @@ -274,7 +274,7 @@ impl SimpleQueryRow { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } diff --git a/libs/remote_storage/src/local_fs.rs b/libs/remote_storage/src/local_fs.rs index 8320d7afdc..30690b1bdb 100644 --- a/libs/remote_storage/src/local_fs.rs +++ b/libs/remote_storage/src/local_fs.rs @@ -400,7 +400,7 @@ impl RemoteStorage for LocalFs { key }; - let relative_key = format!("{}", relative_key); + let relative_key = format!("{relative_key}"); if relative_key.contains(REMOTE_STORAGE_PREFIX_SEPARATOR) { let first_part = relative_key .split(REMOTE_STORAGE_PREFIX_SEPARATOR) @@ -594,13 +594,9 @@ impl RemoteStorage for LocalFs { let from_path = from.with_base(&self.storage_root); let to_path = to.with_base(&self.storage_root); create_target_directory(&to_path).await?; - fs::copy(&from_path, &to_path).await.with_context(|| { - format!( - "Failed to copy file from '{from_path}' to '{to_path}'", - from_path = from_path, - to_path = to_path - ) - })?; + fs::copy(&from_path, &to_path) + .await + .with_context(|| format!("Failed to copy file from '{from_path}' to '{to_path}'"))?; Ok(()) } @@ -1183,7 +1179,7 @@ mod fs_tests { .write(true) .create_new(true) .open(path)?; - write!(file_for_writing, "{}", contents)?; + write!(file_for_writing, "{contents}")?; drop(file_for_writing); let file_size = path.metadata()?.len() as usize; Ok(( diff --git a/libs/safekeeper_api/src/membership.rs b/libs/safekeeper_api/src/membership.rs index 3d4d17096e..1751c54f6a 100644 --- a/libs/safekeeper_api/src/membership.rs +++ b/libs/safekeeper_api/src/membership.rs @@ -193,10 +193,10 @@ mod tests { }) .unwrap(); - println!("members: {}", members); + println!("members: {members}"); let j = serde_json::to_string(&members).expect("failed to serialize"); - println!("members json: {}", j); + println!("members json: {j}"); assert_eq!( j, r#"[{"id":42,"host":"lala.org","pg_port":5432},{"id":43,"host":"bubu.org","pg_port":5432}]"# diff --git a/libs/utils/src/error.rs b/libs/utils/src/error.rs index 7ce203e918..6fa86916c1 100644 --- a/libs/utils/src/error.rs +++ b/libs/utils/src/error.rs @@ -41,7 +41,7 @@ pub fn report_compact_sources(e: &E) -> impl std::fmt::Dis // why is E a generic parameter here? hope that rustc will see through a default // Error::source implementation and leave the following out if there cannot be any // sources: - Sources(self.0.source()).try_for_each(|src| write!(f, ": {}", src)) + Sources(self.0.source()).try_for_each(|src| write!(f, ": {src}")) } } diff --git a/libs/utils/src/generation.rs b/libs/utils/src/generation.rs index b5e4a4644a..8a3bef914a 100644 --- a/libs/utils/src/generation.rs +++ b/libs/utils/src/generation.rs @@ -135,7 +135,7 @@ impl Debug for Generation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Valid(v) => { - write!(f, "{:08x}", v) + write!(f, "{v:08x}") } Self::None => { write!(f, "") diff --git a/libs/utils/src/id.rs b/libs/utils/src/id.rs index 68cb1f0209..e3037aec21 100644 --- a/libs/utils/src/id.rs +++ b/libs/utils/src/id.rs @@ -280,7 +280,7 @@ impl TryFrom> for TimelineId { value .unwrap_or_default() .parse::() - .with_context(|| format!("Could not parse timeline id from {:?}", value)) + .with_context(|| format!("Could not parse timeline id from {value:?}")) } } diff --git a/libs/utils/src/postgres_client.rs b/libs/utils/src/postgres_client.rs index 4167839e28..7596fefe38 100644 --- a/libs/utils/src/postgres_client.rs +++ b/libs/utils/src/postgres_client.rs @@ -89,7 +89,7 @@ pub fn wal_stream_connection_config( .set_password(args.auth_token.map(|s| s.to_owned())); if let Some(availability_zone) = args.availability_zone { - connstr = connstr.extend_options([format!("availability_zone={}", availability_zone)]); + connstr = connstr.extend_options([format!("availability_zone={availability_zone}")]); } Ok(connstr) diff --git a/libs/utils/src/shard.rs b/libs/utils/src/shard.rs index c8c410a725..f2b81373e2 100644 --- a/libs/utils/src/shard.rs +++ b/libs/utils/src/shard.rs @@ -196,7 +196,7 @@ impl std::fmt::Display for TenantShardId { impl std::fmt::Debug for TenantShardId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } @@ -284,7 +284,7 @@ impl std::fmt::Display for ShardIndex { impl std::fmt::Debug for ShardIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/libs/utils/src/signals.rs b/libs/utils/src/signals.rs index 426bb65916..bdaa3cb665 100644 --- a/libs/utils/src/signals.rs +++ b/libs/utils/src/signals.rs @@ -29,7 +29,7 @@ impl ShutdownSignals { SIGINT => Signal::Interrupt, SIGTERM => Signal::Terminate, SIGQUIT => Signal::Quit, - other => panic!("unknown signal: {}", other), + other => panic!("unknown signal: {other}"), }; handler(signal)?; diff --git a/libs/vm_monitor/src/dispatcher.rs b/libs/vm_monitor/src/dispatcher.rs index 7b7201ab77..7bd6adc2f8 100644 --- a/libs/vm_monitor/src/dispatcher.rs +++ b/libs/vm_monitor/src/dispatcher.rs @@ -90,8 +90,7 @@ impl Dispatcher { Err(e) => { sink.send(Message::Text(Utf8Bytes::from( serde_json::to_string(&ProtocolResponse::Error(format!( - "Received protocol version range {} which does not overlap with {}", - agent_range, monitor_range + "Received protocol version range {agent_range} which does not overlap with {monitor_range}" ))) .unwrap(), ))) diff --git a/libs/vm_monitor/src/filecache.rs b/libs/vm_monitor/src/filecache.rs index bc42347e5a..55bbdea169 100644 --- a/libs/vm_monitor/src/filecache.rs +++ b/libs/vm_monitor/src/filecache.rs @@ -285,7 +285,7 @@ impl FileCacheState { // why we're constructing the query here. self.client .query( - &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {};", num_mb), + &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {num_mb};"), &[], ) .await diff --git a/libs/wal_decoder/benches/bench_interpret_wal.rs b/libs/wal_decoder/benches/bench_interpret_wal.rs index ed6ba4d267..ff860a92e2 100644 --- a/libs/wal_decoder/benches/bench_interpret_wal.rs +++ b/libs/wal_decoder/benches/bench_interpret_wal.rs @@ -64,7 +64,7 @@ async fn download_bench_data( let temp_dir_parent: Utf8PathBuf = env::current_dir().unwrap().try_into()?; let temp_dir = camino_tempfile::tempdir_in(temp_dir_parent)?; - eprintln!("Downloading benchmark data to {:?}", temp_dir); + eprintln!("Downloading benchmark data to {temp_dir:?}"); let listing = client .list(None, ListingMode::NoDelimiter, None, cancel) @@ -120,7 +120,7 @@ struct BenchmarkMetadata { } async fn load_bench_data(path: &Utf8Path, input_size: usize) -> anyhow::Result { - eprintln!("Loading benchmark data from {:?}", path); + eprintln!("Loading benchmark data from {path:?}"); let mut entries = tokio::fs::read_dir(path).await?; let mut ordered_segment_paths = Vec::new(); diff --git a/libs/wal_decoder/build.rs b/libs/wal_decoder/build.rs index d5b7ad02ad..e8acb52256 100644 --- a/libs/wal_decoder/build.rs +++ b/libs/wal_decoder/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/interpreted_wal.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/libs/wal_decoder/src/models/record.rs b/libs/wal_decoder/src/models/record.rs index 73516c5220..51659ed904 100644 --- a/libs/wal_decoder/src/models/record.rs +++ b/libs/wal_decoder/src/models/record.rs @@ -128,6 +128,6 @@ pub fn describe_wal_record(rec: &NeonWalRecord) -> Result Ok(format!("{:?}", rec)), + _ => Ok(format!("{rec:?}")), } } diff --git a/libs/walproposer/src/api_bindings.rs b/libs/walproposer/src/api_bindings.rs index b89f1877fd..7c6abf252e 100644 --- a/libs/walproposer/src/api_bindings.rs +++ b/libs/walproposer/src/api_bindings.rs @@ -376,7 +376,7 @@ impl Level { FATAL => Level::Fatal, PANIC => Level::Panic, WPEVENT => Level::WPEvent, - _ => panic!("unknown log level {}", elevel), + _ => panic!("unknown log level {elevel}"), } } } @@ -446,7 +446,7 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { impl std::fmt::Display for Level { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/libs/walproposer/src/walproposer.rs b/libs/walproposer/src/walproposer.rs index c853658ddf..93bb0d5eb0 100644 --- a/libs/walproposer/src/walproposer.rs +++ b/libs/walproposer/src/walproposer.rs @@ -380,7 +380,7 @@ mod tests { } fn conn_send_query(&self, _: &mut crate::bindings::Safekeeper, query: &str) -> bool { - println!("conn_send_query: {}", query); + println!("conn_send_query: {query}"); true } @@ -399,13 +399,13 @@ mod tests { ) -> crate::bindings::PGAsyncReadResult { println!("conn_async_read"); let reply = self.next_safekeeper_reply(); - println!("conn_async_read result: {:?}", reply); + println!("conn_async_read result: {reply:?}"); vec.extend_from_slice(reply); crate::bindings::PGAsyncReadResult_PG_ASYNC_READ_SUCCESS } fn conn_blocking_write(&self, _: &mut crate::bindings::Safekeeper, buf: &[u8]) -> bool { - println!("conn_blocking_write: {:?}", buf); + println!("conn_blocking_write: {buf:?}"); self.check_walproposer_msg(buf); true } @@ -456,10 +456,7 @@ mod tests { timeout_millis: i64, ) -> super::WaitResult { let data = self.wait_events.get(); - println!( - "wait_event_set, timeout_millis={}, res={:?}", - timeout_millis, data - ); + println!("wait_event_set, timeout_millis={timeout_millis}, res={data:?}"); super::WaitResult::Network(data.sk, data.event_mask) } @@ -475,7 +472,7 @@ mod tests { } fn log_internal(&self, _wp: &mut crate::bindings::WalProposer, level: Level, msg: &str) { - println!("wp_log[{}] {}", level, msg); + println!("wp_log[{level}] {msg}"); } fn after_election(&self, _wp: &mut crate::bindings::WalProposer) { diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 219e63c9d4..8b091684eb 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -508,11 +508,11 @@ impl Client { .expect("Cannot build URL"); path.query_pairs_mut() - .append_pair("recurse", &format!("{}", recurse)); + .append_pair("recurse", &format!("{recurse}")); if let Some(concurrency) = concurrency { path.query_pairs_mut() - .append_pair("concurrency", &format!("{}", concurrency)); + .append_pair("concurrency", &format!("{concurrency}")); } self.request(Method::POST, path, ()).await.map(|_| ()) diff --git a/pageserver/compaction/src/simulator/draw.rs b/pageserver/compaction/src/simulator/draw.rs index 3d35d1b91e..8322fe7d6d 100644 --- a/pageserver/compaction/src/simulator/draw.rs +++ b/pageserver/compaction/src/simulator/draw.rs @@ -152,7 +152,7 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: let key_diff = key_end - key_start; if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = lsn_map.map(f.lsn_range.start); @@ -212,12 +212,12 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: )?; writeln!(svg, "")?; } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } files_seen.insert(f); } - writeln!(svg, "{}", EndSvg)?; + writeln!(svg, "{EndSvg}")?; let mut layer_events_str = String::new(); let mut first = true; diff --git a/pageserver/ctl/src/draw_timeline_dir.rs b/pageserver/ctl/src/draw_timeline_dir.rs index 881ebd49a7..2135d302c1 100644 --- a/pageserver/ctl/src/draw_timeline_dir.rs +++ b/pageserver/ctl/src/draw_timeline_dir.rs @@ -228,7 +228,7 @@ pub fn main() -> Result<()> { let lsn_max = lsn_map.len(); if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = *lsn_map.get(&lsnr.start).unwrap(); @@ -250,7 +250,7 @@ pub fn main() -> Result<()> { ymargin = 0.05; fill = Fill::Color(rgb(0, 0, 0)); } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } println!( @@ -287,10 +287,10 @@ pub fn main() -> Result<()> { ); } - println!("{}", EndSvg); + println!("{EndSvg}"); - eprintln!("num_images: {}", num_images); - eprintln!("num_deltas: {}", num_deltas); + eprintln!("num_images: {num_images}"); + eprintln!("num_deltas: {num_deltas}"); Ok(()) } diff --git a/pageserver/ctl/src/key.rs b/pageserver/ctl/src/key.rs index 600f7c412e..c4daafdfd0 100644 --- a/pageserver/ctl/src/key.rs +++ b/pageserver/ctl/src/key.rs @@ -372,7 +372,7 @@ impl std::fmt::Debug for RelTagish { f.write_char('/')?; } first = false; - write!(f, "{}", x) + write!(f, "{x}") }) } } diff --git a/pageserver/ctl/src/layer_map_analyzer.rs b/pageserver/ctl/src/layer_map_analyzer.rs index c49c8b58df..ef844fbd0f 100644 --- a/pageserver/ctl/src/layer_map_analyzer.rs +++ b/pageserver/ctl/src/layer_map_analyzer.rs @@ -224,8 +224,7 @@ pub(crate) async fn main(cmd: &AnalyzeLayerMapCmd) -> Result<()> { } } println!( - "Total delta layers {} image layers {} excess layers {}", - total_delta_layers, total_image_layers, total_excess_layers + "Total delta layers {total_delta_layers} image layers {total_image_layers} excess layers {total_excess_layers}" ); Ok(()) } diff --git a/pageserver/page_api/src/client.rs b/pageserver/page_api/src/client.rs index aa4774c056..3977ce7c23 100644 --- a/pageserver/page_api/src/client.rs +++ b/pageserver/page_api/src/client.rs @@ -131,7 +131,7 @@ impl Client { let domain_stream = response_stream.map(|chunk_res| { chunk_res.and_then(|proto_chunk| { proto_chunk.try_into().map_err(|e| { - tonic::Status::internal(format!("Failed to convert response chunk: {}", e)) + tonic::Status::internal(format!("Failed to convert response chunk: {e}")) }) }) }); diff --git a/pageserver/pagebench/src/cmd/aux_files.rs b/pageserver/pagebench/src/cmd/aux_files.rs index 6441c047c2..43d7a73399 100644 --- a/pageserver/pagebench/src/cmd/aux_files.rs +++ b/pageserver/pagebench/src/cmd/aux_files.rs @@ -62,7 +62,7 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let tenant_shard_id = TenantShardId::unsharded(timeline.tenant_id); let timeline_id = timeline.timeline_id; - println!("operating on timeline {}", timeline); + println!("operating on timeline {timeline}"); mgmt_api_client .set_tenant_config(&TenantConfigRequest { @@ -75,8 +75,8 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let items = (0..100) .map(|id| { ( - format!("pg_logical/mappings/{:03}.{:03}", batch, id), - format!("{:08}", id), + format!("pg_logical/mappings/{batch:03}.{id:03}"), + format!("{id:08}"), ) }) .collect::>(); diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index fe136b8bbd..c4aaff58a1 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -669,7 +669,7 @@ where } // Append dir path for each database - let path = format!("base/{}", dbnode); + let path = format!("base/{dbnode}"); let header = new_tar_header_dir(&path)?; self.ar .append(&header, io::empty()) @@ -677,7 +677,7 @@ where .map_err(|e| BasebackupError::Client(e, "add_dbdir,base"))?; if let Some(img) = relmap_img { - let dst_path = format!("base/{}/PG_VERSION", dbnode); + let dst_path = format!("base/{dbnode}/PG_VERSION"); let pg_version_str = match self.timeline.pg_version { 14 | 15 => self.timeline.pg_version.to_string(), @@ -689,7 +689,7 @@ where .await .map_err(|e| BasebackupError::Client(e, "add_dbdir,base/PG_VERSION"))?; - let relmap_path = format!("base/{}/pg_filenode.map", dbnode); + let relmap_path = format!("base/{dbnode}/pg_filenode.map"); let header = new_tar_header(&relmap_path, img.len() as u64)?; self.ar .append(&header, &img[..]) @@ -714,9 +714,9 @@ where let crc = crc32c::crc32c(&img[..]); buf.put_u32_le(crc); let path = if self.timeline.pg_version < 17 { - format!("pg_twophase/{:>08X}", xid) + format!("pg_twophase/{xid:>08X}") } else { - format!("pg_twophase/{:>016X}", xid) + format!("pg_twophase/{xid:>016X}") }; let header = new_tar_header(&path, buf.len() as u64)?; self.ar @@ -768,7 +768,7 @@ where //send wal segment let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE); let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE); - let wal_file_path = format!("pg_wal/{}", wal_file_name); + let wal_file_path = format!("pg_wal/{wal_file_name}"); let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?; let wal_seg = postgres_ffi::generate_wal_segment( diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 2e9cd3ad70..349bc6dba6 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -287,11 +287,11 @@ impl From for ApiError { GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => { ApiError::ShuttingDown } - GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{}", e)), + GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")), GetActiveTenantError::Cancelled => ApiError::ShuttingDown, GetActiveTenantError::NotFound(gte) => gte.into(), GetActiveTenantError::WaitForActiveTimeout { .. } => { - ApiError::ResourceUnavailable(format!("{}", e).into()) + ApiError::ResourceUnavailable(format!("{e}").into()) } GetActiveTenantError::SwitchedTenant => { // in our HTTP handlers, this error doesn't happen @@ -1015,7 +1015,7 @@ async fn get_lsn_by_timestamp_handler( let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let timestamp_raw = must_get_query_param(&request, "timestamp")?; let timestamp = humantime::parse_rfc3339(×tamp_raw) - .with_context(|| format!("Invalid time: {:?}", timestamp_raw)) + .with_context(|| format!("Invalid time: {timestamp_raw:?}")) .map_err(ApiError::BadRequest)?; let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp); @@ -1110,7 +1110,7 @@ async fn get_timestamp_of_lsn_handler( json_response(StatusCode::OK, time) } None => Err(ApiError::PreconditionFailed( - format!("Timestamp for lsn {} not found", lsn).into(), + format!("Timestamp for lsn {lsn} not found").into(), )), } } @@ -2421,7 +2421,7 @@ async fn timeline_offload_handler( } if let (false, reason) = timeline.can_offload() { return Err(ApiError::PreconditionFailed( - format!("Timeline::can_offload() check failed: {}", reason) .into(), + format!("Timeline::can_offload() check failed: {reason}") .into(), )); } offload_timeline(&tenant, &timeline) diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 8d6d342cf9..7929b094b4 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1727,12 +1727,7 @@ impl Drop for SmgrOpTimer { impl SmgrOpFlushInProgress { /// The caller must guarantee that `socket_fd`` outlives this function. - pub(crate) async fn measure( - self, - started_at: Instant, - mut fut: Fut, - socket_fd: RawFd, - ) -> O + pub(crate) async fn measure(self, started_at: Instant, fut: Fut, socket_fd: RawFd) -> O where Fut: std::future::Future, { diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 032db34983..d3a1ca681e 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -392,16 +392,14 @@ async fn page_service_conn_main( } else { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); Err(io_error).context(format!( - "Postgres connection error for tenant_id={:?} client at peer_addr={}", - tenant_id, peer_addr + "Postgres connection error for tenant_id={tenant_id:?} client at peer_addr={peer_addr}" )) } } other => { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); other.context(format!( - "Postgres query error for tenant_id={:?} client peer_addr={}", - tenant_id, peer_addr + "Postgres query error for tenant_id={tenant_id:?} client peer_addr={peer_addr}" )) } } @@ -2140,8 +2138,7 @@ impl PageServerHandler { if request_lsn < not_modified_since { return Err(PageStreamError::BadRequest( format!( - "invalid request with request LSN {} and not_modified_since {}", - request_lsn, not_modified_since, + "invalid request with request LSN {request_lsn} and not_modified_since {not_modified_since}", ) .into(), )); diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 58af2548ee..180a5b76e8 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -1185,7 +1185,7 @@ impl Timeline { } let origin_id = k.field6 as RepOriginId; let origin_lsn = Lsn::des(&v) - .with_context(|| format!("decode replorigin value for {}: {v:?}", origin_id))?; + .with_context(|| format!("decode replorigin value for {origin_id}: {v:?}"))?; if origin_lsn != Lsn::INVALID { result.insert(origin_id, origin_lsn); } @@ -2440,8 +2440,7 @@ impl DatadirModification<'_> { if path == p { assert!( modifying_file.is_none(), - "duplicated entries found for {}", - path + "duplicated entries found for {path}" ); modifying_file = Some(content); } else { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index d2c2fdef93..a48bf15246 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -3449,7 +3449,7 @@ impl TenantShard { use pageserver_api::models::ActivatingFrom; match &*current_state { TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => { - panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state); + panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {current_state:?}"); } TenantState::Attaching => { *current_state = TenantState::Activating(ActivatingFrom::Attaching); @@ -6616,7 +6616,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6626,7 +6626,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6640,7 +6640,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6650,7 +6650,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -7149,7 +7149,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), ctx, ) .await?; @@ -7437,7 +7437,7 @@ mod tests { .put( gap_at_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))), + &Value::Image(test_img(&format!("{gap_at_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7476,7 +7476,7 @@ mod tests { .put( current_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))), + &Value::Image(test_img(&format!("{current_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7584,7 +7584,7 @@ mod tests { while key < end_key { current_lsn += 0x10; - let image_value = format!("{} at {}", child_gap_at_key, current_lsn); + let image_value = format!("{child_gap_at_key} at {current_lsn}"); let mut writer = parent_timeline.writer().await; writer @@ -7627,7 +7627,7 @@ mod tests { .put( key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", key, current_lsn))), + &Value::Image(test_img(&format!("{key} at {current_lsn}"))), &ctx, ) .await?; @@ -7748,7 +7748,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7769,7 +7769,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7783,7 +7783,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7829,7 +7829,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7858,11 +7858,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating {} at {}", blknum, lsn); + println!("updating {blknum} at {lsn}"); writer.finish_write(lsn); drop(writer); updated[blknum] = lsn; @@ -7873,7 +7873,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7926,11 +7926,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))), + &Value::Image(test_img(&format!("{idx} {blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating [{}][{}] at {}", idx, blknum, lsn); + println!("updating [{idx}][{blknum}] at {lsn}"); writer.finish_write(lsn); drop(writer); updated[idx][blknum] = lsn; @@ -8136,7 +8136,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8153,7 +8153,7 @@ mod tests { test_key.field6 = (blknum * STEP) as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -8190,7 +8190,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8443,7 +8443,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8463,7 +8463,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -9384,12 +9384,7 @@ mod tests { let end_lsn = Lsn(0x100); let image_layers = (0x20..=0x90) .step_by(0x10) - .map(|n| { - ( - Lsn(n), - vec![(key, test_img(&format!("data key at {:x}", n)))], - ) - }) + .map(|n| (Lsn(n), vec![(key, test_img(&format!("data key at {n:x}")))])) .collect(); let timeline = tenant diff --git a/pageserver/src/tenant/checks.rs b/pageserver/src/tenant/checks.rs index d5b979ab2a..83d54f09de 100644 --- a/pageserver/src/tenant/checks.rs +++ b/pageserver/src/tenant/checks.rs @@ -63,8 +63,7 @@ pub fn check_valid_layermap(metadata: &[LayerName]) -> Option { && overlaps_with(&layer.key_range, &other_layer.key_range) { let err = format!( - "layer violates the layer map LSN split assumption: layer {} intersects with layer {}", - layer, other_layer + "layer violates the layer map LSN split assumption: layer {layer} intersects with layer {other_layer}" ); return Some(err); } diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index bea3128265..5081d7f5a4 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -550,8 +550,7 @@ mod tests { assert_eq!( deserialized_metadata.body, expected_metadata.body, - "Metadata of the old version {} should be upgraded to the latest version {}", - METADATA_OLD_FORMAT_VERSION, METADATA_FORMAT_VERSION + "Metadata of the old version {METADATA_OLD_FORMAT_VERSION} should be upgraded to the latest version {METADATA_FORMAT_VERSION}" ); } diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index dd49c843f3..6b315dc4bc 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -1427,7 +1427,7 @@ async fn init_timeline_state( let local_meta = dentry .metadata() .await - .fatal_err(&format!("Read metadata on {}", file_path)); + .fatal_err(&format!("Read metadata on {file_path}")); let file_name = file_path.file_name().expect("created it from the dentry"); if crate::is_temporary(&file_path) diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index ba763d4c3f..0f31318f0c 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -783,7 +783,7 @@ impl DeltaLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1401,7 +1401,7 @@ impl DeltaLayerInner { match val { Value::Image(img) => { let checkpoint = CheckPoint::decode(&img)?; - println!(" CHECKPOINT: {:?}", checkpoint); + println!(" CHECKPOINT: {checkpoint:?}"); } Value::WalRecord(_rec) => { println!(" unexpected walrecord value for checkpoint key"); diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index d6f5f48a6e..9f76f697d3 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -272,8 +272,7 @@ impl ImageLayer { conf.timeline_path(&tenant_shard_id, &timeline_id) .join(format!( - "{fname}.{:x}.{TEMP_FILE_SUFFIX}", - filename_disambiguator + "{fname}.{filename_disambiguator:x}.{TEMP_FILE_SUFFIX}" )) } @@ -370,7 +369,7 @@ impl ImageLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1475,7 +1474,7 @@ mod test { assert_eq!(l1, expect_lsn); assert_eq!(&i1, i2); } - (o1, o2) => panic!("iterators length mismatch: {:?}, {:?}", o1, o2), + (o1, o2) => panic!("iterators length mismatch: {o1:?}, {o2:?}"), } } } diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 8e5b0ba648..c4d53c6405 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -511,7 +511,7 @@ fn inmem_layer_log_display( start_lsn: Lsn, end_lsn: Lsn, ) -> std::fmt::Result { - write!(f, "timeline {} in-memory ", timeline)?; + write!(f, "timeline {timeline} in-memory ")?; inmem_layer_display(f, start_lsn, end_lsn) } diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs index ea354fc716..27fbc6f5fb 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs @@ -380,7 +380,7 @@ impl std::fmt::Debug for LogicalReadState { write!(f, "Ongoing({:?})", BufferDebug::from(b as &dyn Buffer)) } LogicalReadState::Ok(b) => write!(f, "Ok({:?})", BufferDebug::from(b as &dyn Buffer)), - LogicalReadState::Error(e) => write!(f, "Error({:?})", e), + LogicalReadState::Error(e) => write!(f, "Error({e:?})"), LogicalReadState::Undefined => write!(f, "Undefined"), } } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 3d55972017..0be13e67a8 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -105,7 +105,7 @@ impl std::fmt::Display for Layer { impl std::fmt::Debug for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 1bb17af146..81f2646e5a 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -178,7 +178,7 @@ pub enum LastImageLayerCreationStatus { impl std::fmt::Display for ImageLayerCreationMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } @@ -632,7 +632,7 @@ pub enum ReadPathLayerId { impl std::fmt::Display for ReadPathLayerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key), + ReadPathLayerId::PersistentLayer(key) => write!(f, "{key}"), ReadPathLayerId::InMemoryLayer(range) => { write!(f, "in-mem {}..{}", range.start, range.end) } @@ -708,7 +708,7 @@ impl MissingKeyError { impl std::fmt::Debug for MissingKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } @@ -721,19 +721,19 @@ impl std::fmt::Display for MissingKeyError { )?; if let Some(ref ancestor_lsn) = self.ancestor_lsn { - write!(f, ", ancestor {}", ancestor_lsn)?; + write!(f, ", ancestor {ancestor_lsn}")?; } if let Some(ref query) = self.query { - write!(f, ", query {}", query)?; + write!(f, ", query {query}")?; } if let Some(ref read_path) = self.read_path { - write!(f, "\n{}", read_path)?; + write!(f, "\n{read_path}")?; } if let Some(ref backtrace) = self.backtrace { - write!(f, "\n{}", backtrace)?; + write!(f, "\n{backtrace}")?; } Ok(()) @@ -7179,9 +7179,7 @@ impl Timeline { if let Some(end) = layer_end_lsn { assert!( end <= last_record_lsn, - "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}", - end, - last_record_lsn, + "advance last record lsn before inserting a layer, end_lsn={end}, last_record_lsn={last_record_lsn}", ); } diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 6039c002f7..1b8e5f4b9c 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -977,7 +977,7 @@ impl KeyHistoryRetention { tline .reconstruct_value(key, lsn, data, RedoAttemptType::GcCompaction) .await - .with_context(|| format!("verification failed for key {} at lsn {}", key, lsn))?; + .with_context(|| format!("verification failed for key {key} at lsn {lsn}"))?; Ok(()) } @@ -2647,15 +2647,15 @@ impl Timeline { use std::fmt::Write; let mut output = String::new(); if let Some((key, _, _)) = replay_history.first() { - write!(output, "key={} ", key).unwrap(); + write!(output, "key={key} ").unwrap(); let mut cnt = 0; for (_, lsn, val) in replay_history { if val.is_image() { - write!(output, "i@{} ", lsn).unwrap(); + write!(output, "i@{lsn} ").unwrap(); } else if val.will_init() { - write!(output, "di@{} ", lsn).unwrap(); + write!(output, "di@{lsn} ").unwrap(); } else { - write!(output, "d@{} ", lsn).unwrap(); + write!(output, "d@{lsn} ").unwrap(); } cnt += 1; if cnt >= 128 { diff --git a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs index 343e04f5f0..e91bd5d43a 100644 --- a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs +++ b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs @@ -360,8 +360,7 @@ pub(super) async fn handle_walreceiver_connection( match raw_wal_start_lsn.cmp(&expected_wal_start) { std::cmp::Ordering::Greater => { let msg = format!( - "Gap in streamed WAL: [{}, {})", - expected_wal_start, raw_wal_start_lsn + "Gap in streamed WAL: [{expected_wal_start}, {raw_wal_start_lsn})" ); critical!("{msg}"); return Err(WalReceiverError::Other(anyhow!(msg))); diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs index 090d2ece85..85ea5c4d80 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs @@ -68,16 +68,9 @@ impl AlignedBuffer { assert!( begin <= end, - "range start must not be greater than end: {:?} <= {:?}", - begin, - end, - ); - assert!( - end <= len, - "range end out of bounds: {:?} <= {:?}", - end, - len, + "range start must not be greater than end: {begin:?} <= {end:?}", ); + assert!(end <= len, "range end out of bounds: {end:?} <= {len:?}",); let begin = self.range.start + begin; let end = self.range.start + end; diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs index 07f949b89e..93116ea85e 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs @@ -242,10 +242,7 @@ unsafe impl bytes::BufMut for AlignedBufferMut { /// Panic with a nice error message. #[cold] fn panic_advance(idx: usize, len: usize) -> ! { - panic!( - "advance out of bounds: the len is {} but advancing by {}", - len, idx - ); + panic!("advance out of bounds: the len is {len} but advancing by {idx}"); } /// Safety: [`AlignedBufferMut`] has exclusive ownership of the io buffer, diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index ebffaf70e2..c452f48e40 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -2108,7 +2108,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2142,7 +2142,7 @@ mod tests { for blkno in 0..1 { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2167,7 +2167,7 @@ mod tests { ); for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2188,7 +2188,7 @@ mod tests { let lsn = Lsn(0x80); let mut m = tline.begin_modification(lsn); for blkno in 0..relsize { - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); walingest .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx) .await?; @@ -2210,7 +2210,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x80); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2414,6 +2414,6 @@ mod tests { } let duration = started_at.elapsed(); - println!("done in {:?}", duration); + println!("done in {duration:?}"); } } diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index 0783c77622..a525579082 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -52,8 +52,7 @@ pub(crate) fn apply_in_neon( let (rel, _) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "TruncateVisibilityMap record on unexpected rel {}", - rel + "TruncateVisibilityMap record on unexpected rel {rel}" ); let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; map[*trunc_byte + 1..].fill(0u8); @@ -78,8 +77,7 @@ pub(crate) fn apply_in_neon( let (rel, blknum) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "ClearVisibilityMapFlags record on unexpected rel {}", - rel + "ClearVisibilityMapFlags record on unexpected rel {rel}" ); if let Some(heap_blkno) = *new_heap_blkno { // Calculate the VM block and offset that corresponds to the heap block. @@ -124,8 +122,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetCommitted record with unexpected key {}", - key + "ClogSetCommitted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -135,15 +132,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_COMMITTED, page); @@ -169,8 +162,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetAborted record with unexpected key {}", - key + "ClogSetAborted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -180,15 +172,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); @@ -199,8 +187,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactOffsets, - "MultixactOffsetCreate record with unexpected key {}", - key + "MultixactOffsetCreate record with unexpected key {key}" ); // Compute the block and offset to modify. // See RecordNewMultiXact in PostgreSQL sources. @@ -213,15 +200,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); @@ -231,8 +214,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactMembers, - "MultixactMembersCreate record with unexpected key {}", - key + "MultixactMembersCreate record with unexpected key {key}" ); for (i, member) in members.iter().enumerate() { let offset = moff + i as u32; @@ -249,15 +231,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); let mut flagsval = LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]); diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index d96f582fad..263d784e78 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -61,6 +61,10 @@ clippy::too_many_lines, clippy::unused_self )] +#![allow( + clippy::unsafe_derive_deserialize, + reason = "false positive: https://github.com/rust-lang/rust-clippy/issues/15120" +)] #![cfg_attr( any(test, feature = "testing"), allow( diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index 1bf3e4cac1..4fc62fb229 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -206,16 +206,10 @@ impl Storage for FileStorage { let buf: Vec = s.write_to_buf()?; control_partial.write_all(&buf).await.with_context(|| { - format!( - "failed to write safekeeper state into control file at: {}", - control_partial_path - ) + format!("failed to write safekeeper state into control file at: {control_partial_path}") })?; control_partial.flush().await.with_context(|| { - format!( - "failed to flush safekeeper state into control file at: {}", - control_partial_path - ) + format!("failed to flush safekeeper state into control file at: {control_partial_path}") })?; let control_path = self.timeline_dir.join(CONTROL_FILE_NAME); diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index b54bee8bfb..5e7f1d8758 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -73,7 +73,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { let re = Regex::new(r"START_WAL_PUSH(\s+?\((.*)\))?").unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_WAL_PUSH command {}", cmd))?; + .context(format!("failed to parse START_WAL_PUSH command {cmd}"))?; // capture () content let options = caps.get(2).map(|m| m.as_str()).unwrap_or(""); // default values @@ -85,24 +85,20 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { } let mut kvit = kvstr.split_whitespace(); let key = kvit.next().context(format!( - "failed to parse key in kv {} in command {}", - kvstr, cmd + "failed to parse key in kv {kvstr} in command {cmd}" ))?; let value = kvit.next().context(format!( - "failed to parse value in kv {} in command {}", - kvstr, cmd + "failed to parse value in kv {kvstr} in command {cmd}" ))?; let value_trimmed = value.trim_matches('\''); if key == "proto_version" { proto_version = value_trimmed.parse::().context(format!( - "failed to parse proto_version value {} in command {}", - value, cmd + "failed to parse proto_version value {value} in command {cmd}" ))?; } if key == "allow_timeline_creation" { allow_timeline_creation = value_trimmed.parse::().context(format!( - "failed to parse allow_timeline_creation value {} in command {}", - value, cmd + "failed to parse allow_timeline_creation value {value} in command {cmd}" ))?; } } @@ -118,7 +114,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { .unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_REPLICATION command {}", cmd))?; + .context(format!("failed to parse START_REPLICATION command {cmd}"))?; let start_lsn = Lsn::from_str(&caps[1]).context("parse start LSN from START_REPLICATION command")?; let term = if let Some(m) = caps.get(2) { diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 886cac869d..aa19b6d283 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -64,10 +64,10 @@ impl TermHistory { for i in 0..n_entries { let term = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses term", i))?; + .with_context(|| format!("TermHistory pos {i} misses term"))?; let lsn = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses lsn", i))? + .with_context(|| format!("TermHistory pos {i} misses lsn"))? .into(); res.push(TermLsn { term, lsn }) } @@ -121,9 +121,7 @@ impl TermHistory { if let Some(sk_th_last) = sk_th.last() { assert!( sk_th_last.lsn <= sk_wal_end, - "safekeeper term history end {:?} LSN is higher than WAL end {:?}", - sk_th_last, - sk_wal_end + "safekeeper term history end {sk_th_last:?} LSN is higher than WAL end {sk_wal_end:?}" ); } @@ -438,11 +436,11 @@ impl ProposerAcceptorMessage { for i in 0..members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading member {} node_id", i))?; - let host = Self::get_cstr(buf).with_context(|| format!("reading member {} host", i))?; + .with_context(|| format!("reading member {i} node_id"))?; + let host = Self::get_cstr(buf).with_context(|| format!("reading member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading member {} port", i))?; + .with_context(|| format!("reading member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -463,12 +461,12 @@ impl ProposerAcceptorMessage { for i in 0..new_members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading new member {} node_id", i))?; - let host = Self::get_cstr(buf) - .with_context(|| format!("reading new member {} host", i))?; + .with_context(|| format!("reading new member {i} node_id"))?; + let host = + Self::get_cstr(buf).with_context(|| format!("reading new member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading new member {} port", i))?; + .with_context(|| format!("reading new member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -1508,7 +1506,7 @@ mod tests { let mut vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } // reboot... @@ -1523,7 +1521,7 @@ mod tests { vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(!resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } } diff --git a/safekeeper/src/timeline_eviction.rs b/safekeeper/src/timeline_eviction.rs index e817dbf6f9..47b65a579a 100644 --- a/safekeeper/src/timeline_eviction.rs +++ b/safekeeper/src/timeline_eviction.rs @@ -342,7 +342,7 @@ where let bytes_read1 = reader1 .read(&mut buffer1[..bytes_to_read]) .await - .with_context(|| format!("failed to read from reader1 at offset {}", offset))?; + .with_context(|| format!("failed to read from reader1 at offset {offset}"))?; if bytes_read1 == 0 { anyhow::bail!("unexpected EOF from reader1 at offset {}", offset); } @@ -351,10 +351,7 @@ where .read_exact(&mut buffer2[..bytes_read1]) .await .with_context(|| { - format!( - "failed to read {} bytes from reader2 at offset {}", - bytes_read1, offset - ) + format!("failed to read {bytes_read1} bytes from reader2 at offset {offset}") })?; assert!(bytes_read2 == bytes_read1); diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 48eda92fed..a68752bfdd 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -108,7 +108,7 @@ impl std::fmt::Debug for ManagerCtlMessage { match self { ManagerCtlMessage::GuardRequest(_) => write!(f, "GuardRequest"), ManagerCtlMessage::TryGuardRequest(_) => write!(f, "TryGuardRequest"), - ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({:?})", id), + ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({id:?})"), ManagerCtlMessage::BackupPartialReset(_) => write!(f, "BackupPartialReset"), } } diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index 6e41ada1b3..a81a7298a9 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -147,7 +147,7 @@ impl GlobalTimelines { }; let mut tenant_count = 0; for tenants_dir_entry in std::fs::read_dir(&tenants_dir) - .with_context(|| format!("failed to list tenants dir {}", tenants_dir))? + .with_context(|| format!("failed to list tenants dir {tenants_dir}"))? { match &tenants_dir_entry { Ok(tenants_dir_entry) => { @@ -188,7 +188,7 @@ impl GlobalTimelines { let timelines_dir = get_tenant_dir(&conf, &tenant_id); for timelines_dir_entry in std::fs::read_dir(&timelines_dir) - .with_context(|| format!("failed to list timelines dir {}", timelines_dir))? + .with_context(|| format!("failed to list timelines dir {timelines_dir}"))? { match &timelines_dir_entry { Ok(timeline_dir_entry) => { diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index fe0f1b3607..cdf68262dd 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -364,8 +364,7 @@ impl PartialBackup { // there should always be zero or one uploaded segment assert!( new_segments.is_empty(), - "too many uploaded segments: {:?}", - new_segments + "too many uploaded segments: {new_segments:?}" ); } diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 8ba3e7cc47..e68c9f3a99 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -841,7 +841,7 @@ pub(crate) async fn open_wal_file( // If that failed, try it without the .partial extension. let pf = tokio::fs::File::open(&wal_file_path) .await - .with_context(|| format!("failed to open WAL file {:#}", wal_file_path)) + .with_context(|| format!("failed to open WAL file {wal_file_path:#}")) .map_err(|e| { warn!("{}", e); e diff --git a/safekeeper/tests/walproposer_sim/log.rs b/safekeeper/tests/walproposer_sim/log.rs index e2ba3282ca..cecbc859e6 100644 --- a/safekeeper/tests/walproposer_sim/log.rs +++ b/safekeeper/tests/walproposer_sim/log.rs @@ -33,7 +33,7 @@ impl FormatTime for SimClock { if let Some(clock) = clock.as_ref() { let now = clock.now(); - write!(w, "[{}]", now) + write!(w, "[{now}]") } else { write!(w, "[?]") } diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 5fb29683f2..1fdf8e4949 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -257,7 +257,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { let estr = e.to_string(); if !estr.contains("finished processing START_REPLICATION") { warn!("conn {:?} error: {:?}", connection_id, e); - panic!("unexpected error at safekeeper: {:#}", e); + panic!("unexpected error at safekeeper: {e:#}"); } conns.remove(&connection_id); break; diff --git a/safekeeper/tests/walproposer_sim/simulation.rs b/safekeeper/tests/walproposer_sim/simulation.rs index 70fecfbe22..edd3bf2d9e 100644 --- a/safekeeper/tests/walproposer_sim/simulation.rs +++ b/safekeeper/tests/walproposer_sim/simulation.rs @@ -217,7 +217,7 @@ impl TestConfig { ]; let server_ids = [servers[0].id, servers[1].id, servers[2].id]; - let safekeepers_addrs = server_ids.map(|id| format!("node:{}", id)).to_vec(); + let safekeepers_addrs = server_ids.map(|id| format!("node:{id}")).to_vec(); let ttid = TenantTimelineId::generate(); diff --git a/safekeeper/tests/walproposer_sim/walproposer_api.rs b/safekeeper/tests/walproposer_sim/walproposer_api.rs index c2604c4bdc..29b361db7e 100644 --- a/safekeeper/tests/walproposer_sim/walproposer_api.rs +++ b/safekeeper/tests/walproposer_sim/walproposer_api.rs @@ -523,7 +523,7 @@ impl ApiImpl for SimulationApi { // Voting bug when safekeeper disconnects after voting executor::exit(1, msg.to_owned()); } - panic!("unknown FATAL error from walproposer: {}", msg); + panic!("unknown FATAL error from walproposer: {msg}"); } } @@ -544,10 +544,7 @@ impl ApiImpl for SimulationApi { } } - let msg = format!( - "prop_elected;{};{};{};{}", - prop_lsn, prop_term, prev_lsn, prev_term - ); + let msg = format!("prop_elected;{prop_lsn};{prop_term};{prev_lsn};{prev_term}"); debug!(msg); self.os.log_event(msg); diff --git a/storage_broker/benches/rps.rs b/storage_broker/benches/rps.rs index 9953ccfa91..5f3e594687 100644 --- a/storage_broker/benches/rps.rs +++ b/storage_broker/benches/rps.rs @@ -161,7 +161,7 @@ async fn publish(client: Option, n_keys: u64) { } }; let response = client.publish_safekeeper_info(Request::new(outbound)).await; - println!("pub response is {:?}", response); + println!("pub response is {response:?}"); } #[tokio::main] diff --git a/storage_broker/build.rs b/storage_broker/build.rs index 08dadeacd5..77c441dddd 100644 --- a/storage_broker/build.rs +++ b/storage_broker/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/broker.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/storage_broker/src/lib.rs b/storage_broker/src/lib.rs index 149656a191..7d8b57380f 100644 --- a/storage_broker/src/lib.rs +++ b/storage_broker/src/lib.rs @@ -86,13 +86,9 @@ impl BrokerClientChannel { #[allow(clippy::result_large_err, reason = "TODO")] pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result { let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id) - .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?; - let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id).map_err(|e| { - Status::new( - Code::InvalidArgument, - format!("malformed timeline_id: {}", e), - ) - })?; + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {e}")))?; + let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id) + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed timeline_id: {e}")))?; Ok(TenantTimelineId { tenant_id, timeline_id, diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index a4482a4dac..0b5569b3d6 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -370,7 +370,7 @@ impl ComputeHook { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let mut client = reqwest::ClientBuilder::new().timeout(NOTIFY_REQUEST_TIMEOUT); for cert in &config.ssl_ca_certs { diff --git a/storage_controller/src/drain_utils.rs b/storage_controller/src/drain_utils.rs index bd4b8ba38f..0dae7b8147 100644 --- a/storage_controller/src/drain_utils.rs +++ b/storage_controller/src/drain_utils.rs @@ -62,7 +62,7 @@ pub(crate) fn validate_node_state( nodes: Arc>, ) -> Result<(), OperationError> { let node = nodes.get(node_id).ok_or(OperationError::NodeStateChanged( - format!("node {} was removed", node_id).into(), + format!("node {node_id} was removed").into(), ))?; let current_policy = node.get_scheduling(); @@ -70,7 +70,7 @@ pub(crate) fn validate_node_state( // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think // about it return Err(OperationError::NodeStateChanged( - format!("node {} changed state to {:?}", node_id, current_policy).into(), + format!("node {node_id} changed state to {current_policy:?}").into(), )); } @@ -145,7 +145,7 @@ impl TenantShardDrain { if !nodes.contains_key(&destination) { return Err(OperationError::NodeStateChanged( - format!("node {} was removed", destination).into(), + format!("node {destination} was removed").into(), )); } diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 346595aa11..a7e86b5224 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -721,9 +721,9 @@ async fn handle_tenant_timeline_passthrough( // Callers will always pass an unsharded tenant ID. Before proxying, we must // rewrite this to a shard-aware shard zero ID. - let path = format!("{}", path); + let path = format!("{path}"); let tenant_str = tenant_or_shard_id.tenant_id.to_string(); - let tenant_shard_str = format!("{}", tenant_shard_id); + let tenant_shard_str = format!("{tenant_shard_id}"); let path = path.replace(&tenant_str, &tenant_shard_str); let latency = &METRICS_REGISTRY @@ -1539,7 +1539,7 @@ async fn handle_ready(req: Request) -> Result, ApiError> { impl From for ApiError { fn from(value: ReconcileError) -> Self { - ApiError::Conflict(format!("Reconciliation error: {}", value)) + ApiError::Conflict(format!("Reconciliation error: {value}")) } } diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index 4300dd32a9..2948e9019f 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -500,15 +500,13 @@ impl Persistence { if let Some(np) = node_to_delete { let lc = NodeLifecycle::from_str(&np.lifecycle).map_err(|e| { DatabaseError::Logical(format!( - "Node {} has invalid lifecycle: {}", - del_node_id, e + "Node {del_node_id} has invalid lifecycle: {e}" )) })?; if lc != NodeLifecycle::Deleted { return Err(DatabaseError::Logical(format!( - "Node {} was not soft deleted before, cannot hard delete it", - del_node_id + "Node {del_node_id} was not soft deleted before, cannot hard delete it" ))); } @@ -642,8 +640,7 @@ impl Persistence { .await?; if deleted_node > 0 { return Err(DatabaseError::Logical(format!( - "Node {} is marked as deleted, re-attach is not allowed", - input_node_id + "Node {input_node_id} is marked as deleted, re-attach is not allowed" ))); } @@ -1003,7 +1000,7 @@ impl Persistence { .execute(conn).await?; if u8::try_from(updated) .map_err(|_| DatabaseError::Logical( - format!("Overflow existing shard count {} while splitting", updated)) + format!("Overflow existing shard count {updated} while splitting")) )? != old_shard_count.count() { // Perhaps a deletion or another split raced with this attempt to split, mutating // the parent shards that we intend to split. In this case the split request should fail. @@ -1343,8 +1340,7 @@ impl Persistence { if inserted_updated != 1 { return Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))); } @@ -1406,8 +1402,7 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))), } }) @@ -1476,8 +1471,7 @@ impl Persistence { 0 => Ok(()), 1 => Ok(()), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - updated + "unexpected number of rows ({updated})" ))), } }) @@ -1570,8 +1564,7 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))), } }) diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index b3656c33d4..b86b4dfab1 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -23,7 +23,7 @@ pub enum ScheduleError { impl From for ApiError { fn from(value: ScheduleError) -> Self { - ApiError::Conflict(format!("Scheduling error: {}", value)) + ApiError::Conflict(format!("Scheduling error: {value}")) } } @@ -903,7 +903,7 @@ impl Scheduler { /// rigorously updating them on every change. pub(crate) fn update_metrics(&self) { for (node_id, node) in &self.nodes { - let node_id_str = format!("{}", node_id); + let node_id_str = format!("{node_id}"); let label_group = NodeLabelGroup { az: &node.az.0, node_id: &node_id_str, @@ -1326,7 +1326,7 @@ mod tests { .map(|(node_id, node)| (node_id, node.home_shard_count)) .collect::>(); node_home_counts.sort_by_key(|i| i.0); - eprintln!("Selected {}, vs nodes {:?}", preferred_az, node_home_counts); + eprintln!("Selected {preferred_az}, vs nodes {node_home_counts:?}"); let tenant_shard_id = TenantShardId { tenant_id: TenantId::generate(), diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 0eb87ffbe3..8424c27cf8 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -260,7 +260,7 @@ fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError { // Presume errors receiving body are connectivity/availability issues except for decoding errors let src_str = err.source().map(|e| e.to_string()).unwrap_or_default(); ApiError::ResourceUnavailable( - format!("{node} error receiving error body: {err} {}", src_str).into(), + format!("{node} error receiving error body: {err} {src_str}").into(), ) } mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => { @@ -671,7 +671,7 @@ impl std::fmt::Display for StopReconciliationsReason { Self::ShuttingDown => "Shutting down", Self::SteppingDown => "Stepping down", }; - write!(writer, "{}", s) + write!(writer, "{s}") } } @@ -5278,7 +5278,7 @@ impl Service { shard_params, result .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -6201,7 +6201,7 @@ impl Service { }, ) .await - .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?; + .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?; fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict( "failpoint".to_string() @@ -6218,7 +6218,7 @@ impl Service { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -7117,8 +7117,7 @@ impl Service { Ok(()) } else { Err(ApiError::Conflict(format!( - "Node {} is in use, consider using tombstone API first", - node_id + "Node {node_id} is in use, consider using tombstone API first" ))) } } @@ -7668,7 +7667,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -7799,7 +7798,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -8870,7 +8869,7 @@ impl Service { let nodes = self.inner.read().unwrap().nodes.clone(); let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError( StatusCode::NOT_FOUND, - format!("Node with id {} not found", secondary), + format!("Node with id {secondary} not found"), ))?; match node @@ -8949,8 +8948,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9054,8 +9052,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9265,8 +9262,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9348,8 +9344,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index 789327bfaf..359921ecbf 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -3008,21 +3008,18 @@ pub(crate) mod tests { if attachments_in_wrong_az > 0 { violations.push(format!( - "{} attachments scheduled to the incorrect AZ", - attachments_in_wrong_az + "{attachments_in_wrong_az} attachments scheduled to the incorrect AZ" )); } if secondaries_in_wrong_az > 0 { violations.push(format!( - "{} secondaries scheduled to the incorrect AZ", - secondaries_in_wrong_az + "{secondaries_in_wrong_az} secondaries scheduled to the incorrect AZ" )); } eprintln!( - "attachments_in_wrong_az={} secondaries_in_wrong_az={}", - attachments_in_wrong_az, secondaries_in_wrong_az + "attachments_in_wrong_az={attachments_in_wrong_az} secondaries_in_wrong_az={secondaries_in_wrong_az}" ); for (node_id, stats) in &node_stats { diff --git a/storage_controller/src/timeline_import.rs b/storage_controller/src/timeline_import.rs index eb50819d02..e88bce4c82 100644 --- a/storage_controller/src/timeline_import.rs +++ b/storage_controller/src/timeline_import.rs @@ -195,7 +195,7 @@ impl UpcallClient { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let client = reqwest::ClientBuilder::new() .timeout(IMPORT_COMPLETE_REQUEST_TIMEOUT) diff --git a/storage_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs index 865f0908f9..774418f237 100644 --- a/storage_scrubber/src/checks.rs +++ b/storage_scrubber/src/checks.rs @@ -146,7 +146,7 @@ pub(crate) async fn branch_cleanup_and_check_errors( for (layer, metadata) in index_part.layer_metadata { if metadata.file_size == 0 { result.errors.push(format!( - "index_part.json contains a layer {} that has 0 size in its layer metadata", layer, + "index_part.json contains a layer {layer} that has 0 size in its layer metadata", )) } diff --git a/storage_scrubber/src/lib.rs b/storage_scrubber/src/lib.rs index 25a157f108..d3ed5a8357 100644 --- a/storage_scrubber/src/lib.rs +++ b/storage_scrubber/src/lib.rs @@ -123,7 +123,7 @@ impl S3Target { pub fn with_sub_segment(&self, new_segment: &str) -> Self { let mut new_self = self.clone(); if new_self.prefix_in_bucket.is_empty() { - new_self.prefix_in_bucket = format!("/{}/", new_segment); + new_self.prefix_in_bucket = format!("/{new_segment}/"); } else { if new_self.prefix_in_bucket.ends_with('/') { new_self.prefix_in_bucket.pop(); diff --git a/storage_scrubber/src/scan_safekeeper_metadata.rs b/storage_scrubber/src/scan_safekeeper_metadata.rs index f10d758097..cf0a3d19e9 100644 --- a/storage_scrubber/src/scan_safekeeper_metadata.rs +++ b/storage_scrubber/src/scan_safekeeper_metadata.rs @@ -265,7 +265,7 @@ async fn load_timelines_from_db( // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -274,7 +274,7 @@ async fn load_timelines_from_db( "and tenant_id in ({})", tenant_ids .iter() - .map(|t| format!("'{}'", t)) + .map(|t| format!("'{t}'")) .collect::>() .join(", ") )