diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 08d915b331..0cda36a6e2 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -329,11 +329,39 @@ struct StartVmMonitorResult { impl ComputeNode { pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result { let connstr = params.connstr.as_str(); - let conn_conf = postgres::config::Config::from_str(connstr) + let mut conn_conf = postgres::config::Config::from_str(connstr) .context("cannot build postgres config from connstr")?; - let tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr) + let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr) .context("cannot build tokio postgres config from connstr")?; + // Users can set some configuration parameters per database with + // ALTER DATABASE ... SET ... + // + // There are at least these parameters: + // + // - role=some_other_role + // - default_transaction_read_only=on + // - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail + // - search_path=non_public_schema, this should be actually safe because + // we don't call any functions in user databases, but better to always reset + // it to public. + // + // that can affect `compute_ctl` and prevent it from properly configuring the database schema. + // Unset them via connection string options before connecting to the database. + // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`. + // + // TODO(ololobus): we currently pass `-c default_transaction_read_only=off` from control plane + // as well. After rolling out this code, we can remove this parameter from control plane. + // In the meantime, double-passing is fine, the last value is applied. + // See: + const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0"; + let options = match conn_conf.get_options() { + Some(options) => format!("{} {}", options, EXTRA_OPTIONS), + None => EXTRA_OPTIONS.to_string(), + }; + conn_conf.options(&options); + tokio_conn_conf.options(&options); + let mut new_state = ComputeState::new(); if let Some(spec) = config.spec { let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?; @@ -1449,15 +1477,20 @@ impl ComputeNode { Err(e) => match e.code() { Some(&SqlState::INVALID_PASSWORD) | Some(&SqlState::INVALID_AUTHORIZATION_SPECIFICATION) => { - // Connect with zenith_admin if cloud_admin could not authenticate + // Connect with `zenith_admin` if `cloud_admin` could not authenticate info!( - "cannot connect to postgres: {}, retrying with `zenith_admin` username", + "cannot connect to Postgres: {}, retrying with 'zenith_admin' username", e ); let mut zenith_admin_conf = postgres::config::Config::from(conf.clone()); zenith_admin_conf.application_name("compute_ctl:apply_config"); zenith_admin_conf.user("zenith_admin"); + // It doesn't matter what were the options before, here we just want + // to connect and create a new superuser role. + const ZENITH_OPTIONS: &str = "-c role=zenith_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0"; + zenith_admin_conf.options(ZENITH_OPTIONS); + let mut client = zenith_admin_conf.connect(NoTls) .context("broken cloud_admin credential: tried connecting with cloud_admin but could not authenticate, and zenith_admin does not work either")?; @@ -1623,9 +1656,7 @@ impl ComputeNode { self.pg_reload_conf()?; if spec.mode == ComputeMode::Primary { - let mut conf = - tokio_postgres::Config::from_str(self.params.connstr.as_str()).unwrap(); - conf.application_name("apply_config"); + let conf = self.get_tokio_conn_conf(Some("compute_ctl:reconfigure")); let conf = Arc::new(conf); let spec = Arc::new(spec.clone()); diff --git a/test_runner/regress/test_compute_catalog.py b/test_runner/regress/test_compute_catalog.py index 37208c9fff..b66b326360 100644 --- a/test_runner/regress/test_compute_catalog.py +++ b/test_runner/regress/test_compute_catalog.py @@ -544,3 +544,69 @@ def test_drop_role_with_table_privileges_from_non_neon_superuser(neon_simple_env ) role = cursor.fetchone() assert role is None + + +def test_db_with_custom_settings(neon_simple_env: NeonEnv): + """ + Test that compute_ctl can work with databases that have some custom settings. + For example, role=some_other_role, default_transaction_read_only=on, + search_path=non_public_schema, statement_timeout=1 (1ms). + """ + env = neon_simple_env + + endpoint = env.endpoints.create_start("main") + + TEST_ROLE = "some_other_role" + TEST_DB = "db_with_custom_settings" + TEST_SCHEMA = "non_public_schema" + + endpoint.respec_deep( + **{ + "spec": { + "skip_pg_catalog_updates": False, + "cluster": { + "databases": [ + { + "name": TEST_DB, + "owner": TEST_ROLE, + } + ], + "roles": [ + { + "name": TEST_ROLE, + } + ], + }, + } + } + ) + + endpoint.reconfigure() + + with endpoint.cursor(dbname=TEST_DB) as cursor: + cursor.execute(f"CREATE SCHEMA {TEST_SCHEMA}") + cursor.execute(f"ALTER DATABASE {TEST_DB} SET role = {TEST_ROLE}") + cursor.execute(f"ALTER DATABASE {TEST_DB} SET default_transaction_read_only = on") + cursor.execute(f"ALTER DATABASE {TEST_DB} SET search_path = {TEST_SCHEMA}") + cursor.execute(f"ALTER DATABASE {TEST_DB} SET statement_timeout = 1") + + with endpoint.cursor(dbname=TEST_DB) as cursor: + cursor.execute("SELECT current_role") + role = cursor.fetchone() + assert role is not None + assert role[0] == TEST_ROLE + + cursor.execute("SHOW default_transaction_read_only") + default_transaction_read_only = cursor.fetchone() + assert default_transaction_read_only is not None + assert default_transaction_read_only[0] == "on" + + cursor.execute("SHOW search_path") + search_path = cursor.fetchone() + assert search_path is not None + assert search_path[0] == TEST_SCHEMA + + # Do not check statement_timeout, because we force it to 2min + # in `endpoint.cursor()` fixture. + + endpoint.reconfigure()