mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-06 21:12:55 +00:00
Setup compute_ctl pgaudit and rsyslog (#10615)
Setup pgaudit and pgauditlogtofile extensions in compute_ctl when the ComputeAuditLogLevel is set to 'hipaa'. See cloud PR https://github.com/neondatabase/cloud/pull/24568 Add rsyslog setup for compute_ctl. Spin up a rsyslog server in the compute VM, and configure it to send logs to the endpoint specified in AUDIT_LOGGING_ENDPOINT env.
This commit is contained in:
committed by
GitHub
parent
9cdc8c0e6c
commit
d94fc75cfc
@@ -1933,6 +1933,7 @@ RUN apt update && \
|
|||||||
locales \
|
locales \
|
||||||
procps \
|
procps \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
rsyslog \
|
||||||
$VERSION_INSTALLS && \
|
$VERSION_INSTALLS && \
|
||||||
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
apt clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
@@ -1978,6 +1979,15 @@ COPY --from=sql_exporter_preprocessor --chmod=0644 /home/nonroot/compute/etc/neo
|
|||||||
# Make the libraries we built available
|
# Make the libraries we built available
|
||||||
RUN echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
RUN echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||||
|
|
||||||
|
# rsyslog config permissions
|
||||||
|
RUN chown postgres:postgres /etc/rsyslog.conf && \
|
||||||
|
touch /etc/compute_rsyslog.conf && \
|
||||||
|
chown -R postgres:postgres /etc/compute_rsyslog.conf && \
|
||||||
|
# directory for rsyslogd pid file
|
||||||
|
mkdir /var/run/rsyslogd && \
|
||||||
|
chown -R postgres:postgres /var/run/rsyslogd
|
||||||
|
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ files:
|
|||||||
# regardless of hostname (ALL)
|
# regardless of hostname (ALL)
|
||||||
#
|
#
|
||||||
# Also allow it to shut down the VM. The fast_import job does that when it's finished.
|
# Also allow it to shut down the VM. The fast_import job does that when it's finished.
|
||||||
postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap, /neonvm/bin/set-disk-quota, /neonvm/bin/poweroff
|
postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap, /neonvm/bin/set-disk-quota, /neonvm/bin/poweroff, /usr/sbin/rsyslogd
|
||||||
- filename: cgconfig.conf
|
- filename: cgconfig.conf
|
||||||
content: |
|
content: |
|
||||||
# Configuration for cgroups in VM compute nodes
|
# Configuration for cgroups in VM compute nodes
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ files:
|
|||||||
# regardless of hostname (ALL)
|
# regardless of hostname (ALL)
|
||||||
#
|
#
|
||||||
# Also allow it to shut down the VM. The fast_import job does that when it's finished.
|
# Also allow it to shut down the VM. The fast_import job does that when it's finished.
|
||||||
postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap, /neonvm/bin/set-disk-quota, /neonvm/bin/poweroff
|
postgres ALL=(root) NOPASSWD: /neonvm/bin/resize-swap, /neonvm/bin/set-disk-quota, /neonvm/bin/poweroff, /usr/sbin/rsyslogd
|
||||||
- filename: cgconfig.conf
|
- filename: cgconfig.conf
|
||||||
content: |
|
content: |
|
||||||
# Configuration for cgroups in VM compute nodes
|
# Configuration for cgroups in VM compute nodes
|
||||||
|
|||||||
@@ -12,7 +12,9 @@ use anyhow::{Context, Result};
|
|||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use compute_api::privilege::Privilege;
|
use compute_api::privilege::Privilege;
|
||||||
use compute_api::responses::{ComputeCtlConfig, ComputeMetrics, ComputeStatus};
|
use compute_api::responses::{ComputeCtlConfig, ComputeMetrics, ComputeStatus};
|
||||||
use compute_api::spec::{ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent};
|
use compute_api::spec::{
|
||||||
|
ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent,
|
||||||
|
};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
@@ -35,6 +37,7 @@ use crate::logger::startup_context_from_env;
|
|||||||
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
|
||||||
use crate::monitor::launch_monitor;
|
use crate::monitor::launch_monitor;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
|
use crate::rsyslog::configure_and_start_rsyslog;
|
||||||
use crate::spec::*;
|
use crate::spec::*;
|
||||||
use crate::swap::resize_swap;
|
use crate::swap::resize_swap;
|
||||||
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
use crate::sync_sk::{check_if_synced, ping_safekeeper};
|
||||||
@@ -676,6 +679,23 @@ impl ComputeNode {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Configure and start rsyslog if necessary
|
||||||
|
if let ComputeAudit::Hipaa = pspec.spec.audit_log_level {
|
||||||
|
let remote_endpoint = std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string());
|
||||||
|
if remote_endpoint.is_empty() {
|
||||||
|
anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty");
|
||||||
|
}
|
||||||
|
|
||||||
|
let log_directory_path = Path::new(&self.params.pgdata).join("log");
|
||||||
|
// TODO: make this more robust
|
||||||
|
// now rsyslog starts once and there is no monitoring or restart if it fails
|
||||||
|
configure_and_start_rsyslog(
|
||||||
|
log_directory_path.to_str().unwrap(),
|
||||||
|
"hipaa",
|
||||||
|
&remote_endpoint,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Launch remaining service threads
|
// Launch remaining service threads
|
||||||
let _monitor_handle = launch_monitor(self);
|
let _monitor_handle = launch_monitor(self);
|
||||||
let _configurator_handle = launch_configurator(self);
|
let _configurator_handle = launch_configurator(self);
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
use anyhow::Result;
|
||||||
use std::fmt::Write as FmtWrite;
|
use std::fmt::Write as FmtWrite;
|
||||||
use std::fs::{File, OpenOptions};
|
use std::fs::{File, OpenOptions};
|
||||||
use std::io;
|
use std::io;
|
||||||
@@ -5,10 +6,11 @@ use std::io::Write;
|
|||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::Result;
|
use compute_api::spec::{ComputeAudit, ComputeMode, ComputeSpec, GenericOption};
|
||||||
use compute_api::spec::{ComputeMode, ComputeSpec, GenericOption};
|
|
||||||
|
|
||||||
use crate::pg_helpers::{GenericOptionExt, PgOptionsSerialize, escape_conf_value};
|
use crate::pg_helpers::{
|
||||||
|
GenericOptionExt, GenericOptionsSearch, PgOptionsSerialize, escape_conf_value,
|
||||||
|
};
|
||||||
|
|
||||||
/// Check that `line` is inside a text file and put it there if it is not.
|
/// Check that `line` is inside a text file and put it there if it is not.
|
||||||
/// Create file if it doesn't exist.
|
/// Create file if it doesn't exist.
|
||||||
@@ -138,6 +140,54 @@ pub fn write_postgres_conf(
|
|||||||
writeln!(file, "# Managed by compute_ctl: end")?;
|
writeln!(file, "# Managed by compute_ctl: end")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If audit logging is enabled, configure pgaudit.
|
||||||
|
//
|
||||||
|
// Note, that this is called after the settings from spec are written.
|
||||||
|
// This way we always override the settings from the spec
|
||||||
|
// and don't allow the user or the control plane admin to change them.
|
||||||
|
if let ComputeAudit::Hipaa = spec.audit_log_level {
|
||||||
|
writeln!(file, "# Managed by compute_ctl audit settings: begin")?;
|
||||||
|
// This log level is very verbose
|
||||||
|
// but this is necessary for HIPAA compliance.
|
||||||
|
writeln!(file, "pgaudit.log='all'")?;
|
||||||
|
writeln!(file, "pgaudit.log_parameter=on")?;
|
||||||
|
// Disable logging of catalog queries
|
||||||
|
// The catalog doesn't contain sensitive data, so we don't need to audit it.
|
||||||
|
writeln!(file, "pgaudit.log_catalog=off")?;
|
||||||
|
// Set log rotation to 5 minutes
|
||||||
|
// TODO: tune this after performance testing
|
||||||
|
writeln!(file, "pgaudit.log_rotation_age=5")?;
|
||||||
|
|
||||||
|
// Add audit shared_preload_libraries, if they are not present.
|
||||||
|
//
|
||||||
|
// The caller who sets the flag is responsible for ensuring that the necessary
|
||||||
|
// shared_preload_libraries are present in the compute image,
|
||||||
|
// otherwise the compute start will fail.
|
||||||
|
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
|
||||||
|
let mut extra_shared_preload_libraries = String::new();
|
||||||
|
if !libs.contains("pgaudit") {
|
||||||
|
extra_shared_preload_libraries.push_str(",pgaudit");
|
||||||
|
}
|
||||||
|
if !libs.contains("pgauditlogtofile") {
|
||||||
|
extra_shared_preload_libraries.push_str(",pgauditlogtofile");
|
||||||
|
}
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"shared_preload_libraries='{}{}'",
|
||||||
|
libs, extra_shared_preload_libraries
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
// Typically, this should be unreacheable,
|
||||||
|
// because we always set at least some shared_preload_libraries in the spec
|
||||||
|
// but let's handle it explicitly anyway.
|
||||||
|
writeln!(
|
||||||
|
file,
|
||||||
|
"shared_preload_libraries='neon,pgaudit,pgauditlogtofile'"
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
writeln!(file, "# Managed by compute_ctl audit settings: end")?;
|
||||||
|
}
|
||||||
|
|
||||||
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
writeln!(file, "neon.extension_server_port={}", extension_server_port)?;
|
||||||
|
|
||||||
if spec.drop_subscriptions_before_start {
|
if spec.drop_subscriptions_before_start {
|
||||||
|
|||||||
@@ -0,0 +1,10 @@
|
|||||||
|
# Load imfile module to read log files
|
||||||
|
module(load="imfile")
|
||||||
|
|
||||||
|
# Input configuration for log files in the specified directory
|
||||||
|
# Replace {log_directory} with the directory containing the log files
|
||||||
|
input(type="imfile" File="{log_directory}/*.log" Tag="{tag}" Severity="info" Facility="local0")
|
||||||
|
global(workDirectory="/var/log")
|
||||||
|
|
||||||
|
# Forward logs to remote syslog server
|
||||||
|
*.* @@{remote_endpoint}
|
||||||
@@ -21,6 +21,7 @@ mod migration;
|
|||||||
pub mod monitor;
|
pub mod monitor;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
pub mod pg_helpers;
|
pub mod pg_helpers;
|
||||||
|
pub mod rsyslog;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
mod spec_apply;
|
mod spec_apply;
|
||||||
pub mod swap;
|
pub mod swap;
|
||||||
|
|||||||
80
compute_tools/src/rsyslog.rs
Normal file
80
compute_tools/src/rsyslog.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
use std::process::Command;
|
||||||
|
use std::{fs::OpenOptions, io::Write};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
fn get_rsyslog_pid() -> Option<String> {
|
||||||
|
let output = Command::new("pgrep")
|
||||||
|
.arg("rsyslogd")
|
||||||
|
.output()
|
||||||
|
.expect("Failed to execute pgrep");
|
||||||
|
|
||||||
|
if !output.stdout.is_empty() {
|
||||||
|
let pid = std::str::from_utf8(&output.stdout)
|
||||||
|
.expect("Invalid UTF-8 in process output")
|
||||||
|
.trim()
|
||||||
|
.to_string();
|
||||||
|
Some(pid)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start rsyslogd with the specified configuration file
|
||||||
|
// If it is already running, do nothing.
|
||||||
|
fn start_rsyslog(rsyslog_conf_path: &str) -> Result<()> {
|
||||||
|
let pid = get_rsyslog_pid();
|
||||||
|
if let Some(pid) = pid {
|
||||||
|
info!("rsyslogd is already running with pid: {}", pid);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = Command::new("/usr/sbin/rsyslogd")
|
||||||
|
.arg("-f")
|
||||||
|
.arg(rsyslog_conf_path)
|
||||||
|
.arg("-i")
|
||||||
|
.arg("/var/run/rsyslogd/rsyslogd.pid")
|
||||||
|
.output()
|
||||||
|
.context("Failed to start rsyslogd")?;
|
||||||
|
|
||||||
|
// Check that rsyslogd is running
|
||||||
|
if let Some(pid) = get_rsyslog_pid() {
|
||||||
|
info!("rsyslogd started successfully with pid: {}", pid);
|
||||||
|
} else {
|
||||||
|
return Err(anyhow::anyhow!("Failed to start rsyslogd"));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn configure_and_start_rsyslog(
|
||||||
|
log_directory: &str,
|
||||||
|
tag: &str,
|
||||||
|
remote_endpoint: &str,
|
||||||
|
) -> Result<()> {
|
||||||
|
let config_content: String = format!(
|
||||||
|
include_str!("config_template/compute_rsyslog_template.conf"),
|
||||||
|
log_directory = log_directory,
|
||||||
|
tag = tag,
|
||||||
|
remote_endpoint = remote_endpoint
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("rsyslog config_content: {}", config_content);
|
||||||
|
|
||||||
|
let rsyslog_conf_path = "/etc/compute_rsyslog.conf";
|
||||||
|
let mut file = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.open(rsyslog_conf_path)?;
|
||||||
|
|
||||||
|
file.write_all(config_content.as_bytes())?;
|
||||||
|
|
||||||
|
info!("rsyslog configuration added successfully. Starting rsyslogd");
|
||||||
|
|
||||||
|
// start the service, using the configuration
|
||||||
|
start_rsyslog(rsyslog_conf_path)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -6,7 +6,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use compute_api::responses::ComputeStatus;
|
use compute_api::responses::ComputeStatus;
|
||||||
use compute_api::spec::{ComputeFeature, ComputeSpec, Database, PgIdent, Role};
|
use compute_api::spec::{ComputeAudit, ComputeFeature, ComputeSpec, Database, PgIdent, Role};
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tokio_postgres::Client;
|
use tokio_postgres::Client;
|
||||||
@@ -19,10 +19,10 @@ use crate::pg_helpers::{
|
|||||||
get_existing_roles_async,
|
get_existing_roles_async,
|
||||||
};
|
};
|
||||||
use crate::spec_apply::ApplySpecPhase::{
|
use crate::spec_apply::ApplySpecPhase::{
|
||||||
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateSchemaNeon,
|
CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreatePgauditExtension,
|
||||||
CreateSuperUser, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions,
|
CreatePgauditlogtofileExtension, CreateSchemaNeon, CreateSuperUser, DisablePostgresDBPgAudit,
|
||||||
HandleNeonExtension, HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles,
|
DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions, HandleNeonExtension,
|
||||||
RunInEachDatabase,
|
HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
|
||||||
};
|
};
|
||||||
use crate::spec_apply::PerDatabasePhase::{
|
use crate::spec_apply::PerDatabasePhase::{
|
||||||
ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions, HandleAnonExtension,
|
ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions, HandleAnonExtension,
|
||||||
@@ -277,6 +277,19 @@ impl ComputeNode {
|
|||||||
phases.push(FinalizeDropLogicalSubscriptions);
|
phases.push(FinalizeDropLogicalSubscriptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Keep DisablePostgresDBPgAudit phase at the end,
|
||||||
|
// so that all config operations are audit logged.
|
||||||
|
match spec.audit_log_level
|
||||||
|
{
|
||||||
|
ComputeAudit::Hipaa => {
|
||||||
|
phases.push(CreatePgauditExtension);
|
||||||
|
phases.push(CreatePgauditlogtofileExtension);
|
||||||
|
phases.push(DisablePostgresDBPgAudit);
|
||||||
|
}
|
||||||
|
ComputeAudit::Log => { /* not implemented yet */ }
|
||||||
|
ComputeAudit::Disabled => {}
|
||||||
|
}
|
||||||
|
|
||||||
for phase in phases {
|
for phase in phases {
|
||||||
debug!("Applying phase {:?}", &phase);
|
debug!("Applying phase {:?}", &phase);
|
||||||
apply_operations(
|
apply_operations(
|
||||||
@@ -463,6 +476,9 @@ pub enum ApplySpecPhase {
|
|||||||
CreateAndAlterDatabases,
|
CreateAndAlterDatabases,
|
||||||
CreateSchemaNeon,
|
CreateSchemaNeon,
|
||||||
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
|
||||||
|
CreatePgauditExtension,
|
||||||
|
CreatePgauditlogtofileExtension,
|
||||||
|
DisablePostgresDBPgAudit,
|
||||||
HandleOtherExtensions,
|
HandleOtherExtensions,
|
||||||
HandleNeonExtension,
|
HandleNeonExtension,
|
||||||
CreateAvailabilityCheck,
|
CreateAvailabilityCheck,
|
||||||
@@ -1098,6 +1114,25 @@ async fn get_operations<'a>(
|
|||||||
}
|
}
|
||||||
Ok(Box::new(empty()))
|
Ok(Box::new(empty()))
|
||||||
}
|
}
|
||||||
|
ApplySpecPhase::CreatePgauditExtension => Ok(Box::new(once(Operation {
|
||||||
|
query: String::from("CREATE EXTENSION IF NOT EXISTS pgaudit"),
|
||||||
|
comment: Some(String::from("create pgaudit extensions")),
|
||||||
|
}))),
|
||||||
|
ApplySpecPhase::CreatePgauditlogtofileExtension => Ok(Box::new(once(Operation {
|
||||||
|
query: String::from("CREATE EXTENSION IF NOT EXISTS pgauditlogtofile"),
|
||||||
|
comment: Some(String::from("create pgauditlogtofile extensions")),
|
||||||
|
}))),
|
||||||
|
// Disable pgaudit logging for postgres database.
|
||||||
|
// Postgres is neon system database used by monitors
|
||||||
|
// and compute_ctl tuning functions and thus generates a lot of noise.
|
||||||
|
// We do not consider data stored in this database as sensitive.
|
||||||
|
ApplySpecPhase::DisablePostgresDBPgAudit => {
|
||||||
|
let query = "ALTER DATABASE postgres SET pgaudit.log to 'none'";
|
||||||
|
Ok(Box::new(once(Operation {
|
||||||
|
query: query.to_string(),
|
||||||
|
comment: Some(query.to_string()),
|
||||||
|
})))
|
||||||
|
}
|
||||||
ApplySpecPhase::HandleNeonExtension => {
|
ApplySpecPhase::HandleNeonExtension => {
|
||||||
let operations = vec![
|
let operations = vec![
|
||||||
Operation {
|
Operation {
|
||||||
|
|||||||
@@ -48,7 +48,8 @@ use anyhow::{Context, Result, anyhow, bail};
|
|||||||
use compute_api::requests::ConfigurationRequest;
|
use compute_api::requests::ConfigurationRequest;
|
||||||
use compute_api::responses::{ComputeCtlConfig, ComputeStatus, ComputeStatusResponse};
|
use compute_api::responses::{ComputeCtlConfig, ComputeStatus, ComputeStatusResponse};
|
||||||
use compute_api::spec::{
|
use compute_api::spec::{
|
||||||
Cluster, ComputeFeature, ComputeMode, ComputeSpec, Database, PgIdent, RemoteExtSpec, Role,
|
Cluster, ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, Database, PgIdent,
|
||||||
|
RemoteExtSpec, Role,
|
||||||
};
|
};
|
||||||
use nix::sys::signal::{Signal, kill};
|
use nix::sys::signal::{Signal, kill};
|
||||||
use pageserver_api::shard::ShardStripeSize;
|
use pageserver_api::shard::ShardStripeSize;
|
||||||
@@ -668,6 +669,7 @@ impl Endpoint {
|
|||||||
local_proxy_config: None,
|
local_proxy_config: None,
|
||||||
reconfigure_concurrency: self.reconfigure_concurrency,
|
reconfigure_concurrency: self.reconfigure_concurrency,
|
||||||
drop_subscriptions_before_start: self.drop_subscriptions_before_start,
|
drop_subscriptions_before_start: self.drop_subscriptions_before_start,
|
||||||
|
audit_log_level: ComputeAudit::Disabled,
|
||||||
};
|
};
|
||||||
|
|
||||||
// this strange code is needed to support respec() in tests
|
// this strange code is needed to support respec() in tests
|
||||||
|
|||||||
@@ -155,6 +155,16 @@ pub struct ComputeSpec {
|
|||||||
/// over the same replication content from publisher.
|
/// over the same replication content from publisher.
|
||||||
#[serde(default)] // Default false
|
#[serde(default)] // Default false
|
||||||
pub drop_subscriptions_before_start: bool,
|
pub drop_subscriptions_before_start: bool,
|
||||||
|
|
||||||
|
/// Log level for audit logging:
|
||||||
|
///
|
||||||
|
/// Disabled - no audit logging. This is the default.
|
||||||
|
/// log - log masked statements to the postgres log using pgaudit extension
|
||||||
|
/// hipaa - log unmasked statements to the file using pgaudit and pgauditlogtofile extension
|
||||||
|
///
|
||||||
|
/// Extensions should be present in shared_preload_libraries
|
||||||
|
#[serde(default)]
|
||||||
|
pub audit_log_level: ComputeAudit,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
/// Feature flag to signal `compute_ctl` to enable certain experimental functionality.
|
||||||
@@ -262,6 +272,17 @@ pub enum ComputeMode {
|
|||||||
Replica,
|
Replica,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Log level for audit logging
|
||||||
|
/// Disabled, log, hipaa
|
||||||
|
/// Default is Disabled
|
||||||
|
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
|
||||||
|
pub enum ComputeAudit {
|
||||||
|
#[default]
|
||||||
|
Disabled,
|
||||||
|
Log,
|
||||||
|
Hipaa,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)]
|
||||||
pub struct Cluster {
|
pub struct Cluster {
|
||||||
pub cluster_id: Option<String>,
|
pub cluster_id: Option<String>,
|
||||||
|
|||||||
Reference in New Issue
Block a user