diff --git a/Cargo.lock b/Cargo.lock index 22079976ae..ca6ae469d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1196,10 +1196,6 @@ dependencies = [ "scopeguard", "serde", "serde_json", - "slog", - "slog-scope", - "slog-stdlog", - "slog-term", "tar", "thiserror", "tokio", @@ -2333,11 +2329,6 @@ dependencies = [ "regex", "rust-s3", "serde", - "slog", - "slog-async", - "slog-scope", - "slog-stdlog", - "slog-term", "tokio", "tokio-stream", "walkdir", @@ -2586,6 +2577,11 @@ dependencies = [ "rustls-split", "serde", "serde_json", + "slog", + "slog-async", + "slog-scope", + "slog-stdlog", + "slog-term", "thiserror", "tokio", "webpki", diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index 87459448eb..936c41ff24 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -16,10 +16,6 @@ byteorder = "1.4.3" futures = "0.3.13" hyper = "0.14" lazy_static = "1.4.0" -slog-stdlog = "4.1.0" -slog-scope = "4.4.0" -slog-term = "2.8.0" -slog = "2.7.0" log = "0.4.14" clap = "2.33.0" daemonize = "0.4.1" diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 1e6948daf1..2da1e677fb 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -13,13 +13,13 @@ use std::{ thread, time::Duration, }; -use zenith_utils::{auth::JwtAuth, postgres_backend::AuthType}; +use zenith_utils::{auth::JwtAuth, logging, postgres_backend::AuthType}; use anyhow::{ensure, Result}; use clap::{App, Arg, ArgMatches}; use daemonize::Daemonize; -use pageserver::{branches, http, logger, page_service, tenant_mgr, PageServerConf}; +use pageserver::{branches, http, page_service, tenant_mgr, PageServerConf, LOG_FILE_NAME}; use zenith_utils::http::endpoint; const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:64000"; @@ -273,11 +273,7 @@ fn main() -> Result<()> { fn start_pageserver(conf: &'static PageServerConf) -> Result<()> { // Initialize logger - let (_scope_guard, log_file) = logger::init_logging(conf, "pageserver.log")?; - let _log_guard = slog_stdlog::init()?; - - // Note: this `info!(...)` macro comes from `log` crate - info!("standard logging redirected to slog"); + let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?; // TODO: Check that it looks like a valid repository before going further diff --git a/pageserver/src/branches.rs b/pageserver/src/branches.rs index 958e03fb29..40b2233e77 100644 --- a/pageserver/src/branches.rs +++ b/pageserver/src/branches.rs @@ -17,13 +17,13 @@ use std::{ use zenith_utils::zid::{ZTenantId, ZTimelineId}; use log::*; +use zenith_utils::logging; use zenith_utils::lsn::Lsn; -use crate::logger; -use crate::restore_local_repo; use crate::tenant_mgr; use crate::walredo::WalRedoManager; use crate::{repository::Repository, PageServerConf}; +use crate::{restore_local_repo, LOG_FILE_NAME}; #[derive(Serialize, Deserialize, Clone)] pub struct BranchInfo { @@ -98,8 +98,8 @@ pub struct PointInTime { pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str>) -> Result<()> { // Initialize logger - let (_scope_guard, _log_file) = logger::init_logging(conf, "pageserver.log")?; - let _log_guard = slog_stdlog::init()?; + // use true as daemonize parameter because otherwise we pollute zenith cli output with a few pages long output of info messages + let (_scope_guard, _log_file) = logging::init(LOG_FILE_NAME, true)?; // We don't use the real WAL redo manager, because we don't want to spawn the WAL redo // process during repository initialization. @@ -138,9 +138,6 @@ pub fn create_repo( fs::create_dir_all(&repo_dir) .with_context(|| format!("could not create directory {}", repo_dir.display()))?; - // Note: this `info!(...)` macro comes from `log` crate - info!("standard logging redirected to slog"); - fs::create_dir(conf.timelines_path(&tenantid))?; fs::create_dir_all(conf.branches_path(&tenantid))?; fs::create_dir_all(conf.tags_path(&tenantid))?; diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 314e6c1e33..2b52f17009 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -11,7 +11,6 @@ pub mod basebackup; pub mod branches; pub mod http; pub mod layered_repository; -pub mod logger; pub mod page_service; pub mod relish; pub mod repository; @@ -30,6 +29,8 @@ lazy_static! { .expect("failed to define a metric"); } +pub const LOG_FILE_NAME: &str = "pageserver.log"; + #[derive(Debug, Clone)] pub struct PageServerConf { pub daemonize: bool, diff --git a/pageserver/src/logger.rs b/pageserver/src/logger.rs deleted file mode 100644 index 56006a54af..0000000000 --- a/pageserver/src/logger.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::PageServerConf; - -use anyhow::{Context, Result}; -use slog::{Drain, FnValue}; -use std::fs::{File, OpenOptions}; - -pub fn init_logging( - _conf: &PageServerConf, - log_filename: &str, -) -> Result<(slog_scope::GlobalLoggerGuard, File)> { - // Don't open the same file for output multiple times; - // the different fds could overwrite each other's output. - let log_file = OpenOptions::new() - .create(true) - .append(true) - .open(&log_filename) - .with_context(|| format!("failed to open {:?}", &log_filename))?; - - let logger_file = log_file.try_clone().unwrap(); - - let decorator = slog_term::PlainSyncDecorator::new(logger_file); - let drain = slog_term::FullFormat::new(decorator).build(); - let drain = slog::Filter::new(drain, |record: &slog::Record| { - if record.level().is_at_least(slog::Level::Info) { - return true; - } - false - }); - let drain = std::sync::Mutex::new(drain).fuse(); - let logger = slog::Logger::root( - drain, - slog::o!( - "location" => - FnValue(move |record| { - format!("{}, {}:{}", - record.module(), - record.file(), - record.line() - ) - } - ) - ), - ); - Ok((slog_scope::set_global_logger(logger), log_file)) -} diff --git a/walkeeper/Cargo.toml b/walkeeper/Cargo.toml index 4c83f1c4ba..20f967ec1b 100644 --- a/walkeeper/Cargo.toml +++ b/walkeeper/Cargo.toml @@ -13,11 +13,6 @@ bytes = "1.0.1" byteorder = "1.4.3" fs2 = "0.4.3" lazy_static = "1.4.0" -slog-stdlog = "4.1.0" -slog-async = "2.6.0" -slog-scope = "4.4.0" -slog-term = "2.8.0" -slog = "2.7.0" log = "0.4.14" clap = "2.33.0" daemonize = "0.4.1" diff --git a/walkeeper/src/bin/wal_acceptor.rs b/walkeeper/src/bin/wal_acceptor.rs index cf7cea19cc..d8a0ab6737 100644 --- a/walkeeper/src/bin/wal_acceptor.rs +++ b/walkeeper/src/bin/wal_acceptor.rs @@ -1,15 +1,14 @@ // // Main entry point for the wal_acceptor executable // -use anyhow::{Context, Result}; +use anyhow::Result; use clap::{App, Arg}; use daemonize::Daemonize; use log::*; -use slog::Drain; +use std::env; use std::path::{Path, PathBuf}; use std::thread; -use std::{env, io}; -use std::{fs::File, fs::OpenOptions}; +use zenith_utils::logging; use walkeeper::s3_offload; use walkeeper::wal_service; @@ -113,20 +112,7 @@ fn main() -> Result<()> { fn start_wal_acceptor(conf: WalAcceptorConf) -> Result<()> { let log_filename = conf.data_dir.join("wal_acceptor.log"); - // Don't open the same file for output multiple times; - // the different fds could overwrite each other's output. - let log_file = OpenOptions::new() - .create(true) - .append(true) - .open(&log_filename) - .with_context(|| format!("failed to open {:?}", &log_filename))?; - - // Initialize logger - let logger_file = log_file.try_clone().unwrap(); - let _scope_guard = init_logging(&conf, logger_file)?; - let _log_guard = slog_stdlog::init().unwrap(); - // Note: this `info!(...)` macro comes from `log` crate - info!("standard logging redirected to slog"); + let (_scope_guard, log_file) = logging::init(log_filename, conf.daemonize)?; if conf.daemonize { info!("daemonizing..."); @@ -179,29 +165,3 @@ fn start_wal_acceptor(conf: WalAcceptorConf) -> Result<()> { } Ok(()) } - -fn init_logging( - conf: &WalAcceptorConf, - log_file: File, -) -> Result { - if conf.daemonize { - let decorator = slog_term::PlainSyncDecorator::new(log_file); - let drain = slog_term::CompactFormat::new(decorator).build(); - let drain = slog::Filter::new(drain, |record: &slog::Record| { - record.level().is_at_least(slog::Level::Info) - }); - let drain = std::sync::Mutex::new(drain).fuse(); - let logger = slog::Logger::root(drain, slog::o!()); - Ok(slog_scope::set_global_logger(logger)) - } else { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog::Filter::new(drain, |record: &slog::Record| { - record.level().is_at_least(slog::Level::Info) - }) - .fuse(); - let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse(); - let logger = slog::Logger::root(drain, slog::o!()); - Ok(slog_scope::set_global_logger(logger)) - } -} diff --git a/zenith_utils/Cargo.toml b/zenith_utils/Cargo.toml index 731e2d45a2..f6f2dfbc98 100644 --- a/zenith_utils/Cargo.toml +++ b/zenith_utils/Cargo.toml @@ -19,6 +19,12 @@ serde_json = "1" thiserror = "1.0" tokio = { version = "1.5.0", features = ["full"] } +slog-async = "2.6.0" +slog-stdlog = "4.1.0" +slog-scope = "4.4.0" +slog-term = "2.8.0" +slog = "2.7.0" + zenith_metrics = { path = "../zenith_metrics" } workspace_hack = { path = "../workspace_hack" } rand = "0.8.3" diff --git a/zenith_utils/src/lib.rs b/zenith_utils/src/lib.rs index ba0ac7ce23..302069494c 100644 --- a/zenith_utils/src/lib.rs +++ b/zenith_utils/src/lib.rs @@ -28,3 +28,6 @@ pub mod http; // socket splitting utils pub mod sock_split; + +// common log initialisation routine +pub mod logging; diff --git a/zenith_utils/src/logging.rs b/zenith_utils/src/logging.rs new file mode 100644 index 0000000000..c6ed35cbf4 --- /dev/null +++ b/zenith_utils/src/logging.rs @@ -0,0 +1,47 @@ +use slog::{Drain, Level}; +use std::{ + fs::{File, OpenOptions}, + path::Path, +}; + +use anyhow::{Context, Result}; + +pub fn init( + log_filename: impl AsRef, + daemonize: bool, +) -> Result<(slog_scope::GlobalLoggerGuard, File)> { + // Don't open the same file for output multiple times; + // the different fds could overwrite each other's output. + let log_file = OpenOptions::new() + .create(true) + .append(true) + .open(&log_filename) + .with_context(|| format!("failed to open {:?}", log_filename.as_ref()))?; + + // we are cloning and returning log file in order to allow redirecting daemonized stdout and stderr to it + // if we do not use daemonization (e.g. in docker) it is better to log to stdout directly + // for example to be in line with docker log command which expects logs comimg from stdout + let guard = if daemonize { + let decorator = slog_term::PlainSyncDecorator::new(log_file.try_clone()?); + let drain = slog_term::FullFormat::new(decorator) + .build() + .filter_level(Level::Info) + .fuse(); + let logger = slog::Logger::root(drain, slog::o!()); + slog_scope::set_global_logger(logger) + } else { + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::FullFormat::new(decorator) + .build() + .filter_level(Level::Info) + .fuse(); + let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse(); + let logger = slog::Logger::root(drain, slog::o!()); + slog_scope::set_global_logger(logger) + }; + + // initialise forwarding of std log calls + slog_stdlog::init()?; + + Ok((guard, log_file)) +}