diff --git a/Cargo.lock b/Cargo.lock index 4a78b2e504..d850d3bd89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -495,8 +495,8 @@ name = "control_plane" version = "0.1.0" dependencies = [ "anyhow", - "lazy_static", "nix", + "once_cell", "pageserver", "postgres", "regex", @@ -1591,8 +1591,8 @@ dependencies = [ name = "metrics" version = "0.1.0" dependencies = [ - "lazy_static", "libc", + "once_cell", "prometheus", "workspace_hack", ] @@ -1870,7 +1870,6 @@ dependencies = [ "humantime-serde", "hyper", "itertools", - "lazy_static", "metrics", "nix", "once_cell", @@ -2116,9 +2115,9 @@ dependencies = [ "crc32c", "env_logger", "hex", - "lazy_static", "log", "memoffset", + "once_cell", "postgres", "rand", "regex", @@ -2278,9 +2277,9 @@ dependencies = [ "hex", "hmac 0.12.1", "hyper", - "lazy_static", "md5", "metrics", + "once_cell", "parking_lot 0.12.1", "pin-project-lite", "rand", @@ -2754,7 +2753,6 @@ dependencies = [ "hex", "humantime", "hyper", - "lazy_static", "metrics", "once_cell", "postgres", @@ -3671,9 +3669,9 @@ dependencies = [ "hex-literal", "hyper", "jsonwebtoken", - "lazy_static", "metrics", "nix", + "once_cell", "pin-project-lite", "postgres", "postgres-protocol", diff --git a/control_plane/Cargo.toml b/control_plane/Cargo.toml index 26bb577636..425eb332c3 100644 --- a/control_plane/Cargo.toml +++ b/control_plane/Cargo.toml @@ -9,7 +9,7 @@ postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8 serde = { version = "1.0", features = ["derive"] } serde_with = "1.12.0" toml = "0.5" -lazy_static = "1.4" +once_cell = "1.13.0" regex = "1" anyhow = "1.0" thiserror = "1" diff --git a/control_plane/src/postgresql_conf.rs b/control_plane/src/postgresql_conf.rs index 83765b2c95..a71108da01 100644 --- a/control_plane/src/postgresql_conf.rs +++ b/control_plane/src/postgresql_conf.rs @@ -5,7 +5,7 @@ /// enough to extract a few settings we need in Zenith, assuming you don't do /// funny stuff like include-directives or funny escaping. use anyhow::{bail, Context, Result}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use std::collections::HashMap; use std::fmt; @@ -19,9 +19,7 @@ pub struct PostgresConf { hash: HashMap, } -lazy_static! { - static ref CONF_LINE_RE: Regex = Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap(); -} +static CONF_LINE_RE: Lazy = Lazy::new(|| Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap()); impl PostgresConf { pub fn new() -> PostgresConf { @@ -139,10 +137,10 @@ fn escape_str(s: &str) -> String { // // This regex is a bit more conservative than the rules in guc-file.l, so we quote some // strings that PostgreSQL would accept without quoting, but that's OK. - lazy_static! { - static ref UNQUOTED_RE: Regex = - Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap(); - } + + static UNQUOTED_RE: Lazy = + Lazy::new(|| Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap()); + if UNQUOTED_RE.is_match(s) { s.to_string() } else { diff --git a/libs/etcd_broker/Cargo.toml b/libs/etcd_broker/Cargo.toml index 49be7ad207..f7bfbad4ba 100644 --- a/libs/etcd_broker/Cargo.toml +++ b/libs/etcd_broker/Cargo.toml @@ -9,7 +9,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1" serde_with = "1.12.0" - once_cell = "1.8.0" + once_cell = "1.13.0" utils = { path = "../utils" } workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/metrics/Cargo.toml b/libs/metrics/Cargo.toml index 2879dfed81..d0cd46d2a9 100644 --- a/libs/metrics/Cargo.toml +++ b/libs/metrics/Cargo.toml @@ -6,5 +6,5 @@ edition = "2021" [dependencies] prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency libc = "0.2" -lazy_static = "1.4" +once_cell = "1.13.0" workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/metrics/src/lib.rs b/libs/metrics/src/lib.rs index ea24b3fe7e..920d3fd17e 100644 --- a/libs/metrics/src/lib.rs +++ b/libs/metrics/src/lib.rs @@ -2,7 +2,7 @@ //! make sure that we use the same dep version everywhere. //! Otherwise, we might not see all metrics registered via //! a default registry. -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use prometheus::core::{AtomicU64, GenericGauge, GenericGaugeVec}; pub use prometheus::opts; pub use prometheus::register; @@ -41,19 +41,22 @@ pub fn gather() -> Vec { prometheus::gather() } -lazy_static! { - static ref DISK_IO_BYTES: IntGaugeVec = register_int_gauge_vec!( +static DISK_IO_BYTES: Lazy = Lazy::new(|| { + register_int_gauge_vec!( "libmetrics_disk_io_bytes_total", "Bytes written and read from disk, grouped by the operation (read|write)", &["io_operation"] ) - .expect("Failed to register disk i/o bytes int gauge vec"); - static ref MAXRSS_KB: IntGauge = register_int_gauge!( + .expect("Failed to register disk i/o bytes int gauge vec") +}); + +static MAXRSS_KB: Lazy = Lazy::new(|| { + register_int_gauge!( "libmetrics_maxrss_kb", "Memory usage (Maximum Resident Set Size)" ) - .expect("Failed to register maxrss_kb int gauge"); -} + .expect("Failed to register maxrss_kb int gauge") +}); pub const DISK_WRITE_SECONDS_BUCKETS: &[f64] = &[ 0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5, diff --git a/libs/metrics/src/wrappers.rs b/libs/metrics/src/wrappers.rs index de334add99..1bf1ea0753 100644 --- a/libs/metrics/src/wrappers.rs +++ b/libs/metrics/src/wrappers.rs @@ -10,13 +10,13 @@ use std::io::{Read, Result, Write}; /// # use std::io::{Result, Read}; /// # use metrics::{register_int_counter, IntCounter}; /// # use metrics::CountedReader; +/// # use once_cell::sync::Lazy; /// # -/// # lazy_static::lazy_static! { -/// # static ref INT_COUNTER: IntCounter = register_int_counter!( +/// # static INT_COUNTER: Lazy = Lazy::new( || { register_int_counter!( /// # "int_counter", /// # "let's count something!" -/// # ).unwrap(); -/// # } +/// # ).unwrap() +/// # }); /// # /// fn do_some_reads(stream: impl Read, count: usize) -> Result> { /// let mut reader = CountedReader::new(stream, |cnt| { @@ -85,13 +85,13 @@ impl Read for CountedReader<'_, T> { /// # use std::io::{Result, Write}; /// # use metrics::{register_int_counter, IntCounter}; /// # use metrics::CountedWriter; +/// # use once_cell::sync::Lazy; /// # -/// # lazy_static::lazy_static! { -/// # static ref INT_COUNTER: IntCounter = register_int_counter!( +/// # static INT_COUNTER: Lazy = Lazy::new( || { register_int_counter!( /// # "int_counter", /// # "let's count something!" -/// # ).unwrap(); -/// # } +/// # ).unwrap() +/// # }); /// # /// fn do_some_writes(stream: impl Write, payload: &[u8]) -> Result<()> { /// let mut writer = CountedWriter::new(stream, |cnt| { diff --git a/libs/postgres_ffi/Cargo.toml b/libs/postgres_ffi/Cargo.toml index c9cc858ab9..0118701a7e 100644 --- a/libs/postgres_ffi/Cargo.toml +++ b/libs/postgres_ffi/Cargo.toml @@ -12,7 +12,7 @@ byteorder = "1.4.3" anyhow = "1.0" crc32c = "0.6.0" hex = "0.4.3" -lazy_static = "1.4" +once_cell = "1.13.0" log = "0.4.14" memoffset = "0.6.2" thiserror = "1.0" diff --git a/libs/postgres_ffi/src/relfile_utils.rs b/libs/postgres_ffi/src/relfile_utils.rs index 97c8f0afea..94498ee9a9 100644 --- a/libs/postgres_ffi/src/relfile_utils.rs +++ b/libs/postgres_ffi/src/relfile_utils.rs @@ -2,7 +2,7 @@ //! Common utilities for dealing with PostgreSQL relation files. //! use crate::pg_constants; -use lazy_static::lazy_static; +use once_cell::sync::OnceCell; use regex::Regex; #[derive(Debug, Clone, thiserror::Error, PartialEq)] @@ -54,11 +54,14 @@ pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> { /// See functions relpath() and _mdfd_segpath() in PostgreSQL sources. /// pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { - lazy_static! { - static ref RELFILE_RE: Regex = - Regex::new(r"^(?P\d+)(_(?P[a-z]+))?(\.(?P\d+))?$").unwrap(); - } + static RELFILE_RE: OnceCell = OnceCell::new(); + RELFILE_RE.get_or_init(|| { + Regex::new(r"^(?P\d+)(_(?P[a-z]+))?(\.(?P\d+))?$").unwrap() + }); + let caps = RELFILE_RE + .get() + .unwrap() .captures(fname) .ok_or(FilePathError::InvalidFileName)?; diff --git a/libs/postgres_ffi/wal_craft/Cargo.toml b/libs/postgres_ffi/wal_craft/Cargo.toml index 374c8e2e55..114f08113b 100644 --- a/libs/postgres_ffi/wal_craft/Cargo.toml +++ b/libs/postgres_ffi/wal_craft/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1.0" clap = "3.0" env_logger = "0.9" log = "0.4" -once_cell = "1.8.0" +once_cell = "1.13.0" postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } postgres_ffi = { path = "../" } tempfile = "3.2" diff --git a/libs/remote_storage/Cargo.toml b/libs/remote_storage/Cargo.toml index b11b3cf371..b3485f274a 100644 --- a/libs/remote_storage/Cargo.toml +++ b/libs/remote_storage/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" anyhow = { version = "1.0", features = ["backtrace"] } async-trait = "0.1" metrics = { version = "0.1", path = "../metrics" } -once_cell = "1.8.0" +once_cell = "1.13.0" rusoto_core = "0.48" rusoto_s3 = "0.48" serde = { version = "1.0", features = ["derive"] } diff --git a/libs/utils/Cargo.toml b/libs/utils/Cargo.toml index d83b02d7ae..e3e78ec68f 100644 --- a/libs/utils/Cargo.toml +++ b/libs/utils/Cargo.toml @@ -8,7 +8,6 @@ anyhow = "1.0" bincode = "1.3" bytes = "1.0.1" hyper = { version = "0.14.7", features = ["full"] } -lazy_static = "1.4.0" pin-project-lite = "0.2.7" postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } @@ -28,6 +27,8 @@ rustls = "0.20.2" rustls-split = "0.3.0" git-version = "0.3.5" serde_with = "1.12.0" +once_cell = "1.13.0" + metrics = { path = "../metrics" } workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/utils/src/http/endpoint.rs b/libs/utils/src/http/endpoint.rs index 51bff5f6eb..69bf5ef87a 100644 --- a/libs/utils/src/http/endpoint.rs +++ b/libs/utils/src/http/endpoint.rs @@ -4,8 +4,8 @@ use crate::zid::ZTenantId; use anyhow::anyhow; use hyper::header::AUTHORIZATION; use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server}; -use lazy_static::lazy_static; use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder}; +use once_cell::sync::Lazy; use routerify::ext::RequestExt; use routerify::RequestInfo; use routerify::{Middleware, Router, RouterBuilder, RouterService}; @@ -16,13 +16,13 @@ use std::net::TcpListener; use super::error::ApiError; -lazy_static! { - static ref SERVE_METRICS_COUNT: IntCounter = register_int_counter!( +static SERVE_METRICS_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( "libmetrics_metric_handler_requests_total", "Number of metric requests made" ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); async fn logger(res: Response, info: RequestInfo) -> Result, ApiError> { info!("{} {} {}", info.method(), info.uri().path(), res.status(),); diff --git a/libs/utils/tests/ssl_test.rs b/libs/utils/tests/ssl_test.rs index 002361667b..907ef98aec 100644 --- a/libs/utils/tests/ssl_test.rs +++ b/libs/utils/tests/ssl_test.rs @@ -7,7 +7,7 @@ use std::{ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use utils::postgres_backend::{AuthType, Handler, PostgresBackend}; @@ -19,16 +19,15 @@ fn make_tcp_pair() -> (TcpStream, TcpStream) { (server_stream, client_stream) } -lazy_static! { - static ref KEY: rustls::PrivateKey = { - let mut cursor = Cursor::new(include_bytes!("key.pem")); - rustls::PrivateKey(rustls_pemfile::rsa_private_keys(&mut cursor).unwrap()[0].clone()) - }; - static ref CERT: rustls::Certificate = { - let mut cursor = Cursor::new(include_bytes!("cert.pem")); - rustls::Certificate(rustls_pemfile::certs(&mut cursor).unwrap()[0].clone()) - }; -} +static KEY: Lazy = Lazy::new(|| { + let mut cursor = Cursor::new(include_bytes!("key.pem")); + rustls::PrivateKey(rustls_pemfile::rsa_private_keys(&mut cursor).unwrap()[0].clone()) +}); + +static CERT: Lazy = Lazy::new(|| { + let mut cursor = Cursor::new(include_bytes!("cert.pem")); + rustls::Certificate(rustls_pemfile::certs(&mut cursor).unwrap()[0].clone()) +}); #[test] fn ssl() { diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index 215fa151a0..63a2263ae0 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -21,7 +21,6 @@ futures = "0.3.13" hex = "0.4.3" hyper = "0.14" itertools = "0.10.3" -lazy_static = "1.4.0" clap = "3.0" daemonize = "0.4.1" tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] } @@ -48,7 +47,7 @@ tracing = "0.1.27" signal-hook = "0.3.10" url = "2" nix = "0.23" -once_cell = "1.8.0" +once_cell = "1.13.0" crossbeam-utils = "0.8.5" fail = "0.5.0" git-version = "0.3.5" diff --git a/pageserver/src/layered_repository/block_io.rs b/pageserver/src/layered_repository/block_io.rs index d027b2f0e7..bc3bc082a0 100644 --- a/pageserver/src/layered_repository/block_io.rs +++ b/pageserver/src/layered_repository/block_io.rs @@ -5,7 +5,7 @@ use crate::page_cache; use crate::page_cache::{ReadBufResult, PAGE_SZ}; use bytes::Bytes; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use std::ops::{Deref, DerefMut}; use std::os::unix::fs::FileExt; use std::sync::atomic::AtomicU64; @@ -117,9 +117,7 @@ where } } -lazy_static! { - static ref NEXT_ID: AtomicU64 = AtomicU64::new(1); -} +static NEXT_ID: Lazy = Lazy::new(|| AtomicU64::new(1)); /// An adapter for reading a (virtual) file using the page cache. /// diff --git a/pageserver/src/layered_repository/ephemeral_file.rs b/pageserver/src/layered_repository/ephemeral_file.rs index 299bb4e873..1776946e7a 100644 --- a/pageserver/src/layered_repository/ephemeral_file.rs +++ b/pageserver/src/layered_repository/ephemeral_file.rs @@ -8,7 +8,7 @@ use crate::page_cache; use crate::page_cache::PAGE_SZ; use crate::page_cache::{ReadBufResult, WriteBufResult}; use crate::virtual_file::VirtualFile; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use std::cmp::min; use std::collections::HashMap; use std::fs::OpenOptions; @@ -21,15 +21,15 @@ use utils::zid::{ZTenantId, ZTimelineId}; use std::os::unix::fs::FileExt; -lazy_static! { - /// - /// This is the global cache of file descriptors (File objects). - /// - static ref EPHEMERAL_FILES: RwLock = RwLock::new(EphemeralFiles { +/// +/// This is the global cache of file descriptors (File objects). +/// +static EPHEMERAL_FILES: Lazy> = Lazy::new(|| { + RwLock::new(EphemeralFiles { next_file_id: 1, files: HashMap::new(), - }); -} + }) +}); pub struct EphemeralFiles { next_file_id: u64, diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index be590c88c2..8363d6314f 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -15,19 +15,18 @@ use crate::layered_repository::storage_layer::Layer; use crate::layered_repository::storage_layer::{range_eq, range_overlaps}; use crate::repository::Key; use anyhow::Result; -use lazy_static::lazy_static; use metrics::{register_int_gauge, IntGauge}; +use once_cell::sync::Lazy; use std::collections::VecDeque; use std::ops::Range; use std::sync::Arc; use tracing::*; use utils::lsn::Lsn; -lazy_static! { - static ref NUM_ONDISK_LAYERS: IntGauge = - register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk") - .expect("failed to define a metric"); -} +static NUM_ONDISK_LAYERS: Lazy = Lazy::new(|| { + register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk") + .expect("failed to define a metric") +}); /// /// LayerMap tracks what layers exist on a timeline. diff --git a/pageserver/src/layered_repository/timeline.rs b/pageserver/src/layered_repository/timeline.rs index 095f3d3861..181adc2bcc 100644 --- a/pageserver/src/layered_repository/timeline.rs +++ b/pageserver/src/layered_repository/timeline.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, bail, ensure, Context, Result}; use bytes::Bytes; use fail::fail_point; use itertools::Itertools; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use tracing::*; use std::cmp::{max, min, Ordering}; @@ -61,75 +61,81 @@ use crate::CheckpointConfig; use crate::{page_cache, storage_sync}; // Metrics collected on operations on the storage repository. -lazy_static! { - pub static ref STORAGE_TIME: HistogramVec = register_histogram_vec!( +pub static STORAGE_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_storage_operations_seconds", "Time spent on storage operations", &["operation", "tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); // Metrics collected on operations on the storage repository. -lazy_static! { - static ref RECONSTRUCT_TIME: HistogramVec = register_histogram_vec!( +static RECONSTRUCT_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_getpage_reconstruct_seconds", "Time spent in reconstruct_value", &["tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); -lazy_static! { - static ref MATERIALIZED_PAGE_CACHE_HIT: IntCounterVec = register_int_counter_vec!( +static MATERIALIZED_PAGE_CACHE_HIT: Lazy = Lazy::new(|| { + register_int_counter_vec!( "pageserver_materialized_cache_hits_total", "Number of cache hits from materialized page cache", &["tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); - static ref WAIT_LSN_TIME: HistogramVec = register_histogram_vec!( + .expect("failed to define a metric") +}); + +static WAIT_LSN_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_wait_lsn_seconds", "Time spent waiting for WAL to arrive", &["tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); -lazy_static! { - static ref LAST_RECORD_LSN: IntGaugeVec = register_int_gauge_vec!( +static LAST_RECORD_LSN: Lazy = Lazy::new(|| { + register_int_gauge_vec!( "pageserver_last_record_lsn", "Last record LSN grouped by timeline", &["tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); // Metrics for determining timeline's physical size. // A layered timeline's physical is defined as the total size of // (delta/image) layer files on disk. -lazy_static! { - static ref CURRENT_PHYSICAL_SIZE: UIntGaugeVec = register_uint_gauge_vec!( +static CURRENT_PHYSICAL_SIZE: Lazy = Lazy::new(|| { + register_uint_gauge_vec!( "pageserver_current_physical_size", "Current physical size grouped by timeline", &["tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); // Metrics for cloud upload. These metrics reflect data uploaded to cloud storage, // or in testing they estimate how much we would upload if we did. -lazy_static! { - static ref NUM_PERSISTENT_FILES_CREATED: IntCounter = register_int_counter!( +static NUM_PERSISTENT_FILES_CREATED: Lazy = Lazy::new(|| { + register_int_counter!( "pageserver_created_persistent_files_total", "Number of files created that are meant to be uploaded to cloud storage", ) - .expect("failed to define a metric"); - static ref PERSISTENT_BYTES_WRITTEN: IntCounter = register_int_counter!( + .expect("failed to define a metric") +}); + +static PERSISTENT_BYTES_WRITTEN: Lazy = Lazy::new(|| { + register_int_counter!( "pageserver_written_persistent_bytes_total", "Total bytes written that are meant to be uploaded to cloud storage", ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); #[derive(Clone)] pub enum LayeredTimelineEntry { diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 4ecb181553..ba912a3702 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -22,7 +22,7 @@ pub mod walreceiver; pub mod walrecord; pub mod walredo; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use tracing::info; use crate::thread_mgr::ThreadKind; @@ -42,14 +42,14 @@ pub const STORAGE_FORMAT_VERSION: u16 = 3; pub const IMAGE_FILE_MAGIC: u16 = 0x5A60; pub const DELTA_FILE_MAGIC: u16 = 0x5A61; -lazy_static! { - static ref LIVE_CONNECTIONS_COUNT: IntGaugeVec = register_int_gauge_vec!( +static LIVE_CONNECTIONS_COUNT: Lazy = Lazy::new(|| { + register_int_gauge_vec!( "pageserver_live_connections", "Number of live network connections", &["pageserver_connection_kind"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); pub const LOG_FILE_NAME: &str = "pageserver.log"; diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index c8aa4b35e8..75df744014 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -11,7 +11,7 @@ use anyhow::{bail, ensure, Context, Result}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use std::io::{self, Read}; use std::net::TcpListener; @@ -434,15 +434,15 @@ const TIME_BUCKETS: &[f64] = &[ 0.1, // 1/10 s ]; -lazy_static! { - static ref SMGR_QUERY_TIME: HistogramVec = register_histogram_vec!( +static SMGR_QUERY_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_smgr_query_seconds", "Time spent on smgr query handling", &["smgr_query_type", "tenant_id", "timeline_id"], TIME_BUCKETS.into() ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); impl PageServerHandler { pub fn new(conf: &'static PageServerConf, auth: Option>) -> Self { diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index 6467231e08..3fae0184f9 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -408,7 +408,7 @@ pub trait TimelineWriter<'a> { #[cfg(test)] pub mod repo_harness { use bytes::BytesMut; - use lazy_static::lazy_static; + use once_cell::sync::Lazy; use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{fs, path::PathBuf}; @@ -439,9 +439,7 @@ pub mod repo_harness { buf.freeze() } - lazy_static! { - static ref LOCK: RwLock<()> = RwLock::new(()); - } + static LOCK: Lazy> = Lazy::new(|| RwLock::new(())); impl From for TenantConfOpt { fn from(tenant_conf: TenantConf) -> Self { @@ -589,11 +587,10 @@ mod tests { //use std::sync::Arc; use bytes::BytesMut; use hex_literal::hex; - use lazy_static::lazy_static; + use once_cell::sync::Lazy; - lazy_static! { - static ref TEST_KEY: Key = Key::from_slice(&hex!("112222222233333333444444445500000001")); - } + static TEST_KEY: Lazy = + Lazy::new(|| Key::from_slice(&hex!("112222222233333333444444445500000001"))); #[test] fn test_basic() -> Result<()> { diff --git a/pageserver/src/storage_sync.rs b/pageserver/src/storage_sync.rs index c60d3dccc0..222a406c81 100644 --- a/pageserver/src/storage_sync.rs +++ b/pageserver/src/storage_sync.rs @@ -155,8 +155,7 @@ use std::{ use anyhow::{anyhow, bail, Context}; use futures::stream::{FuturesUnordered, StreamExt}; -use lazy_static::lazy_static; -use once_cell::sync::OnceCell; +use once_cell::sync::{Lazy, OnceCell}; use remote_storage::{GenericRemoteStorage, RemoteStorage}; use tokio::{ fs, @@ -184,8 +183,8 @@ use crate::{ }; use metrics::{ - register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, - HistogramVec, IntCounter, IntCounterVec, IntGauge, + register_histogram_vec, register_int_counter_vec, register_int_gauge, HistogramVec, + IntCounterVec, IntGauge, }; use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}; @@ -193,32 +192,33 @@ use self::download::download_index_parts; pub use self::download::gather_tenant_timelines_index_parts; pub use self::download::TEMP_DOWNLOAD_EXTENSION; -lazy_static! { - static ref REMAINING_SYNC_ITEMS: IntGauge = register_int_gauge!( +static REMAINING_SYNC_ITEMS: Lazy = Lazy::new(|| { + register_int_gauge!( "pageserver_remote_storage_remaining_sync_items", "Number of storage sync items left in the queue" ) - .expect("failed to register pageserver remote storage remaining sync items int gauge"); - static ref FATAL_TASK_FAILURES: IntCounter = register_int_counter!( - "pageserver_remote_storage_fatal_task_failures_total", - "Number of critically failed tasks" - ) - .expect("failed to register pageserver remote storage remaining sync items int gauge"); - static ref IMAGE_SYNC_TIME: HistogramVec = register_histogram_vec!( + .expect("failed to register pageserver remote storage remaining sync items int gauge") +}); + +static IMAGE_SYNC_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_remote_storage_image_sync_seconds", "Time took to synchronize (download or upload) a whole pageserver image. \ Grouped by tenant and timeline ids, `operation_kind` (upload|download) and `status` (success|failure)", &["tenant_id", "timeline_id", "operation_kind", "status"], vec![0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 3.0, 10.0, 20.0] ) - .expect("failed to register pageserver image sync time histogram vec"); - static ref REMOTE_INDEX_UPLOAD: IntCounterVec = register_int_counter_vec!( + .expect("failed to register pageserver image sync time histogram vec") +}); + +static REMOTE_INDEX_UPLOAD: Lazy = Lazy::new(|| { + register_int_counter_vec!( "pageserver_remote_storage_remote_index_uploads_total", "Number of remote index uploads", &["tenant_id", "timeline_id"], ) - .expect("failed to register pageserver remote index upload vec"); -} + .expect("failed to register pageserver remote index upload vec") +}); static SYNC_QUEUE: OnceCell = OnceCell::new(); diff --git a/pageserver/src/storage_sync/upload.rs b/pageserver/src/storage_sync/upload.rs index f9ab3b7471..671ea45202 100644 --- a/pageserver/src/storage_sync/upload.rs +++ b/pageserver/src/storage_sync/upload.rs @@ -4,7 +4,7 @@ use std::{fmt::Debug, path::PathBuf}; use anyhow::Context; use futures::stream::{FuturesUnordered, StreamExt}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use remote_storage::RemoteStorage; use tokio::fs; use tracing::{debug, error, info, warn}; @@ -20,14 +20,14 @@ use crate::{ }; use metrics::{register_int_counter_vec, IntCounterVec}; -lazy_static! { - static ref NO_LAYERS_UPLOAD: IntCounterVec = register_int_counter_vec!( +static NO_LAYERS_UPLOAD: Lazy = Lazy::new(|| { + register_int_counter_vec!( "pageserver_remote_storage_no_layers_uploads_total", "Number of skipped uploads due to no layers", &["tenant_id", "timeline_id"], ) - .expect("failed to register pageserver no layers upload vec"); -} + .expect("failed to register pageserver no layers upload vec") +}); /// Serializes and uploads the given index part data to the remote storage. pub(super) async fn upload_index_part( diff --git a/pageserver/src/tenant_mgr.rs b/pageserver/src/tenant_mgr.rs index dfdbc4c318..5a5cea9a4b 100644 --- a/pageserver/src/tenant_mgr.rs +++ b/pageserver/src/tenant_mgr.rs @@ -27,23 +27,25 @@ use utils::zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}; mod tenants_state { use anyhow::ensure; + use once_cell::sync::Lazy; use std::{ collections::HashMap, sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, }; use tokio::sync::mpsc; use tracing::{debug, error}; - use utils::zid::ZTenantId; use crate::tenant_mgr::{LocalTimelineUpdate, Tenant}; - lazy_static::lazy_static! { - static ref TENANTS: RwLock> = RwLock::new(HashMap::new()); - /// Sends updates to the local timelines (creation and deletion) to the WAL receiver, - /// so that it can enable/disable corresponding processes. - static ref TIMELINE_UPDATE_SENDER: RwLock>> = RwLock::new(None); - } + static TENANTS: Lazy>> = + Lazy::new(|| RwLock::new(HashMap::new())); + + /// Sends updates to the local timelines (creation and deletion) to the WAL receiver, + /// so that it can enable/disable corresponding processes. + static TIMELINE_UPDATE_SENDER: Lazy< + RwLock>>, + > = Lazy::new(|| RwLock::new(None)); pub(super) fn read_tenants() -> RwLockReadGuard<'static, HashMap> { TENANTS diff --git a/pageserver/src/thread_mgr.rs b/pageserver/src/thread_mgr.rs index 6dd2e4b00b..cdd38febbc 100644 --- a/pageserver/src/thread_mgr.rs +++ b/pageserver/src/thread_mgr.rs @@ -45,21 +45,20 @@ use tokio::sync::watch; use tracing::{debug, error, info, warn}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use utils::zid::{ZTenantId, ZTimelineId}; use crate::shutdown_pageserver; -lazy_static! { - /// Each thread that we track is associated with a "thread ID". It's just - /// an increasing number that we assign, not related to any system thread - /// id. - static ref NEXT_THREAD_ID: AtomicU64 = AtomicU64::new(1); +/// Each thread that we track is associated with a "thread ID". It's just +/// an increasing number that we assign, not related to any system thread +/// id. +static NEXT_THREAD_ID: Lazy = Lazy::new(|| AtomicU64::new(1)); - /// Global registry of threads - static ref THREADS: Mutex>> = Mutex::new(HashMap::new()); -} +/// Global registry of threads +static THREADS: Lazy>>> = + Lazy::new(|| Mutex::new(HashMap::new())); // There is a Tokio watch channel for each thread, which can be used to signal the // thread that it needs to shut down. This thread local variable holds the receiving diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index a16e772238..5b24b848ad 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -10,7 +10,7 @@ //! This is similar to PostgreSQL's virtual file descriptor facility in //! src/backend/storage/file/fd.c //! -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use once_cell::sync::OnceCell; use std::fs::{File, OpenOptions}; use std::io::{Error, ErrorKind, Read, Seek, SeekFrom, Write}; @@ -32,23 +32,24 @@ const STORAGE_IO_TIME_BUCKETS: &[f64] = &[ 1.0, // 1 sec ]; -lazy_static! { - static ref STORAGE_IO_TIME: HistogramVec = register_histogram_vec!( +static STORAGE_IO_TIME: Lazy = Lazy::new(|| { + register_histogram_vec!( "pageserver_io_operations_seconds", "Time spent in IO operations", &["operation", "tenant_id", "timeline_id"], STORAGE_IO_TIME_BUCKETS.into() ) - .expect("failed to define a metric"); -} -lazy_static! { - static ref STORAGE_IO_SIZE: IntGaugeVec = register_int_gauge_vec!( + .expect("failed to define a metric") +}); + +static STORAGE_IO_SIZE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( "pageserver_io_operations_bytes_total", "Total amount of bytes read/written in IO operations", &["operation", "tenant_id", "timeline_id"] ) - .expect("failed to define a metric"); -} + .expect("failed to define a metric") +}); /// /// A virtual file descriptor. You can use this just like std::fs::File, but internally diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index db4620417c..85f970a941 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -20,8 +20,8 @@ //! use byteorder::{ByteOrder, LittleEndian}; use bytes::{BufMut, Bytes, BytesMut}; -use lazy_static::lazy_static; use nix::poll::*; +use once_cell::sync::Lazy; use serde::Serialize; use std::fs; use std::fs::OpenOptions; @@ -105,21 +105,27 @@ impl crate::walredo::WalRedoManager for DummyRedoManager { // We collect the time spent in actual WAL redo ('redo'), and time waiting // for access to the postgres process ('wait') since there is only one for // each tenant. -lazy_static! { - static ref WAL_REDO_TIME: Histogram = - register_histogram!("pageserver_wal_redo_seconds", "Time spent on WAL redo") - .expect("failed to define a metric"); - static ref WAL_REDO_WAIT_TIME: Histogram = register_histogram!( + +static WAL_REDO_TIME: Lazy = Lazy::new(|| { + register_histogram!("pageserver_wal_redo_seconds", "Time spent on WAL redo") + .expect("failed to define a metric") +}); + +static WAL_REDO_WAIT_TIME: Lazy = Lazy::new(|| { + register_histogram!( "pageserver_wal_redo_wait_seconds", "Time spent waiting for access to the WAL redo process" ) - .expect("failed to define a metric"); - static ref WAL_REDO_RECORD_COUNTER: IntCounter = register_int_counter!( + .expect("failed to define a metric") +}); + +static WAL_REDO_RECORD_COUNTER: Lazy = Lazy::new(|| { + register_int_counter!( "pageserver_replayed_wal_records_total", "Number of WAL records replayed in WAL redo process" ) - .unwrap(); -} + .unwrap() +}); /// /// This is the real implementation that uses a Postgres process to diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 8c6036f87d..d9d43c3325 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -14,7 +14,7 @@ hashbrown = "0.11.2" hex = "0.4.3" hmac = "0.12.1" hyper = "0.14" -lazy_static = "1.4.0" +once_cell = "1.13.0" md5 = "0.7.0" parking_lot = "0.12" pin-project-lite = "0.2.7" diff --git a/proxy/src/auth/backend.rs b/proxy/src/auth/backend.rs index 5e87059c86..a67865e08c 100644 --- a/proxy/src/auth/backend.rs +++ b/proxy/src/auth/backend.rs @@ -12,13 +12,12 @@ use crate::{ stream::PqStream, waiters::{self, Waiter, Waiters}, }; -use lazy_static::lazy_static; + +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncWrite}; -lazy_static! { - static ref CPLANE_WAITERS: Waiters = Default::default(); -} +static CPLANE_WAITERS: Lazy> = Lazy::new(Default::default); /// Give caller an opportunity to wait for the cloud's reply. pub async fn with_waiter( diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index f202782109..29be79c886 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -4,8 +4,8 @@ use crate::config::{ProxyConfig, TlsConfig}; use crate::stream::{MetricsStream, PqStream, Stream}; use anyhow::{bail, Context}; use futures::TryFutureExt; -use lazy_static::lazy_static; use metrics::{register_int_counter, IntCounter}; +use once_cell::sync::Lazy; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; use utils::pq_proto::{BeMessage as Be, *}; @@ -13,23 +13,29 @@ use utils::pq_proto::{BeMessage as Be, *}; const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)"; const ERR_PROTO_VIOLATION: &str = "protocol violation"; -lazy_static! { - static ref NUM_CONNECTIONS_ACCEPTED_COUNTER: IntCounter = register_int_counter!( +static NUM_CONNECTIONS_ACCEPTED_COUNTER: Lazy = Lazy::new(|| { + register_int_counter!( "proxy_accepted_connections_total", "Number of TCP client connections accepted." ) - .unwrap(); - static ref NUM_CONNECTIONS_CLOSED_COUNTER: IntCounter = register_int_counter!( + .unwrap() +}); + +static NUM_CONNECTIONS_CLOSED_COUNTER: Lazy = Lazy::new(|| { + register_int_counter!( "proxy_closed_connections_total", "Number of TCP client connections closed." ) - .unwrap(); - static ref NUM_BYTES_PROXIED_COUNTER: IntCounter = register_int_counter!( + .unwrap() +}); + +static NUM_BYTES_PROXIED_COUNTER: Lazy = Lazy::new(|| { + register_int_counter!( "proxy_io_bytes_total", "Number of bytes sent/received between any client and backend." ) - .unwrap(); -} + .unwrap() +}); /// A small combinator for pluggable error logging. async fn log_error(future: F) -> F::Output diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index f6ae9e75d7..4ed30413e2 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -9,7 +9,6 @@ bytes = "1.0.1" byteorder = "1.4.3" hyper = "0.14" fs2 = "0.4.3" -lazy_static = "1.4.0" serde_json = "1" tracing = "0.1.27" clap = "3.0" @@ -29,7 +28,7 @@ const_format = "0.2.21" tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } git-version = "0.3.5" async-trait = "0.1" -once_cell = "1.10.0" +once_cell = "1.13.0" toml_edit = { version = "0.13", features = ["easy"] } postgres_ffi = { path = "../libs/postgres_ffi" } diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index c49b4c058a..7fc75246e1 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -2,7 +2,7 @@ use anyhow::{bail, ensure, Context, Result}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use std::fs::{self, File, OpenOptions}; use std::io::{Read, Write}; @@ -26,15 +26,15 @@ const CONTROL_FILE_NAME: &str = "safekeeper.control"; const CONTROL_FILE_NAME_PARTIAL: &str = "safekeeper.control.partial"; pub const CHECKSUM_SIZE: usize = std::mem::size_of::(); -lazy_static! { - static ref PERSIST_CONTROL_FILE_SECONDS: HistogramVec = register_histogram_vec!( +static PERSIST_CONTROL_FILE_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( "safekeeper_persist_control_file_seconds", "Seconds to persist and sync control file, grouped by timeline", &["tenant_id", "timeline_id"], DISK_WRITE_SECONDS_BUCKETS.to_vec() ) - .expect("Failed to register safekeeper_persist_control_file_seconds histogram vec"); -} + .expect("Failed to register safekeeper_persist_control_file_seconds histogram vec") +}); /// Storage should keep actual state inside of it. It should implement Deref /// trait to access state fields and have persist method for updating that state. diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index bed6e447d7..ee642408f2 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -4,7 +4,7 @@ use anyhow::{bail, Context, Result}; use etcd_broker::subscription_value::SkTimelineInfo; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use postgres_ffi::xlog_utils::XLogSegNo; use serde::Serialize; @@ -559,12 +559,12 @@ struct GlobalTimelinesState { wal_backup_launcher_tx: Option>, } -lazy_static! { - static ref TIMELINES_STATE: Mutex = Mutex::new(GlobalTimelinesState { +static TIMELINES_STATE: Lazy> = Lazy::new(|| { + Mutex::new(GlobalTimelinesState { timelines: HashMap::new(), wal_backup_launcher_tx: None, - }); -} + }) +}); #[derive(Clone, Copy, Serialize)] pub struct TimelineDeleteForceResult { diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 9b23e2189c..2a36d5c04c 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -12,7 +12,7 @@ use std::io::{self, Seek, SeekFrom}; use std::pin::Pin; use tokio::io::AsyncRead; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use postgres_ffi::xlog_utils::{ find_end_of_wal, IsPartialXLogFileName, IsXLogFileName, XLogFromFileName, XLogSegNo, PG_TLI, }; @@ -38,31 +38,44 @@ use metrics::{register_histogram_vec, Histogram, HistogramVec, DISK_WRITE_SECOND use tokio::io::{AsyncReadExt, AsyncSeekExt}; -lazy_static! { - // The prometheus crate does not support u64 yet, i64 only (see `IntGauge`). - // i64 is faster than f64, so update to u64 when available. - static ref WRITE_WAL_BYTES: HistogramVec = register_histogram_vec!( +// The prometheus crate does not support u64 yet, i64 only (see `IntGauge`). +// i64 is faster than f64, so update to u64 when available. +static WRITE_WAL_BYTES: Lazy = Lazy::new(|| { + register_histogram_vec!( "safekeeper_write_wal_bytes", "Bytes written to WAL in a single request, grouped by timeline", &["tenant_id", "timeline_id"], - vec![1.0, 10.0, 100.0, 1024.0, 8192.0, 128.0 * 1024.0, 1024.0 * 1024.0, 10.0 * 1024.0 * 1024.0] + vec![ + 1.0, + 10.0, + 100.0, + 1024.0, + 8192.0, + 128.0 * 1024.0, + 1024.0 * 1024.0, + 10.0 * 1024.0 * 1024.0 + ] ) - .expect("Failed to register safekeeper_write_wal_bytes histogram vec"); - static ref WRITE_WAL_SECONDS: HistogramVec = register_histogram_vec!( + .expect("Failed to register safekeeper_write_wal_bytes histogram vec") +}); +static WRITE_WAL_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( "safekeeper_write_wal_seconds", "Seconds spent writing and syncing WAL to a disk in a single request, grouped by timeline", &["tenant_id", "timeline_id"], DISK_WRITE_SECONDS_BUCKETS.to_vec() ) - .expect("Failed to register safekeeper_write_wal_seconds histogram vec"); - static ref FLUSH_WAL_SECONDS: HistogramVec = register_histogram_vec!( + .expect("Failed to register safekeeper_write_wal_seconds histogram vec") +}); +static FLUSH_WAL_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( "safekeeper_flush_wal_seconds", "Seconds spent syncing WAL to a disk, grouped by timeline", &["tenant_id", "timeline_id"], DISK_WRITE_SECONDS_BUCKETS.to_vec() ) - .expect("Failed to register safekeeper_flush_wal_seconds histogram vec"); -} + .expect("Failed to register safekeeper_flush_wal_seconds histogram vec") +}); struct WalStorageMetrics { write_wal_bytes: Histogram,