From 2450f82de5eecb1bfb30185ef8f5b21d0bfd4be5 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 10:06:48 +0300 Subject: [PATCH 01/24] Introduce a new "layered" repository implementation. This replaces the RocksDB based implementation with an approach using "snapshot files" on disk, and in-memory btreemaps to hold the recent changes. This make the repository implementation a configuration option. You can choose 'layered' or 'rocksdb' with "zenith init --repository-format=" The unit tests have been refactored to exercise both implementations. 'layered' is now the default. Push/pull is not implemented. The 'test_history_inmemory' test has been commented out accordingly. It's not clear how we will implement that functionality; probably by copying the snapshot files directly. --- Cargo.lock | 99 +- control_plane/src/local_env.rs | 6 + control_plane/src/storage.rs | 6 +- pageserver/Cargo.toml | 1 + pageserver/src/bin/pageserver.rs | 26 +- pageserver/src/branches.rs | 30 +- pageserver/src/layered_repository.rs | 1212 +++++++++++++++++ pageserver/src/layered_repository/README.md | 298 ++++ .../src/layered_repository/inmemory_layer.rs | 534 ++++++++ .../src/layered_repository/layer_map.rs | 132 ++ .../src/layered_repository/snapshot_layer.rs | 631 +++++++++ .../src/layered_repository/storage_layer.rs | 123 ++ pageserver/src/lib.rs | 9 + pageserver/src/page_cache.rs | 32 +- pageserver/src/page_service.rs | 49 + pageserver/src/relish.rs | 11 +- pageserver/src/repository.rs | 147 +- pageserver/src/restore_local_repo.rs | 15 +- pageserver/src/walreceiver.rs | 8 +- test_runner/batch_others/test_gc.py | 3 +- test_runner/batch_others/test_snapfiles_gc.py | 122 ++ vendor/postgres | 2 +- zenith/src/main.rs | 12 +- zenith_utils/src/zid.rs | 2 +- 24 files changed, 3435 insertions(+), 75 deletions(-) create mode 100644 pageserver/src/layered_repository.rs create mode 100644 pageserver/src/layered_repository/README.md create mode 100644 pageserver/src/layered_repository/inmemory_layer.rs create mode 100644 pageserver/src/layered_repository/layer_map.rs create mode 100644 pageserver/src/layered_repository/snapshot_layer.rs create mode 100644 pageserver/src/layered_repository/storage_layer.rs create mode 100644 test_runner/batch_others/test_snapfiles_gc.py diff --git a/Cargo.lock b/Cargo.lock index 67c00293b0..9bf6f0b7fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,7 +1,5 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 - [[package]] name = "ahash" version = "0.4.7" @@ -82,6 +80,30 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "aversion" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41992ab8cfcc3026ef9abceffe0c2b0479c043183fc23825e30d22baab6df334" +dependencies = [ + "aversion-macros", + "byteorder", + "serde", + "serde_cbor", + "thiserror", +] + +[[package]] +name = "aversion-macros" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ba5785f953985aa0caca927ba4005880f3b4f53de87f134e810ae3549f744d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "aws-creds" version = "0.26.0" @@ -166,6 +188,18 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bookfile" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efa3e2086414e1bbecbc10730f265e5b079ab4ea0b830e7219a70dab6471e753" +dependencies = [ + "aversion", + "byteorder", + "serde", + "thiserror", +] + [[package]] name = "boxfnonce" version = "0.1.1" @@ -646,6 +680,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" + [[package]] name = "hashbrown" version = "0.9.1" @@ -1139,6 +1179,7 @@ name = "pageserver" version = "0.1.0" dependencies = [ "anyhow", + "bookfile", "byteorder", "bytes", "chrono", @@ -1276,24 +1317,6 @@ dependencies = [ "tokio-postgres 0.7.1", ] -[[package]] -name = "postgres-protocol" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff3e0f70d32e20923cabf2df02913be7c1842d4c772db8065c00fcfdd1d1bff3" -dependencies = [ - "base64 0.13.0", - "byteorder", - "bytes", - "fallible-iterator", - "hmac", - "md-5", - "memchr", - "rand", - "sha2", - "stringprep", -] - [[package]] name = "postgres-protocol" version = "0.6.1" @@ -1313,14 +1336,21 @@ dependencies = [ ] [[package]] -name = "postgres-types" -version = "0.2.1" +name = "postgres-protocol" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "430f4131e1b7657b0cd9a2b0c3408d77c9a43a042d300b8c77f981dffcc43a2f" +checksum = "ff3e0f70d32e20923cabf2df02913be7c1842d4c772db8065c00fcfdd1d1bff3" dependencies = [ + "base64 0.13.0", + "byteorder", "bytes", "fallible-iterator", - "postgres-protocol 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hmac", + "md-5", + "memchr", + "rand", + "sha2", + "stringprep", ] [[package]] @@ -1333,6 +1363,17 @@ dependencies = [ "postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)", ] +[[package]] +name = "postgres-types" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "430f4131e1b7657b0cd9a2b0c3408d77c9a43a042d300b8c77f981dffcc43a2f" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "postgres_ffi" version = "0.1.0" @@ -1735,6 +1776,16 @@ dependencies = [ "xml-rs", ] +[[package]] +name = "serde_cbor" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.126" diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 084285cb16..10e70c1485 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -42,6 +42,9 @@ pub struct LocalEnv { #[serde(with = "hex")] pub tenantid: ZTenantId, + // Repository format, 'rocksdb' or 'layered' or None for default + pub repository_format: Option, + // jwt auth token used for communication with pageserver pub auth_token: String, @@ -101,6 +104,7 @@ pub fn init( remote_pageserver: Option<&str>, tenantid: ZTenantId, auth_type: AuthType, + repository_format: Option<&str>, ) -> Result<()> { // check if config already exists let base_path = base_path(); @@ -176,6 +180,7 @@ pub fn init( base_data_dir: base_path, remotes: BTreeMap::default(), tenantid, + repository_format: repository_format.map(|x| x.into()), auth_token, auth_type, private_key_path, @@ -194,6 +199,7 @@ pub fn init( base_data_dir: base_path, remotes: BTreeMap::default(), tenantid, + repository_format: repository_format.map(|x| x.into()), auth_token, auth_type, private_key_path, diff --git a/control_plane/src/storage.rs b/control_plane/src/storage.rs index cc576b1c45..5da3334e4a 100644 --- a/control_plane/src/storage.rs +++ b/control_plane/src/storage.rs @@ -50,7 +50,7 @@ impl PageServerNode { .unwrap() } - pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool) -> Result<()> { + pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool, repository_format: Option<&str>) -> Result<()> { let mut cmd = Command::new(self.env.pageserver_bin()?); let mut args = vec![ "--init", @@ -65,6 +65,10 @@ impl PageServerNode { args.extend(&["--auth-type", "ZenithJWT"]); } + if let Some(repo_format) = repository_format { + args.extend(&["--repository-format", repo_format]); + } + create_tenant.map(|tenantid| args.extend(&["--create-tenant", tenantid])); let status = cmd .args(args) diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index a58c35756b..539c9dccd0 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bookfile = "^0.3" chrono = "0.4.19" rand = "0.8.3" regex = "1.4.5" diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 914c8858ca..ecc004f7d1 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -20,14 +20,14 @@ use anyhow::{ensure, Result}; use clap::{App, Arg, ArgMatches}; use daemonize::Daemonize; -use pageserver::{branches, logger, page_cache, page_service, PageServerConf}; +use pageserver::{branches, logger, page_cache, page_service, PageServerConf, RepositoryFormat}; use zenith_utils::http_endpoint; const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:64000"; const DEFAULT_HTTP_ENDPOINT_ADDR: &str = "127.0.0.1:9898"; const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024; -const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(100); +const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(10); const DEFAULT_SUPERUSER: &str = "zenith_admin"; @@ -41,6 +41,7 @@ struct CfgFileParams { pg_distrib_dir: Option, auth_validation_public_key_path: Option, auth_type: Option, + repository_format: Option, } impl CfgFileParams { @@ -58,6 +59,7 @@ impl CfgFileParams { pg_distrib_dir: get_arg("postgres-distrib"), auth_validation_public_key_path: get_arg("auth-validation-public-key-path"), auth_type: get_arg("auth-type"), + repository_format: get_arg("repository-format"), } } @@ -74,6 +76,7 @@ impl CfgFileParams { .auth_validation_public_key_path .or(other.auth_validation_public_key_path), auth_type: self.auth_type.or(other.auth_type), + repository_format: self.repository_format.or(other.repository_format), } } @@ -133,6 +136,16 @@ impl CfgFileParams { ); } + let repository_format = match self.repository_format.as_ref() { + Some(repo_format_str) if repo_format_str == "rocksdb" => RepositoryFormat::RocksDb, + Some(repo_format_str) if repo_format_str == "layered" => RepositoryFormat::Layered, + Some(repo_format_str) => anyhow::bail!( + "invalid --repository-format '{}', must be 'rocksdb' or 'layered'", + repo_format_str + ), + None => RepositoryFormat::Layered, // default + }; + Ok(PageServerConf { daemonize: false, @@ -148,8 +161,9 @@ impl CfgFileParams { pg_distrib_dir, auth_validation_public_key_path, - auth_type, + + repository_format, }) } } @@ -221,6 +235,12 @@ fn main() -> Result<()> { .takes_value(true) .help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"), ) + .arg( + Arg::with_name("repository-format") + .long("repository-format") + .takes_value(true) + .help("Which repository implementation to use, 'rocksdb' or 'layered'"), + ) .get_matches(); let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith")); diff --git a/pageserver/src/branches.rs b/pageserver/src/branches.rs index 2739312326..c62074cd04 100644 --- a/pageserver/src/branches.rs +++ b/pageserver/src/branches.rs @@ -24,7 +24,7 @@ use crate::object_repository::ObjectRepository; use crate::page_cache; use crate::restore_local_repo; use crate::walredo::WalRedoManager; -use crate::{repository::Repository, PageServerConf}; +use crate::{repository::Repository, PageServerConf, RepositoryFormat}; #[derive(Serialize, Deserialize, Clone)] pub struct BranchInfo { @@ -65,8 +65,8 @@ pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str pub fn create_repo( conf: &'static PageServerConf, tenantid: ZTenantId, - wal_redo_manager: Arc, -) -> Result { + wal_redo_manager: Arc, +) -> Result> { let repo_dir = conf.tenant_path(&tenantid); if repo_dir.exists() { bail!("repo for {} already exists", tenantid) @@ -96,19 +96,27 @@ pub fn create_repo( // and we failed to run initdb again in the same directory. This has been solved for the // rapid init+start case now, but the general race condition remains if you restart the // server quickly. - let storage = crate::rocksdb_storage::RocksObjectStore::create(conf, &tenantid)?; + let repo: Arc = + match conf.repository_format { + RepositoryFormat::Layered => Arc::new( + crate::layered_repository::LayeredRepository::new(conf, wal_redo_manager, tenantid), + ), + RepositoryFormat::RocksDb => { + let obj_store = crate::rocksdb_storage::RocksObjectStore::create(conf, &tenantid)?; - let repo = crate::object_repository::ObjectRepository::new( - conf, - std::sync::Arc::new(storage), - wal_redo_manager, - tenantid, - ); + Arc::new(ObjectRepository::new( + conf, + Arc::new(obj_store), + wal_redo_manager, + tenantid, + )) + } + }; // Load data into pageserver // TODO To implement zenith import we need to // move data loading out of create_repo() - bootstrap_timeline(conf, tenantid, tli, &repo)?; + bootstrap_timeline(conf, tenantid, tli, &*repo)?; Ok(repo) } diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs new file mode 100644 index 0000000000..495d2be6ab --- /dev/null +++ b/pageserver/src/layered_repository.rs @@ -0,0 +1,1212 @@ +//! +//! Zenith repository implementation that keeps old data in "snapshot files", and +//! the recent changes in memory. See layered_repository/snapshot_layer.rs and +//! layered_repository/inmemory_layer.rs, respectively. The functions here are +//! responsible for locating the correct layer for the get/put call, tracing +//! timeline branching history as needed. +//! +//! The snapshot files are stored in the .zenith/tenants//timelines/ +//! directory. See layered_repository/README for how the files are managed. +//! In addition to the snapshot files, there is a metadata file in the same +//! directory that contains information about the timeline, in particular its +//! parent timeline, and the last LSN that has been written to disk. +//! + +use anyhow::{bail, Context, Result}; +use bytes::Bytes; +use lazy_static::lazy_static; +use log::*; +use serde::{Deserialize, Serialize}; + +use std::collections::HashMap; +use std::collections::{BTreeSet, HashSet}; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::ops::Bound::Included; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; + +use crate::relish::*; +use crate::repository::{GcResult, History, Repository, Timeline, WALRecord}; +use crate::restore_local_repo::import_timeline_wal; +use crate::walredo::WalRedoManager; +use crate::PageServerConf; +use crate::{ZTenantId, ZTimelineId}; + +use zenith_metrics::{register_histogram_vec, HistogramVec}; +use zenith_utils::bin_ser::BeSer; +use zenith_utils::lsn::{AtomicLsn, Lsn}; +use zenith_utils::seqwait::SeqWait; + +mod inmemory_layer; +mod layer_map; +mod snapshot_layer; +mod storage_layer; + +use inmemory_layer::InMemoryLayer; +use layer_map::LayerMap; +use snapshot_layer::SnapshotLayer; +use storage_layer::Layer; + +// Timeout when waiting for WAL receiver to catch up to an LSN given in a GetPage@LSN call. +static TIMEOUT: Duration = Duration::from_secs(60); + +// Perform a checkpoint in the GC thread, when the LSN has advanced this much since +// last checkpoint. This puts a backstop on how much WAL needs to be re-digested if +// the page server is restarted. +// +// FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB +// would be more appropriate. But a low value forces the code to be exercised more, +// which is good for now to trigger bugs. +static CHECKPOINT_INTERVAL: u64 = 16 * 1024 * 1024; + +// Metrics collected on operations on the storage repository. +lazy_static! { + static ref STORAGE_TIME: HistogramVec = register_histogram_vec!( + "pageserver_storage_time", + "Time spent on storage operations", + &["operation"] + ) + .expect("failed to define a metric"); +} + +/// +/// Repository consists of multiple timelines. Keep them in a hash table. +/// +pub struct LayeredRepository { + conf: &'static PageServerConf, + tenantid: ZTenantId, + timelines: Mutex>>, + + walredo_mgr: Arc, +} + +/// Public interface +impl Repository for LayeredRepository { + fn get_timeline(&self, timelineid: ZTimelineId) -> Result> { + let mut timelines = self.timelines.lock().unwrap(); + + Ok(self.get_timeline_locked(timelineid, &mut timelines)?) + } + + fn create_empty_timeline( + &self, + timelineid: ZTimelineId, + start_lsn: Lsn, + ) -> Result> { + let mut timelines = self.timelines.lock().unwrap(); + + // Create the timeline directory, and write initial metadata to file. + std::fs::create_dir_all(self.conf.timeline_path(&timelineid, &self.tenantid))?; + + let metadata = TimelineMetadata { + last_valid_lsn: start_lsn, + last_record_lsn: start_lsn, + prev_record_lsn: Lsn(0), + ancestor_timeline: None, + ancestor_lsn: start_lsn, + }; + Self::save_metadata(self.conf, timelineid, self.tenantid, &metadata)?; + + let timeline = LayeredTimeline::new( + self.conf, + metadata, + None, + timelineid, + self.tenantid, + self.walredo_mgr.clone(), + )?; + + let timeline_rc = Arc::new(timeline); + let r = timelines.insert(timelineid, timeline_rc.clone()); + assert!(r.is_none()); + Ok(timeline_rc) + } + + /// Branch a timeline + fn branch_timeline(&self, src: ZTimelineId, dst: ZTimelineId, start_lsn: Lsn) -> Result<()> { + let src_timeline = self.get_timeline(src)?; + + // Create the metadata file, noting the ancestor of the new timeline. + // There is initially no data in it, but all the read-calls know to look + // into the ancestor. + let metadata = TimelineMetadata { + last_valid_lsn: start_lsn, + last_record_lsn: start_lsn, + prev_record_lsn: src_timeline.get_prev_record_lsn(), + ancestor_timeline: Some(src), + ancestor_lsn: start_lsn, + }; + std::fs::create_dir_all(self.conf.timeline_path(&dst, &self.tenantid))?; + Self::save_metadata(self.conf, dst, self.tenantid, &metadata)?; + + info!("branched timeline {} from {} at {}", dst, src, start_lsn); + + Ok(()) + } + + /// Public entry point to GC. All the logic is in the private + /// gc_iteration_internal function, this public facade just wraps it for + /// metrics collection. + fn gc_iteration( + &self, + target_timelineid: Option, + horizon: u64, + compact: bool, + ) -> Result { + STORAGE_TIME + .with_label_values(&["gc"]) + .observe_closure_duration(|| self.gc_iteration_internal( + target_timelineid, + horizon, + compact, + )) + } +} + +/// Private functions +impl LayeredRepository { + // Implementation of the public `get_timeline` function. This differs from the public + // interface in that the caller must already hold the mutex on the 'timelines' hashmap. + fn get_timeline_locked( + &self, + timelineid: ZTimelineId, + timelines: &mut HashMap>, + ) -> Result> { + match timelines.get(&timelineid) { + Some(timeline) => Ok(timeline.clone()), + None => { + let metadata = Self::load_metadata(self.conf, timelineid, self.tenantid)?; + + // Recurse to look up the ancestor timeline. + // + // TODO: If you have a very deep timeline history, this could become + // expensive. Perhaps delay this until we need to look up a page in + // ancestor. + let ancestor = if let Some(ancestor_timelineid) = metadata.ancestor_timeline { + Some(self.get_timeline_locked(ancestor_timelineid, timelines)?) + } else { + None + }; + + let timeline = LayeredTimeline::new( + self.conf, + metadata, + ancestor, + timelineid, + self.tenantid, + self.walredo_mgr.clone(), + )?; + + // List the snapshot layers on disk, and load them into the layer map + timeline.load_layer_map()?; + + // Load any new WAL after the last checkpoint into memory. + info!( + "Loading WAL for timeline {} starting at {}", + timelineid, + timeline.get_last_record_lsn() + ); + let wal_dir = self + .conf + .timeline_path(&timelineid, &self.tenantid) + .join("wal"); + import_timeline_wal(&wal_dir, &timeline, timeline.get_last_record_lsn())?; + + let timeline_rc = Arc::new(timeline); + timelines.insert(timelineid, timeline_rc.clone()); + Ok(timeline_rc) + } + } + } + + pub fn new( + conf: &'static PageServerConf, + walredo_mgr: Arc, + tenantid: ZTenantId, + ) -> LayeredRepository { + LayeredRepository { + tenantid: tenantid, + conf: conf, + timelines: Mutex::new(HashMap::new()), + walredo_mgr, + } + } + + /// + /// Launch the checkpointer thread in given repository. + /// + pub fn launch_checkpointer_thread(conf: &'static PageServerConf, rc: Arc) { + let _thread = std::thread::Builder::new() + .name("Checkpointer thread".into()) + .spawn(move || { + // FIXME: relaunch it? Panic is not good. + rc.checkpoint_loop(conf).expect("Checkpointer thread died"); + }) + .unwrap(); + } + + /// + /// Checkpointer thread's main loop + /// + fn checkpoint_loop(&self, conf: &'static PageServerConf) -> Result<()> { + loop { + std::thread::sleep(conf.gc_period); + + info!("checkpointer thread for tenant {} waking up", self.tenantid); + + // checkpoint timelines that have accumulated more than CHECKPOINT_INTERVAL + // bytes of WAL since last checkpoint. + { + let timelines = self.timelines.lock().unwrap(); + for (_timelineid, timeline) in timelines.iter() { + let distance = u64::from(timeline.last_valid_lsn.load()) + - u64::from(timeline.last_checkpoint_lsn.load()); + if distance > CHECKPOINT_INTERVAL { + timeline.checkpoint()?; + } + } + // release lock on 'timelines' + } + + // Garbage collect old files that are not needed for PITR anymore + if conf.gc_horizon > 0 { + self.gc_iteration(None, conf.gc_horizon, false).unwrap(); + } + } + } + + /// Save timeline metadata to file + fn save_metadata( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + data: &TimelineMetadata, + ) -> Result<()> { + let path = conf.timeline_path(&timelineid, &tenantid).join("metadata"); + let mut file = File::create(&path)?; + + info!("saving metadata {}", path.display()); + + file.write_all(&TimelineMetadata::ser(data)?)?; + + Ok(()) + } + + fn load_metadata( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + ) -> Result { + let path = conf.timeline_path(&timelineid, &tenantid).join("metadata"); + let data = std::fs::read(&path)?; + + Ok(TimelineMetadata::des(&data)?) + } + + // + // How garbage collection works: + // + // +--bar-------------> + // / + // +----+-----foo----------------> + // / + // ----main--+--------------------------> + // \ + // +-----baz--------> + // + // + // 1. Grab a mutex to prevent new timelines from being created + // 2. Scan all timelines, and on each timeline, make note of the + // all the points where other timelines have been branched off. + // We will refrain from removing page versions at those LSNs. + // 3. For each timeline, scan all snapshot files on the timeline. + // Remove all files for which a newer file exists and which + // don't cover any branch point LSNs. + // + // TODO: + // - if a relation has been modified on a child branch, then we + // don't need to keep that in the parent anymore. But currently + // we do. + fn gc_iteration_internal( + &self, + target_timelineid: Option, + horizon: u64, + _compact: bool, + ) -> Result { + let mut totals: GcResult = Default::default(); + let now = Instant::now(); + + // grab mutex to prevent new timelines from being created here. + // TODO: We will hold it for a long time + let mut timelines = self.timelines.lock().unwrap(); + + // Scan all timelines. For each timeline, remember the timeline ID and + // the branch point where it was created. + // + // We scan the directory, not the in-memory hash table, because the hash + // table only contains entries for timelines that have been accessed. We + // need to take all timelines into account, not only the active ones. + let mut timelineids: Vec = Vec::new(); + let mut all_branchpoints: BTreeSet<(ZTimelineId, Lsn)> = BTreeSet::new(); + let timelines_path = self.conf.timelines_path(&self.tenantid); + for direntry in fs::read_dir(timelines_path)? { + let direntry = direntry?; + if let Some(fname) = direntry.file_name().to_str() { + if let Ok(timelineid) = fname.parse::() { + timelineids.push(timelineid); + + // Read the metadata of this timeline to get its parent timeline. + // + // We read the ancestor information directly from the file, instead + // of calling get_timeline(). We don't want to load the timeline + // into memory just for GC. + // + // FIXME: we open the timeline in the loop below with + // get_timeline_locked() anyway, so maybe we should just do it + // here, too. + let metadata = Self::load_metadata(self.conf, timelineid, self.tenantid)?; + if let Some(ancestor_timeline) = metadata.ancestor_timeline { + all_branchpoints.insert((ancestor_timeline, metadata.ancestor_lsn)); + } + } + } + } + + // Ok, we now know all the branch points. Iterate through them. + for timelineid in timelineids { + // If a target timeline was specified, leave the other timelines alone. + // This is a bit inefficient, because we still collect the information for + // all the timelines above. + if let Some(x) = target_timelineid { + if x != timelineid { + continue; + } + } + + let branchpoints: Vec = all_branchpoints + .range(( + Included((timelineid, Lsn(0))), + Included((timelineid, Lsn(u64::MAX))), + )) + .map(|&x| x.1) + .collect(); + + let timeline = self.get_timeline_locked(timelineid, &mut *timelines)?; + let last_lsn = timeline.get_last_valid_lsn(); + + if let Some(cutoff) = last_lsn.checked_sub(horizon) { + let result = timeline.gc_timeline(branchpoints, cutoff)?; + + totals += result; + } + } + + totals.elapsed = now.elapsed(); + Ok(totals) + } +} + +/// Metadata stored on disk for each timeline +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimelineMetadata { + last_valid_lsn: Lsn, + last_record_lsn: Lsn, + prev_record_lsn: Lsn, + ancestor_timeline: Option, + ancestor_lsn: Lsn, +} + +pub struct LayeredTimeline { + conf: &'static PageServerConf, + + tenantid: ZTenantId, + timelineid: ZTimelineId, + + layers: Mutex, + + // WAL redo manager + walredo_mgr: Arc, + + // What page versions do we hold in the repository? If we get a + // request > last_valid_lsn, we need to wait until we receive all + // the WAL up to the request. The SeqWait provides functions for + // that. TODO: If we get a request for an old LSN, such that the + // versions have already been garbage collected away, we should + // throw an error, but we don't track that currently. + // + // last_record_lsn points to the end of last processed WAL record. + // It can lag behind last_valid_lsn, if the WAL receiver has + // received some WAL after the end of last record, but not the + // whole next record yet. In the page cache, we care about + // last_valid_lsn, but if the WAL receiver needs to restart the + // streaming, it needs to restart at the end of last record, so we + // track them separately. last_record_lsn should perhaps be in + // walreceiver.rs instead of here, but it seems convenient to keep + // all three values together. + // + // We also remember the starting point of the previous record in + // 'prev_record_lsn'. It's used to set the xl_prev pointer of the + // first WAL record when the node is started up. But here, we just + // keep track of it. FIXME: last_record_lsn and prev_record_lsn + // should be updated atomically together. + // + last_valid_lsn: SeqWait, + last_record_lsn: AtomicLsn, + prev_record_lsn: AtomicLsn, + + last_checkpoint_lsn: AtomicLsn, + + // Parent timeline that this timeline was branched from, and the LSN + // of the branch point. + ancestor_timeline: Option>, + ancestor_lsn: Lsn, +} + +/// Public interface functions +impl Timeline for LayeredTimeline { + /// Look up given page in the cache. + fn get_page_at_lsn(&self, rel: RelishTag, blknum: u32, lsn: Lsn) -> Result { + if !rel.is_blocky() && blknum != 0 { + bail!( + "invalid request for block {} for non-blocky relish {}", + blknum, + rel + ); + } + let lsn = self.wait_lsn(lsn)?; + + if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) + } else { + bail!("relish {} not found at {}", rel, lsn); + } + } + + fn get_page_at_lsn_nowait(&self, rel: RelishTag, blknum: u32, lsn: Lsn) -> Result { + if !rel.is_blocky() && blknum != 0 { + bail!( + "invalid request for block {} for non-blocky relish {}", + blknum, + rel + ); + } + + if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) + } else { + bail!("relish {} not found at {}", rel, lsn); + } + } + + fn get_relish_size(&self, rel: RelishTag, lsn: Lsn) -> Result> { + if !rel.is_blocky() { + bail!("invalid get_relish_size request for non-blocky relish {}", rel); + } + + let lsn = self.wait_lsn(lsn)?; + + if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + let result = layer.get_relish_size(lsn); + trace!( + "get_relish_size: rel {} at {}/{} -> {:?}", + rel, + self.timelineid, + lsn, + result + ); + result + } else { + Ok(None) + } + } + + fn get_rel_exists(&self, rel: RelishTag, lsn: Lsn) -> Result { + let lsn = self.wait_lsn(lsn)?; + + let result; + if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + result = layer.get_rel_exists(lsn)?; + } else { + result = false; + } + + trace!("get_rel_exists: {} at {} -> {}", rel, lsn, result); + Ok(result) + } + + fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result> { + trace!("list_rels called at {}", lsn); + + // List all rels in this timeline, and all its ancestors. + let mut all_rels = HashSet::new(); + let mut timeline = self; + loop { + let rels = timeline.layers.lock().unwrap().list_rels(spcnode, dbnode)?; + + all_rels.extend(rels.iter()); + + if let Some(ancestor) = timeline.ancestor_timeline.as_ref() { + timeline = ancestor; + continue; + } else { + break; + } + } + + // Now we have a list of all rels that appeared anywhere in the history. Filter + // out relations that were dropped. + // + // FIXME: We should pass the LSN argument to the calls above, and avoid scanning + // dropped relations in the first place. + let mut res: Result<()> = Ok(()); + all_rels.retain(|reltag| + match self.get_rel_exists(RelishTag::Relation(*reltag), lsn) { + Ok(exists) => { info!("retain: {} -> {}", *reltag, exists); exists }, + Err(err) => { res = Err(err); false } + } + ); + res?; + + Ok(all_rels) + } + + fn list_nonrels(&self, lsn: Lsn) -> Result> { + info!("list_nonrels called at {}", lsn); + + // List all nonrels in this timeline, and all its ancestors. + let mut all_rels = HashSet::new(); + let mut timeline = self; + loop { + let rels = timeline.layers.lock().unwrap().list_nonrels(lsn)?; + + all_rels.extend(rels.iter()); + + if let Some(ancestor) = timeline.ancestor_timeline.as_ref() { + timeline = ancestor; + continue; + } else { + break; + } + } + + // Now we have a list of all nonrels that appeared anywhere in the history. Filter + // out dropped ones. + // + // FIXME: We should pass the LSN argument to the calls above, and avoid scanning + // dropped relations in the first place. + let mut res: Result<()> = Ok(()); + all_rels.retain(|tag| + match self.get_rel_exists(*tag, lsn) { + Ok(exists) => { info!("retain: {} -> {}", *tag, exists); exists }, + Err(err) => { res = Err(err); false } + } + ); + res?; + + Ok(all_rels) + } + + fn history<'a>(&'a self) -> Result> { + // This is needed by the push/pull functionality. Not implemented yet. + todo!(); + } + + fn put_wal_record(&self, rel: RelishTag, blknum: u32, rec: WALRecord) -> Result<()> { + if !rel.is_blocky() && blknum != 0 { + bail!( + "invalid request for block {} for non-blocky relish {}", + blknum, + rel + ); + } + let layer = self.get_layer_for_write(rel, rec.lsn)?; + layer.put_wal_record(blknum, rec) + } + + fn put_truncation(&self, rel: RelishTag, lsn: Lsn, relsize: u32) -> anyhow::Result<()> { + if !rel.is_blocky() { + bail!("invalid truncation for non-blocky relish {}", rel); + } + + debug!("put_truncation: {} to {} blocks at {}", rel, relsize, lsn); + + let layer = self.get_layer_for_write(rel, lsn)?; + layer.put_truncation(lsn, relsize) + } + + fn put_page_image( + &self, + rel: RelishTag, + blknum: u32, + lsn: Lsn, + img: Bytes, + _update_meta: bool, + ) -> Result<()> { + if !rel.is_blocky() && blknum != 0 { + bail!( + "invalid request for block {} for non-blocky relish {}", + blknum, + rel + ); + } + + let layer = self.get_layer_for_write(rel, lsn)?; + layer.put_page_image(blknum, lsn, img) + } + + fn put_unlink(&self, rel: RelishTag, lsn: Lsn) -> Result<()> { + trace!("put_unlink: {} at {}", rel, lsn); + + let layer = self.get_layer_for_write(rel, lsn)?; + layer.put_unlink(lsn) + } + + fn put_raw_data( + &self, + _tag: crate::object_key::ObjectTag, + _lsn: Lsn, + _data: &[u8], + ) -> Result<()> { + // FIXME: This doesn't make much sense for the layered storage format, + // it's pretty tightly coupled with the way the object store stores + // things. + bail!("put_raw_data not implemented"); + } + + /// Public entry point for checkpoint(). All the logic is in the private + /// checkpoint_internal function, this public facade just wraps it for + /// metrics collection. + fn checkpoint(&self) -> Result<()> { + STORAGE_TIME + .with_label_values(&["checkpoint"]) + .observe_closure_duration(|| self.checkpoint_internal()) + } + + /// Remember that WAL has been received and added to the page cache up to the given LSN + fn advance_last_valid_lsn(&self, lsn: Lsn) { + let old = self.last_valid_lsn.advance(lsn); + + // The last valid LSN cannot move backwards, but when WAL + // receiver is restarted after having only partially processed + // a record, it can call this with an lsn older than previous + // last valid LSN, when it restarts processing that record. + if lsn < old { + // Should never be called with an LSN older than the last + // record LSN, though. + let last_record_lsn = self.last_record_lsn.load(); + if lsn < last_record_lsn { + warn!( + "attempted to move last valid LSN backwards beyond last record LSN (last record {}, new {})", + last_record_lsn, lsn + ); + } + } + } + + fn init_valid_lsn(&self, lsn: Lsn) { + let old = self.last_valid_lsn.advance(lsn); + assert!(old == Lsn(0)); + let old = self.last_record_lsn.fetch_max(lsn); + assert!(old == Lsn(0)); + self.prev_record_lsn.store(Lsn(0)); + } + + fn get_last_valid_lsn(&self) -> Lsn { + self.last_valid_lsn.load() + } + + /// + /// Remember the (end of) last valid WAL record remembered in the page cache. + /// + /// NOTE: this updates last_valid_lsn as well. + /// + fn advance_last_record_lsn(&self, lsn: Lsn) { + // Can't move backwards. + let old = self.last_record_lsn.fetch_max(lsn); + assert!(old <= lsn); + + // Use old value of last_record_lsn as prev_record_lsn + self.prev_record_lsn.fetch_max(old); + + // Also advance last_valid_lsn + let old = self.last_valid_lsn.advance(lsn); + // Can't move backwards. + if lsn < old { + warn!( + "attempted to move last record LSN backwards (was {}, new {})", + old, lsn + ); + } + } + + fn get_last_record_lsn(&self) -> Lsn { + self.last_record_lsn.load() + } + + fn get_prev_record_lsn(&self) -> Lsn { + self.prev_record_lsn.load() + } +} + +impl LayeredTimeline { + /// Open a Timeline handle. + /// + /// Loads the metadata for the timeline into memory, but not the layer map. + fn new( + conf: &'static PageServerConf, + metadata: TimelineMetadata, + ancestor: Option>, + timelineid: ZTimelineId, + tenantid: ZTenantId, + walredo_mgr: Arc, + ) -> Result { + let timeline = LayeredTimeline { + conf, + timelineid, + tenantid, + layers: Mutex::new(LayerMap::default()), + + walredo_mgr, + + last_valid_lsn: SeqWait::new(metadata.last_valid_lsn), + last_record_lsn: AtomicLsn::new(metadata.last_record_lsn.0), + prev_record_lsn: AtomicLsn::new(metadata.prev_record_lsn.0), + last_checkpoint_lsn: AtomicLsn::new(metadata.last_valid_lsn.0), + + ancestor_timeline: ancestor, + ancestor_lsn: metadata.ancestor_lsn, + }; + Ok(timeline) + } + + /// + /// Load the list of snapshot files from disk, populating the layer map + /// + fn load_layer_map(&self) -> anyhow::Result<()> { + info!( + "loading layer map for timeline {} into memory", + self.timelineid + ); + let mut layers = self.layers.lock().unwrap(); + let snapfiles = + SnapshotLayer::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; + + for layer_rc in snapfiles.iter() { + info!( + "found layer {} {}-{} {} on timeline {}", + layer_rc.get_relish_tag(), + layer_rc.get_start_lsn(), + layer_rc.get_end_lsn(), + layer_rc.is_dropped(), + self.timelineid + ); + layers.insert(Arc::clone(layer_rc)); + } + + Ok(()) + } + + /// + /// Get a handle to a Layer for reading. + /// + /// The returned SnapshotFile might be from an ancestor timeline, if the + /// relation hasn't been updated on this timeline yet. + /// + fn get_layer_for_read( + &self, + rel: RelishTag, + lsn: Lsn, + ) -> Result, Lsn)>> { + trace!( + "get_layer_for_read called for {} at {}/{}", + rel, + self.timelineid, + lsn + ); + + // If you requested a page at an older LSN, before the branch point, dig into + // the right ancestor timeline. This can only happen if you launch a read-only + // node with an old LSN, a primary always uses a recent LSN in its requests. + let mut timeline = self; + let mut lsn = lsn; + + while lsn < timeline.ancestor_lsn { + trace!("going into ancestor {} ", timeline.ancestor_lsn); + timeline = &timeline.ancestor_timeline.as_ref().unwrap(); + } + + // Now we have the right starting timeline for our search. + loop { + let layers = timeline.layers.lock().unwrap(); + // + // FIXME: If the relation has been dropped, does this return the right + // thing? The compute node should not normally request dropped relations, + // but if OID wraparound happens the same relfilenode might get reused + // for an unrelated relation. + // + + // Do we have a layer on this timeline? + if let Some(layer) = layers.get(rel, lsn) { + trace!( + "found layer in cache: {} {}-{}", + timeline.timelineid, + layer.get_start_lsn(), + layer.get_end_lsn() + ); + + assert!(layer.get_start_lsn() <= lsn); + + return Ok(Some((layer.clone(), lsn))); + } + + // If not, check if there's a layer on the ancestor timeline + if let Some(ancestor) = &timeline.ancestor_timeline { + lsn = timeline.ancestor_lsn; + timeline = &ancestor.as_ref(); + trace!("recursing into ancestor at {}/{}", timeline.timelineid, lsn); + continue; + } + return Ok(None); + } + } + + /// + /// Get a handle to the latest layer for appending. + /// + fn get_layer_for_write(&self, rel: RelishTag, lsn: Lsn) -> Result> { + if lsn < self.last_valid_lsn.load() { + bail!("cannot modify relation after advancing last_valid_lsn"); + } + + // Look up the correct layer. + let layers = self.layers.lock().unwrap(); + if let Some(layer) = layers.get(rel, lsn) { + + // If it's writeable, good, return it. + if !layer.is_frozen() { + return Ok(Arc::clone(&layer)); + } + } + + // No (writeable) layer for this relation yet. Create one. + // + // Is this a completely new relation? Or the first modification after branching? + // + + // FIXME: race condition, if another thread creates the layer while + // we're busy looking up the previous one. We should hold the mutex throughout + // this operation, but for that we'll need a version of get_layer_for_read() + // that doesn't try to also grab the mutex. + drop(layers); + + let layer; + if let Some((prev_layer, _prev_lsn)) = self.get_layer_for_read(rel, lsn)? { + // Create new entry after the previous one. + let lsn; + if prev_layer.get_timeline_id() != self.timelineid { + // First modification on this timeline + lsn = self.ancestor_lsn; + trace!( + "creating file for write for {} at branch point {}/{}", + rel, + self.timelineid, + lsn + ); + } else { + lsn = prev_layer.get_end_lsn(); + trace!( + "creating file for write for {} after previous layer {}/{}", + rel, + self.timelineid, + lsn + ); + } + trace!( + "prev layer is at {}/{} - {}", + prev_layer.get_timeline_id(), + prev_layer.get_start_lsn(), + prev_layer.get_end_lsn() + ); + layer = InMemoryLayer::copy_snapshot( + self.conf, + &*self.walredo_mgr, + &*prev_layer, + self.timelineid, + self.tenantid, + lsn, + )?; + } else { + // New relation. + trace!( + "creating layer for write for new rel {} at {}/{}", + rel, + self.timelineid, + lsn + ); + + layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, rel, lsn)?; + } + + let mut layers = self.layers.lock().unwrap(); + let layer_rc: Arc = Arc::new(layer); + layers.insert(Arc::clone(&layer_rc)); + + Ok(layer_rc) + } + + /// + /// Wait until WAL has been received up to the given LSN. + /// + fn wait_lsn(&self, mut lsn: Lsn) -> anyhow::Result { + // When invalid LSN is requested, it means "don't wait, return latest version of the page" + // This is necessary for bootstrap. + if lsn == Lsn(0) { + let last_valid_lsn = self.last_valid_lsn.load(); + trace!( + "walreceiver doesn't work yet last_valid_lsn {}, requested {}", + last_valid_lsn, + lsn + ); + lsn = last_valid_lsn; + } + + self.last_valid_lsn + .wait_for_timeout(lsn, TIMEOUT) + .with_context(|| { + format!( + "Timed out while waiting for WAL record at LSN {} to arrive", + lsn + ) + })?; + + Ok(lsn) + } + + /// + /// Flush to disk all data that was written with the put_* functions + /// + /// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't + /// know anything about them here in the repository. + fn checkpoint_internal(&self) -> Result<()> { + let last_valid_lsn = self.last_valid_lsn.load(); + let last_record_lsn = self.last_record_lsn.load(); + let prev_record_lsn = self.prev_record_lsn.load(); + trace!( + "checkpointing timeline {} at {}", + self.timelineid, + last_valid_lsn + ); + + // Grab lock on the layer map. + // + // TODO: We hold it locked throughout the checkpoint operation. That's bad, + // the checkpointing could take many seconds, and any incoming get_page_at_lsn() + // requests will block. + let mut layers = self.layers.lock().unwrap(); + + // Walk through each in-memory, and write any dirty data to disk, + // as snapshot files. + // + // We currently write a new snapshot file for every relation + // that was modified, if there has been any changes at all. + // It would be smarter to only flush out in-memory layers that + // have accumulated a fair amount of changes. Note that the + // start and end LSNs of snapshot files belonging to different + // relations don't have to line up, although currently they do + // because of the way this works. So you could have a snapshot + // file covering LSN range 100-200 for one relation, and a + // snapshot file covering 150-250 for another relation. The + // read functions should even cope with snapshot files + // covering overlapping ranges for the same relation, although + // that situation never arises currently. + // + // Note: We aggressively freeze and unload all the layer + // structs. Even if a layer is actively being used. This + // keeps memory usage in check, but is probably too + // aggressive. Some kind of LRU policy would be appropriate. + // + + // It is not possible to modify a BTreeMap while you're iterating + // it. So we have to make a temporary copy, and iterate through that, + // while we modify the original. + let old_layers = layers.inner.clone(); + + // Call freeze() on any unfrozen layers (that is, layers that haven't + // been written to disk yet). + // Call unload() on all frozen layers, to release memory. + for layer in old_layers.values() { + if !layer.is_frozen() { + let new_layers = layer.freeze(last_valid_lsn, &*self.walredo_mgr)?; + + // replace this layer with the new layers that 'freeze' returned + layers.remove(&**layer); + for new_layer in new_layers { + trace!( + "freeze returned layer {} {}-{}", + new_layer.get_relish_tag(), + new_layer.get_start_lsn(), + new_layer.get_end_lsn() + ); + layers.insert(Arc::clone(&new_layer)); + } + } else { + layer.unload()?; + } + } + + // Also save the metadata, with updated last_valid_lsn and last_record_lsn, to a + // file in the timeline dir. The metadata reflects the last_valid_lsn as it was + // when we *started* the checkpoint, so that after crash, the WAL receiver knows + // to restart the streaming from that WAL position. + let ancestor_timelineid = if let Some(x) = &self.ancestor_timeline { + Some(x.timelineid) + } else { + None + }; + let metadata = TimelineMetadata { + last_valid_lsn: last_valid_lsn, + last_record_lsn: last_record_lsn, + prev_record_lsn: prev_record_lsn, + ancestor_timeline: ancestor_timelineid, + ancestor_lsn: self.ancestor_lsn, + }; + LayeredRepository::save_metadata(self.conf, self.timelineid, self.tenantid, &metadata)?; + + self.last_checkpoint_lsn.store(last_valid_lsn); + + Ok(()) + } + + /// + /// Garbage collect snapshot files on a timeline that are no longer needed. + /// + /// The caller specifies how much history is needed with the two arguments: + /// + /// retain_lsns: keep a version of each page at these LSNs + /// cutoff: also keep everything newer than this LSN + /// + /// The 'retain_lsns' list is currently used to prevent removing files that + /// are needed by child timelines. In the future, the user might be able to + /// name additional points in time to retain. The caller is responsible for + /// collecting that information. + /// + /// The 'cutoff' point is used to retain recent versions that might still be + /// needed by read-only nodes. (As of this writing, the caller just passes + /// the latest LSN subtracted by a constant, and doesn't do anything smart + /// to figure out what read-only nodes might actually need.) + /// + /// Currently, we don't make any attempt at removing unneeded page versions + /// within a snapshot file. We can only remove the whole file if it's fully + /// obsolete. + /// + pub fn gc_timeline(&self, retain_lsns: Vec, cutoff: Lsn) -> Result { + let now = Instant::now(); + let mut result: GcResult = Default::default(); + + // Scan all snapshot files in the directory. For each file, if a newer file + // exists, we can remove the old one. + self.checkpoint()?; + + let mut layers = self.layers.lock().unwrap(); + + info!( + "running GC on timeline {}, cutoff {}", + self.timelineid, cutoff + ); + + let mut layers_to_remove: Vec> = Vec::new(); + + // Determine for each file if it needs to be retained + 'outer: for ((rel, _lsn), l) in layers.inner.iter() { + if rel.is_relation() { + result.snapshot_relfiles_total += 1; + } else { + result.snapshot_nonrelfiles_total += 1; + } + + // Is it newer than cutoff point? + if l.get_end_lsn() > cutoff { + info!( + "keeping {} {}-{} because it's newer than cutoff {}", + rel, + l.get_start_lsn(), + l.get_end_lsn(), + cutoff + ); + if rel.is_relation() { + result.snapshot_relfiles_needed_by_cutoff += 1; + } else { + result.snapshot_nonrelfiles_needed_by_cutoff += 1; + } + continue 'outer; + } + + // Is it needed by a child branch? + for retain_lsn in &retain_lsns { + // FIXME: are the bounds inclusive or exclusive? + if l.get_start_lsn() <= *retain_lsn && *retain_lsn <= l.get_end_lsn() { + info!( + "keeping {} {}-{} because it's needed by branch point {}", + rel, + l.get_start_lsn(), + l.get_end_lsn(), + *retain_lsn + ); + if rel.is_relation() { + result.snapshot_relfiles_needed_by_branches += 1; + } else { + result.snapshot_nonrelfiles_needed_by_branches += 1; + } + continue 'outer; + } + } + + // Unless the relation was dropped, is there a later snapshot file for this relation? + if !l.is_dropped() && !layers.newer_layer_exists(l.get_relish_tag(), l.get_end_lsn()) { + if rel.is_relation() { + result.snapshot_relfiles_not_updated += 1; + } else { + result.snapshot_nonrelfiles_not_updated += 1; + } + continue 'outer; + } + + // We didn't find any reason to keep this file, so remove it. + info!( + "garbage collecting {} {}-{} {}", + l.get_relish_tag(), + l.get_start_lsn(), + l.get_end_lsn(), + l.is_dropped() + ); + layers_to_remove.push(Arc::clone(l)); + } + + // Actually delete the layers from disk and remove them from the map. + // (couldn't do this in the loop above, because you cannot modify a collection + // while iterating it. BTreeMap::retain() would be another option) + for doomed_layer in layers_to_remove { + doomed_layer.delete()?; + layers.remove(&*doomed_layer); + + if doomed_layer.is_dropped() { + if doomed_layer.get_relish_tag().is_relation() { + result.snapshot_relfiles_dropped += 1; + } else { + result.snapshot_nonrelfiles_dropped += 1; + } + } else { + if doomed_layer.get_relish_tag().is_relation() { + result.snapshot_relfiles_removed += 1; + } else { + result.snapshot_nonrelfiles_removed += 1; + } + } + } + + result.elapsed = now.elapsed(); + Ok(result) + } +} diff --git a/pageserver/src/layered_repository/README.md b/pageserver/src/layered_repository/README.md new file mode 100644 index 0000000000..db3d7feb79 --- /dev/null +++ b/pageserver/src/layered_repository/README.md @@ -0,0 +1,298 @@ +# Overview + +The on-disk format is based on immutable files. The page server +receives a stream of incoming WAL, parses the WAL records to determine +which pages they apply to, and accumulates the incoming changes in +memory. Every now and then, the accumulated changes are written out to +new files. + +The files are called "snapshot files". Each snapshot file corresponds +to one PostgreSQL relation fork. The snapshot files for each timeline +are stored in the timeline's subdirectory under +.zenith/tenants//timelines. + +The files are named like this: + + rel______ + +For example: + + rel_1663_13990_2609_0_000000000169C348_0000000001702000 + +Some non-relation files are also stored in repository. For example, +a CLOG segment would be named like this: + + pg_xact_0000_00000000198B06B0_00000000198C2550 + +There is no difference in how the relation and non-relation files are +managed, except that the first part of file names is different. +Internally, the relations and non-relation files that are managed in +the versioned store are together called "relishes". + +Each snapshot file contains a full snapshot, that is, full copy of all +pages in the relation, as of the "start LSN". It also contains all WAL +records applicable to the relation between the start and end +LSNs. With this information, the page server can reconstruct any page +version of the relation in the LSN range. + +If a file has been dropped, the last snapshot file for it is created +with the _DROPPED suffix, e.g. + + rel_1663_13990_2609_0_000000000169C348_0000000001702000_DROPPED + +In addition to the relations, with "rel_*" prefix, we use the same +format for storing various smaller files from the PostgreSQL data +directory. They will use different suffixes and the naming scheme +up to the LSN range varies. The Zenith source code uses the term +"relish" to mean "a relation, or other file that's treated like a +relation in the storage" + +## Notation used in this document + +The full path of a snapshot file looks like this: + + .zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_000000000169C348_0000000001702000 + +For simplicity, the examples below use a simplified notation for the +paths. The tenant ID is left out, the timeline ID is replaced with +the human-readable branch name, and spcnode+dbnode+relnode+forkum with +a human-readable table name. The LSNs are also shorter. For example, a +snapshot file for 'orders' table on 'main' branch, with LSN range +100-200 would be: + + main/orders_100_200 + + +# Creating snapshot files + +Let's start with a simple example with a system that contains one +branch called 'main' and two tables, 'orders' and 'customers'. The end +of WAL is currently at LSN 250. In this starting situation, you would +have two files on disk: + + main/orders_100_200 + main/customers_100_200 + +In addition to those files, the recent changes between LSN 200 and the +end of WAL at 250 are kept in memory. If the page server crashes, the +latest records between 200-250 need to be re-read from the WAL. + +Whenever enough WAL has been accumulated in memory, the page server +writes out the changes in memory into new snapshot files. This process +is called "checkpointing" (not to be confused with the PostgreSQL +checkpoints, that's a different thing). The page server only creates +snapshot files for relations that have been modified since the last +checkpoint. For example, if the current end of WAL is at LSN 450, and +the last checkpoint happened at LSN 400 but there hasn't been any +recent changes to 'customers' table, you would have these files on +disk: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/customers_100_200 + +If the customers table is modified later, a new file is created for it +at the next checkpoint. The new file will cover the "gap" from the +last snapshot file, so the LSN ranges are always contiguous: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/customers_100_200 + main/customers_200_500 + +## Reading page versions + +Whenever a GetPage@LSN request comes in from the compute node, the +page server needs to reconstruct the requested page, as it was at the +requested LSN. To do that, the page server first checks the recent +in-memory layer; if the requested page version is found there, it can +be returned immediatedly without looking at the files on +disk. Otherwise the page server needs to locate the snapshot file that +contains the requested page version. + +For example, if a request comes in for table 'orders' at LSN 250, the +page server would load the 'main/orders_200_300' file into memory, and +reconstruct and return the requested page from it, as it was at +LSN 250. Because the snapshot file consists of a full image of the +relation at the start LSN and the WAL, reconstructing the page +involves replaying any WAL records applicable to the page between LSNs +200-250, starting from the base image at LSN 200. + +A request at a file boundary can be satisfied using either file. For +example, if there are two files on disk: + + main/orders_100_200 + main/orders_200_300 + +And a request comes with LSN 200, either file can be used for it. It +is better to use the later file, however, because it contains an +already materialized version of all the pages at LSN 200. Using the +first file, you would need to apply any WAL records between 100 and +200 to reconstruct the requested page. + +# Multiple branches + +Imagine that a child branch is created at LSN 250: + + @250 + ----main--+--------------------------> + \ + +---child--------------> + + +Then, the 'orders' table is updated differently on the 'main' and +'child' branches. You now have this situation on disk: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/customers_100_200 + child/orders_250_300 + child/orders_300_400 + +Because the 'customers' table hasn't been modified on the child +branch, there is no file for it there. If you request a page for it on +the 'child' branch, the page server will not find any snapshot file +for it in the 'child' directory, so it will recurse to look into the +parent 'main' branch instead. + +From the 'child' branch's point of view, the history for each relation +is linear, and the request's LSN identifies unambiguously which file +you need to look at. For example, the history for the 'orders' table +on the 'main' branch consists of these files: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + +And from the 'child' branch's point of view, it consists of these +files: + + main/orders_100_200 + main/orders_200_300 + child/orders_250_300 + child/orders_300_400 + +The branch metadata includes the point where the child branch was +created, LSN 250. If a page request comes with LSN 275, we read the +page version from the 'child/orders_250_300' file. If the request LSN +is 225, we read it from the 'main/orders_200_300' file instead. The +page versions between 250-300 in the 'main/orders_200_300' file are +ignored when operating on the child branch. + +Note: It doesn't make any difference if the child branch is created +when the end of the main branch was at LSN 250, or later when the tip of +the main branch had already moved on. The latter case, creating a +branch at a historic LSN, is how we support PITR in Zenith. + + +# Garbage collection + +In this scheme, we keep creating new snapshot files over time. We also +need a mechanism to remove old files that are no longer needed, +because disk space isn't infinite. + +What files are still needed? Currently, the page server supports PITR +and branching from any branch at any LSN that is "recent enough" from +the tip of the branch. "Recent enough" is defined as an LSN horizon, +which by default is 64 MB. (See DEFAULT_GC_HORIZON). For this +example, let's assume that the LSN horizon is 150 units. + +Let's look at the single branch scenario again. Imagine that the end +of the branch is LSN 525, so that the GC horizon is currently at +525-150 = 375 + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/orders_400_500 + main/customers_100_200 + +We can remove files 'main/orders_100_200' and 'main/orders_200_300', +because the end LSNs of those files are older than GC horizon 375, and +there are more recent snapshot files for the table. 'main/orders_300_400' +and 'main/orders_400_500' are still within the horizon, so they must be +retained. 'main/customers_100_200' is old enough, but it cannot be +removed because there is no newer snapshot file for the table. + +Things get slightly more complicated with multiple branches. All of +the above still holds, but in addition to recent files we must also +retain older shapshot files that are still needed by child branches. +For example, if child branch is created at LSN 150, and the 'customers' +table is updated on the branch, you would have these files: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/orders_400_500 + main/customers_100_200 + child/customers_150_300 + +In this situation, the 'main/orders_100_200' file cannot be removed, +even though it is older than the GC horizon, because it is still +needed by the child branch. 'main/orders_200_300' can still be +removed. So after garbage collection, these files would remain: + + main/orders_100_200 + + main/orders_300_400 + main/orders_400_500 + main/customers_100_200 + child/customers_150_300 + +If 'orders' is modified later on the 'child' branch, we will create a +snapshot file for it on the child: + + main/orders_100_200 + + main/orders_300_400 + main/orders_400_500 + main/customers_100_200 + child/customers_150_300 + child/orders_150_400 + +After this, the 'main/orders_100_200' file can be removed. It is no +longer needed by the child branch, because there is a newer snapshot +file there. TODO: This optimization hasn't been implemented! The GC +algorithm will currently keep the file on the 'main' branch anyway, for +as long as the child branch exists. + + +# TODO: On LSN ranges + +In principle, each relation can be checkpointed separately, i.e. the +LSN ranges of the files don't need to line up. So this would be legal: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + main/customers_150_250 + main/customers_250_500 + +However, the code currently always checkpoints all relations together. +So that situation doesn't arise in practice. + +It would also be OK to have overlapping LSN ranges for the same relation: + + main/orders_100_200 + main/orders_200_300 + main/orders_250_350 + main/orders_300_400 + +The code that reads the snapshot files should cope with this, but this +situation doesn't arise either, because the checkpointing code never +does that. It could be useful, however, as a transient state when +garbage collecting around branch points, or explicit recovery +points. For example, if we start with this: + + main/orders_100_200 + main/orders_200_300 + main/orders_300_400 + +And there is a branch or explicit recovery point at LSN 150, we could +replace 'main/orders_100_200' with 'main/orders_150_150' to keep a +snapshot only at that exact point that's still needed, removing the +other page versions around it. But such compaction has not been +implemented yet. diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs new file mode 100644 index 0000000000..f9ac9178bb --- /dev/null +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -0,0 +1,534 @@ +//! +//! An in-memory layer stores recently received page versions in memory. The page versions +//! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation. +//! + +use crate::layered_repository::storage_layer::Layer; +use crate::layered_repository::storage_layer::PageVersion; +use crate::layered_repository::SnapshotLayer; +use crate::relish::*; +use crate::repository::WALRecord; +use crate::walredo::WalRedoManager; +use crate::PageServerConf; +use crate::{ZTenantId, ZTimelineId}; +use anyhow::{bail, Result}; +use bytes::Bytes; +use log::*; +use std::collections::BTreeMap; +use std::ops::Bound::Included; +use std::sync::{Arc, Mutex}; + +use zenith_utils::lsn::Lsn; + +static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); + +pub struct InMemoryLayer { + conf: &'static PageServerConf, + tenantid: ZTenantId, + timelineid: ZTimelineId, + rel: RelishTag, + + /// + /// This layer contains all the changes from 'start_lsn'. The + /// start is inclusive. There is no end LSN; we only use in-memory + /// layer at the end of a timeline. + /// + start_lsn: Lsn, + + /// The above fields never change. The parts that do change are in 'inner', + /// and protected by mutex. + inner: Mutex, +} + +pub struct InMemoryLayerInner { + /// If this relation was dropped, remember when that happened. + drop_lsn: Option, + + /// + /// All versions of all pages in the layer are are kept here. + /// Indexed by block number and LSN. + /// + page_versions: BTreeMap<(u32, Lsn), PageVersion>, + + /// + /// `relsizes` tracks the size of the relation at different points in time. + /// + relsizes: BTreeMap, +} + +impl Layer for InMemoryLayer { + fn is_frozen(&self) -> bool { + return false; + } + + fn get_timeline_id(&self) -> ZTimelineId { + return self.timelineid; + } + + fn get_relish_tag(&self) -> RelishTag { + return self.rel; + } + + fn get_start_lsn(&self) -> Lsn { + return self.start_lsn; + } + + fn get_end_lsn(&self) -> Lsn { + return Lsn(u64::MAX); + } + + fn is_dropped(&self) -> bool { + let inner = self.inner.lock().unwrap(); + inner.drop_lsn.is_some() + } + + /// Look up given page in the cache. + fn get_page_at_lsn( + &self, + walredo_mgr: &dyn WalRedoManager, + blknum: u32, + lsn: Lsn, + ) -> Result { + // Scan the BTreeMap backwards, starting from the given entry. + let mut records: Vec = Vec::new(); + let mut page_img: Option = None; + let mut need_base_image_lsn: Option = Some(lsn); + + { + let inner = self.inner.lock().unwrap(); + let minkey = (blknum, Lsn(0)); + let maxkey = (blknum, lsn); + let mut iter = inner + .page_versions + .range((Included(&minkey), Included(&maxkey))); + while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() { + if let Some(img) = &entry.page_image { + page_img = Some(img.clone()); + need_base_image_lsn = None; + break; + } else if let Some(rec) = &entry.record { + records.push(rec.clone()); + if rec.will_init { + // This WAL record initializes the page, so no need to go further back + need_base_image_lsn = None; + break; + } else { + need_base_image_lsn = Some(*entry_lsn); + } + } else { + // No base image, and no WAL record. Huh? + bail!("no page image or WAL record for requested page"); + } + } + + // release lock on 'page_versions' + } + records.reverse(); + + // If we needed a base image to apply the WAL records against, we should have found it in memory. + if let Some(lsn) = need_base_image_lsn { + if records.is_empty() { + // no records, and no base image. This can happen if PostgreSQL extends a relation + // but never writes the page. + // + // Would be nice to detect that situation better. + warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + return Ok(ZERO_PAGE.clone()); + } + bail!( + "No base image found for page {} blk {} at {}/{}", + self.rel, + blknum, + self.timelineid, + lsn + ); + } + + // If we have a page image, and no WAL, we're all set + if records.is_empty() { + if let Some(img) = page_img { + trace!( + "found page image for blk {} in {} at {}/{}, no WAL redo required", + blknum, + self.rel, + self.timelineid, + lsn + ); + Ok(img) + } else { + // FIXME: this ought to be an error? + warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + Ok(ZERO_PAGE.clone()) + } + } else { + // We need to do WAL redo. + // + // If we don't have a base image, then the oldest WAL record better initialize + // the page + if page_img.is_none() && !records.first().unwrap().will_init { + // FIXME: this ought to be an error? + warn!( + "Base image for page {}/{} at {} not found, but got {} WAL records", + self.rel, + blknum, + lsn, + records.len() + ); + Ok(ZERO_PAGE.clone()) + } else { + if page_img.is_some() { + trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + } else { + trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + } + let img = walredo_mgr.request_redo(self.rel, blknum, lsn, page_img, records)?; + + self.put_page_image(blknum, lsn, img.clone())?; + + Ok(img) + } + } + } + + /// Get size of the relation at given LSN + fn get_relish_size(&self, lsn: Lsn) -> Result> { + // Scan the BTreeMap backwards, starting from the given entry. + let inner = self.inner.lock().unwrap(); + let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + + if let Some((_entry_lsn, entry)) = iter.next_back() { + let result = *entry; + drop(inner); + trace!("get_relish_size: {} at {} -> {}", self.rel, lsn, result); + Ok(Some(result)) + } else { + Ok(None) + } + } + + /// Does this relation exist at given LSN? + fn get_rel_exists(&self, lsn: Lsn) -> Result { + let inner = self.inner.lock().unwrap(); + + // Is the requested LSN after the rel was dropped? + if let Some(drop_lsn) = inner.drop_lsn { + if lsn >= drop_lsn { + return Ok(false); + } + } + + // Otherwise, it exists + Ok(true) + } + + // Write operations + + /// Common subroutine of the public put_wal_record() and put_page_image() functions. + /// Adds the page version to the in-memory tree + fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> { + trace!( + "put_page_version blk {} of {} at {}/{}", + blknum, + self.rel, + self.timelineid, + lsn + ); + let mut inner = self.inner.lock().unwrap(); + + let old = inner.page_versions.insert((blknum, lsn), pv); + + if old.is_some() { + // We already had an entry for this LSN. That's odd.. + warn!( + "Page version of rel {:?} blk {} at {} already exists", + self.rel, blknum, lsn + ); + } + + // Also update the relation size, if this extended the relation. + if self.rel.is_blocky() { + let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + + let oldsize; + if let Some((_entry_lsn, entry)) = iter.next_back() { + oldsize = *entry; + } else { + oldsize = 0; + //bail!("No old size found for {} at {}", self.tag, lsn); + } + if blknum >= oldsize { + trace!( + "enlarging relation {} from {} to {} blocks at {}", + self.rel, + oldsize, + blknum + 1, + lsn + ); + inner.relsizes.insert(lsn, blknum + 1); + } + } + + Ok(()) + } + + /// Remember that the relation was truncated at given LSN + fn put_truncation(&self, lsn: Lsn, relsize: u32) -> anyhow::Result<()> { + let mut inner = self.inner.lock().unwrap(); + let old = inner.relsizes.insert(lsn, relsize); + + if old.is_some() { + // We already had an entry for this LSN. That's odd.. + warn!("Inserting truncation, but had an entry for the LSN already"); + } + + Ok(()) + } + + /// Remember that the relation was dropped at given LSN + fn put_unlink(&self, lsn: Lsn) -> anyhow::Result<()> { + let mut inner = self.inner.lock().unwrap(); + + assert!(inner.drop_lsn.is_none()); + inner.drop_lsn = Some(lsn); + + info!("dropped relation {} at {}", self.rel, lsn); + + Ok(()) + } + + /// + /// Write the this in-memory layer to disk, as a snapshot layer. + /// + /// The cutoff point for the layer that's written to disk is 'end_lsn'. + /// + /// Returns new layers that replace this one. Always returns a + /// SnapshotLayer containing the page versions that were written to disk, + /// but if there were page versions newer than 'end_lsn', also return a new + /// in-memory layer containing those page versions. The caller replaces + /// this layer with the returned layers in the layer map. + /// + fn freeze( + &self, + cutoff_lsn: Lsn, + walredo_mgr: &dyn WalRedoManager, + ) -> Result>> { + info!( + "freezing in memory layer for {} on timeline {} at {}", + self.rel, self.timelineid, cutoff_lsn + ); + + let inner = self.inner.lock().unwrap(); + + // Normally, use the cutoff LSN as the end of the frozen layer. + // But if the relation was dropped, we know that there are no + // more changes coming in for it, and in particular we know that + // there are no changes "in flight" for the LSN anymore, so we use + // the drop LSN instead. The drop-LSN could be ahead of the + // caller-specified LSN! + let dropped = inner.drop_lsn.is_some(); + let end_lsn = + if dropped { + inner.drop_lsn.unwrap() + } else { + cutoff_lsn + }; + + // Divide all the page versions into old and new at the 'end_lsn' cutoff point. + let mut before_page_versions; + let mut before_relsizes; + let mut after_page_versions; + let mut after_relsizes; + if !dropped { + before_relsizes = BTreeMap::new(); + after_relsizes = BTreeMap::new(); + for (lsn, size) in inner.relsizes.iter() { + if *lsn > end_lsn { + after_relsizes.insert(*lsn, *size); + } else { + before_relsizes.insert(*lsn, *size); + } + } + + before_page_versions = BTreeMap::new(); + after_page_versions = BTreeMap::new(); + for ((blknum, lsn), pv) in inner.page_versions.iter() { + if *lsn > end_lsn { + after_page_versions.insert((*blknum, *lsn), pv.clone()); + } else { + before_page_versions.insert((*blknum, *lsn), pv.clone()); + } + } + } else { + before_page_versions = inner.page_versions.clone(); + before_relsizes = inner.relsizes.clone(); + after_relsizes = BTreeMap::new(); + after_page_versions = BTreeMap::new(); + } + + // we can release the lock now. + drop(inner); + + // Write the page versions before the cutoff to disk. + let snapfile = SnapshotLayer::create( + self.conf, + self.timelineid, + self.tenantid, + self.rel, + self.start_lsn, + end_lsn, + dropped, + before_page_versions, + before_relsizes, + )?; + let mut result: Vec> = Vec::new(); + + // If there were any page versions after the cutoff, initialize a new in-memory layer + // to hold them + if !after_relsizes.is_empty() || !after_page_versions.is_empty() { + info!("created new in-mem layer for {} {}-", self.rel, end_lsn); + + let new_layer = Self::copy_snapshot( + self.conf, + walredo_mgr, + &snapfile, + self.timelineid, + self.tenantid, + end_lsn, + )?; + let mut new_inner = new_layer.inner.lock().unwrap(); + new_inner.page_versions.append(&mut after_page_versions); + new_inner.relsizes.append(&mut after_relsizes); + drop(new_inner); + + result.push(Arc::new(new_layer)); + } + result.push(Arc::new(snapfile)); + + Ok(result) + } + + fn delete(&self) -> Result<()> { + // Nothing to do. When the reference is dropped, the memory is released. + Ok(()) + } + + fn unload(&self) -> Result<()> { + // cannot unload in-memory layer. Freeze instead + Ok(()) + } +} + +impl InMemoryLayer { + /// + /// Create a new, empty, in-memory layer + /// + pub fn create( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + rel: RelishTag, + start_lsn: Lsn, + ) -> Result { + trace!( + "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", + rel, + timelineid, + start_lsn + ); + + Ok(InMemoryLayer { + conf, + timelineid, + tenantid, + rel, + start_lsn, + inner: Mutex::new(InMemoryLayerInner { + drop_lsn: None, + page_versions: BTreeMap::new(), + relsizes: BTreeMap::new(), + }), + }) + } + + /// + /// Initialize a new InMemoryLayer for, by copying the state at the given + /// point in time from given existing layer. + /// + pub fn copy_snapshot( + conf: &'static PageServerConf, + walredo_mgr: &dyn WalRedoManager, + src: &dyn Layer, + timelineid: ZTimelineId, + tenantid: ZTenantId, + lsn: Lsn, + ) -> Result { + trace!( + "initializing new InMemoryLayer for writing {} on timeline {} at {}", + src.get_relish_tag(), + timelineid, + lsn + ); + let mut page_versions = BTreeMap::new(); + let mut relsizes = BTreeMap::new(); + + let size; + if src.get_relish_tag().is_blocky() { + if let Some(sz) = src.get_relish_size(lsn)? { + relsizes.insert(lsn, sz); + size = sz; + } else { + bail!("no size found or {} at {}", src.get_relish_tag(), lsn); + } + } else { + size = 1; + } + + for blknum in 0..size { + let img = src.get_page_at_lsn(walredo_mgr, blknum, lsn)?; + let pv = PageVersion { + page_image: Some(img), + record: None, + }; + page_versions.insert((blknum, lsn), pv); + } + + Ok(InMemoryLayer { + conf, + timelineid, + tenantid, + rel: src.get_relish_tag(), + start_lsn: lsn, + inner: Mutex::new(InMemoryLayerInner { + drop_lsn: None, + page_versions: page_versions, + relsizes: relsizes, + }), + }) + } + + /// debugging function to print out the contents of the layer + #[allow(unused)] + pub fn dump(&self) -> String { + let mut result = format!( + "----- inmemory layer for {} {}-> ----\n", + self.rel, self.start_lsn + ); + + let inner = self.inner.lock().unwrap(); + + for (k, v) in inner.relsizes.iter() { + result += &format!("{}: {}\n", k, v); + } + for (k, v) in inner.page_versions.iter() { + result += &format!( + "blk {} at {}: {}/{}\n", + k.0, + k.1, + v.page_image.is_some(), + v.record.is_some() + ); + } + + result + } +} diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs new file mode 100644 index 0000000000..f0a91bd08b --- /dev/null +++ b/pageserver/src/layered_repository/layer_map.rs @@ -0,0 +1,132 @@ +//! +//! The layer map tracks what layers exist for all the relations in a timeline. +//! +//! When the timeline is first accessed, the server lists of all snapshot files +//! in the timelines/ directory, and populates this map with +//! SnapshotLayers corresponding to each file. When new WAL is received, +//! we create InMemoryLayers to hold the incoming records. Now and then, +//! in the checkpoint() function, the in-memory layers are frozen, forming +//! new snapshot layers and corresponding files are written to disk. +//! + +use crate::layered_repository::storage_layer::Layer; +use crate::relish::*; +use anyhow::Result; +use log::*; +use std::collections::BTreeMap; +use std::collections::HashSet; +use std::ops::Bound::Included; +use std::sync::Arc; +use zenith_utils::lsn::Lsn; + +/// LayerMap is a BTreeMap keyed by RelishTag and the layer's start LSN. +/// It provides a couple of convenience functions over a plain BTreeMap +pub struct LayerMap { + pub inner: BTreeMap<(RelishTag, Lsn), Arc>, +} + +impl LayerMap { + /// + /// Look up using the given rel tag and LSN. This differs from a plain + /// key-value lookup in that if there is any layer that covers the + /// given LSN, or precedes the given LSN, it is returned. In other words, + /// you don't need to know the exact start LSN of the layer. + /// + pub fn get(&self, tag: RelishTag, lsn: Lsn) -> Option> { + let startkey = (tag, Lsn(0)); + let endkey = (tag, lsn); + + if let Some((_k, v)) = self + .inner + .range((Included(startkey), Included(endkey))) + .next_back() + { + Some(Arc::clone(v)) + } else { + None + } + } + + pub fn insert(&mut self, layer: Arc) { + let rel = layer.get_relish_tag(); + let start_lsn = layer.get_start_lsn(); + + self.inner.insert((rel, start_lsn), Arc::clone(&layer)); + } + + pub fn remove(&mut self, layer: &dyn Layer) { + let rel = layer.get_relish_tag(); + let start_lsn = layer.get_start_lsn(); + + self.inner.remove(&(rel, start_lsn)); + } + + pub fn list_rels(&self, spcnode: u32, dbnode: u32) -> Result> { + let mut rels: HashSet = HashSet::new(); + + // Scan the timeline directory to get all rels in this timeline. + for ((rel, _lsn), _l) in self.inner.iter() { + if let RelishTag::Relation(reltag) = rel { + // FIXME: skip if it was dropped before the requested LSN. But there is no + // LSN argument + + if (spcnode == 0 || reltag.spcnode == spcnode) + && (dbnode == 0 || reltag.dbnode == dbnode) + { + rels.insert(*reltag); + } + } + } + Ok(rels) + } + + pub fn list_nonrels(&self, _lsn: Lsn) -> Result> { + let mut rels: HashSet = HashSet::new(); + + // Scan the timeline directory to get all rels in this timeline. + for ((rel, _lsn), _l) in self.inner.iter() { + // FIXME: skip if it was dropped before the requested LSN. + + if let RelishTag::Relation(_) = rel { + } else { + rels.insert(*rel); + } + } + Ok(rels) + } + + /// Is there a newer layer for given relation? + pub fn newer_layer_exists(&self, rel: RelishTag, lsn: Lsn) -> bool { + let startkey = (rel, lsn); + let endkey = (rel, Lsn(u64::MAX)); + + for ((_rel, newer_lsn), layer) in self.inner.range((Included(startkey), Included(endkey))) { + if layer.get_end_lsn() > lsn { + trace!( + "found later layer for rel {}, {} {}-{}", + rel, + lsn, + newer_lsn, + layer.get_end_lsn() + ); + return true; + } else { + trace!( + "found singleton layer for rel {}, {} {}", + rel, lsn, newer_lsn + ); + continue; + } + } + trace!("no later layer found for rel {}, {}", rel, lsn); + false + } +} + +impl Default for LayerMap { + fn default() -> Self { + LayerMap { + inner: BTreeMap::new(), + } + } +} diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs new file mode 100644 index 0000000000..e2936ffd31 --- /dev/null +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -0,0 +1,631 @@ +//! +//! A SnapshotLayer represents one snapshot file on disk. One file holds all page +//! version and size information of one relation, in a range of LSN. +//! The name "snapshot file" is a bit of a misnomer because a snapshot file doesn't +//! contain a snapshot at a specific LSN, but rather all the page versions in a range +//! of LSNs. +//! +//! Currently, a snapshot file contains full information needed to reconstruct any +//! page version in the LSN range, without consulting any other snapshot files. When +//! a new snapshot file is created for writing, the full contents of relation are +//! materialized as it is at the beginning of the LSN range. That can be very expensive, +//! we should find a way to store differential files. But this keeps the read-side +//! of things simple. You can find the correct snapshot file based on RelishTag and +//! timeline+LSN, and once you've located it, you have all the data you need to in that +//! file. +//! +//! When a snapshot file needs to be accessed, we slurp the whole file into memory, into +//! the SnapshotLayer struct. See load() and unload() functions. +//! +//! On disk, the snapshot files are stored in timelines/ directory. +//! Currently, there are no subdirectories, and each snapshot file is named like this: +//! +//! _____ +//! +//! For example: +//! +//! 1663_13990_2609_0_000000000169C348_000000000169C349 +//! +//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that. +//! So the above example would become: +//! +//! 1663_13990_2609_0_000000000169C348_000000000169C349_DROPPED +//! +//! The end LSN indicates when it was dropped in that case, we don't store it in the +//! file contents in any way. +//! +//! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two +//! parts: the page versions and the relation sizes. They are stored as separate chapters. +//! +use crate::layered_repository::storage_layer::Layer; +use crate::layered_repository::storage_layer::PageVersion; +use crate::layered_repository::storage_layer::ZERO_PAGE; +use crate::relish::*; +use crate::repository::WALRecord; +use crate::walredo::WalRedoManager; +use crate::PageServerConf; +use crate::{ZTenantId, ZTimelineId}; +use anyhow::{bail, Result}; +use bytes::Bytes; +use log::*; +use std::collections::BTreeMap; +use std::fmt; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::ops::Bound::Included; +use std::path::PathBuf; +use std::sync::{Arc, Mutex, MutexGuard}; + +use bookfile::{Book, BookWriter}; + +use zenith_utils::bin_ser::BeSer; +use zenith_utils::lsn::Lsn; + +// Magic constant to identify a Zenith snapshot file +static SNAPSHOT_FILE_MAGIC: u32 = 0x5A616E01; + +static PAGE_VERSIONS_CHAPTER: u64 = 1; +static REL_SIZES_CHAPTER: u64 = 2; + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +struct SnapshotFileName { + rel: RelishTag, + start_lsn: Lsn, + end_lsn: Lsn, + dropped: bool, +} + +impl SnapshotFileName { + fn from_str(fname: &str) -> Option { + // Split the filename into parts + // + // _____ + // + // or if it was dropped: + // + // ______DROPPED + // + let rel; + let mut parts; + if let Some(rest) = fname.strip_prefix("rel_") { + parts = rest.split('_'); + rel = RelishTag::Relation(RelTag { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + relnode: parts.next()?.parse::().ok()?, + forknum: parts.next()?.parse::().ok()?, + }); + } else if let Some(rest) = fname.strip_prefix("pg_xact_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::Clog, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { + parts = rest.split('_'); + rel = RelishTag::FileNodeMap { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { + parts = rest.split('_'); + rel = RelishTag::TwoPhase { + xid: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { + parts = rest.split('_'); + rel = RelishTag::Checkpoint; + } else if let Some(rest) = fname.strip_prefix("pg_control_") { + parts = rest.split('_'); + rel = RelishTag::ControlFile; + } else { + return None; + } + + let start_lsn = Lsn::from_hex(parts.next()?).ok()?; + let end_lsn = Lsn::from_hex(parts.next()?).ok()?; + + let mut dropped = false; + if let Some(suffix) = parts.next() { + if suffix == "DROPPED" { + dropped = true; + } else { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + } + if parts.next().is_some() { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + + Some(SnapshotFileName { + rel, + start_lsn, + end_lsn, + dropped, + }) + } + + fn to_string(&self) -> String { + let basename = match self.rel { + RelishTag::Relation(reltag) => format!( + "rel_{}_{}_{}_{}", + reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum + ), + RelishTag::Slru { + slru: SlruKind::Clog, + segno, + } => format!("pg_xact_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno, + } => format!("pg_multixact_members_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno, + } => format!("pg_multixact_offsets_{:04X}", segno), + RelishTag::FileNodeMap { spcnode, dbnode } => { + format!("pg_filenodemap_{}_{}", spcnode, dbnode) + } + RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), + RelishTag::Checkpoint => format!("pg_control_checkpoint"), + RelishTag::ControlFile => format!("pg_control"), + }; + + format!( + "{}_{:016X}_{:016X}{}", + basename, + u64::from(self.start_lsn), + u64::from(self.end_lsn), + if self.dropped { "_DROPPED" } else { "" } + ) + } +} + +impl fmt::Display for SnapshotFileName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_string()) + } +} + +/// +/// SnapshotLayer is the in-memory data structure associated with an +/// on-disk snapshot file. We keep a SnapshotLayer in memory for each +/// file, in the LayerMap. If a layer is in "loaded" state, we have a +/// copy of the file in memory, in 'inner'. Otherwise the struct is +/// just a placeholder for a file that exists on disk, and it needs to +/// be loaded before using it in queries. +/// +pub struct SnapshotLayer { + conf: &'static PageServerConf, + pub tenantid: ZTenantId, + pub timelineid: ZTimelineId, + pub rel: RelishTag, + + // + // This entry contains all the changes from 'start_lsn' to 'end_lsn'. The + // start is inclusive, and end is exclusive. + pub start_lsn: Lsn, + pub end_lsn: Lsn, + + dropped: bool, + + inner: Mutex, +} + +pub struct SnapshotLayerInner { + /// If false, the 'page_versions' and 'relsizes' have not been + /// loaded into memory yet. + loaded: bool, + + /// All versions of all pages in the file are are kept here. + /// Indexed by block number and LSN. + page_versions: BTreeMap<(u32, Lsn), PageVersion>, + + /// `relsizes` tracks the size of the relation at different points in time. + relsizes: BTreeMap, +} + +impl Layer for SnapshotLayer { + fn is_frozen(&self) -> bool { + return true; + } + + fn get_timeline_id(&self) -> ZTimelineId { + return self.timelineid; + } + + fn get_relish_tag(&self) -> RelishTag { + return self.rel; + } + + fn is_dropped(&self) -> bool { + return self.dropped; + } + + fn get_start_lsn(&self) -> Lsn { + return self.start_lsn; + } + + fn get_end_lsn(&self) -> Lsn { + return self.end_lsn; + } + + /// Look up given page in the cache. + fn get_page_at_lsn( + &self, + walredo_mgr: &dyn WalRedoManager, + blknum: u32, + lsn: Lsn, + ) -> Result { + // Scan the BTreeMap backwards, starting from the given entry. + let mut records: Vec = Vec::new(); + let mut page_img: Option = None; + let mut need_base_image_lsn: Option = Some(lsn); + { + let inner = self.load()?; + let minkey = (blknum, Lsn(0)); + let maxkey = (blknum, lsn); + let mut iter = inner + .page_versions + .range((Included(&minkey), Included(&maxkey))); + while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() { + if let Some(img) = &entry.page_image { + page_img = Some(img.clone()); + need_base_image_lsn = None; + break; + } else if let Some(rec) = &entry.record { + records.push(rec.clone()); + if rec.will_init { + // This WAL record initializes the page, so no need to go further back + need_base_image_lsn = None; + break; + } else { + need_base_image_lsn = Some(*entry_lsn); + } + } else { + // No base image, and no WAL record. Huh? + bail!("no page image or WAL record for requested page"); + } + } + + // release lock on 'inner' + } + records.reverse(); + + // If we needed a base image to apply the WAL records against, we should have found it in memory. + if let Some(lsn) = need_base_image_lsn { + if records.is_empty() { + // no records, and no base image. This can happen if PostgreSQL extends a relation + // but never writes the page. + // + // Would be nice to detect that situation better. + warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + return Ok(ZERO_PAGE.clone()); + } + bail!( + "No base image found for page {} blk {} at {}/{}", + self.rel, + blknum, + self.timelineid, + lsn + ); + } + + // If we have a page image, and no WAL, we're all set + if records.is_empty() { + if let Some(img) = page_img { + trace!( + "found page image for blk {} in {} at {}/{}, no WAL redo required", + blknum, + self.rel, + self.timelineid, + lsn + ); + Ok(img) + } else { + // FIXME: this ought to be an error? + warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + Ok(ZERO_PAGE.clone()) + } + } else { + // We need to do WAL redo. + // + // If we don't have a base image, then the oldest WAL record better initialize + // the page + if page_img.is_none() && !records.first().unwrap().will_init { + // FIXME: this ought to be an error? + warn!( + "Base image for page {} blk {} at {} not found, but got {} WAL records", + self.rel, + blknum, + lsn, + records.len() + ); + Ok(ZERO_PAGE.clone()) + } else { + if page_img.is_some() { + trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + } else { + trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + } + let img = walredo_mgr.request_redo(self.rel, blknum, lsn, page_img, records)?; + + // FIXME: Should we memoize the page image in memory, so that + // we wouldn't need to reconstruct it again, if it's requested again? + //self.put_page_image(blknum, lsn, img.clone())?; + + Ok(img) + } + } + } + + /// Get size of the relation at given LSN + fn get_relish_size(&self, lsn: Lsn) -> Result> { + // Scan the BTreeMap backwards, starting from the given entry. + let inner = self.load()?; + let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + + if let Some((_entry_lsn, entry)) = iter.next_back() { + let result = *entry; + drop(inner); + trace!("get_relsize: {} at {} -> {}", self.rel, lsn, result); + Ok(Some(result)) + } else { + Ok(None) + } + } + + /// Does this relation exist at given LSN? + fn get_rel_exists(&self, lsn: Lsn) -> Result { + // Is the requested LSN after the rel was dropped? + if self.dropped && lsn >= self.end_lsn { + return Ok(false); + } + + // Otherwise, it exists. + Ok(true) + } + + // Unsupported write operations + fn put_page_version(&self, blknum: u32, lsn: Lsn, _pv: PageVersion) -> Result<()> { + panic!( + "cannot modify historical snapshot layer, rel {} blk {} at {}/{}, {}-{}", + self.rel, blknum, self.timelineid, lsn, self.start_lsn, self.end_lsn + ); + } + fn put_truncation(&self, _lsn: Lsn, _relsize: u32) -> anyhow::Result<()> { + bail!("cannot modify historical snapshot layer"); + } + + fn put_unlink(&self, _lsn: Lsn) -> anyhow::Result<()> { + bail!("cannot modify historical snapshot layer"); + } + + fn freeze( + &self, + _end_lsn: Lsn, + _walredo_mgr: &dyn WalRedoManager, + ) -> Result>> { + bail!("cannot freeze historical snapshot layer"); + } + + fn delete(&self) -> Result<()> { + // delete underlying file + fs::remove_file(self.path())?; + Ok(()) + } + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + fn unload(&self) -> Result<()> { + let mut inner = self.inner.lock().unwrap(); + inner.page_versions = BTreeMap::new(); + inner.relsizes = BTreeMap::new(); + inner.loaded = false; + Ok(()) + } +} + +impl SnapshotLayer { + fn path(&self) -> PathBuf { + Self::path_for( + self.conf, + self.timelineid, + self.tenantid, + &SnapshotFileName { + rel: self.rel, + start_lsn: self.start_lsn, + end_lsn: self.end_lsn, + dropped: self.dropped, + }, + ) + } + + fn path_for( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + fname: &SnapshotFileName, + ) -> PathBuf { + conf.timeline_path(&timelineid, &tenantid) + .join(fname.to_string()) + } + + /// Create a new snapshot file, using the given btreemaps containing the page versions and + /// relsizes. + /// + /// This is used to write the in-memory layer to disk. The in-memory layer uses the same + /// data structure with two btreemaps as we do, so passing the btreemaps is currently + /// expedient. + pub fn create( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + rel: RelishTag, + start_lsn: Lsn, + end_lsn: Lsn, + dropped: bool, + page_versions: BTreeMap<(u32, Lsn), PageVersion>, + relsizes: BTreeMap, + ) -> Result { + let snapfile = SnapshotLayer { + conf: conf, + timelineid: timelineid, + tenantid: tenantid, + rel: rel, + start_lsn: start_lsn, + end_lsn, + dropped, + inner: Mutex::new(SnapshotLayerInner { + loaded: true, + page_versions: page_versions, + relsizes: relsizes, + }), + }; + let inner = snapfile.inner.lock().unwrap(); + + // Write the in-memory btreemaps into a file + let path = snapfile.path(); + + // Note: This overwrites any existing file. There shouldn't be any. + // FIXME: throw an error instead? + let file = File::create(&path)?; + let book = BookWriter::new(file, SNAPSHOT_FILE_MAGIC)?; + + // Write out page versions + let mut chapter = book.new_chapter(PAGE_VERSIONS_CHAPTER); + let buf = BTreeMap::ser(&inner.page_versions)?; + chapter.write_all(&buf)?; + let book = chapter.close()?; + + // and relsizes to separate chapter + let mut chapter = book.new_chapter(REL_SIZES_CHAPTER); + let buf = BTreeMap::ser(&inner.relsizes)?; + chapter.write_all(&buf)?; + let book = chapter.close()?; + + book.close()?; + + trace!("saved {}", &path.display()); + + drop(inner); + + Ok(snapfile) + } + + /// + /// Load the contents of the file into memory + /// + fn load(&self) -> Result> { + // quick exit if already loaded + let mut inner = self.inner.lock().unwrap(); + + if inner.loaded { + return Ok(inner); + } + + let path = Self::path_for( + self.conf, + self.timelineid, + self.tenantid, + &SnapshotFileName { + rel: self.rel, + start_lsn: self.start_lsn, + end_lsn: self.end_lsn, + dropped: self.dropped, + }, + ); + + let file = File::open(&path)?; + let book = Book::new(file)?; + + let chapter = book.read_chapter(PAGE_VERSIONS_CHAPTER)?; + let page_versions = BTreeMap::des(&chapter)?; + + let chapter = book.read_chapter(REL_SIZES_CHAPTER)?; + let relsizes = BTreeMap::des(&chapter)?; + + debug!("loaded from {}", &path.display()); + + *inner = SnapshotLayerInner { + loaded: true, + page_versions, + relsizes, + }; + + Ok(inner) + } + + /// Create SnapshotLayers representing all files on dik + /// + // TODO: returning an Iterator would be more idiomatic + pub fn list_snapshot_files( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + ) -> Result>> { + let path = conf.timeline_path(&timelineid, &tenantid); + + let mut snapfiles: Vec> = Vec::new(); + for direntry in fs::read_dir(path)? { + let fname = direntry?.file_name(); + let fname = fname.to_str().unwrap(); + + if let Some(snapfilename) = SnapshotFileName::from_str(fname) { + let snapfile = SnapshotLayer { + conf, + timelineid, + tenantid, + rel: snapfilename.rel, + start_lsn: snapfilename.start_lsn, + end_lsn: snapfilename.end_lsn, + dropped: snapfilename.dropped, + inner: Mutex::new(SnapshotLayerInner { + loaded: false, + page_versions: BTreeMap::new(), + relsizes: BTreeMap::new(), + }), + }; + + snapfiles.push(Arc::new(snapfile)); + } + } + return Ok(snapfiles); + } + + /// debugging function to print out the contents of the layer + #[allow(unused)] + pub fn dump(&self) -> String { + let mut result = format!( + "----- snapshot layer for {} {}-{} ----\n", + self.rel, self.start_lsn, self.end_lsn + ); + + let inner = self.inner.lock().unwrap(); + for (k, v) in inner.relsizes.iter() { + result += &format!("{}: {}\n", k, v); + } + //for (k, v) in inner.page_versions.iter() { + // result += &format!("blk {} at {}: {}/{}\n", k.0, k.1, v.page_image.is_some(), v.record.is_some()); + //} + + result + } +} diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs new file mode 100644 index 0000000000..7ba5769e2d --- /dev/null +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -0,0 +1,123 @@ +use crate::relish::RelishTag; +use crate::repository::WALRecord; +use crate::walredo::WalRedoManager; +use crate::ZTimelineId; +use anyhow::Result; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +use zenith_utils::lsn::Lsn; + +pub static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); + +/// +/// Represents a version of a page at a specific LSN. The LSN is the key of the +/// entry in the 'page_versions' hash, it is not duplicated here. +/// +/// A page version can be stored as a full page image, or as WAL record that needs +/// to be applied over the previous page version to reconstruct this version. +/// +/// It's also possible to have both a WAL record and a page image in the same +/// PageVersion. That happens if page version is originally stored as a WAL record +/// but it is later reconstructed by a GetPage@LSN request by performing WAL +/// redo. The get_page_at_lsn() code will store the reconstructed pag image next to +/// the WAL record in that case. TODO: That's pretty accidental, not the result +/// of any grand design. If we want to keep reconstructed page versions around, we +/// probably should have a separate buffer cache so that we could control the +/// replacement policy globally. Or if we keep a reconstructed page image, we +/// could throw away the WAL record. +/// +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PageVersion { + /// an 8kb page image + pub page_image: Option, + /// WAL record to get from previous page version to this one. + pub record: Option, +} + +/// +/// A Layer holds all page versions for one relish, in a range of LSNs. +/// There are two kinds of layers, in-memory and snapshot layers. In-memory +/// layers are used to ingest incoming WAL, and provide fast access +/// to the recent page versions. Snaphot layers are stored on disk, and +/// are immutable. +/// +/// Each layer contains a full snapshot of the relish at the start +/// LSN. In addition to that, it contains WAL (or more page images) +/// needed to recontruct any page version up to the end LSN. +/// +pub trait Layer: Send + Sync { + // These functions identify the relish and the LSN range that this Layer + // holds. + fn get_timeline_id(&self) -> ZTimelineId; + fn get_relish_tag(&self) -> RelishTag; + fn get_start_lsn(&self) -> Lsn; + fn get_end_lsn(&self) -> Lsn; + fn is_dropped(&self) -> bool; + + /// Frozen layers are stored on disk, an cannot accept cannot accept new WAL + /// records, whereas an unfrozen layer can still be modified, but is not + /// durable in case of a crash. Snapshot layers are always frozen, and + /// in-memory layers are always unfrozen. + fn is_frozen(&self) -> bool; + + // Functions that correspond to the Timeline trait functions. + fn get_page_at_lsn( + &self, + walredo_mgr: &dyn WalRedoManager, + blknum: u32, + lsn: Lsn, + ) -> Result; + + fn get_relish_size(&self, lsn: Lsn) -> Result>; + + fn get_rel_exists(&self, lsn: Lsn) -> Result; + + fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()>; + + fn put_truncation(&self, lsn: Lsn, relsize: u32) -> anyhow::Result<()>; + + fn put_unlink(&self, lsn: Lsn) -> anyhow::Result<()>; + + /// Remember new page version, as a WAL record over previous version + fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> Result<()> { + self.put_page_version( + blknum, + rec.lsn, + PageVersion { + page_image: None, + record: Some(rec), + }, + ) + } + + /// Remember new page version, as a full page image + fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()> { + self.put_page_version( + blknum, + lsn, + PageVersion { + page_image: Some(img), + record: None, + }, + ) + } + + /// + /// Split off an immutable layer from existing layer. + /// + /// Returns new layers that replace this one. + /// + fn freeze(&self, end_lsn: Lsn, walredo_mgr: &dyn WalRedoManager) + -> Result>>; + + /// Permanently delete this layer + fn delete(&self) -> Result<()>; + + /// Try to release memory used by this layer. This is currently + /// only used by snapshot layers, to free the copy of the file + /// from memory. (TODO: a smarter, more granular caching scheme + /// would be nice) + fn unload(&self) -> Result<()>; +} diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 8354426c22..8bc8d62fc2 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -9,6 +9,7 @@ use zenith_metrics::{register_int_gauge_vec, IntGaugeVec}; pub mod basebackup; pub mod branches; +pub mod layered_repository; pub mod logger; pub mod object_key; pub mod object_repository; @@ -54,6 +55,14 @@ pub struct PageServerConf { pub auth_type: AuthType, pub auth_validation_public_key_path: Option, + + pub repository_format: RepositoryFormat, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum RepositoryFormat { + Layered, + RocksDb, } impl PageServerConf { diff --git a/pageserver/src/page_cache.rs b/pageserver/src/page_cache.rs index db24df861f..b4990eaba1 100644 --- a/pageserver/src/page_cache.rs +++ b/pageserver/src/page_cache.rs @@ -2,11 +2,12 @@ //! page server. use crate::branches; +use crate::layered_repository::LayeredRepository; use crate::object_repository::ObjectRepository; use crate::repository::Repository; use crate::rocksdb_storage::RocksObjectStore; use crate::walredo::PostgresRedoManager; -use crate::PageServerConf; +use crate::{PageServerConf, RepositoryFormat}; use anyhow::{anyhow, bail, Result}; use lazy_static::lazy_static; use log::info; @@ -27,16 +28,35 @@ pub fn init(conf: &'static PageServerConf) { for dir_entry in fs::read_dir(conf.tenants_path()).unwrap() { let tenantid = ZTenantId::from_str(dir_entry.unwrap().file_name().to_str().unwrap()).unwrap(); - let obj_store = RocksObjectStore::open(conf, &tenantid).unwrap(); // Set up a WAL redo manager, for applying WAL records. let walredo_mgr = PostgresRedoManager::new(conf, tenantid); // Set up an object repository, for actual data storage. - let repo = - ObjectRepository::new(conf, Arc::new(obj_store), Arc::new(walredo_mgr), tenantid); + let repo: Arc = match conf.repository_format { + RepositoryFormat::Layered => { + let repo = Arc::new(LayeredRepository::new( + conf, + Arc::new(walredo_mgr), + tenantid, + )); + LayeredRepository::launch_checkpointer_thread(conf, repo.clone()); + repo + } + RepositoryFormat::RocksDb => { + let obj_store = RocksObjectStore::open(conf, &tenantid).unwrap(); + + Arc::new(ObjectRepository::new( + conf, + Arc::new(obj_store), + Arc::new(walredo_mgr), + tenantid, + )) + } + }; + info!("initialized storage for tenant: {}", &tenantid); - m.insert(tenantid, Arc::new(repo)); + m.insert(tenantid, repo); } } @@ -53,7 +73,7 @@ pub fn create_repository_for_tenant( let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenantid)); let repo = branches::create_repo(conf, tenantid, wal_redo_manager)?; - m.insert(tenantid, Arc::new(repo)); + m.insert(tenantid, repo); Ok(()) } diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 2caffb1eb2..ce36dce505 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -693,6 +693,18 @@ impl postgres_backend::Handler for PageServerHandler { RowDescriptor::int8_col(b"control_deleted"), RowDescriptor::int8_col(b"filenodemap_deleted"), RowDescriptor::int8_col(b"dropped"), + RowDescriptor::int8_col(b"snapshot_relfiles_total"), + RowDescriptor::int8_col(b"snapshot_relfiles_needed_by_cutoff"), + RowDescriptor::int8_col(b"snapshot_relfiles_needed_by_branches"), + RowDescriptor::int8_col(b"snapshot_relfiles_not_updated"), + RowDescriptor::int8_col(b"snapshot_relfiles_removed"), + RowDescriptor::int8_col(b"snapshot_relfiles_dropped"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_total"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_needed_by_cutoff"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_needed_by_branches"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_not_updated"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_removed"), + RowDescriptor::int8_col(b"snapshot_nonrelfiles_dropped"), RowDescriptor::int8_col(b"elapsed"), ]))? .write_message_noflush(&BeMessage::DataRow(&[ @@ -705,6 +717,43 @@ impl postgres_backend::Handler for PageServerHandler { Some(&result.control_deleted.to_string().as_bytes()), Some(&result.filenodemap_deleted.to_string().as_bytes()), Some(&result.dropped.to_string().as_bytes()), + Some(&result.snapshot_relfiles_total.to_string().as_bytes()), + Some( + &result + .snapshot_relfiles_needed_by_cutoff + .to_string() + .as_bytes(), + ), + Some( + &result + .snapshot_relfiles_needed_by_branches + .to_string() + .as_bytes(), + ), + Some(&result.snapshot_relfiles_not_updated.to_string().as_bytes()), + Some(&result.snapshot_relfiles_removed.to_string().as_bytes()), + Some(&result.snapshot_relfiles_dropped.to_string().as_bytes()), + Some(&result.snapshot_nonrelfiles_total.to_string().as_bytes()), + Some( + &result + .snapshot_nonrelfiles_needed_by_cutoff + .to_string() + .as_bytes(), + ), + Some( + &result + .snapshot_nonrelfiles_needed_by_branches + .to_string() + .as_bytes(), + ), + Some( + &result + .snapshot_nonrelfiles_not_updated + .to_string() + .as_bytes(), + ), + Some(&result.snapshot_nonrelfiles_removed.to_string().as_bytes()), + Some(&result.snapshot_nonrelfiles_dropped.to_string().as_bytes()), Some(&result.elapsed.as_millis().to_string().as_bytes()), ]))? .write_message(&BeMessage::CommandComplete(b"SELECT 1"))?; diff --git a/pageserver/src/relish.rs b/pageserver/src/relish.rs index 7484e0848f..4c050e4617 100644 --- a/pageserver/src/relish.rs +++ b/pageserver/src/relish.rs @@ -120,7 +120,16 @@ impl RelishTag { // and these don't | RelishTag::ControlFile - | RelishTag::Checkpoint => false, + | RelishTag::Checkpoint => false, + } + } + + // convenience function to check if this relish is a normal relation. + pub const fn is_relation(&self) -> bool { + if let RelishTag::Relation(_) = self { + true + } else { + false } } } diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index bb6388a532..501b7a6254 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -5,6 +5,7 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::iter::Iterator; +use std::ops::AddAssign; use std::sync::Arc; use std::time::Duration; use zenith_utils::lsn::Lsn; @@ -56,6 +57,8 @@ pub trait Repository: Send + Sync { /// #[derive(Default)] pub struct GcResult { + // FIXME: These counters make sense for the ObjectRepository. They are not used + // by the LayeredRepository. pub n_relations: u64, pub inspected: u64, pub truncated: u64, @@ -66,9 +69,51 @@ pub struct GcResult { pub control_deleted: u64, // RelishTag::ControlFile pub filenodemap_deleted: u64, // RelishTag::FileNodeMap pub dropped: u64, + + // These are used for the LayeredRepository instead + pub snapshot_relfiles_total: u64, + pub snapshot_relfiles_needed_by_cutoff: u64, + pub snapshot_relfiles_needed_by_branches: u64, + pub snapshot_relfiles_not_updated: u64, + pub snapshot_relfiles_removed: u64, // # of snapshot files removed because they have been made obsolete by newer snapshot files. + pub snapshot_relfiles_dropped: u64, // # of snapshot files removed because the relation was dropped + + pub snapshot_nonrelfiles_total: u64, + pub snapshot_nonrelfiles_needed_by_cutoff: u64, + pub snapshot_nonrelfiles_needed_by_branches: u64, + pub snapshot_nonrelfiles_not_updated: u64, + pub snapshot_nonrelfiles_removed: u64, // # of snapshot files removed because they have been made obsolete by newer snapshot files. + pub snapshot_nonrelfiles_dropped: u64, // # of snapshot files removed because the relation was dropped + pub elapsed: Duration, } +impl AddAssign for GcResult { + fn add_assign(&mut self, other: Self) { + self.n_relations += other.n_relations; + self.truncated += other.truncated; + self.deleted += other.deleted; + self.dropped += other.dropped; + + self.snapshot_relfiles_total += other.snapshot_relfiles_total; + self.snapshot_relfiles_needed_by_cutoff += other.snapshot_relfiles_needed_by_cutoff; + self.snapshot_relfiles_needed_by_branches += other.snapshot_relfiles_needed_by_branches; + self.snapshot_relfiles_not_updated += other.snapshot_relfiles_not_updated; + self.snapshot_relfiles_removed += other.snapshot_relfiles_removed; + self.snapshot_relfiles_dropped += other.snapshot_relfiles_dropped; + + self.snapshot_nonrelfiles_total += other.snapshot_nonrelfiles_total; + self.snapshot_nonrelfiles_needed_by_cutoff += other.snapshot_nonrelfiles_needed_by_cutoff; + self.snapshot_nonrelfiles_needed_by_branches += + other.snapshot_nonrelfiles_needed_by_branches; + self.snapshot_nonrelfiles_not_updated += other.snapshot_nonrelfiles_not_updated; + self.snapshot_nonrelfiles_removed += other.snapshot_nonrelfiles_removed; + self.snapshot_nonrelfiles_dropped += other.snapshot_nonrelfiles_dropped; + + self.elapsed += other.elapsed; + } +} + pub trait Timeline: Send + Sync { //------------------------------------------------------------------------------ // Public GET functions @@ -234,11 +279,12 @@ impl WALRecord { #[cfg(test)] mod tests { use super::*; + use crate::layered_repository::LayeredRepository; use crate::object_repository::ObjectRepository; use crate::object_repository::{ObjectValue, PageEntry, RelationSizeEntry}; use crate::rocksdb_storage::RocksObjectStore; use crate::walredo::{WalRedoError, WalRedoManager}; - use crate::PageServerConf; + use crate::{PageServerConf, RepositoryFormat}; use postgres_ffi::pg_constants; use std::fs; use std::path::PathBuf; @@ -272,10 +318,16 @@ mod tests { buf.freeze() } - fn get_test_repo(test_name: &str) -> Result> { + static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); + + fn get_test_repo( + test_name: &str, + repository_format: RepositoryFormat, + ) -> Result> { let repo_dir = PathBuf::from(format!("../tmp_check/test_{}", test_name)); let _ = fs::remove_dir_all(&repo_dir); - fs::create_dir_all(&repo_dir).unwrap(); + fs::create_dir_all(&repo_dir)?; + fs::create_dir_all(&repo_dir.join("timelines"))?; let conf = PageServerConf { daemonize: false, @@ -288,6 +340,7 @@ mod tests { pg_distrib_dir: "".into(), auth_type: AuthType::Trust, auth_validation_public_key_path: None, + repository_format, }; // Make a static copy of the config. This can never be free'd, but that's // OK in a test. @@ -295,24 +348,47 @@ mod tests { let tenantid = ZTenantId::generate(); fs::create_dir_all(conf.tenant_path(&tenantid)).unwrap(); - let obj_store = RocksObjectStore::create(conf, &tenantid)?; - let walredo_mgr = TestRedoManager {}; - let repo = - ObjectRepository::new(conf, Arc::new(obj_store), Arc::new(walredo_mgr), tenantid); + let repo: Box = match conf.repository_format { + RepositoryFormat::Layered => Box::new(LayeredRepository::new( + conf, + Arc::new(walredo_mgr), + tenantid, + )), + RepositoryFormat::RocksDb => { + let obj_store = RocksObjectStore::create(conf, &tenantid)?; - Ok(Box::new(repo)) + Box::new(ObjectRepository::new( + conf, + Arc::new(obj_store), + Arc::new(walredo_mgr), + tenantid, + )) + } + }; + + Ok(repo) } /// Test get_relsize() and truncation. #[test] - fn test_relsize() -> Result<()> { + fn test_relsize_rocksdb() -> Result<()> { + let repo = get_test_repo("test_relsize_rocksdb", RepositoryFormat::RocksDb)?; + test_relsize(&*repo) + } + + #[test] + fn test_relsize_layered() -> Result<()> { + let repo = get_test_repo("test_relsize_layered", RepositoryFormat::Layered)?; + test_relsize(&*repo) + } + + fn test_relsize(repo: &dyn Repository) -> Result<()> { // get_timeline() with non-existent timeline id should fail //repo.get_timeline("11223344556677881122334455667788"); // Create timeline to work on - let repo = get_test_repo("test_relsize")?; let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap(); let tline = repo.create_empty_timeline(timelineid, Lsn(0))?; @@ -397,14 +473,24 @@ mod tests { /// This isn't very interesting with the RocksDb implementation, as we don't pay /// any attention to Postgres segment boundaries there. #[test] - fn test_large_rel() -> Result<()> { - let repo = get_test_repo("test_large_rel")?; + fn test_large_rel_rocksdb() -> Result<()> { + let repo = get_test_repo("test_large_rel_rocksdb", RepositoryFormat::RocksDb)?; + test_large_rel(&*repo) + } + + #[test] + fn test_large_rel_layered() -> Result<()> { + let repo = get_test_repo("test_large_rel_layered", RepositoryFormat::Layered)?; + test_large_rel(&*repo) + } + + fn test_large_rel(repo: &dyn Repository) -> Result<()> { let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap(); let tline = repo.create_empty_timeline(timelineid, Lsn(0))?; tline.init_valid_lsn(Lsn(1)); - let mut lsn = 0; + let mut lsn = 1; for blknum in 0..pg_constants::RELSEG_SIZE + 1 { let img = TEST_IMG(&format!("foo blk {} at {}", blknum, Lsn(lsn))); lsn += 1; @@ -450,15 +536,29 @@ mod tests { })) } + #[test] + fn test_branch_rocksdb() -> Result<()> { + let repo = get_test_repo("test_branch_rocksdb", RepositoryFormat::RocksDb)?; + test_branch(&*repo) + } + + #[test] + fn test_branch_layered() -> Result<()> { + let repo = get_test_repo("test_branch_layered", RepositoryFormat::Layered)?; + test_branch(&*repo) + } + /// /// Test branch creation /// - #[test] - fn test_branch() -> Result<()> { - let repo = get_test_repo("test_branch")?; + fn test_branch(repo: &dyn Repository) -> Result<()> { let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap(); let tline = repo.create_empty_timeline(timelineid, Lsn(0))?; + // Import initial dummy checkpoint record, otherwise the get_timeline() call + // after branching fails below + tline.put_page_image(RelishTag::Checkpoint, 0, Lsn(1), ZERO_PAGE.clone(), false)?; + // Create a relation on the timeline tline.init_valid_lsn(Lsn(1)); tline.put_page_image(TESTREL_A, 0, Lsn(2), TEST_IMG("foo blk 0 at 2"), true)?; @@ -500,8 +600,19 @@ mod tests { } #[test] - fn test_history() -> Result<()> { - let repo = get_test_repo("test_snapshot")?; + fn test_history_rocksdb() -> Result<()> { + let repo = get_test_repo("test_history_rocksdb", RepositoryFormat::RocksDb)?; + test_history(&*repo) + } + #[test] + // TODO: This doesn't work with the layered storage, the functions needed for push/pull + // functionality haven't been implemented yet. + #[ignore] + fn test_history_layered() -> Result<()> { + let repo = get_test_repo("test_history_layered", RepositoryFormat::Layered)?; + test_history(&*repo) + } + fn test_history(repo: &dyn Repository) -> Result<()> { let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap(); let tline = repo.create_empty_timeline(timelineid, Lsn(0))?; diff --git a/pageserver/src/restore_local_repo.rs b/pageserver/src/restore_local_repo.rs index b3523c3bbb..c7f84d7bad 100644 --- a/pageserver/src/restore_local_repo.rs +++ b/pageserver/src/restore_local_repo.rs @@ -132,6 +132,7 @@ pub fn import_timeline_from_postgres_datadir( } // TODO: Scan pg_tblspc + timeline.advance_last_valid_lsn(lsn); timeline.checkpoint()?; Ok(()) @@ -425,12 +426,12 @@ pub fn save_decoded_record( save_xact_record(timeline, lsn, &parsed_xact, decoded)?; // Remove twophase file. see RemoveTwoPhaseFile() in postgres code info!( - "unlink twophaseFile for xid {} parsed_xact.xid {} here", - decoded.xl_xid, parsed_xact.xid + "unlink twophaseFile for xid {} parsed_xact.xid {} here at {}", + decoded.xl_xid, parsed_xact.xid, lsn ); timeline.put_unlink( RelishTag::TwoPhase { - xid: decoded.xl_xid, + xid: parsed_xact.xid, }, lsn, )?; @@ -795,7 +796,13 @@ fn save_clog_truncate_record( // Iterate via SLRU CLOG segments and unlink segments that we're ready to truncate // TODO This implementation is very inefficient - // it scans all non-rels only to find Clog - for obj in timeline.list_nonrels(lsn)? { + // + // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it + // will block waiting for the last valid LSN to advance up to + // it. So we use the previous record's LSN in the get calls + // instead. + let req_lsn = min(timeline.get_last_record_lsn(), lsn); + for obj in timeline.list_nonrels(req_lsn)? { match obj { RelishTag::Slru { slru, segno } => { if slru == SlruKind::Clog { diff --git a/pageserver/src/walreceiver.rs b/pageserver/src/walreceiver.rs index 8ae690a251..069f84e2ff 100644 --- a/pageserver/src/walreceiver.rs +++ b/pageserver/src/walreceiver.rs @@ -8,7 +8,7 @@ use crate::page_cache; use crate::relish::*; use crate::restore_local_repo; use crate::waldecoder::*; -use crate::PageServerConf; +use crate::{PageServerConf, RepositoryFormat}; use anyhow::{Error, Result}; use lazy_static::lazy_static; use log::*; @@ -264,7 +264,11 @@ fn walreceiver_main( )?; if newest_segno - oldest_segno >= 10 { - timeline.checkpoint()?; + // FIXME: The layered repository performs checkpointing in a separate thread, so this + // isn't needed anymore. Remove 'checkpoint' from the Timeline trait altogether? + if conf.repository_format == RepositoryFormat::RocksDb { + timeline.checkpoint()?; + } // TODO: This is where we could remove WAL older than last_rec_lsn. //remove_wal_files(timelineid, pg_constants::WAL_SEGMENT_SIZE, last_rec_lsn)?; diff --git a/test_runner/batch_others/test_gc.py b/test_runner/batch_others/test_gc.py index 9a3f7f3d25..2e58b5096d 100644 --- a/test_runner/batch_others/test_gc.py +++ b/test_runner/batch_others/test_gc.py @@ -14,7 +14,8 @@ pytest_plugins = ("fixtures.zenith_fixtures") # @pytest.mark.skip(reason="""" Current GC test is flaky and overly strict. Since we are migrating to the layered repo format - with different GC implementation let's just silence this test for now. + with different GC implementation let's just silence this test for now. This test only + works with the RocksDB implementation. """) def test_gc(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin): zenith_cli.run(["branch", "test_gc", "empty"]) diff --git a/test_runner/batch_others/test_snapfiles_gc.py b/test_runner/batch_others/test_snapfiles_gc.py new file mode 100644 index 0000000000..761dc95b31 --- /dev/null +++ b/test_runner/batch_others/test_snapfiles_gc.py @@ -0,0 +1,122 @@ +from contextlib import closing +import psycopg2.extras +import time; + +pytest_plugins = ("fixtures.zenith_fixtures") + +def print_gc_result(row): + print("GC duration {elapsed} ms".format_map(row)); + print(" REL total: {snapshot_relfiles_total}, needed_by_cutoff {snapshot_relfiles_needed_by_cutoff}, needed_by_branches: {snapshot_relfiles_needed_by_branches}, not_updated: {snapshot_relfiles_not_updated}, removed: {snapshot_relfiles_removed}, dropped: {snapshot_relfiles_dropped}".format_map(row)) + print(" NONREL total: {snapshot_nonrelfiles_total}, needed_by_cutoff {snapshot_nonrelfiles_needed_by_cutoff}, needed_by_branches: {snapshot_nonrelfiles_needed_by_branches}, not_updated: {snapshot_nonrelfiles_not_updated}, removed: {snapshot_nonrelfiles_removed}, dropped: {snapshot_nonrelfiles_dropped}".format_map(row)) + + +# +# Test Garbage Collection of old snapshot files +# +# This test is pretty tightly coupled with the current implementation of layered +# storage, in layered_repository.rs. +# +def test_snapfiles_gc(zenith_cli, pageserver, postgres, pg_bin): + zenith_cli.run(["branch", "test_snapfiles_gc", "empty"]) + pg = postgres.create_start('test_snapfiles_gc') + + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + with closing(pageserver.connect()) as psconn: + with psconn.cursor(cursor_factory = psycopg2.extras.DictCursor) as pscur: + + # Get the timeline ID of our branch. We need it for the 'do_gc' command + cur.execute("SHOW zenith.zenith_timeline") + timeline = cur.fetchone()[0] + + # Create a test table + cur.execute("CREATE TABLE foo(x integer)") + + print("Inserting two more rows and running GC") + cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass"); + row = cur.fetchone(); + print("relfilenode is {}", row[0]); + + # Run GC, to clear out any garbage left behind in the catalogs by + # the CREATE TABLE command. We want to have a clean slate with no garbage + # before running the actual tests below, otherwise the counts won't match + # what we expect. + # + # Also run vacuum first to make it less likely that autovacuum or pruning + # kicks in and confuses our numbers. + cur.execute("VACUUM") + + print("Running GC before test") + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + # remember the number of files + snapshot_relfiles_remain = row['snapshot_relfiles_total'] - row['snapshot_relfiles_removed'] + assert snapshot_relfiles_remain > 0 + + # Insert a row. The first insert will also create a metadata entry for the + # relation, with size == 1 block. Hence, bump up the expected relation count. + snapshot_relfiles_remain += 1; + print("Inserting one row and running GC") + cur.execute("INSERT INTO foo VALUES (1)") + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain + assert row['snapshot_relfiles_removed'] == 0 + assert row['snapshot_relfiles_dropped'] == 0 + + # Insert two more rows and run GC. + # This should create a new snapshot file with the new contents, and + # remove the old one. + print("Inserting two more rows and running GC") + cur.execute("INSERT INTO foo VALUES (2)") + cur.execute("INSERT INTO foo VALUES (3)") + + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain + 1 + assert row['snapshot_relfiles_removed'] == 1 + assert row['snapshot_relfiles_dropped'] == 0 + + # Do it again. Should again create a new snapshot file and remove old one. + print("Inserting two more rows and running GC") + cur.execute("INSERT INTO foo VALUES (2)") + cur.execute("INSERT INTO foo VALUES (3)") + + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain + 1 + assert row['snapshot_relfiles_removed'] == 1 + assert row['snapshot_relfiles_dropped'] == 0 + + # Run GC again, with no changes in the database. Should not remove anything. + print("Run GC again, with nothing to do") + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain + assert row['snapshot_relfiles_removed'] == 0 + assert row['snapshot_relfiles_dropped'] == 0 + + # + # Test DROP TABLE checks that relation data and metadata was deleted by GC from object storage + # + print("Drop table and run GC again"); + cur.execute("DROP TABLE foo") + + pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row); + + # Each relation fork is counted separately, hence 3. + assert row['snapshot_relfiles_dropped'] == 3 + + # The catalog updates also create new snapshot files of the catalogs, which + # are counted as 'removed' + assert row['snapshot_relfiles_removed'] > 0 + + # TODO: perhaps we should count catalog and user relations separately, + # to make this kind of testing more robust diff --git a/vendor/postgres b/vendor/postgres index 04cfa326a5..e3175fe60a 160000 --- a/vendor/postgres +++ b/vendor/postgres @@ -1 +1 @@ -Subproject commit 04cfa326a543171967c16954306f5a9dd8a470ea +Subproject commit e3175fe60a996dfb54568855ba93e6134e62f052 diff --git a/zenith/src/main.rs b/zenith/src/main.rs index e3f6464ef0..abaa972cc1 100644 --- a/zenith/src/main.rs +++ b/zenith/src/main.rs @@ -61,6 +61,13 @@ fn main() -> Result<()> { .long("enable-auth") .takes_value(false) .help("Enable authentication using ZenithJWT") + ) + .arg( + Arg::with_name("repository-format") + .long("repository-format") + .takes_value(false) + .value_name("repository-format") + .help("Choose repository format, 'layered' or 'rocksdb'") ), ) .subcommand( @@ -131,8 +138,8 @@ fn main() -> Result<()> { } else { AuthType::Trust }; - - local_env::init(pageserver_uri, tenantid, auth_type) + let repository_format = init_match.value_of("repository-format"); + local_env::init(pageserver_uri, tenantid, auth_type, repository_format) .with_context(|| "Failed to create config file")?; } @@ -151,6 +158,7 @@ fn main() -> Result<()> { if let Err(e) = pageserver.init( Some(&env.tenantid.to_string()), init_match.is_present("enable-auth"), + init_match.value_of("repository-format"), ) { eprintln!("pageserver init failed: {}", e); exit(1); diff --git a/zenith_utils/src/zid.rs b/zenith_utils/src/zid.rs index c5b4128527..a1cc60ca9e 100644 --- a/zenith_utils/src/zid.rs +++ b/zenith_utils/src/zid.rs @@ -126,7 +126,7 @@ macro_rules! zid_newtype { /// is separate from PostgreSQL timelines, and doesn't have those /// limitations. A zenith timeline is identified by a 128-bit ID, which /// is usually printed out as a hex string. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Serialize, Deserialize)] pub struct ZTimelineId(ZId); zid_newtype!(ZTimelineId); From 0c4ab80eac9dd8f8daa803d6c9017840d2fa7e52 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Fri, 13 Aug 2021 12:47:07 +0300 Subject: [PATCH 02/24] try to be more intelligent in WalAcceptor.start, added a bunch of typing sugar to wal acceptor fixtures --- test_runner/batch_others/test_wal_acceptor.py | 4 +- test_runner/fixtures/zenith_fixtures.py | 87 +++++++++++-------- 2 files changed, 53 insertions(+), 38 deletions(-) diff --git a/test_runner/batch_others/test_wal_acceptor.py b/test_runner/batch_others/test_wal_acceptor.py index 371a740f0f..fb45aa18b9 100644 --- a/test_runner/batch_others/test_wal_acceptor.py +++ b/test_runner/batch_others/test_wal_acceptor.py @@ -4,7 +4,7 @@ import time from contextlib import closing from multiprocessing import Process, Value -from fixtures.zenith_fixtures import ZenithPageserver, PostgresFactory +from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory pytest_plugins = ("fixtures.zenith_fixtures") @@ -61,7 +61,7 @@ def test_many_timelines(zenith_cli, pageserver: ZenithPageserver, postgres: Post # Check that dead minority doesn't prevent the commits: execute insert n_inserts # times, with fault_probability chance of getting a wal acceptor down or up # along the way. 2 of 3 are always alive, so the work keeps going. -def test_restarts(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory): +def test_restarts(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory): fault_probability = 0.01 n_inserts = 1000 n_acceptors = 3 diff --git a/test_runner/fixtures/zenith_fixtures.py b/test_runner/fixtures/zenith_fixtures.py index e76debcea1..f4813d2230 100644 --- a/test_runner/fixtures/zenith_fixtures.py +++ b/test_runner/fixtures/zenith_fixtures.py @@ -1,4 +1,3 @@ -import getpass import os import pathlib import uuid @@ -7,9 +6,11 @@ import pytest import shutil import signal import subprocess +import time from contextlib import closing from pathlib import Path +from dataclasses import dataclass # Type-related stuff from psycopg2.extensions import connection as PgConnection @@ -511,26 +512,27 @@ def pg_bin(test_output_dir: str, pg_distrib_dir: str) -> PgBin: return PgBin(test_output_dir, pg_distrib_dir) -def read_pid(path): +def read_pid(path: Path): """ Read content of file into number """ - return int(Path(path).read_text()) + return int(path.read_text()) +@dataclass class WalAcceptor: """ An object representing a running wal acceptor daemon. """ - def __init__(self, wa_binpath, data_dir, port, num, auth_token: Optional[str] = None): - self.wa_binpath = wa_binpath - self.data_dir = data_dir - self.port = port - self.num = num # identifier for logging - self.auth_token = auth_token + bin_path: Path + data_dir: Path + port: int + num: int # identifier for logging + auth_token: Optional[str] = None def start(self) -> 'WalAcceptor': # create data directory if not exists - Path(self.data_dir).mkdir(parents=True, exist_ok=True) + self.data_dir.mkdir(parents=True, exist_ok=True) + self.pidfile.unlink(missing_ok=True) - cmd = [self.wa_binpath] - cmd.extend(["-D", self.data_dir]) + cmd = [str(self.bin_path)] + cmd.extend(["-D", str(self.data_dir)]) cmd.extend(["-l", "localhost:{}".format(self.port)]) cmd.append("--daemonize") cmd.append("--no-sync") @@ -541,38 +543,51 @@ class WalAcceptor: env = {'PAGESERVER_AUTH_TOKEN': self.auth_token} if self.auth_token else None subprocess.run(cmd, check=True, env=env) - return self + # wait for wal acceptor start by checkking that pid is readable + for _ in range(3): + pid = self.get_pid() + if pid is not None: + return self + time.sleep(0.5) + + raise RuntimeError("cannot get wal acceptor pid") + + @property + def pidfile(self) -> Path: + return self.data_dir / "wal_acceptor.pid" + + def get_pid(self) -> Optional[int]: + if not self.pidfile.exists(): + return None + + try: + pid = read_pid(self.pidfile) + except ValueError: + return None + + return pid def stop(self) -> 'WalAcceptor': print('Stopping wal acceptor {}'.format(self.num)) - pidfile_path = os.path.join(self.data_dir, "wal_acceptor.pid") - try: - pid = read_pid(pidfile_path) - try: - os.kill(pid, signal.SIGTERM) - except Exception: - pass # pidfile might be obsolete - # TODO: cleanup pid file on exit in wal acceptor - return self - # for _ in range(5): - # print('waiting wal acceptor {} (pid {}) to stop...', self.num, pid) - # try: - # read_pid(pidfile_path) - # except FileNotFoundError: - # return # done - # time.sleep(1) - # raise Exception('Failed to wait for wal acceptor {} shutdown'.format(self.num)) - except FileNotFoundError: + pid = self.get_pid() + if pid is None: print("Wal acceptor {} is not running".format(self.num)) return self + try: + os.kill(pid, signal.SIGTERM) + except Exception: + # TODO: cleanup pid file on exit in wal acceptor + pass # pidfile might be obsolete + return self + class WalAcceptorFactory: """ An object representing multiple running wal acceptors. """ - def __init__(self, zenith_binpath, data_dir): - self.wa_binpath = os.path.join(zenith_binpath, 'wal_acceptor') + def __init__(self, zenith_binpath: Path, data_dir: Path): + self.wa_binpath = zenith_binpath / 'wal_acceptor' self.data_dir = data_dir - self.instances = [] + self.instances: List[WalAcceptor] = [] self.initial_port = 54321 def start_new(self, auth_token: Optional[str] = None) -> WalAcceptor: @@ -583,7 +598,7 @@ class WalAcceptorFactory: wa_num = len(self.instances) wa = WalAcceptor( self.wa_binpath, - os.path.join(self.data_dir, "wal_acceptor_{}".format(wa_num)), + self.data_dir / "wal_acceptor_{}".format(wa_num), self.initial_port + wa_num, wa_num, auth_token, @@ -613,7 +628,7 @@ class WalAcceptorFactory: @zenfixture def wa_factory(zenith_binpath: str, repo_dir: str) -> Iterator[WalAcceptorFactory]: """ Gives WalAcceptorFactory providing wal acceptors. """ - wafactory = WalAcceptorFactory(zenith_binpath, os.path.join(repo_dir, "wal_acceptors")) + wafactory = WalAcceptorFactory(Path(zenith_binpath), Path(repo_dir) / "wal_acceptors") yield wafactory # After the yield comes any cleanup code we need. print('Starting wal acceptors cleanup') From 047a05efb2d599d8af0d493718f1feeff1ccac72 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 15:48:59 +0300 Subject: [PATCH 03/24] Minor formatting and comment fixes. --- pageserver/src/basebackup.rs | 4 +++- pageserver/src/object_repository.rs | 8 +++----- pageserver/src/walredo.rs | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index a133f7c68b..434b6af79f 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -179,7 +179,9 @@ impl<'a> Basebackup<'a> { // Extract twophase state files // fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> { - if let Ok(img) = self.timeline.get_page_at_lsn_nowait(RelishTag::TwoPhase { xid }, 0, self.lsn) + if let Ok(img) = + self.timeline + .get_page_at_lsn_nowait(RelishTag::TwoPhase { xid }, 0, self.lsn) { let mut buf = BytesMut::new(); buf.extend_from_slice(&img[..]); diff --git a/pageserver/src/object_repository.rs b/pageserver/src/object_repository.rs index 31b8c40d4e..b5bcdd490d 100644 --- a/pageserver/src/object_repository.rs +++ b/pageserver/src/object_repository.rs @@ -299,15 +299,13 @@ impl Timeline for ObjectTimeline { // move this check out of the funciton. // match rel { - RelishTag::Slru { .. } | - RelishTag::TwoPhase{ .. } => - { + RelishTag::Slru { .. } | RelishTag::TwoPhase { .. } => { if !self.get_rel_exists(rel, req_lsn).unwrap_or(false) { trace!("{:?} at {} doesn't exist", rel, req_lsn); return Err(anyhow!("non-rel relish doesn't exist")); } - }, - _ => () + } + _ => (), }; const ZERO_PAGE: [u8; 8192] = [0u8; 8192]; diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 9e77fb8df4..3d6a1d4e0c 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -197,12 +197,10 @@ fn mx_offset_to_member_offset(xid: MultiXactId) -> usize { } impl PostgresRedoManager { - /// /// Create a new PostgresRedoManager. /// pub fn new(conf: &'static PageServerConf, tenantid: ZTenantId) -> PostgresRedoManager { - // We block on waiting for requests on the walredo request channel, but // use async I/O to communicate with the child process. Initialize the // runtime for the async part. @@ -244,6 +242,8 @@ impl PostgresRedoManager { let buf_tag = BufferTag { rel, blknum }; apply_result = process.apply_wal_records(buf_tag, base_img, records).await; } else { + // Non-relational WAL records are handled here, with custom code that has the + // same effects as the corresponding Postgres WAL redo function. const ZERO_PAGE: [u8; 8192] = [0u8; 8192]; let mut page = BytesMut::new(); if let Some(fpi) = base_img { @@ -378,7 +378,7 @@ impl PostgresRedoManager { panic!(); } } else if xlogrec.xl_rmid == pg_constants::RM_RELMAP_ID { - // Ralation map file has size 512 bytes + // Relation map file has size 512 bytes page.clear(); page.extend_from_slice(&buf[12..]); // skip xl_relmap_update assert!(page.len() == 512); // size of pg_filenode.map From 7ee8de3725588467b828f08785b14c3cf1da6976 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 15:49:17 +0300 Subject: [PATCH 04/24] Add metrics to WAL redo. Track the time spent on replaying WAL records by the special Postgres process, the time spent waiting for acces to the Postgres process (since there is only one per tenant), and the number of records replayed. --- pageserver/src/walredo.rs | 61 +++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 3d6a1d4e0c..41183ec4cf 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -20,6 +20,7 @@ //! use byteorder::{ByteOrder, LittleEndian}; use bytes::{Buf, BufMut, Bytes, BytesMut}; +use lazy_static::lazy_static; use log::*; use serde::{Deserialize, Serialize}; use std::cell::RefCell; @@ -36,6 +37,7 @@ use tokio::io::AsyncBufReadExt; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::process::{ChildStdin, ChildStdout, Command}; use tokio::time::timeout; +use zenith_metrics::{register_histogram, register_int_counter, Histogram, IntCounter}; use zenith_utils::bin_ser::BeSer; use zenith_utils::lsn::Lsn; use zenith_utils::zid::ZTenantId; @@ -103,6 +105,27 @@ impl crate::walredo::WalRedoManager for DummyRedoManager { static TIMEOUT: Duration = Duration::from_secs(20); +// Metrics collected on WAL redo operations +// +// We collect the time spent in actual WAL redo ('redo'), and time waiting +// for access to the postgres process ('wait') since there is only one for +// each tenant. +lazy_static! { + static ref WAL_REDO_TIME: Histogram = + register_histogram!("pageserver_wal_redo_time", "Time spent on WAL redo") + .expect("failed to define a metric"); + static ref WAL_REDO_WAIT_TIME: Histogram = register_histogram!( + "pageserver_wal_redo_wait_time", + "Time spent waiting for access to the WAL redo process" + ) + .expect("failed to define a metric"); + static ref WAL_REDO_RECORD_COUNTER: IntCounter = register_int_counter!( + "pageserver_wal_records_replayed", + "Number of WAL records replayed" + ) + .unwrap(); +} + /// /// This is the real implementation that uses a Postgres process to /// perform WAL replay. Only one thread can use the processs at a time, @@ -156,6 +179,9 @@ impl WalRedoManager for PostgresRedoManager { base_img: Option, records: Vec, ) -> Result { + let start_time; + let lock_time; + let end_time; let request = WalRedoRequest { rel, @@ -165,16 +191,29 @@ impl WalRedoManager for PostgresRedoManager { records, }; - // launch the WAL redo process on first use - let mut process_guard = self.process.lock().unwrap(); - if process_guard.is_none() { - let p = self.runtime - .block_on(PostgresRedoProcess::launch(self.conf, &self.tenantid))?; - *process_guard = Some(p); - } - let process = (*process_guard).as_ref().unwrap(); + start_time = Instant::now(); + let result = { + let mut process_guard = self.process.lock().unwrap(); + lock_time = Instant::now(); - self.runtime.block_on(self.handle_apply_request(&process, &request)) + // launch the WAL redo process on first use + if process_guard.is_none() { + let p = self + .runtime + .block_on(PostgresRedoProcess::launch(self.conf, &self.tenantid))?; + *process_guard = Some(p); + } + let process = (*process_guard).as_ref().unwrap(); + + self.runtime + .block_on(self.handle_apply_request(&process, &request)) + }; + end_time = Instant::now(); + + WAL_REDO_WAIT_TIME.observe(lock_time.duration_since(start_time).as_secs_f64()); + WAL_REDO_TIME.observe(end_time.duration_since(lock_time).as_secs_f64()); + + result } } @@ -257,6 +296,8 @@ impl PostgresRedoManager { for record in records { let mut buf = record.rec.clone(); + WAL_REDO_RECORD_COUNTER.inc(); + // 1. Parse XLogRecord struct // FIXME: refactor to avoid code duplication. let xlogrec = XLogRecord::from_bytes(&mut buf); @@ -557,6 +598,8 @@ impl PostgresRedoProcess { for rec in records.iter() { let r = rec.clone(); + WAL_REDO_RECORD_COUNTER.inc(); + stdin .write_all(&build_apply_record_msg(r.lsn, r.rec)) .await?; From f37cb213051c1054e6ea85b7d42a099e5213dea7 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 10:33:48 +0300 Subject: [PATCH 05/24] Update Cargo.lock for addition of 'bincode' Commit 5eb1738e8b added a dependency to the 'bincode' crate. 'cargo build' adds it to Cargo.lock automatically, so let's remember it. --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 9bf6f0b7fd..adc0b26428 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2334,6 +2334,7 @@ name = "walkeeper" version = "0.1.0" dependencies = [ "anyhow", + "bincode", "byteorder", "bytes", "clap", From 921ec390bc943d065a372f877cd2dc40753eef9f Mon Sep 17 00:00:00 2001 From: anastasia Date: Mon, 16 Aug 2021 19:40:48 +0300 Subject: [PATCH 06/24] cargo fmt --- control_plane/src/storage.rs | 7 ++- pageserver/src/layered_repository.rs | 46 +++++++++++-------- .../src/layered_repository/inmemory_layer.rs | 11 ++--- .../src/layered_repository/layer_map.rs | 4 +- pageserver/src/relish.rs | 3 +- 5 files changed, 43 insertions(+), 28 deletions(-) diff --git a/control_plane/src/storage.rs b/control_plane/src/storage.rs index 5da3334e4a..644cd3a643 100644 --- a/control_plane/src/storage.rs +++ b/control_plane/src/storage.rs @@ -50,7 +50,12 @@ impl PageServerNode { .unwrap() } - pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool, repository_format: Option<&str>) -> Result<()> { + pub fn init( + &self, + create_tenant: Option<&str>, + enable_auth: bool, + repository_format: Option<&str>, + ) -> Result<()> { let mut cmd = Command::new(self.env.pageserver_bin()?); let mut args = vec![ "--init", diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index 495d2be6ab..52974bc8f2 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -157,11 +157,9 @@ impl Repository for LayeredRepository { ) -> Result { STORAGE_TIME .with_label_values(&["gc"]) - .observe_closure_duration(|| self.gc_iteration_internal( - target_timelineid, - horizon, - compact, - )) + .observe_closure_duration(|| { + self.gc_iteration_internal(target_timelineid, horizon, compact) + }) } } @@ -502,7 +500,10 @@ impl Timeline for LayeredTimeline { fn get_relish_size(&self, rel: RelishTag, lsn: Lsn) -> Result> { if !rel.is_blocky() { - bail!("invalid get_relish_size request for non-blocky relish {}", rel); + bail!( + "invalid get_relish_size request for non-blocky relish {}", + rel + ); } let lsn = self.wait_lsn(lsn)?; @@ -561,11 +562,17 @@ impl Timeline for LayeredTimeline { // FIXME: We should pass the LSN argument to the calls above, and avoid scanning // dropped relations in the first place. let mut res: Result<()> = Ok(()); - all_rels.retain(|reltag| - match self.get_rel_exists(RelishTag::Relation(*reltag), lsn) { - Ok(exists) => { info!("retain: {} -> {}", *reltag, exists); exists }, - Err(err) => { res = Err(err); false } - } + all_rels.retain( + |reltag| match self.get_rel_exists(RelishTag::Relation(*reltag), lsn) { + Ok(exists) => { + info!("retain: {} -> {}", *reltag, exists); + exists + } + Err(err) => { + res = Err(err); + false + } + }, ); res?; @@ -597,12 +604,16 @@ impl Timeline for LayeredTimeline { // FIXME: We should pass the LSN argument to the calls above, and avoid scanning // dropped relations in the first place. let mut res: Result<()> = Ok(()); - all_rels.retain(|tag| - match self.get_rel_exists(*tag, lsn) { - Ok(exists) => { info!("retain: {} -> {}", *tag, exists); exists }, - Err(err) => { res = Err(err); false } - } - ); + all_rels.retain(|tag| match self.get_rel_exists(*tag, lsn) { + Ok(exists) => { + info!("retain: {} -> {}", *tag, exists); + exists + } + Err(err) => { + res = Err(err); + false + } + }); res?; Ok(all_rels) @@ -883,7 +894,6 @@ impl LayeredTimeline { // Look up the correct layer. let layers = self.layers.lock().unwrap(); if let Some(layer) = layers.get(rel, lsn) { - // If it's writeable, good, return it. if !layer.is_frozen() { return Ok(Arc::clone(&layer)); diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index f9ac9178bb..34be5a1740 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -326,12 +326,11 @@ impl Layer for InMemoryLayer { // the drop LSN instead. The drop-LSN could be ahead of the // caller-specified LSN! let dropped = inner.drop_lsn.is_some(); - let end_lsn = - if dropped { - inner.drop_lsn.unwrap() - } else { - cutoff_lsn - }; + let end_lsn = if dropped { + inner.drop_lsn.unwrap() + } else { + cutoff_lsn + }; // Divide all the page versions into old and new at the 'end_lsn' cutoff point. let mut before_page_versions; diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index f0a91bd08b..528216d5e3 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -113,7 +113,9 @@ impl LayerMap { } else { trace!( "found singleton layer for rel {}, {} {}", - rel, lsn, newer_lsn + rel, + lsn, + newer_lsn ); continue; } diff --git a/pageserver/src/relish.rs b/pageserver/src/relish.rs index 4c050e4617..5db6540265 100644 --- a/pageserver/src/relish.rs +++ b/pageserver/src/relish.rs @@ -119,8 +119,7 @@ impl RelishTag { | RelishTag::TwoPhase { .. } => true, // and these don't - | RelishTag::ControlFile - | RelishTag::Checkpoint => false, + RelishTag::ControlFile | RelishTag::Checkpoint => false, } } From cbeb67067c7f5e3c6688bfead646b136af4589c9 Mon Sep 17 00:00:00 2001 From: anastasia Date: Fri, 13 Aug 2021 20:18:44 +0300 Subject: [PATCH 07/24] Issue #367. Change CLI so that we always create node from scratch at 'pg start'. This operation preserve previously existing config Add new flag '--config-only' to 'pg create'. If this flag is passed, don't perform basebackup, just fill initial postgresql.conf for the node. --- Cargo.lock | 44 +++---- control_plane/src/compute.rs | 155 +++++++++++++++--------- test_runner/fixtures/zenith_fixtures.py | 58 ++++++++- zenith/src/main.rs | 21 +++- 4 files changed, 191 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adc0b26428..a36f8aee0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "ahash" version = "0.4.7" @@ -1317,24 +1319,6 @@ dependencies = [ "tokio-postgres 0.7.1", ] -[[package]] -name = "postgres-protocol" -version = "0.6.1" -source = "git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858#9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" -dependencies = [ - "base64 0.13.0", - "byteorder", - "bytes", - "fallible-iterator", - "hmac", - "lazy_static", - "md-5", - "memchr", - "rand", - "sha2", - "stringprep", -] - [[package]] name = "postgres-protocol" version = "0.6.1" @@ -1354,13 +1338,21 @@ dependencies = [ ] [[package]] -name = "postgres-types" -version = "0.2.1" +name = "postgres-protocol" +version = "0.6.1" source = "git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858#9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" dependencies = [ + "base64 0.13.0", + "byteorder", "bytes", "fallible-iterator", - "postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)", + "hmac", + "lazy_static", + "md-5", + "memchr", + "rand", + "sha2", + "stringprep", ] [[package]] @@ -1374,6 +1366,16 @@ dependencies = [ "postgres-protocol 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "postgres-types" +version = "0.2.1" +source = "git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858#9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)", +] + [[package]] name = "postgres_ffi" version = "0.1.0" diff --git a/control_plane/src/compute.rs b/control_plane/src/compute.rs index 434f4f167c..a03dc15b2e 100644 --- a/control_plane/src/compute.rs +++ b/control_plane/src/compute.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use std::time::Duration; use std::{collections::BTreeMap, path::PathBuf}; use std::{ - fs::{self, OpenOptions}, + fs::{self, File, OpenOptions}, io::Read, }; @@ -85,48 +85,36 @@ impl ComputeControlPlane { } } - /// Connect to a page server, get base backup, and untar it to initialize a - /// new data directory - pub fn new_from_page_server( - &mut self, - is_test: bool, - timelineid: ZTimelineId, - name: &str, - tenantid: ZTenantId, - ) -> Result> { - let node = Arc::new(PostgresNode { - name: name.to_owned(), - address: SocketAddr::new("127.0.0.1".parse().unwrap(), self.get_port()), - env: self.env.clone(), - pageserver: Arc::clone(&self.pageserver), - is_test, - timelineid, - tenantid, - }); - - node.init_from_page_server(self.env.auth_type)?; - self.nodes - .insert((tenantid, node.name.clone()), Arc::clone(&node)); - - Ok(node) - } - pub fn new_node( &mut self, tenantid: ZTenantId, branch_name: &str, + config_only: bool, ) -> Result> { let timeline_id = self .pageserver .branch_get_by_name(&tenantid, branch_name)? .timeline_id; - let node = self.new_from_page_server(false, timeline_id, branch_name, tenantid)?; + + let node = Arc::new(PostgresNode { + name: branch_name.to_owned(), + address: SocketAddr::new("127.0.0.1".parse().unwrap(), self.get_port()), + env: self.env.clone(), + pageserver: Arc::clone(&self.pageserver), + is_test: false, + timelineid: timeline_id, + tenantid, + }); + + node.init_from_page_server(self.env.auth_type, config_only)?; + self.nodes + .insert((tenantid, node.name.clone()), Arc::clone(&node)); + // Configure the node to stream WAL directly to the pageserver node.append_conf( "postgresql.conf", format!( concat!( - "shared_preload_libraries = zenith\n", "synchronous_standby_names = 'pageserver'\n", // TODO: add a new function arg? "zenith.callmemaybe_connstring = '{}'\n", // FIXME escaping ), @@ -246,39 +234,15 @@ impl PostgresNode { }) } - // Connect to a page server, get base backup, and untar it to initialize a - // new data directory - pub fn init_from_page_server(&self, auth_type: AuthType) -> Result<()> { + pub fn do_basebackup(&self) -> Result<()> { let pgdata = self.pgdata(); - println!( - "Extracting base backup to create postgres instance: path={} port={}", - pgdata.display(), - self.address.port() - ); - - // initialize data directory - if self.is_test { - fs::remove_dir_all(&pgdata).ok(); - } - let sql = format!("basebackup {} {}", self.tenantid, self.timelineid); let mut client = self .pageserver .page_server_psql_client() .with_context(|| "connecting to page server failed")?; - fs::create_dir_all(&pgdata) - .with_context(|| format!("could not create data directory {}", pgdata.display()))?; - fs::set_permissions(pgdata.as_path(), fs::Permissions::from_mode(0o700)).with_context( - || { - format!( - "could not set permissions in data directory {}", - pgdata.display() - ) - }, - )?; - let mut copyreader = client .copy_out(sql.as_str()) .with_context(|| "page server 'basebackup' command failed")?; @@ -294,6 +258,45 @@ impl PostgresNode { ar.unpack(&pgdata) .with_context(|| "extracting page backup failed")?; + Ok(()) + } + + // Connect to a page server, get base backup, and untar it to initialize a + // new data directory + pub fn init_from_page_server(&self, auth_type: AuthType, config_only: bool) -> Result<()> { + let pgdata = self.pgdata(); + + println!( + "Extracting base backup to create postgres instance: path={} port={}", + pgdata.display(), + self.address.port() + ); + + // initialize data directory + if self.is_test { + fs::remove_dir_all(&pgdata).ok(); + } + + fs::create_dir_all(&pgdata) + .with_context(|| format!("could not create data directory {}", pgdata.display()))?; + fs::set_permissions(pgdata.as_path(), fs::Permissions::from_mode(0o700)).with_context( + || { + format!( + "could not set permissions in data directory {}", + pgdata.display() + ) + }, + )?; + + if config_only { + //Just create an empty config file + File::create(self.pgdata().join("postgresql.conf").to_str().unwrap())?; + } else { + self.do_basebackup()?; + fs::create_dir_all(self.pgdata().join("pg_wal"))?; + fs::create_dir_all(self.pgdata().join("pg_wal").join("archive_status"))?; + } + // wal_log_hints is mandatory when running against pageserver (see gh issue#192) // TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE? self.append_conf( @@ -321,8 +324,6 @@ impl PostgresNode { // page server yet. (gh issue #349) self.append_conf("postgresql.conf", "wal_keep_size='10TB'\n")?; - // Connect it to the page server. - // set up authentication let password = if let AuthType::ZenithJWT = auth_type { "$ZENITH_AUTH_TOKEN" @@ -348,8 +349,6 @@ impl PostgresNode { .as_str(), )?; - fs::create_dir_all(self.pgdata().join("pg_wal"))?; - fs::create_dir_all(self.pgdata().join("pg_wal").join("archive_status"))?; Ok(()) } @@ -410,6 +409,46 @@ impl PostgresNode { } pub fn start(&self, auth_token: &Option) -> Result<()> { + // Bail if the node already running. + if self.status() == "running" { + anyhow::bail!("The node is already running"); + } + + // 1. We always start compute node from scratch, so + // if old dir exists, preserve config files and drop the directory + + // XXX Now we only use 'postgresql.conf'. + // If we will need 'pg_hba.conf', support it here too + + let postgresql_conf_path = self.pgdata().join("postgresql.conf"); + let postgresql_conf = fs::read(postgresql_conf_path.clone()).with_context(|| { + format!( + "failed to read config file in {}", + postgresql_conf_path.to_str().unwrap() + ) + })?; + + println!( + "Destroying postgres data directory '{}'", + self.pgdata().to_str().unwrap() + ); + fs::remove_dir_all(&self.pgdata())?; + + // 2. Create new node + self.init_from_page_server(self.env.auth_type, false)?; + + // 3. Bring back config files + + if let Ok(mut file) = OpenOptions::new() + .append(false) + .write(true) + .open(&postgresql_conf_path) + { + file.write_all(&postgresql_conf)?; + file.sync_all()?; + } + + // 4. Finally start the compute node postgres println!("Starting postgres node at '{}'", self.connstr()); self.pg_ctl(&["start"], auth_token) } diff --git a/test_runner/fixtures/zenith_fixtures.py b/test_runner/fixtures/zenith_fixtures.py index f4813d2230..2a1081af8f 100644 --- a/test_runner/fixtures/zenith_fixtures.py +++ b/test_runner/fixtures/zenith_fixtures.py @@ -267,6 +267,7 @@ class Postgres(PgProtocol): branch: str, wal_acceptors: Optional[str] = None, config_lines: Optional[List[str]] = None, + config_only: bool = False, ) -> 'Postgres': """ Create the pg data directory. @@ -278,7 +279,10 @@ class Postgres(PgProtocol): if not config_lines: config_lines = [] - self.zenith_cli.run(['pg', 'create', branch, f'--tenantid={self.tenant_id}']) + if config_only: + self.zenith_cli.run(['pg', 'create', '--config-only', branch, f'--tenantid={self.tenant_id}']) + else: + self.zenith_cli.run(['pg', 'create', branch, f'--tenantid={self.tenant_id}']) self.branch = branch if wal_acceptors is not None: self.adjust_for_wal_acceptors(wal_acceptors) @@ -377,7 +381,8 @@ class Postgres(PgProtocol): config_lines: Optional[List[str]] = None, ) -> 'Postgres': """ - Create a Postgres instance, then start it. + Create a Postgres instance, apply config + and then start it. Returns self. """ @@ -385,6 +390,7 @@ class Postgres(PgProtocol): branch=branch, wal_acceptors=wal_acceptors, config_lines=config_lines, + config_only=True, ).start() return self @@ -430,6 +436,54 @@ class PostgresFactory: config_lines=config_lines, ) + def create( + self, + branch: str = "main", + tenant_id: Optional[str] = None, + wal_acceptors: Optional[str] = None, + config_lines: Optional[List[str]] = None + ) -> Postgres: + + pg = Postgres( + zenith_cli=self.zenith_cli, + repo_dir=self.repo_dir, + tenant_id=tenant_id or self.initial_tenant, + port=self.base_port + self.num_instances + 1, + ) + + self.num_instances += 1 + self.instances.append(pg) + + return pg.create( + branch=branch, + wal_acceptors=wal_acceptors, + config_lines=config_lines, + ) + + def config( + self, + branch: str = "main", + tenant_id: Optional[str] = None, + wal_acceptors: Optional[str] = None, + config_lines: Optional[List[str]] = None + ) -> Postgres: + + pg = Postgres( + zenith_cli=self.zenith_cli, + repo_dir=self.repo_dir, + tenant_id=tenant_id or self.initial_tenant, + port=self.base_port + self.num_instances + 1, + ) + + self.num_instances += 1 + self.instances.append(pg) + + return pg.config( + branch=branch, + wal_acceptors=wal_acceptors, + config_lines=config_lines, + ) + def stop_all(self) -> 'PostgresFactory': for pg in self.instances: pg.stop() diff --git a/zenith/src/main.rs b/zenith/src/main.rs index abaa972cc1..82aa39c46f 100644 --- a/zenith/src/main.rs +++ b/zenith/src/main.rs @@ -92,8 +92,18 @@ fn main() -> Result<()> { .setting(AppSettings::ArgRequiredElseHelp) .about("Manage postgres instances") .subcommand(SubCommand::with_name("list").arg(tenantid_arg.clone())) - .subcommand(SubCommand::with_name("create").arg(timeline_arg.clone()).arg(tenantid_arg.clone())) - .subcommand(SubCommand::with_name("start").arg(timeline_arg.clone()).arg(tenantid_arg.clone())) + .subcommand(SubCommand::with_name("create") + .about("Create a postgres compute node") + .arg(timeline_arg.clone()).arg(tenantid_arg.clone()) + .arg( + Arg::with_name("config-only") + .help("Don't do basebackup, create compute node with only config files") + .long("config-only") + .required(false) + )) + .subcommand(SubCommand::with_name("start") + .about("Start a postrges compute node.\n This command actually creates new node from scrath, but preserves existing config files") + .arg(timeline_arg.clone()).arg(tenantid_arg.clone())) .subcommand( SubCommand::with_name("stop") .arg(timeline_arg.clone()) @@ -459,10 +469,9 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { .value_of("tenantid") .map_or(Ok(env.tenantid), |value| value.parse())?; let timeline_name = create_match.value_of("timeline").unwrap_or("main"); - // check is that timeline doesnt already exist - // this check here is because it + let config_only = create_match.is_present("config-only"); - cplane.new_node(tenantid, timeline_name)?; + cplane.new_node(tenantid, timeline_name, config_only)?; } ("start", Some(start_match)) => { let tenantid: ZTenantId = start_match @@ -483,7 +492,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { if let Some(node) = node { node.start(&auth_token)?; } else { - let node = cplane.new_node(tenantid, timeline_name)?; + let node = cplane.new_node(tenantid, timeline_name, false)?; node.start(&auth_token)?; } } From 91f72fabc9336d1bd20a4b971ec11ee81e8baf27 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 17 Aug 2021 18:54:41 +0300 Subject: [PATCH 08/24] Work with smaller segments. Split each relish into fixed-sized 10 MB segments. Separate layers are created for each segment. This reduces the write amplification if you have a large relation and update only parts of it; the downside is that you have a lot more files. The 10 MB is just a guess, we should do some modeling and testing in the future to figure out the optimal size. Each segment tracks the size of the segment separately. To figure out the total size of a relish, you need to loop through the segment to find the highest segment that's in use. That's a bit inefficient, but will do for now. We might want to add a cache or something later. --- pageserver/src/layered_repository.rs | 207 +++++++++++++----- pageserver/src/layered_repository/README.md | 22 +- .../src/layered_repository/inmemory_layer.rs | 153 +++++++------ .../src/layered_repository/layer_map.rs | 55 +++-- .../src/layered_repository/snapshot_layer.rs | 78 ++++--- .../src/layered_repository/storage_layer.rs | 55 ++++- pageserver/src/repository.rs | 15 ++ test_runner/batch_others/test_snapfiles_gc.py | 14 +- 8 files changed, 388 insertions(+), 211 deletions(-) diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index 52974bc8f2..44bcb60936 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -12,7 +12,7 @@ //! parent timeline, and the last LSN that has been written to disk. //! -use anyhow::{bail, Context, Result}; +use anyhow::{anyhow, bail, Context, Result}; use bytes::Bytes; use lazy_static::lazy_static; use log::*; @@ -47,7 +47,7 @@ mod storage_layer; use inmemory_layer::InMemoryLayer; use layer_map::LayerMap; use snapshot_layer::SnapshotLayer; -use storage_layer::Layer; +use storage_layer::{Layer, SegmentTag, RELISH_SEG_SIZE}; // Timeout when waiting for WAL receiver to catch up to an LSN given in a GetPage@LSN call. static TIMEOUT: Duration = Duration::from_secs(60); @@ -475,7 +475,9 @@ impl Timeline for LayeredTimeline { } let lsn = self.wait_lsn(lsn)?; - if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + let seg = SegmentTag::from_blknum(rel, blknum); + + if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) } else { bail!("relish {} not found at {}", rel, lsn); @@ -491,7 +493,9 @@ impl Timeline for LayeredTimeline { ); } - if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { + let seg = SegmentTag::from_blknum(rel, blknum); + + if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) } else { bail!("relish {} not found at {}", rel, lsn); @@ -508,27 +512,43 @@ impl Timeline for LayeredTimeline { let lsn = self.wait_lsn(lsn)?; - if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { - let result = layer.get_relish_size(lsn); - trace!( - "get_relish_size: rel {} at {}/{} -> {:?}", - rel, - self.timelineid, - lsn, - result - ); - result - } else { - Ok(None) + let mut segno = 0; + loop { + let seg = SegmentTag { rel, segno }; + + let segsize; + if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { + segsize = layer.get_seg_size(lsn)?; + trace!( + "get_seg_size: {} at {}/{} -> {}", + seg, + self.timelineid, + lsn, + segsize + ); + } else { + if segno == 0 { + return Ok(None); + } + segsize = 0; + } + + if segsize != RELISH_SEG_SIZE { + let result = segno * RELISH_SEG_SIZE + segsize; + return Ok(Some(result)); + } + segno += 1; } } fn get_rel_exists(&self, rel: RelishTag, lsn: Lsn) -> Result { let lsn = self.wait_lsn(lsn)?; + let seg = SegmentTag { rel, segno: 0 }; + let result; - if let Some((layer, lsn)) = self.get_layer_for_read(rel, lsn)? { - result = layer.get_rel_exists(lsn)?; + if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { + result = layer.get_seg_exists(lsn)?; } else { result = false; } @@ -632,7 +652,10 @@ impl Timeline for LayeredTimeline { rel ); } - let layer = self.get_layer_for_write(rel, rec.lsn)?; + + let seg = SegmentTag::from_blknum(rel, blknum); + + let layer = self.get_layer_for_write(seg, rec.lsn)?; layer.put_wal_record(blknum, rec) } @@ -643,8 +666,84 @@ impl Timeline for LayeredTimeline { debug!("put_truncation: {} to {} blocks at {}", rel, relsize, lsn); - let layer = self.get_layer_for_write(rel, lsn)?; - layer.put_truncation(lsn, relsize) + let oldsize = self + .get_relish_size(rel, self.last_valid_lsn.load())? + .ok_or_else(|| { + anyhow!( + "attempted to truncate non-existent relish {} at {}", + rel, + lsn + ) + })?; + + if oldsize <= relsize { + return Ok(()); + } + let old_last_seg = (oldsize - 1) / RELISH_SEG_SIZE; + + let last_remain_seg = if relsize == 0 { + 0 + } else { + (relsize - 1) / RELISH_SEG_SIZE + }; + + // Unlink segments beyond the last remaining segment. + for remove_segno in (last_remain_seg + 1)..=old_last_seg { + let seg = SegmentTag { + rel, + segno: remove_segno, + }; + let layer = self.get_layer_for_write(seg, lsn)?; + layer.put_unlink(lsn)?; + } + + // Truncate the last remaining segment to the specified size + if relsize == 0 || relsize % RELISH_SEG_SIZE != 0 { + let seg = SegmentTag { + rel, + segno: last_remain_seg, + }; + let layer = self.get_layer_for_write(seg, lsn)?; + layer.put_truncation(lsn, relsize % RELISH_SEG_SIZE)?; + } + + Ok(()) + } + + fn put_unlink(&self, rel: RelishTag, lsn: Lsn) -> Result<()> { + trace!("put_unlink: {} at {}", rel, lsn); + + if rel.is_blocky() { + let oldsize_opt = self.get_relish_size(rel, self.last_valid_lsn.load())?; + if let Some(oldsize) = oldsize_opt { + let old_last_seg = if oldsize == 0 { + 0 + } else { + (oldsize - 1) / RELISH_SEG_SIZE + }; + + // Unlink all segments + for remove_segno in 0..=old_last_seg { + let seg = SegmentTag { + rel, + segno: remove_segno, + }; + let layer = self.get_layer_for_write(seg, lsn)?; + layer.put_unlink(lsn)?; + } + } else { + warn!( + "put_unlink called on non-existent relish {} at {}", + rel, lsn + ); + } + } else { + let seg = SegmentTag::from_blknum(rel, 0); + let layer = self.get_layer_for_write(seg, lsn)?; + layer.put_unlink(lsn)?; + } + + Ok(()) } fn put_page_image( @@ -663,17 +762,12 @@ impl Timeline for LayeredTimeline { ); } - let layer = self.get_layer_for_write(rel, lsn)?; + let seg = SegmentTag::from_blknum(rel, blknum); + + let layer = self.get_layer_for_write(seg, lsn)?; layer.put_page_image(blknum, lsn, img) } - fn put_unlink(&self, rel: RelishTag, lsn: Lsn) -> Result<()> { - trace!("put_unlink: {} at {}", rel, lsn); - - let layer = self.get_layer_for_write(rel, lsn)?; - layer.put_unlink(lsn) - } - fn put_raw_data( &self, _tag: crate::object_key::ObjectTag, @@ -807,7 +901,7 @@ impl LayeredTimeline { for layer_rc in snapfiles.iter() { info!( "found layer {} {}-{} {} on timeline {}", - layer_rc.get_relish_tag(), + layer_rc.get_seg_tag(), layer_rc.get_start_lsn(), layer_rc.get_end_lsn(), layer_rc.is_dropped(), @@ -822,17 +916,17 @@ impl LayeredTimeline { /// /// Get a handle to a Layer for reading. /// - /// The returned SnapshotFile might be from an ancestor timeline, if the - /// relation hasn't been updated on this timeline yet. + /// The returned Layer might be from an ancestor timeline, if the + /// segment hasn't been updated on this timeline yet. /// fn get_layer_for_read( &self, - rel: RelishTag, + seg: SegmentTag, lsn: Lsn, ) -> Result, Lsn)>> { trace!( "get_layer_for_read called for {} at {}/{}", - rel, + seg, self.timelineid, lsn ); @@ -859,7 +953,7 @@ impl LayeredTimeline { // // Do we have a layer on this timeline? - if let Some(layer) = layers.get(rel, lsn) { + if let Some(layer) = layers.get(seg, lsn) { trace!( "found layer in cache: {} {}-{}", timeline.timelineid, @@ -869,6 +963,11 @@ impl LayeredTimeline { assert!(layer.get_start_lsn() <= lsn); + if layer.is_dropped() && layer.get_end_lsn() <= lsn { + // The segment was unlinked + return Ok(None); + } + return Ok(Some((layer.clone(), lsn))); } @@ -886,14 +985,14 @@ impl LayeredTimeline { /// /// Get a handle to the latest layer for appending. /// - fn get_layer_for_write(&self, rel: RelishTag, lsn: Lsn) -> Result> { + fn get_layer_for_write(&self, seg: SegmentTag, lsn: Lsn) -> Result> { if lsn < self.last_valid_lsn.load() { bail!("cannot modify relation after advancing last_valid_lsn"); } // Look up the correct layer. let layers = self.layers.lock().unwrap(); - if let Some(layer) = layers.get(rel, lsn) { + if let Some(layer) = layers.get(seg, lsn) { // If it's writeable, good, return it. if !layer.is_frozen() { return Ok(Arc::clone(&layer)); @@ -912,7 +1011,7 @@ impl LayeredTimeline { drop(layers); let layer; - if let Some((prev_layer, _prev_lsn)) = self.get_layer_for_read(rel, lsn)? { + if let Some((prev_layer, _prev_lsn)) = self.get_layer_for_read(seg, lsn)? { // Create new entry after the previous one. let lsn; if prev_layer.get_timeline_id() != self.timelineid { @@ -920,7 +1019,7 @@ impl LayeredTimeline { lsn = self.ancestor_lsn; trace!( "creating file for write for {} at branch point {}/{}", - rel, + seg, self.timelineid, lsn ); @@ -928,7 +1027,7 @@ impl LayeredTimeline { lsn = prev_layer.get_end_lsn(); trace!( "creating file for write for {} after previous layer {}/{}", - rel, + seg, self.timelineid, lsn ); @@ -951,12 +1050,12 @@ impl LayeredTimeline { // New relation. trace!( "creating layer for write for new rel {} at {}/{}", - rel, + seg, self.timelineid, lsn ); - layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, rel, lsn)?; + layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, seg, lsn)?; } let mut layers = self.layers.lock().unwrap(); @@ -1055,7 +1154,7 @@ impl LayeredTimeline { for new_layer in new_layers { trace!( "freeze returned layer {} {}-{}", - new_layer.get_relish_tag(), + new_layer.get_seg_tag(), new_layer.get_start_lsn(), new_layer.get_end_lsn() ); @@ -1129,8 +1228,8 @@ impl LayeredTimeline { let mut layers_to_remove: Vec> = Vec::new(); // Determine for each file if it needs to be retained - 'outer: for ((rel, _lsn), l) in layers.inner.iter() { - if rel.is_relation() { + 'outer: for ((seg, _lsn), l) in layers.inner.iter() { + if seg.rel.is_relation() { result.snapshot_relfiles_total += 1; } else { result.snapshot_nonrelfiles_total += 1; @@ -1140,12 +1239,12 @@ impl LayeredTimeline { if l.get_end_lsn() > cutoff { info!( "keeping {} {}-{} because it's newer than cutoff {}", - rel, + seg, l.get_start_lsn(), l.get_end_lsn(), cutoff ); - if rel.is_relation() { + if seg.rel.is_relation() { result.snapshot_relfiles_needed_by_cutoff += 1; } else { result.snapshot_nonrelfiles_needed_by_cutoff += 1; @@ -1159,12 +1258,12 @@ impl LayeredTimeline { if l.get_start_lsn() <= *retain_lsn && *retain_lsn <= l.get_end_lsn() { info!( "keeping {} {}-{} because it's needed by branch point {}", - rel, + seg, l.get_start_lsn(), l.get_end_lsn(), *retain_lsn ); - if rel.is_relation() { + if seg.rel.is_relation() { result.snapshot_relfiles_needed_by_branches += 1; } else { result.snapshot_nonrelfiles_needed_by_branches += 1; @@ -1174,8 +1273,8 @@ impl LayeredTimeline { } // Unless the relation was dropped, is there a later snapshot file for this relation? - if !l.is_dropped() && !layers.newer_layer_exists(l.get_relish_tag(), l.get_end_lsn()) { - if rel.is_relation() { + if !l.is_dropped() && !layers.newer_layer_exists(l.get_seg_tag(), l.get_end_lsn()) { + if seg.rel.is_relation() { result.snapshot_relfiles_not_updated += 1; } else { result.snapshot_nonrelfiles_not_updated += 1; @@ -1186,7 +1285,7 @@ impl LayeredTimeline { // We didn't find any reason to keep this file, so remove it. info!( "garbage collecting {} {}-{} {}", - l.get_relish_tag(), + l.get_seg_tag(), l.get_start_lsn(), l.get_end_lsn(), l.is_dropped() @@ -1202,13 +1301,13 @@ impl LayeredTimeline { layers.remove(&*doomed_layer); if doomed_layer.is_dropped() { - if doomed_layer.get_relish_tag().is_relation() { + if doomed_layer.get_seg_tag().rel.is_relation() { result.snapshot_relfiles_dropped += 1; } else { result.snapshot_nonrelfiles_dropped += 1; } } else { - if doomed_layer.get_relish_tag().is_relation() { + if doomed_layer.get_seg_tag().rel.is_relation() { result.snapshot_relfiles_removed += 1; } else { result.snapshot_nonrelfiles_removed += 1; diff --git a/pageserver/src/layered_repository/README.md b/pageserver/src/layered_repository/README.md index db3d7feb79..96e89b6f24 100644 --- a/pageserver/src/layered_repository/README.md +++ b/pageserver/src/layered_repository/README.md @@ -7,22 +7,22 @@ memory. Every now and then, the accumulated changes are written out to new files. The files are called "snapshot files". Each snapshot file corresponds -to one PostgreSQL relation fork. The snapshot files for each timeline -are stored in the timeline's subdirectory under +to one 10 MB slice of a PostgreSQL relation fork. The snapshot files +for each timeline are stored in the timeline's subdirectory under .zenith/tenants//timelines. The files are named like this: - rel______ + rel_______ For example: - rel_1663_13990_2609_0_000000000169C348_0000000001702000 + rel_1663_13990_2609_0_10_000000000169C348_0000000001702000 Some non-relation files are also stored in repository. For example, a CLOG segment would be named like this: - pg_xact_0000_00000000198B06B0_00000000198C2550 + pg_xact_0000_0_00000000198B06B0_00000000198C2550 There is no difference in how the relation and non-relation files are managed, except that the first part of file names is different. @@ -38,7 +38,7 @@ version of the relation in the LSN range. If a file has been dropped, the last snapshot file for it is created with the _DROPPED suffix, e.g. - rel_1663_13990_2609_0_000000000169C348_0000000001702000_DROPPED + rel_1663_13990_2609_0_10_000000000169C348_0000000001702000_DROPPED In addition to the relations, with "rel_*" prefix, we use the same format for storing various smaller files from the PostgreSQL data @@ -51,14 +51,14 @@ relation in the storage" The full path of a snapshot file looks like this: - .zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_000000000169C348_0000000001702000 + .zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_10_000000000169C348_0000000001702000 For simplicity, the examples below use a simplified notation for the paths. The tenant ID is left out, the timeline ID is replaced with -the human-readable branch name, and spcnode+dbnode+relnode+forkum with -a human-readable table name. The LSNs are also shorter. For example, a -snapshot file for 'orders' table on 'main' branch, with LSN range -100-200 would be: +the human-readable branch name, and spcnode+dbnode+relnode+forkum+segno +with a human-readable table name. The LSNs are also shorter. For +example, a snapshot file for 'orders' table on 'main' branch, with LSN +range 100-200 would be: main/orders_100_200 diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 34be5a1740..d59a82b8cd 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -3,10 +3,8 @@ //! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation. //! -use crate::layered_repository::storage_layer::Layer; -use crate::layered_repository::storage_layer::PageVersion; +use crate::layered_repository::storage_layer::{Layer, PageVersion, SegmentTag, RELISH_SEG_SIZE}; use crate::layered_repository::SnapshotLayer; -use crate::relish::*; use crate::repository::WALRecord; use crate::walredo::WalRedoManager; use crate::PageServerConf; @@ -26,7 +24,7 @@ pub struct InMemoryLayer { conf: &'static PageServerConf, tenantid: ZTenantId, timelineid: ZTimelineId, - rel: RelishTag, + seg: SegmentTag, /// /// This layer contains all the changes from 'start_lsn'. The @@ -51,9 +49,9 @@ pub struct InMemoryLayerInner { page_versions: BTreeMap<(u32, Lsn), PageVersion>, /// - /// `relsizes` tracks the size of the relation at different points in time. + /// `segsizes` tracks the size of the segment at different points in time. /// - relsizes: BTreeMap, + segsizes: BTreeMap, } impl Layer for InMemoryLayer { @@ -65,8 +63,8 @@ impl Layer for InMemoryLayer { return self.timelineid; } - fn get_relish_tag(&self) -> RelishTag { - return self.rel; + fn get_seg_tag(&self) -> SegmentTag { + return self.seg; } fn get_start_lsn(&self) -> Lsn { @@ -74,7 +72,13 @@ impl Layer for InMemoryLayer { } fn get_end_lsn(&self) -> Lsn { - return Lsn(u64::MAX); + let inner = self.inner.lock().unwrap(); + + if let Some(drop_lsn) = inner.drop_lsn { + drop_lsn + } else { + Lsn(u64::MAX) + } } fn is_dropped(&self) -> bool { @@ -94,6 +98,8 @@ impl Layer for InMemoryLayer { let mut page_img: Option = None; let mut need_base_image_lsn: Option = Some(lsn); + assert!(self.seg.blknum_in_seg(blknum)); + { let inner = self.inner.lock().unwrap(); let minkey = (blknum, Lsn(0)); @@ -132,12 +138,12 @@ impl Layer for InMemoryLayer { // but never writes the page. // // Would be nice to detect that situation better. - warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); return Ok(ZERO_PAGE.clone()); } bail!( "No base image found for page {} blk {} at {}/{}", - self.rel, + self.seg.rel, blknum, self.timelineid, lsn @@ -150,14 +156,14 @@ impl Layer for InMemoryLayer { trace!( "found page image for blk {} in {} at {}/{}, no WAL redo required", blknum, - self.rel, + self.seg.rel, self.timelineid, lsn ); Ok(img) } else { // FIXME: this ought to be an error? - warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); Ok(ZERO_PAGE.clone()) } } else { @@ -169,7 +175,7 @@ impl Layer for InMemoryLayer { // FIXME: this ought to be an error? warn!( "Base image for page {}/{} at {} not found, but got {} WAL records", - self.rel, + self.seg.rel, blknum, lsn, records.len() @@ -177,11 +183,11 @@ impl Layer for InMemoryLayer { Ok(ZERO_PAGE.clone()) } else { if page_img.is_some() { - trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); } else { - trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); } - let img = walredo_mgr.request_redo(self.rel, blknum, lsn, page_img, records)?; + let img = walredo_mgr.request_redo(self.seg.rel, blknum, lsn, page_img, records)?; self.put_page_image(blknum, lsn, img.clone())?; @@ -191,26 +197,26 @@ impl Layer for InMemoryLayer { } /// Get size of the relation at given LSN - fn get_relish_size(&self, lsn: Lsn) -> Result> { + fn get_seg_size(&self, lsn: Lsn) -> Result { // Scan the BTreeMap backwards, starting from the given entry. let inner = self.inner.lock().unwrap(); - let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + let mut iter = inner.segsizes.range((Included(&Lsn(0)), Included(&lsn))); if let Some((_entry_lsn, entry)) = iter.next_back() { let result = *entry; drop(inner); - trace!("get_relish_size: {} at {} -> {}", self.rel, lsn, result); - Ok(Some(result)) + trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } else { - Ok(None) + bail!("No size found for {} at {} in memory", self.seg, lsn); } } - /// Does this relation exist at given LSN? - fn get_rel_exists(&self, lsn: Lsn) -> Result { + /// Does this segment exist at given LSN? + fn get_seg_exists(&self, lsn: Lsn) -> Result { let inner = self.inner.lock().unwrap(); - // Is the requested LSN after the rel was dropped? + // Is the requested LSN after the segment was dropped? if let Some(drop_lsn) = inner.drop_lsn { if lsn >= drop_lsn { return Ok(false); @@ -226,10 +232,12 @@ impl Layer for InMemoryLayer { /// Common subroutine of the public put_wal_record() and put_page_image() functions. /// Adds the page version to the in-memory tree fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> { + assert!(self.seg.blknum_in_seg(blknum)); + trace!( "put_page_version blk {} of {} at {}/{}", blknum, - self.rel, + self.seg.rel, self.timelineid, lsn ); @@ -240,14 +248,16 @@ impl Layer for InMemoryLayer { if old.is_some() { // We already had an entry for this LSN. That's odd.. warn!( - "Page version of rel {:?} blk {} at {} already exists", - self.rel, blknum, lsn + "Page version of rel {} blk {} at {} already exists", + self.seg.rel, blknum, lsn ); } // Also update the relation size, if this extended the relation. - if self.rel.is_blocky() { - let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + if self.seg.rel.is_blocky() { + let newsize = blknum - self.seg.segno * RELISH_SEG_SIZE + 1; + + let mut iter = inner.segsizes.range((Included(&Lsn(0)), Included(&lsn))); let oldsize; if let Some((_entry_lsn, entry)) = iter.next_back() { @@ -256,15 +266,15 @@ impl Layer for InMemoryLayer { oldsize = 0; //bail!("No old size found for {} at {}", self.tag, lsn); } - if blknum >= oldsize { + if newsize > oldsize { trace!( - "enlarging relation {} from {} to {} blocks at {}", - self.rel, + "enlarging segment {} from {} to {} blocks at {}", + self.seg, oldsize, - blknum + 1, + newsize, lsn ); - inner.relsizes.insert(lsn, blknum + 1); + inner.segsizes.insert(lsn, newsize); } } @@ -272,9 +282,9 @@ impl Layer for InMemoryLayer { } /// Remember that the relation was truncated at given LSN - fn put_truncation(&self, lsn: Lsn, relsize: u32) -> anyhow::Result<()> { + fn put_truncation(&self, lsn: Lsn, segsize: u32) -> anyhow::Result<()> { let mut inner = self.inner.lock().unwrap(); - let old = inner.relsizes.insert(lsn, relsize); + let old = inner.segsizes.insert(lsn, segsize); if old.is_some() { // We already had an entry for this LSN. That's odd.. @@ -291,7 +301,7 @@ impl Layer for InMemoryLayer { assert!(inner.drop_lsn.is_none()); inner.drop_lsn = Some(lsn); - info!("dropped relation {} at {}", self.rel, lsn); + info!("dropped segment {} at {}", self.seg, lsn); Ok(()) } @@ -314,7 +324,7 @@ impl Layer for InMemoryLayer { ) -> Result>> { info!( "freezing in memory layer for {} on timeline {} at {}", - self.rel, self.timelineid, cutoff_lsn + self.seg, self.timelineid, cutoff_lsn ); let inner = self.inner.lock().unwrap(); @@ -334,17 +344,17 @@ impl Layer for InMemoryLayer { // Divide all the page versions into old and new at the 'end_lsn' cutoff point. let mut before_page_versions; - let mut before_relsizes; + let mut before_segsizes; let mut after_page_versions; - let mut after_relsizes; + let mut after_segsizes; if !dropped { - before_relsizes = BTreeMap::new(); - after_relsizes = BTreeMap::new(); - for (lsn, size) in inner.relsizes.iter() { + before_segsizes = BTreeMap::new(); + after_segsizes = BTreeMap::new(); + for (lsn, size) in inner.segsizes.iter() { if *lsn > end_lsn { - after_relsizes.insert(*lsn, *size); + after_segsizes.insert(*lsn, *size); } else { - before_relsizes.insert(*lsn, *size); + before_segsizes.insert(*lsn, *size); } } @@ -359,8 +369,8 @@ impl Layer for InMemoryLayer { } } else { before_page_versions = inner.page_versions.clone(); - before_relsizes = inner.relsizes.clone(); - after_relsizes = BTreeMap::new(); + before_segsizes = inner.segsizes.clone(); + after_segsizes = BTreeMap::new(); after_page_versions = BTreeMap::new(); } @@ -372,19 +382,19 @@ impl Layer for InMemoryLayer { self.conf, self.timelineid, self.tenantid, - self.rel, + self.seg, self.start_lsn, end_lsn, dropped, before_page_versions, - before_relsizes, + before_segsizes, )?; let mut result: Vec> = Vec::new(); // If there were any page versions after the cutoff, initialize a new in-memory layer // to hold them - if !after_relsizes.is_empty() || !after_page_versions.is_empty() { - info!("created new in-mem layer for {} {}-", self.rel, end_lsn); + if !after_segsizes.is_empty() || !after_page_versions.is_empty() { + info!("created new in-mem layer for {} {}-", self.seg, end_lsn); let new_layer = Self::copy_snapshot( self.conf, @@ -396,7 +406,7 @@ impl Layer for InMemoryLayer { )?; let mut new_inner = new_layer.inner.lock().unwrap(); new_inner.page_versions.append(&mut after_page_versions); - new_inner.relsizes.append(&mut after_relsizes); + new_inner.segsizes.append(&mut after_segsizes); drop(new_inner); result.push(Arc::new(new_layer)); @@ -425,12 +435,12 @@ impl InMemoryLayer { conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, - rel: RelishTag, + seg: SegmentTag, start_lsn: Lsn, ) -> Result { trace!( "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", - rel, + seg, timelineid, start_lsn ); @@ -439,12 +449,12 @@ impl InMemoryLayer { conf, timelineid, tenantid, - rel, + seg, start_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, page_versions: BTreeMap::new(), - relsizes: BTreeMap::new(), + segsizes: BTreeMap::new(), }), }) } @@ -463,26 +473,27 @@ impl InMemoryLayer { ) -> Result { trace!( "initializing new InMemoryLayer for writing {} on timeline {} at {}", - src.get_relish_tag(), + src.get_seg_tag(), timelineid, lsn ); let mut page_versions = BTreeMap::new(); - let mut relsizes = BTreeMap::new(); + let mut segsizes = BTreeMap::new(); + let seg = src.get_seg_tag(); + + let startblk; let size; - if src.get_relish_tag().is_blocky() { - if let Some(sz) = src.get_relish_size(lsn)? { - relsizes.insert(lsn, sz); - size = sz; - } else { - bail!("no size found or {} at {}", src.get_relish_tag(), lsn); - } + if seg.rel.is_blocky() { + size = src.get_seg_size(lsn)?; + segsizes.insert(lsn, size); + startblk = seg.segno * RELISH_SEG_SIZE; } else { size = 1; + startblk = 0; } - for blknum in 0..size { + for blknum in startblk..(startblk + size) { let img = src.get_page_at_lsn(walredo_mgr, blknum, lsn)?; let pv = PageVersion { page_image: Some(img), @@ -495,12 +506,12 @@ impl InMemoryLayer { conf, timelineid, tenantid, - rel: src.get_relish_tag(), + seg: src.get_seg_tag(), start_lsn: lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, page_versions: page_versions, - relsizes: relsizes, + segsizes: segsizes, }), }) } @@ -510,12 +521,12 @@ impl InMemoryLayer { pub fn dump(&self) -> String { let mut result = format!( "----- inmemory layer for {} {}-> ----\n", - self.rel, self.start_lsn + self.seg, self.start_lsn ); let inner = self.inner.lock().unwrap(); - for (k, v) in inner.relsizes.iter() { + for (k, v) in inner.segsizes.iter() { result += &format!("{}: {}\n", k, v); } for (k, v) in inner.page_versions.iter() { diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 528216d5e3..37d2bea8f5 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -9,7 +9,7 @@ //! new snapshot layers and corresponding files are written to disk. //! -use crate::layered_repository::storage_layer::Layer; +use crate::layered_repository::storage_layer::{Layer, SegmentTag}; use crate::relish::*; use anyhow::Result; use log::*; @@ -19,20 +19,20 @@ use std::ops::Bound::Included; use std::sync::Arc; use zenith_utils::lsn::Lsn; -/// LayerMap is a BTreeMap keyed by RelishTag and the layer's start LSN. +/// LayerMap is a BTreeMap keyed by SegmentTag and the layer's start LSN. /// It provides a couple of convenience functions over a plain BTreeMap pub struct LayerMap { - pub inner: BTreeMap<(RelishTag, Lsn), Arc>, + pub inner: BTreeMap<(SegmentTag, Lsn), Arc>, } impl LayerMap { /// - /// Look up using the given rel tag and LSN. This differs from a plain + /// Look up using the given segment tag and LSN. This differs from a plain /// key-value lookup in that if there is any layer that covers the /// given LSN, or precedes the given LSN, it is returned. In other words, /// you don't need to know the exact start LSN of the layer. /// - pub fn get(&self, tag: RelishTag, lsn: Lsn) -> Option> { + pub fn get(&self, tag: SegmentTag, lsn: Lsn) -> Option> { let startkey = (tag, Lsn(0)); let endkey = (tag, lsn); @@ -48,32 +48,32 @@ impl LayerMap { } pub fn insert(&mut self, layer: Arc) { - let rel = layer.get_relish_tag(); + let seg = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); - self.inner.insert((rel, start_lsn), Arc::clone(&layer)); + self.inner.insert((seg, start_lsn), Arc::clone(&layer)); } pub fn remove(&mut self, layer: &dyn Layer) { - let rel = layer.get_relish_tag(); + let seg = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); - self.inner.remove(&(rel, start_lsn)); + self.inner.remove(&(seg, start_lsn)); } pub fn list_rels(&self, spcnode: u32, dbnode: u32) -> Result> { let mut rels: HashSet = HashSet::new(); // Scan the timeline directory to get all rels in this timeline. - for ((rel, _lsn), _l) in self.inner.iter() { - if let RelishTag::Relation(reltag) = rel { + for ((seg, _lsn), _l) in self.inner.iter() { + if let RelishTag::Relation(reltag) = seg.rel { // FIXME: skip if it was dropped before the requested LSN. But there is no // LSN argument if (spcnode == 0 || reltag.spcnode == spcnode) && (dbnode == 0 || reltag.dbnode == dbnode) { - rels.insert(*reltag); + rels.insert(reltag); } } } @@ -84,43 +84,40 @@ impl LayerMap { let mut rels: HashSet = HashSet::new(); // Scan the timeline directory to get all rels in this timeline. - for ((rel, _lsn), _l) in self.inner.iter() { + for ((seg, _lsn), _l) in self.inner.iter() { // FIXME: skip if it was dropped before the requested LSN. - if let RelishTag::Relation(_) = rel { + if let RelishTag::Relation(_) = seg.rel { } else { - rels.insert(*rel); + rels.insert(seg.rel); } } Ok(rels) } - /// Is there a newer layer for given relation? - pub fn newer_layer_exists(&self, rel: RelishTag, lsn: Lsn) -> bool { - let startkey = (rel, lsn); - let endkey = (rel, Lsn(u64::MAX)); + /// Is there a newer layer for given segment? + pub fn newer_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool { + let startkey = (seg, lsn); + let endkey = (seg, Lsn(u64::MAX)); - for ((_rel, newer_lsn), layer) in self.inner.range((Included(startkey), Included(endkey))) { + for ((_newer_seg, newer_lsn), layer) in + self.inner.range((Included(startkey), Included(endkey))) + { if layer.get_end_lsn() > lsn { trace!( - "found later layer for rel {}, {} {}-{}", - rel, + "found later layer for {}, {} {}-{}", + seg, lsn, newer_lsn, layer.get_end_lsn() ); return true; } else { - trace!( - "found singleton layer for rel {}, {} {}", - rel, - lsn, - newer_lsn - ); + trace!("found singleton layer for {}, {} {}", seg, lsn, newer_lsn); continue; } } - trace!("no later layer found for rel {}, {}", rel, lsn); + trace!("no later layer found for {}, {}", seg, lsn); false } } diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index e2936ffd31..44695aa28b 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -37,9 +37,8 @@ //! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two //! parts: the page versions and the relation sizes. They are stored as separate chapters. //! -use crate::layered_repository::storage_layer::Layer; -use crate::layered_repository::storage_layer::PageVersion; use crate::layered_repository::storage_layer::ZERO_PAGE; +use crate::layered_repository::storage_layer::{Layer, PageVersion, SegmentTag}; use crate::relish::*; use crate::repository::WALRecord; use crate::walredo::WalRedoManager; @@ -70,7 +69,7 @@ static REL_SIZES_CHAPTER: u64 = 2; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] struct SnapshotFileName { - rel: RelishTag, + seg: SegmentTag, start_lsn: Lsn, end_lsn: Lsn, dropped: bool, @@ -80,11 +79,11 @@ impl SnapshotFileName { fn from_str(fname: &str) -> Option { // Split the filename into parts // - // _____ + // ______ // // or if it was dropped: // - // ______DROPPED + // _______DROPPED // let rel; let mut parts; @@ -135,6 +134,10 @@ impl SnapshotFileName { return None; } + let segno = parts.next()?.parse::().ok()?; + + let seg = SegmentTag { rel, segno }; + let start_lsn = Lsn::from_hex(parts.next()?).ok()?; let end_lsn = Lsn::from_hex(parts.next()?).ok()?; @@ -153,7 +156,7 @@ impl SnapshotFileName { } Some(SnapshotFileName { - rel, + seg, start_lsn, end_lsn, dropped, @@ -161,7 +164,7 @@ impl SnapshotFileName { } fn to_string(&self) -> String { - let basename = match self.rel { + let basename = match self.seg.rel { RelishTag::Relation(reltag) => format!( "rel_{}_{}_{}_{}", reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum @@ -187,8 +190,9 @@ impl SnapshotFileName { }; format!( - "{}_{:016X}_{:016X}{}", + "{}_{}_{:016X}_{:016X}{}", basename, + self.seg.segno, u64::from(self.start_lsn), u64::from(self.end_lsn), if self.dropped { "_DROPPED" } else { "" } @@ -214,7 +218,7 @@ pub struct SnapshotLayer { conf: &'static PageServerConf, pub tenantid: ZTenantId, pub timelineid: ZTimelineId, - pub rel: RelishTag, + pub seg: SegmentTag, // // This entry contains all the changes from 'start_lsn' to 'end_lsn'. The @@ -249,8 +253,8 @@ impl Layer for SnapshotLayer { return self.timelineid; } - fn get_relish_tag(&self) -> RelishTag { - return self.rel; + fn get_seg_tag(&self) -> SegmentTag { + return self.seg; } fn is_dropped(&self) -> bool { @@ -314,12 +318,12 @@ impl Layer for SnapshotLayer { // but never writes the page. // // Would be nice to detect that situation better. - warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); return Ok(ZERO_PAGE.clone()); } bail!( "No base image found for page {} blk {} at {}/{}", - self.rel, + self.seg.rel, blknum, self.timelineid, lsn @@ -332,14 +336,14 @@ impl Layer for SnapshotLayer { trace!( "found page image for blk {} in {} at {}/{}, no WAL redo required", blknum, - self.rel, + self.seg.rel, self.timelineid, lsn ); Ok(img) } else { // FIXME: this ought to be an error? - warn!("Page {} blk {} at {} not found", self.rel, blknum, lsn); + warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); Ok(ZERO_PAGE.clone()) } } else { @@ -351,7 +355,7 @@ impl Layer for SnapshotLayer { // FIXME: this ought to be an error? warn!( "Base image for page {} blk {} at {} not found, but got {} WAL records", - self.rel, + self.seg.rel, blknum, lsn, records.len() @@ -359,11 +363,11 @@ impl Layer for SnapshotLayer { Ok(ZERO_PAGE.clone()) } else { if page_img.is_some() { - trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); } else { - trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.rel, self.timelineid, lsn); + trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); } - let img = walredo_mgr.request_redo(self.rel, blknum, lsn, page_img, records)?; + let img = walredo_mgr.request_redo(self.seg.rel, blknum, lsn, page_img, records)?; // FIXME: Should we memoize the page image in memory, so that // we wouldn't need to reconstruct it again, if it's requested again? @@ -375,7 +379,7 @@ impl Layer for SnapshotLayer { } /// Get size of the relation at given LSN - fn get_relish_size(&self, lsn: Lsn) -> Result> { + fn get_seg_size(&self, lsn: Lsn) -> Result { // Scan the BTreeMap backwards, starting from the given entry. let inner = self.load()?; let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); @@ -383,15 +387,23 @@ impl Layer for SnapshotLayer { if let Some((_entry_lsn, entry)) = iter.next_back() { let result = *entry; drop(inner); - trace!("get_relsize: {} at {} -> {}", self.rel, lsn, result); - Ok(Some(result)) + trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } else { - Ok(None) + error!( + "No size found for {} at {} in snapshot layer {} {}-{}", + self.seg, lsn, self.seg, self.start_lsn, self.end_lsn + ); + bail!( + "No size found for {} at {} in snapshot layer", + self.seg, + lsn + ); } } - /// Does this relation exist at given LSN? - fn get_rel_exists(&self, lsn: Lsn) -> Result { + /// Does this segment exist at given LSN? + fn get_seg_exists(&self, lsn: Lsn) -> Result { // Is the requested LSN after the rel was dropped? if self.dropped && lsn >= self.end_lsn { return Ok(false); @@ -404,8 +416,8 @@ impl Layer for SnapshotLayer { // Unsupported write operations fn put_page_version(&self, blknum: u32, lsn: Lsn, _pv: PageVersion) -> Result<()> { panic!( - "cannot modify historical snapshot layer, rel {} blk {} at {}/{}, {}-{}", - self.rel, blknum, self.timelineid, lsn, self.start_lsn, self.end_lsn + "cannot modify historical snapshot layer, {} blk {} at {}/{}, {}-{}", + self.seg, blknum, self.timelineid, lsn, self.start_lsn, self.end_lsn ); } fn put_truncation(&self, _lsn: Lsn, _relsize: u32) -> anyhow::Result<()> { @@ -450,7 +462,7 @@ impl SnapshotLayer { self.timelineid, self.tenantid, &SnapshotFileName { - rel: self.rel, + seg: self.seg, start_lsn: self.start_lsn, end_lsn: self.end_lsn, dropped: self.dropped, @@ -478,7 +490,7 @@ impl SnapshotLayer { conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, - rel: RelishTag, + seg: SegmentTag, start_lsn: Lsn, end_lsn: Lsn, dropped: bool, @@ -489,7 +501,7 @@ impl SnapshotLayer { conf: conf, timelineid: timelineid, tenantid: tenantid, - rel: rel, + seg: seg, start_lsn: start_lsn, end_lsn, dropped, @@ -546,7 +558,7 @@ impl SnapshotLayer { self.timelineid, self.tenantid, &SnapshotFileName { - rel: self.rel, + seg: self.seg, start_lsn: self.start_lsn, end_lsn: self.end_lsn, dropped: self.dropped, @@ -593,7 +605,7 @@ impl SnapshotLayer { conf, timelineid, tenantid, - rel: snapfilename.rel, + seg: snapfilename.seg, start_lsn: snapfilename.start_lsn, end_lsn: snapfilename.end_lsn, dropped: snapfilename.dropped, @@ -615,7 +627,7 @@ impl SnapshotLayer { pub fn dump(&self) -> String { let mut result = format!( "----- snapshot layer for {} {}-{} ----\n", - self.rel, self.start_lsn, self.end_lsn + self.seg, self.start_lsn, self.end_lsn ); let inner = self.inner.lock().unwrap(); diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 7ba5769e2d..461d3cdd25 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -1,3 +1,7 @@ +//! +//! Common traits and structs for layers +//! + use crate::relish::RelishTag; use crate::repository::WALRecord; use crate::walredo::WalRedoManager; @@ -5,12 +9,45 @@ use crate::ZTimelineId; use anyhow::Result; use bytes::Bytes; use serde::{Deserialize, Serialize}; +use std::fmt; use std::sync::Arc; use zenith_utils::lsn::Lsn; pub static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); +// Size of one segment in pages (10 MB) +pub const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192; + +/// +/// Each relish stored in the repository is divided into fixed-sized "segments", +/// with 10 MB of key-space, or 1280 8k pages each. +/// +#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy)] +pub struct SegmentTag { + pub rel: RelishTag, + pub segno: u32, +} + +impl fmt::Display for SegmentTag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}.{}", self.rel, self.segno) + } +} + +impl SegmentTag { + pub const fn from_blknum(rel: RelishTag, blknum: u32) -> SegmentTag { + SegmentTag { + rel, + segno: blknum / RELISH_SEG_SIZE, + } + } + + pub fn blknum_in_seg(&self, blknum: u32) -> bool { + blknum / RELISH_SEG_SIZE == self.segno + } +} + /// /// Represents a version of a page at a specific LSN. The LSN is the key of the /// entry in the 'page_versions' hash, it is not duplicated here. @@ -37,21 +74,21 @@ pub struct PageVersion { } /// -/// A Layer holds all page versions for one relish, in a range of LSNs. +/// A Layer holds all page versions for one segment of a relish, in a range of LSNs. /// There are two kinds of layers, in-memory and snapshot layers. In-memory /// layers are used to ingest incoming WAL, and provide fast access /// to the recent page versions. Snaphot layers are stored on disk, and /// are immutable. /// -/// Each layer contains a full snapshot of the relish at the start +/// Each layer contains a full snapshot of the segment at the start /// LSN. In addition to that, it contains WAL (or more page images) /// needed to recontruct any page version up to the end LSN. /// pub trait Layer: Send + Sync { - // These functions identify the relish and the LSN range that this Layer - // holds. + // These functions identify the relish segment and the LSN range + // that this Layer holds. fn get_timeline_id(&self) -> ZTimelineId; - fn get_relish_tag(&self) -> RelishTag; + fn get_seg_tag(&self) -> SegmentTag; fn get_start_lsn(&self) -> Lsn; fn get_end_lsn(&self) -> Lsn; fn is_dropped(&self) -> bool; @@ -63,6 +100,10 @@ pub trait Layer: Send + Sync { fn is_frozen(&self) -> bool; // Functions that correspond to the Timeline trait functions. + + // Note that the 'blknum' is the offset of the page from the beginning + // of the *relish*, not the beginning of the segment. The requested + // 'blknum' must be covered by this segment. fn get_page_at_lsn( &self, walredo_mgr: &dyn WalRedoManager, @@ -70,9 +111,9 @@ pub trait Layer: Send + Sync { lsn: Lsn, ) -> Result; - fn get_relish_size(&self, lsn: Lsn) -> Result>; + fn get_seg_size(&self, lsn: Lsn) -> Result; - fn get_rel_exists(&self, lsn: Lsn) -> Result; + fn get_seg_exists(&self, lsn: Lsn) -> Result; fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()>; diff --git a/pageserver/src/repository.rs b/pageserver/src/repository.rs index 501b7a6254..81ee5029cd 100644 --- a/pageserver/src/repository.rs +++ b/pageserver/src/repository.rs @@ -521,6 +521,21 @@ mod tests { pg_constants::RELSEG_SIZE - 1 ); + // Truncate to 1500, and then truncate all the way down to 0, one block at a time + // This tests the behavior at segment boundaries + let mut size: i32 = 3000; + while size >= 0 { + lsn += 1; + tline.put_truncation(TESTREL_A, Lsn(lsn), size as u32)?; + tline.advance_last_valid_lsn(Lsn(lsn)); + assert_eq!( + tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(), + size as u32 + ); + + size -= 1; + } + Ok(()) } diff --git a/test_runner/batch_others/test_snapfiles_gc.py b/test_runner/batch_others/test_snapfiles_gc.py index 761dc95b31..99e4b2747d 100644 --- a/test_runner/batch_others/test_snapfiles_gc.py +++ b/test_runner/batch_others/test_snapfiles_gc.py @@ -31,8 +31,8 @@ def test_snapfiles_gc(zenith_cli, pageserver, postgres, pg_bin): # Create a test table cur.execute("CREATE TABLE foo(x integer)") + cur.execute("INSERT INTO foo VALUES (1)") - print("Inserting two more rows and running GC") cur.execute("select relfilenode from pg_class where oid = 'foo'::regclass"); row = cur.fetchone(); print("relfilenode is {}", row[0]); @@ -46,6 +46,10 @@ def test_snapfiles_gc(zenith_cli, pageserver, postgres, pg_bin): # kicks in and confuses our numbers. cur.execute("VACUUM") + # delete the row, to update the Visibility Map. We don't want the VM + # update to confuse our numbers either. + cur.execute("DELETE FROM foo") + print("Running GC before test") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") row = pscur.fetchone() @@ -54,16 +58,14 @@ def test_snapfiles_gc(zenith_cli, pageserver, postgres, pg_bin): snapshot_relfiles_remain = row['snapshot_relfiles_total'] - row['snapshot_relfiles_removed'] assert snapshot_relfiles_remain > 0 - # Insert a row. The first insert will also create a metadata entry for the - # relation, with size == 1 block. Hence, bump up the expected relation count. - snapshot_relfiles_remain += 1; + # Insert a row. print("Inserting one row and running GC") cur.execute("INSERT INTO foo VALUES (1)") pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0") row = pscur.fetchone() print_gc_result(row); - assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain - assert row['snapshot_relfiles_removed'] == 0 + assert row['snapshot_relfiles_total'] == snapshot_relfiles_remain + 1 + assert row['snapshot_relfiles_removed'] == 1 assert row['snapshot_relfiles_dropped'] == 0 # Insert two more rows and run GC. From 48f4a7b8860320a6acce8fc73d20ce7a7e8c899c Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 17 Aug 2021 18:54:48 +0300 Subject: [PATCH 09/24] Refactor get_page_at_lsn() logic to layered_repository.rs There was a a lot of duplicated code between the get_page_at_lsn() implementations in InMemoryLayer and SnapshotLayer. Move the code for requesting WAL redo from the Layer trait into LayeredTimeline. The get-function in Layer now just returns the WAL records and base image to the caller, and the caller is responsible for performing the WAL redo on them. --- pageserver/src/layered_repository.rs | 125 +++++++++++++++--- .../src/layered_repository/inmemory_layer.rs | 98 +++----------- .../src/layered_repository/snapshot_layer.rs | 96 ++------------ .../src/layered_repository/storage_layer.rs | 46 +++++-- 4 files changed, 167 insertions(+), 198 deletions(-) diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index 44bcb60936..bc99b9bd56 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -47,7 +47,9 @@ mod storage_layer; use inmemory_layer::InMemoryLayer; use layer_map::LayerMap; use snapshot_layer::SnapshotLayer; -use storage_layer::{Layer, SegmentTag, RELISH_SEG_SIZE}; +use storage_layer::{Layer, PageReconstructData, SegmentTag, RELISH_SEG_SIZE}; + +static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); // Timeout when waiting for WAL receiver to catch up to an LSN given in a GetPage@LSN call. static TIMEOUT: Duration = Duration::from_secs(60); @@ -466,22 +468,9 @@ pub struct LayeredTimeline { impl Timeline for LayeredTimeline { /// Look up given page in the cache. fn get_page_at_lsn(&self, rel: RelishTag, blknum: u32, lsn: Lsn) -> Result { - if !rel.is_blocky() && blknum != 0 { - bail!( - "invalid request for block {} for non-blocky relish {}", - blknum, - rel - ); - } let lsn = self.wait_lsn(lsn)?; - let seg = SegmentTag::from_blknum(rel, blknum); - - if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { - layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) - } else { - bail!("relish {} not found at {}", rel, lsn); - } + self.get_page_at_lsn_nowait(rel, blknum, lsn) } fn get_page_at_lsn_nowait(&self, rel: RelishTag, blknum: u32, lsn: Lsn) -> Result { @@ -496,7 +485,7 @@ impl Timeline for LayeredTimeline { let seg = SegmentTag::from_blknum(rel, blknum); if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { - layer.get_page_at_lsn(&*self.walredo_mgr, blknum, lsn) + self.materialize_page(seg, blknum, lsn, &*layer) } else { bail!("relish {} not found at {}", rel, lsn); } @@ -1040,7 +1029,7 @@ impl LayeredTimeline { ); layer = InMemoryLayer::copy_snapshot( self.conf, - &*self.walredo_mgr, + &self, &*prev_layer, self.timelineid, self.tenantid, @@ -1147,7 +1136,7 @@ impl LayeredTimeline { // Call unload() on all frozen layers, to release memory. for layer in old_layers.values() { if !layer.is_frozen() { - let new_layers = layer.freeze(last_valid_lsn, &*self.walredo_mgr)?; + let new_layers = layer.freeze(last_valid_lsn, &self)?; // replace this layer with the new layers that 'freeze' returned layers.remove(&**layer); @@ -1318,4 +1307,104 @@ impl LayeredTimeline { result.elapsed = now.elapsed(); Ok(result) } + + /// + /// Reconstruct a page version from given Layer + /// + fn materialize_page( + &self, + seg: SegmentTag, + blknum: u32, + lsn: Lsn, + layer: &dyn Layer, + ) -> Result { + let mut data = PageReconstructData { + records: Vec::new(), + page_img: None, + }; + + if let Some(_cont_lsn) = layer.get_page_reconstruct_data(blknum, lsn, &mut data)? { + // The layers are currently fully self-contained, so we should have found all + // the data we need to reconstruct the page in the layer. + if data.records.is_empty() { + // no records, and no base image. This can happen if PostgreSQL extends a relation + // but never writes the page. + // + // Would be nice to detect that situation better. + warn!("Page {} blk {} at {} not found", seg.rel, blknum, lsn); + return Ok(ZERO_PAGE.clone()); + } + bail!( + "No base image found for page {} blk {} at {}/{}", + seg.rel, + blknum, + self.timelineid, + lsn, + ); + } + self.reconstruct_page(seg.rel, blknum, lsn, data) + } + + /// + /// Reconstruct a page version, using the given base image and WAL records in 'data'. + /// + fn reconstruct_page( + &self, + rel: RelishTag, + blknum: u32, + request_lsn: Lsn, + mut data: PageReconstructData, + ) -> Result { + // Perform WAL redo if needed + data.records.reverse(); + + // If we have a page image, and no WAL, we're all set + if data.records.is_empty() { + if let Some(img) = &data.page_img { + trace!( + "found page image for blk {} in {} at {}/{}, no WAL redo required", + blknum, + rel, + self.timelineid, + request_lsn + ); + Ok(img.clone()) + } else { + // FIXME: this ought to be an error? + warn!("Page {} blk {} at {} not found", rel, blknum, request_lsn); + Ok(ZERO_PAGE.clone()) + } + } else { + // We need to do WAL redo. + // + // If we don't have a base image, then the oldest WAL record better initialize + // the page + if data.page_img.is_none() && !data.records.first().unwrap().will_init { + // FIXME: this ought to be an error? + warn!( + "Base image for page {}/{} at {} not found, but got {} WAL records", + rel, + blknum, + request_lsn, + data.records.len() + ); + Ok(ZERO_PAGE.clone()) + } else { + if data.page_img.is_some() { + trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", data.records.len(), blknum, rel, self.timelineid, request_lsn); + } else { + trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", data.records.len(), blknum, rel, self.timelineid, request_lsn); + } + let img = self.walredo_mgr.request_redo( + rel, + blknum, + request_lsn, + data.page_img.clone(), + data.records, + )?; + + Ok(img) + } + } + } } diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index d59a82b8cd..3c0024bca6 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -3,14 +3,14 @@ //! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation. //! -use crate::layered_repository::storage_layer::{Layer, PageVersion, SegmentTag, RELISH_SEG_SIZE}; +use crate::layered_repository::storage_layer::{ + Layer, PageReconstructData, PageVersion, SegmentTag, RELISH_SEG_SIZE, +}; +use crate::layered_repository::LayeredTimeline; use crate::layered_repository::SnapshotLayer; -use crate::repository::WALRecord; -use crate::walredo::WalRedoManager; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; -use bytes::Bytes; use log::*; use std::collections::BTreeMap; use std::ops::Bound::Included; @@ -18,8 +18,6 @@ use std::sync::{Arc, Mutex}; use zenith_utils::lsn::Lsn; -static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); - pub struct InMemoryLayer { conf: &'static PageServerConf, tenantid: ZTenantId, @@ -87,15 +85,13 @@ impl Layer for InMemoryLayer { } /// Look up given page in the cache. - fn get_page_at_lsn( + fn get_page_reconstruct_data( &self, - walredo_mgr: &dyn WalRedoManager, blknum: u32, lsn: Lsn, - ) -> Result { - // Scan the BTreeMap backwards, starting from the given entry. - let mut records: Vec = Vec::new(); - let mut page_img: Option = None; + reconstruct_data: &mut PageReconstructData, + ) -> Result> { + // Scan the BTreeMap backwards, starting from reconstruct_data.lsn. let mut need_base_image_lsn: Option = Some(lsn); assert!(self.seg.blknum_in_seg(blknum)); @@ -109,11 +105,11 @@ impl Layer for InMemoryLayer { .range((Included(&minkey), Included(&maxkey))); while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() { if let Some(img) = &entry.page_image { - page_img = Some(img.clone()); + reconstruct_data.page_img = Some(img.clone()); need_base_image_lsn = None; break; } else if let Some(rec) = &entry.record { - records.push(rec.clone()); + reconstruct_data.records.push(rec.clone()); if rec.will_init { // This WAL record initializes the page, so no need to go further back need_base_image_lsn = None; @@ -129,71 +125,8 @@ impl Layer for InMemoryLayer { // release lock on 'page_versions' } - records.reverse(); - // If we needed a base image to apply the WAL records against, we should have found it in memory. - if let Some(lsn) = need_base_image_lsn { - if records.is_empty() { - // no records, and no base image. This can happen if PostgreSQL extends a relation - // but never writes the page. - // - // Would be nice to detect that situation better. - warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); - return Ok(ZERO_PAGE.clone()); - } - bail!( - "No base image found for page {} blk {} at {}/{}", - self.seg.rel, - blknum, - self.timelineid, - lsn - ); - } - - // If we have a page image, and no WAL, we're all set - if records.is_empty() { - if let Some(img) = page_img { - trace!( - "found page image for blk {} in {} at {}/{}, no WAL redo required", - blknum, - self.seg.rel, - self.timelineid, - lsn - ); - Ok(img) - } else { - // FIXME: this ought to be an error? - warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); - Ok(ZERO_PAGE.clone()) - } - } else { - // We need to do WAL redo. - // - // If we don't have a base image, then the oldest WAL record better initialize - // the page - if page_img.is_none() && !records.first().unwrap().will_init { - // FIXME: this ought to be an error? - warn!( - "Base image for page {}/{} at {} not found, but got {} WAL records", - self.seg.rel, - blknum, - lsn, - records.len() - ); - Ok(ZERO_PAGE.clone()) - } else { - if page_img.is_some() { - trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); - } else { - trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); - } - let img = walredo_mgr.request_redo(self.seg.rel, blknum, lsn, page_img, records)?; - - self.put_page_image(blknum, lsn, img.clone())?; - - Ok(img) - } - } + Ok(need_base_image_lsn) } /// Get size of the relation at given LSN @@ -320,7 +253,8 @@ impl Layer for InMemoryLayer { fn freeze( &self, cutoff_lsn: Lsn, - walredo_mgr: &dyn WalRedoManager, + // This is needed just to call materialize_page() + timeline: &LayeredTimeline, ) -> Result>> { info!( "freezing in memory layer for {} on timeline {} at {}", @@ -398,7 +332,7 @@ impl Layer for InMemoryLayer { let new_layer = Self::copy_snapshot( self.conf, - walredo_mgr, + timeline, &snapfile, self.timelineid, self.tenantid, @@ -465,7 +399,7 @@ impl InMemoryLayer { /// pub fn copy_snapshot( conf: &'static PageServerConf, - walredo_mgr: &dyn WalRedoManager, + timeline: &LayeredTimeline, src: &dyn Layer, timelineid: ZTimelineId, tenantid: ZTenantId, @@ -494,7 +428,7 @@ impl InMemoryLayer { } for blknum in startblk..(startblk + size) { - let img = src.get_page_at_lsn(walredo_mgr, blknum, lsn)?; + let img = timeline.materialize_page(seg, blknum, lsn, src)?; let pv = PageVersion { page_image: Some(img), record: None, diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index 44695aa28b..83733d6cdb 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -37,15 +37,14 @@ //! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two //! parts: the page versions and the relation sizes. They are stored as separate chapters. //! -use crate::layered_repository::storage_layer::ZERO_PAGE; -use crate::layered_repository::storage_layer::{Layer, PageVersion, SegmentTag}; +use crate::layered_repository::storage_layer::{ + Layer, PageReconstructData, PageVersion, SegmentTag, +}; +use crate::layered_repository::LayeredTimeline; use crate::relish::*; -use crate::repository::WALRecord; -use crate::walredo::WalRedoManager; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; -use bytes::Bytes; use log::*; use std::collections::BTreeMap; use std::fmt; @@ -270,15 +269,13 @@ impl Layer for SnapshotLayer { } /// Look up given page in the cache. - fn get_page_at_lsn( + fn get_page_reconstruct_data( &self, - walredo_mgr: &dyn WalRedoManager, blknum: u32, lsn: Lsn, - ) -> Result { + reconstruct_data: &mut PageReconstructData, + ) -> Result> { // Scan the BTreeMap backwards, starting from the given entry. - let mut records: Vec = Vec::new(); - let mut page_img: Option = None; let mut need_base_image_lsn: Option = Some(lsn); { let inner = self.load()?; @@ -289,11 +286,11 @@ impl Layer for SnapshotLayer { .range((Included(&minkey), Included(&maxkey))); while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() { if let Some(img) = &entry.page_image { - page_img = Some(img.clone()); + reconstruct_data.page_img = Some(img.clone()); need_base_image_lsn = None; break; } else if let Some(rec) = &entry.record { - records.push(rec.clone()); + reconstruct_data.records.push(rec.clone()); if rec.will_init { // This WAL record initializes the page, so no need to go further back need_base_image_lsn = None; @@ -309,73 +306,8 @@ impl Layer for SnapshotLayer { // release lock on 'inner' } - records.reverse(); - // If we needed a base image to apply the WAL records against, we should have found it in memory. - if let Some(lsn) = need_base_image_lsn { - if records.is_empty() { - // no records, and no base image. This can happen if PostgreSQL extends a relation - // but never writes the page. - // - // Would be nice to detect that situation better. - warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); - return Ok(ZERO_PAGE.clone()); - } - bail!( - "No base image found for page {} blk {} at {}/{}", - self.seg.rel, - blknum, - self.timelineid, - lsn - ); - } - - // If we have a page image, and no WAL, we're all set - if records.is_empty() { - if let Some(img) = page_img { - trace!( - "found page image for blk {} in {} at {}/{}, no WAL redo required", - blknum, - self.seg.rel, - self.timelineid, - lsn - ); - Ok(img) - } else { - // FIXME: this ought to be an error? - warn!("Page {} blk {} at {} not found", self.seg.rel, blknum, lsn); - Ok(ZERO_PAGE.clone()) - } - } else { - // We need to do WAL redo. - // - // If we don't have a base image, then the oldest WAL record better initialize - // the page - if page_img.is_none() && !records.first().unwrap().will_init { - // FIXME: this ought to be an error? - warn!( - "Base image for page {} blk {} at {} not found, but got {} WAL records", - self.seg.rel, - blknum, - lsn, - records.len() - ); - Ok(ZERO_PAGE.clone()) - } else { - if page_img.is_some() { - trace!("found {} WAL records and a base image for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); - } else { - trace!("found {} WAL records that will init the page for blk {} in {} at {}/{}, performing WAL redo", records.len(), blknum, self.seg.rel, self.timelineid, lsn); - } - let img = walredo_mgr.request_redo(self.seg.rel, blknum, lsn, page_img, records)?; - - // FIXME: Should we memoize the page image in memory, so that - // we wouldn't need to reconstruct it again, if it's requested again? - //self.put_page_image(blknum, lsn, img.clone())?; - - Ok(img) - } - } + Ok(need_base_image_lsn) } /// Get size of the relation at given LSN @@ -428,11 +360,7 @@ impl Layer for SnapshotLayer { bail!("cannot modify historical snapshot layer"); } - fn freeze( - &self, - _end_lsn: Lsn, - _walredo_mgr: &dyn WalRedoManager, - ) -> Result>> { + fn freeze(&self, _end_lsn: Lsn, _timeline: &LayeredTimeline) -> Result>> { bail!("cannot freeze historical snapshot layer"); } @@ -585,7 +513,7 @@ impl SnapshotLayer { Ok(inner) } - /// Create SnapshotLayers representing all files on dik + /// Create SnapshotLayers representing all files on disk /// // TODO: returning an Iterator would be more idiomatic pub fn list_snapshot_files( diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 461d3cdd25..0d0ac6164a 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -2,9 +2,9 @@ //! Common traits and structs for layers //! +use crate::layered_repository::LayeredTimeline; use crate::relish::RelishTag; use crate::repository::WALRecord; -use crate::walredo::WalRedoManager; use crate::ZTimelineId; use anyhow::Result; use bytes::Bytes; @@ -14,8 +14,6 @@ use std::sync::Arc; use zenith_utils::lsn::Lsn; -pub static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); - // Size of one segment in pages (10 MB) pub const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192; @@ -73,12 +71,25 @@ pub struct PageVersion { pub record: Option, } +/// +/// Data needed to reconstruct a page version +/// +/// 'page_img' is the old base image of the page to start the WAL replay with. +/// It can be None, if the first WAL record initializes the page (will_init) +/// 'records' contains the records to apply over the base image. +/// +pub struct PageReconstructData { + pub records: Vec, + pub page_img: Option, +} + /// /// A Layer holds all page versions for one segment of a relish, in a range of LSNs. /// There are two kinds of layers, in-memory and snapshot layers. In-memory /// layers are used to ingest incoming WAL, and provide fast access /// to the recent page versions. Snaphot layers are stored on disk, and -/// are immutable. +/// are immutable. This trait presents the common functionality of +/// in-memory and snapshot layers. /// /// Each layer contains a full snapshot of the segment at the start /// LSN. In addition to that, it contains WAL (or more page images) @@ -99,18 +110,26 @@ pub trait Layer: Send + Sync { /// in-memory layers are always unfrozen. fn is_frozen(&self) -> bool; - // Functions that correspond to the Timeline trait functions. - - // Note that the 'blknum' is the offset of the page from the beginning - // of the *relish*, not the beginning of the segment. The requested - // 'blknum' must be covered by this segment. - fn get_page_at_lsn( + /// + /// Return data needed to reconstruct given page at LSN. + /// + /// It is up to the caller to collect more data from previous layer and + /// perform WAL redo, if necessary. + /// + /// If returns Some, the returned data is not complete. The caller needs + /// to continue with the returned 'lsn'. + /// + /// Note that the 'blknum' is the offset of the page from the beginning + /// of the *relish*, not the beginning of the segment. The requested + /// 'blknum' must be covered by this segment. + fn get_page_reconstruct_data( &self, - walredo_mgr: &dyn WalRedoManager, blknum: u32, lsn: Lsn, - ) -> Result; + reconstruct_data: &mut PageReconstructData, + ) -> Result>; + // Functions that correspond to the Timeline trait functions. fn get_seg_size(&self, lsn: Lsn) -> Result; fn get_seg_exists(&self, lsn: Lsn) -> Result; @@ -150,8 +169,7 @@ pub trait Layer: Send + Sync { /// /// Returns new layers that replace this one. /// - fn freeze(&self, end_lsn: Lsn, walredo_mgr: &dyn WalRedoManager) - -> Result>>; + fn freeze(&self, end_lsn: Lsn, walredo_mgr: &LayeredTimeline) -> Result>>; /// Permanently delete this layer fn delete(&self) -> Result<()>; From 45f641cabba30e91656786aa61decac3d1bee4de Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 17 Aug 2021 18:54:51 +0300 Subject: [PATCH 10/24] Handle last "open" layer specially in LayerMap. There can be only one "open" layer for each segment. That's the last one, implemented by InMemoryLayer. That's the only one where new records can be appended to. Much of the code needed to distinguish between the last open layer and other layers anyway, so make the distinction explicit in LayerMap. --- pageserver/src/layered_repository.rs | 75 +++--- .../src/layered_repository/inmemory_layer.rs | 251 +++++++++--------- .../src/layered_repository/layer_map.rs | 228 +++++++++++++--- .../src/layered_repository/snapshot_layer.rs | 64 ++--- .../src/layered_repository/storage_layer.rs | 54 ---- 5 files changed, 380 insertions(+), 292 deletions(-) diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index bc99b9bd56..cc78086468 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -896,7 +896,7 @@ impl LayeredTimeline { layer_rc.is_dropped(), self.timelineid ); - layers.insert(Arc::clone(layer_rc)); + layers.insert_historic(Arc::clone(layer_rc)); } Ok(()) @@ -942,7 +942,7 @@ impl LayeredTimeline { // // Do we have a layer on this timeline? - if let Some(layer) = layers.get(seg, lsn) { + if let Some(layer) = layers.get(&seg, lsn) { trace!( "found layer in cache: {} {}-{}", timeline.timelineid, @@ -974,18 +974,19 @@ impl LayeredTimeline { /// /// Get a handle to the latest layer for appending. /// - fn get_layer_for_write(&self, seg: SegmentTag, lsn: Lsn) -> Result> { + fn get_layer_for_write(&self, seg: SegmentTag, lsn: Lsn) -> Result> { + let layers = self.layers.lock().unwrap(); + if lsn < self.last_valid_lsn.load() { bail!("cannot modify relation after advancing last_valid_lsn"); } - // Look up the correct layer. - let layers = self.layers.lock().unwrap(); - if let Some(layer) = layers.get(seg, lsn) { - // If it's writeable, good, return it. - if !layer.is_frozen() { - return Ok(Arc::clone(&layer)); + // Do we have a layer open for writing already? + if let Some(layer) = layers.get_open(&seg) { + if layer.get_start_lsn() > lsn { + bail!("unexpected open layer in the future"); } + return Ok(layer); } // No (writeable) layer for this relation yet. Create one. @@ -1048,8 +1049,8 @@ impl LayeredTimeline { } let mut layers = self.layers.lock().unwrap(); - let layer_rc: Arc = Arc::new(layer); - layers.insert(Arc::clone(&layer_rc)); + let layer_rc: Arc = Arc::new(layer); + layers.insert_open(Arc::clone(&layer_rc)); Ok(layer_rc) } @@ -1126,34 +1127,32 @@ impl LayeredTimeline { // aggressive. Some kind of LRU policy would be appropriate. // - // It is not possible to modify a BTreeMap while you're iterating - // it. So we have to make a temporary copy, and iterate through that, - // while we modify the original. - let old_layers = layers.inner.clone(); - // Call freeze() on any unfrozen layers (that is, layers that haven't // been written to disk yet). // Call unload() on all frozen layers, to release memory. - for layer in old_layers.values() { - if !layer.is_frozen() { - let new_layers = layer.freeze(last_valid_lsn, &self)?; + let mut iter = layers.iter_open_layers(); + while let Some(layer) = iter.next() { + let (new_historic, new_open) = layer.freeze(last_valid_lsn, &self)?; - // replace this layer with the new layers that 'freeze' returned - layers.remove(&**layer); - for new_layer in new_layers { - trace!( - "freeze returned layer {} {}-{}", - new_layer.get_seg_tag(), - new_layer.get_start_lsn(), - new_layer.get_end_lsn() - ); - layers.insert(Arc::clone(&new_layer)); - } - } else { - layer.unload()?; + // replace this layer with the new layers that 'freeze' returned + // (removes it if new_open is None) + iter.replace(new_open); + + if let Some(historic) = new_historic { + trace!( + "freeze returned layer {} {}-{}", + historic.get_seg_tag(), + historic.get_start_lsn(), + historic.get_end_lsn() + ); + iter.insert_historic(historic); } } + for layer in layers.iter_historic_layers() { + layer.unload()?; + } + // Also save the metadata, with updated last_valid_lsn and last_record_lsn, to a // file in the timeline dir. The metadata reflects the last_valid_lsn as it was // when we *started* the checkpoint, so that after crash, the WAL receiver knows @@ -1214,10 +1213,14 @@ impl LayeredTimeline { self.timelineid, cutoff ); - let mut layers_to_remove: Vec> = Vec::new(); + let mut layers_to_remove: Vec> = Vec::new(); // Determine for each file if it needs to be retained - 'outer: for ((seg, _lsn), l) in layers.inner.iter() { + // FIXME: also scan open in-memory layers. Normally we cannot remove the + // latest layer of any seg, but if it was unlinked it's possible + 'outer: for l in layers.iter_historic_layers() { + let seg = l.get_seg_tag(); + if seg.rel.is_relation() { result.snapshot_relfiles_total += 1; } else { @@ -1279,7 +1282,7 @@ impl LayeredTimeline { l.get_end_lsn(), l.is_dropped() ); - layers_to_remove.push(Arc::clone(l)); + layers_to_remove.push(Arc::clone(&l)); } // Actually delete the layers from disk and remove them from the map. @@ -1287,7 +1290,7 @@ impl LayeredTimeline { // while iterating it. BTreeMap::retain() would be another option) for doomed_layer in layers_to_remove { doomed_layer.delete()?; - layers.remove(&*doomed_layer); + layers.remove_historic(&*doomed_layer); if doomed_layer.is_dropped() { if doomed_layer.get_seg_tag().rel.is_relation() { diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 3c0024bca6..70a7b7216e 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -2,15 +2,16 @@ //! An in-memory layer stores recently received page versions in memory. The page versions //! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation. //! - use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, RELISH_SEG_SIZE, }; use crate::layered_repository::LayeredTimeline; use crate::layered_repository::SnapshotLayer; +use crate::repository::WALRecord; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; +use bytes::Bytes; use log::*; use std::collections::BTreeMap; use std::ops::Bound::Included; @@ -53,10 +54,6 @@ pub struct InMemoryLayerInner { } impl Layer for InMemoryLayer { - fn is_frozen(&self) -> bool { - return false; - } - fn get_timeline_id(&self) -> ZTimelineId { return self.timelineid; } @@ -159,12 +156,69 @@ impl Layer for InMemoryLayer { // Otherwise, it exists Ok(true) } +} + +impl InMemoryLayer { + /// + /// Create a new, empty, in-memory layer + /// + pub fn create( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + seg: SegmentTag, + start_lsn: Lsn, + ) -> Result { + trace!( + "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", + seg, + timelineid, + start_lsn + ); + + Ok(InMemoryLayer { + conf, + timelineid, + tenantid, + seg, + start_lsn, + inner: Mutex::new(InMemoryLayerInner { + drop_lsn: None, + page_versions: BTreeMap::new(), + segsizes: BTreeMap::new(), + }), + }) + } // Write operations + /// Remember new page version, as a WAL record over previous version + pub fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> Result<()> { + self.put_page_version( + blknum, + rec.lsn, + PageVersion { + page_image: None, + record: Some(rec), + }, + ) + } + + /// Remember new page version, as a full page image + pub fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()> { + self.put_page_version( + blknum, + lsn, + PageVersion { + page_image: Some(img), + record: None, + }, + ) + } + /// Common subroutine of the public put_wal_record() and put_page_image() functions. /// Adds the page version to the in-memory tree - fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> { + pub fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> { assert!(self.seg.blknum_in_seg(blknum)); trace!( @@ -215,7 +269,7 @@ impl Layer for InMemoryLayer { } /// Remember that the relation was truncated at given LSN - fn put_truncation(&self, lsn: Lsn, segsize: u32) -> anyhow::Result<()> { + pub fn put_truncation(&self, lsn: Lsn, segsize: u32) -> anyhow::Result<()> { let mut inner = self.inner.lock().unwrap(); let old = inner.segsizes.insert(lsn, segsize); @@ -227,8 +281,8 @@ impl Layer for InMemoryLayer { Ok(()) } - /// Remember that the relation was dropped at given LSN - fn put_unlink(&self, lsn: Lsn) -> anyhow::Result<()> { + /// Remember that the segment was dropped at given LSN + pub fn put_unlink(&self, lsn: Lsn) -> anyhow::Result<()> { let mut inner = self.inner.lock().unwrap(); assert!(inner.drop_lsn.is_none()); @@ -239,6 +293,63 @@ impl Layer for InMemoryLayer { Ok(()) } + /// + /// Initialize a new InMemoryLayer for, by copying the state at the given + /// point in time from given existing layer. + /// + pub fn copy_snapshot( + conf: &'static PageServerConf, + timeline: &LayeredTimeline, + src: &dyn Layer, + timelineid: ZTimelineId, + tenantid: ZTenantId, + lsn: Lsn, + ) -> Result { + trace!( + "initializing new InMemoryLayer for writing {} on timeline {} at {}", + src.get_seg_tag(), + timelineid, + lsn + ); + let mut page_versions = BTreeMap::new(); + let mut segsizes = BTreeMap::new(); + + let seg = src.get_seg_tag(); + + let startblk; + let size; + if seg.rel.is_blocky() { + size = src.get_seg_size(lsn)?; + segsizes.insert(lsn, size); + startblk = seg.segno * RELISH_SEG_SIZE; + } else { + size = 1; + startblk = 0; + } + + for blknum in startblk..(startblk + size) { + let img = timeline.materialize_page(seg, blknum, lsn, src)?; + let pv = PageVersion { + page_image: Some(img), + record: None, + }; + page_versions.insert((blknum, lsn), pv); + } + + Ok(InMemoryLayer { + conf, + timelineid, + tenantid, + seg: src.get_seg_tag(), + start_lsn: lsn, + inner: Mutex::new(InMemoryLayerInner { + drop_lsn: None, + page_versions: page_versions, + segsizes: segsizes, + }), + }) + } + /// /// Write the this in-memory layer to disk, as a snapshot layer. /// @@ -250,12 +361,12 @@ impl Layer for InMemoryLayer { /// in-memory layer containing those page versions. The caller replaces /// this layer with the returned layers in the layer map. /// - fn freeze( + pub fn freeze( &self, cutoff_lsn: Lsn, // This is needed just to call materialize_page() timeline: &LayeredTimeline, - ) -> Result>> { + ) -> Result<(Option>, Option>)> { info!( "freezing in memory layer for {} on timeline {} at {}", self.seg, self.timelineid, cutoff_lsn @@ -323,14 +434,13 @@ impl Layer for InMemoryLayer { before_page_versions, before_segsizes, )?; - let mut result: Vec> = Vec::new(); - // If there were any page versions after the cutoff, initialize a new in-memory layer - // to hold them - if !after_segsizes.is_empty() || !after_page_versions.is_empty() { + // If there were any "new" page versions, initialize a new in-memory layer to hold + // them + let new_open = if !after_segsizes.is_empty() || !after_page_versions.is_empty() { info!("created new in-mem layer for {} {}-", self.seg, end_lsn); - let new_layer = Self::copy_snapshot( + let new_open = Self::copy_snapshot( self.conf, timeline, &snapfile, @@ -338,116 +448,19 @@ impl Layer for InMemoryLayer { self.tenantid, end_lsn, )?; - let mut new_inner = new_layer.inner.lock().unwrap(); + let mut new_inner = new_open.inner.lock().unwrap(); new_inner.page_versions.append(&mut after_page_versions); new_inner.segsizes.append(&mut after_segsizes); drop(new_inner); - result.push(Arc::new(new_layer)); - } - result.push(Arc::new(snapfile)); - - Ok(result) - } - - fn delete(&self) -> Result<()> { - // Nothing to do. When the reference is dropped, the memory is released. - Ok(()) - } - - fn unload(&self) -> Result<()> { - // cannot unload in-memory layer. Freeze instead - Ok(()) - } -} - -impl InMemoryLayer { - /// - /// Create a new, empty, in-memory layer - /// - pub fn create( - conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, - seg: SegmentTag, - start_lsn: Lsn, - ) -> Result { - trace!( - "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", - seg, - timelineid, - start_lsn - ); - - Ok(InMemoryLayer { - conf, - timelineid, - tenantid, - seg, - start_lsn, - inner: Mutex::new(InMemoryLayerInner { - drop_lsn: None, - page_versions: BTreeMap::new(), - segsizes: BTreeMap::new(), - }), - }) - } - - /// - /// Initialize a new InMemoryLayer for, by copying the state at the given - /// point in time from given existing layer. - /// - pub fn copy_snapshot( - conf: &'static PageServerConf, - timeline: &LayeredTimeline, - src: &dyn Layer, - timelineid: ZTimelineId, - tenantid: ZTenantId, - lsn: Lsn, - ) -> Result { - trace!( - "initializing new InMemoryLayer for writing {} on timeline {} at {}", - src.get_seg_tag(), - timelineid, - lsn - ); - let mut page_versions = BTreeMap::new(); - let mut segsizes = BTreeMap::new(); - - let seg = src.get_seg_tag(); - - let startblk; - let size; - if seg.rel.is_blocky() { - size = src.get_seg_size(lsn)?; - segsizes.insert(lsn, size); - startblk = seg.segno * RELISH_SEG_SIZE; + Some(Arc::new(new_open)) } else { - size = 1; - startblk = 0; - } + None + }; - for blknum in startblk..(startblk + size) { - let img = timeline.materialize_page(seg, blknum, lsn, src)?; - let pv = PageVersion { - page_image: Some(img), - record: None, - }; - page_versions.insert((blknum, lsn), pv); - } + let new_historic = Some(Arc::new(snapfile)); - Ok(InMemoryLayer { - conf, - timelineid, - tenantid, - seg: src.get_seg_tag(), - start_lsn: lsn, - inner: Mutex::new(InMemoryLayerInner { - drop_lsn: None, - page_versions: page_versions, - segsizes: segsizes, - }), - }) + Ok((new_historic, new_open)) } /// debugging function to print out the contents of the layer diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 37d2bea8f5..0cf9f93419 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -10,19 +10,29 @@ //! use crate::layered_repository::storage_layer::{Layer, SegmentTag}; +use crate::layered_repository::{InMemoryLayer, SnapshotLayer}; use crate::relish::*; use anyhow::Result; use log::*; -use std::collections::BTreeMap; use std::collections::HashSet; +use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; use std::sync::Arc; use zenith_utils::lsn::Lsn; -/// LayerMap is a BTreeMap keyed by SegmentTag and the layer's start LSN. -/// It provides a couple of convenience functions over a plain BTreeMap +/// +/// LayerMap tracks what layers exist or a timeline. The last layer that is +/// open for writes is always an InMemoryLayer, and is tracked separately +/// because there can be only one for each segment. The older layers, +/// stored on disk, are kept in a BTreeMap keyed by the layer's start LSN. +/// pub struct LayerMap { - pub inner: BTreeMap<(SegmentTag, Lsn), Arc>, + segs: HashMap, +} + +struct SegEntry { + pub open: Option>, + pub historic: BTreeMap>, } impl LayerMap { @@ -32,40 +42,101 @@ impl LayerMap { /// given LSN, or precedes the given LSN, it is returned. In other words, /// you don't need to know the exact start LSN of the layer. /// - pub fn get(&self, tag: SegmentTag, lsn: Lsn) -> Option> { - let startkey = (tag, Lsn(0)); - let endkey = (tag, lsn); + pub fn get(&self, tag: &SegmentTag, lsn: Lsn) -> Option> { + let segentry = self.segs.get(tag)?; - if let Some((_k, v)) = self - .inner - .range((Included(startkey), Included(endkey))) + if let Some(open) = &segentry.open { + if open.get_start_lsn() <= lsn { + let x: Arc = Arc::clone(&open) as _; + return Some(x); + } + } + + if let Some((_k, v)) = segentry + .historic + .range((Included(Lsn(0)), Included(lsn))) .next_back() { - Some(Arc::clone(v)) + let x: Arc = Arc::clone(&v) as _; + Some(x) } else { None } } - pub fn insert(&mut self, layer: Arc) { - let seg = layer.get_seg_tag(); - let start_lsn = layer.get_start_lsn(); + /// + /// Get the open layer for given segment for writing. Or None if no open + /// layer exists. + /// + pub fn get_open(&self, tag: &SegmentTag) -> Option> { + let segentry = self.segs.get(tag)?; - self.inner.insert((seg, start_lsn), Arc::clone(&layer)); + if let Some(open) = &segentry.open { + Some(Arc::clone(open)) + } else { + None + } } - pub fn remove(&mut self, layer: &dyn Layer) { - let seg = layer.get_seg_tag(); + /// + /// Insert an open in-memory layer + /// + pub fn insert_open(&mut self, layer: Arc) { + let tag = layer.get_seg_tag(); + + if let Some(segentry) = self.segs.get_mut(&tag) { + if let Some(_old) = &segentry.open { + // FIXME: shouldn't exist, but check + } + segentry.open = Some(layer); + } else { + let segentry = SegEntry { + open: Some(layer), + historic: BTreeMap::new(), + }; + self.segs.insert(tag, segentry); + } + } + + /// + /// Insert an on-disk layer + /// + pub fn insert_historic(&mut self, layer: Arc) { + let tag = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); - self.inner.remove(&(seg, start_lsn)); + if let Some(segentry) = self.segs.get_mut(&tag) { + segentry.historic.insert(start_lsn, layer); + } else { + let mut historic = BTreeMap::new(); + historic.insert(start_lsn, layer); + + let segentry = SegEntry { + open: None, + historic, + }; + self.segs.insert(tag, segentry); + } + } + + /// + /// Remove an on-disk layer from the map. + /// + /// This should be called when the corresponding file on disk has been deleted. + /// + pub fn remove_historic(&mut self, layer: &SnapshotLayer) { + let tag = layer.get_seg_tag(); + let start_lsn = layer.get_start_lsn(); + + if let Some(segentry) = self.segs.get_mut(&tag) { + segentry.historic.remove(&start_lsn); + } } pub fn list_rels(&self, spcnode: u32, dbnode: u32) -> Result> { let mut rels: HashSet = HashSet::new(); - // Scan the timeline directory to get all rels in this timeline. - for ((seg, _lsn), _l) in self.inner.iter() { + for (seg, _entry) in self.segs.iter() { if let RelishTag::Relation(reltag) = seg.rel { // FIXME: skip if it was dropped before the requested LSN. But there is no // LSN argument @@ -84,7 +155,7 @@ impl LayerMap { let mut rels: HashSet = HashSet::new(); // Scan the timeline directory to get all rels in this timeline. - for ((seg, _lsn), _l) in self.inner.iter() { + for (seg, _entry) in self.segs.iter() { // FIXME: skip if it was dropped before the requested LSN. if let RelishTag::Relation(_) = seg.rel { @@ -97,35 +168,114 @@ impl LayerMap { /// Is there a newer layer for given segment? pub fn newer_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool { - let startkey = (seg, lsn); - let endkey = (seg, Lsn(u64::MAX)); - - for ((_newer_seg, newer_lsn), layer) in - self.inner.range((Included(startkey), Included(endkey))) - { - if layer.get_end_lsn() > lsn { - trace!( - "found later layer for {}, {} {}-{}", - seg, - lsn, - newer_lsn, - layer.get_end_lsn() - ); + if let Some(segentry) = self.segs.get(&seg) { + if let Some(_open) = &segentry.open { return true; - } else { - trace!("found singleton layer for {}, {} {}", seg, lsn, newer_lsn); - continue; + } + + for (newer_lsn, layer) in segentry + .historic + .range((Included(lsn), Included(Lsn(u64::MAX)))) + { + if layer.get_end_lsn() > lsn { + trace!( + "found later layer for {}, {} {}-{}", + seg, + lsn, + newer_lsn, + layer.get_end_lsn() + ); + return true; + } else { + trace!("found singleton layer for {}, {} {}", seg, lsn, newer_lsn); + continue; + } } } trace!("no later layer found for {}, {}", seg, lsn); false } + + pub fn iter_open_layers(&mut self) -> OpenLayerIter { + OpenLayerIter { + last: None, + segiter: self.segs.iter_mut(), + } + } + + pub fn iter_historic_layers(&self) -> HistoricLayerIter { + HistoricLayerIter { + segiter: self.segs.iter(), + iter: None, + } + } } impl Default for LayerMap { fn default() -> Self { LayerMap { - inner: BTreeMap::new(), + segs: HashMap::new(), + } + } +} + +pub struct OpenLayerIter<'a> { + last: Option<&'a mut SegEntry>, + + segiter: std::collections::hash_map::IterMut<'a, SegmentTag, SegEntry>, +} + +impl<'a> OpenLayerIter<'a> { + pub fn replace(&mut self, replacement: Option>) { + let segentry = self.last.as_mut().unwrap(); + segentry.open = replacement; + } + + pub fn insert_historic(&mut self, new_layer: Arc) { + let start_lsn = new_layer.get_start_lsn(); + + let segentry = self.last.as_mut().unwrap(); + segentry.historic.insert(start_lsn, new_layer); + } +} + +impl<'a> Iterator for OpenLayerIter<'a> { + type Item = Arc; + + fn next(&mut self) -> std::option::Option<::Item> { + while let Some((_seg, entry)) = self.segiter.next() { + if let Some(open) = &entry.open { + let op = Arc::clone(&open); + self.last = Some(entry); + return Some(op); + } + } + self.last = None; + None + } +} + +pub struct HistoricLayerIter<'a> { + segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>, + iter: Option>>, +} + +impl<'a> Iterator for HistoricLayerIter<'a> { + type Item = Arc; + + fn next(&mut self) -> std::option::Option<::Item> { + loop { + if let Some(x) = &mut self.iter { + if let Some(x) = x.next() { + return Some(Arc::clone(&*x.1)); + } + } + if let Some(seg) = self.segiter.next() { + self.iter = Some(seg.1.historic.iter()); + continue; + } else { + return None; + } } } } diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index 83733d6cdb..e0f4e77995 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -40,7 +40,6 @@ use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, }; -use crate::layered_repository::LayeredTimeline; use crate::relish::*; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; @@ -244,10 +243,6 @@ pub struct SnapshotLayerInner { } impl Layer for SnapshotLayer { - fn is_frozen(&self) -> bool { - return true; - } - fn get_timeline_id(&self) -> ZTimelineId { return self.timelineid; } @@ -344,43 +339,6 @@ impl Layer for SnapshotLayer { // Otherwise, it exists. Ok(true) } - - // Unsupported write operations - fn put_page_version(&self, blknum: u32, lsn: Lsn, _pv: PageVersion) -> Result<()> { - panic!( - "cannot modify historical snapshot layer, {} blk {} at {}/{}, {}-{}", - self.seg, blknum, self.timelineid, lsn, self.start_lsn, self.end_lsn - ); - } - fn put_truncation(&self, _lsn: Lsn, _relsize: u32) -> anyhow::Result<()> { - bail!("cannot modify historical snapshot layer"); - } - - fn put_unlink(&self, _lsn: Lsn) -> anyhow::Result<()> { - bail!("cannot modify historical snapshot layer"); - } - - fn freeze(&self, _end_lsn: Lsn, _timeline: &LayeredTimeline) -> Result>> { - bail!("cannot freeze historical snapshot layer"); - } - - fn delete(&self) -> Result<()> { - // delete underlying file - fs::remove_file(self.path())?; - Ok(()) - } - - /// - /// Release most of the memory used by this layer. If it's accessed again later, - /// it will need to be loaded back. - /// - fn unload(&self) -> Result<()> { - let mut inner = self.inner.lock().unwrap(); - inner.page_versions = BTreeMap::new(); - inner.relsizes = BTreeMap::new(); - inner.loaded = false; - Ok(()) - } } impl SnapshotLayer { @@ -520,10 +478,10 @@ impl SnapshotLayer { conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, - ) -> Result>> { + ) -> Result>> { let path = conf.timeline_path(&timelineid, &tenantid); - let mut snapfiles: Vec> = Vec::new(); + let mut snapfiles: Vec> = Vec::new(); for direntry in fs::read_dir(path)? { let fname = direntry?.file_name(); let fname = fname.to_str().unwrap(); @@ -550,6 +508,24 @@ impl SnapshotLayer { return Ok(snapfiles); } + pub fn delete(&self) -> Result<()> { + // delete underlying file + fs::remove_file(self.path())?; + Ok(()) + } + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + pub fn unload(&self) -> Result<()> { + let mut inner = self.inner.lock().unwrap(); + inner.page_versions = BTreeMap::new(); + inner.relsizes = BTreeMap::new(); + inner.loaded = false; + Ok(()) + } + /// debugging function to print out the contents of the layer #[allow(unused)] pub fn dump(&self) -> String { diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 0d0ac6164a..0a181a1aac 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -2,7 +2,6 @@ //! Common traits and structs for layers //! -use crate::layered_repository::LayeredTimeline; use crate::relish::RelishTag; use crate::repository::WALRecord; use crate::ZTimelineId; @@ -10,7 +9,6 @@ use anyhow::Result; use bytes::Bytes; use serde::{Deserialize, Serialize}; use std::fmt; -use std::sync::Arc; use zenith_utils::lsn::Lsn; @@ -104,12 +102,6 @@ pub trait Layer: Send + Sync { fn get_end_lsn(&self) -> Lsn; fn is_dropped(&self) -> bool; - /// Frozen layers are stored on disk, an cannot accept cannot accept new WAL - /// records, whereas an unfrozen layer can still be modified, but is not - /// durable in case of a crash. Snapshot layers are always frozen, and - /// in-memory layers are always unfrozen. - fn is_frozen(&self) -> bool; - /// /// Return data needed to reconstruct given page at LSN. /// @@ -133,50 +125,4 @@ pub trait Layer: Send + Sync { fn get_seg_size(&self, lsn: Lsn) -> Result; fn get_seg_exists(&self, lsn: Lsn) -> Result; - - fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()>; - - fn put_truncation(&self, lsn: Lsn, relsize: u32) -> anyhow::Result<()>; - - fn put_unlink(&self, lsn: Lsn) -> anyhow::Result<()>; - - /// Remember new page version, as a WAL record over previous version - fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> Result<()> { - self.put_page_version( - blknum, - rec.lsn, - PageVersion { - page_image: None, - record: Some(rec), - }, - ) - } - - /// Remember new page version, as a full page image - fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()> { - self.put_page_version( - blknum, - lsn, - PageVersion { - page_image: Some(img), - record: None, - }, - ) - } - - /// - /// Split off an immutable layer from existing layer. - /// - /// Returns new layers that replace this one. - /// - fn freeze(&self, end_lsn: Lsn, walredo_mgr: &LayeredTimeline) -> Result>>; - - /// Permanently delete this layer - fn delete(&self) -> Result<()>; - - /// Try to release memory used by this layer. This is currently - /// only used by snapshot layers, to free the copy of the file - /// from memory. (TODO: a smarter, more granular caching scheme - /// would be nice) - fn unload(&self) -> Result<()>; } From e35a5aa550c8520e7ba4b66e5f8ae979502cf87a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 13:38:22 +0300 Subject: [PATCH 11/24] WIP: track mem usage --- .../src/layered_repository/inmemory_layer.rs | 14 ++++++++++++ .../src/layered_repository/storage_layer.rs | 22 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 70a7b7216e..cc356efaed 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -51,6 +51,11 @@ pub struct InMemoryLayerInner { /// `segsizes` tracks the size of the segment at different points in time. /// segsizes: BTreeMap, + + /// + /// Memory usage + /// + mem_used: usize, } impl Layer for InMemoryLayer { @@ -186,6 +191,7 @@ impl InMemoryLayer { drop_lsn: None, page_versions: BTreeMap::new(), segsizes: BTreeMap::new(), + mem_used: 0, }), }) } @@ -228,6 +234,9 @@ impl InMemoryLayer { self.timelineid, lsn ); + + let mem_size = pv.get_mem_size(); + let mut inner = self.inner.lock().unwrap(); let old = inner.page_versions.insert((blknum, lsn), pv); @@ -238,6 +247,8 @@ impl InMemoryLayer { "Page version of rel {} blk {} at {} already exists", self.seg.rel, blknum, lsn ); + } else { + inner.mem_used += mem_size; } // Also update the relation size, if this extended the relation. @@ -313,6 +324,7 @@ impl InMemoryLayer { ); let mut page_versions = BTreeMap::new(); let mut segsizes = BTreeMap::new(); + let mut mem_used = 0; let seg = src.get_seg_tag(); @@ -333,6 +345,7 @@ impl InMemoryLayer { page_image: Some(img), record: None, }; + mem_used += pv.get_mem_size(); page_versions.insert((blknum, lsn), pv); } @@ -346,6 +359,7 @@ impl InMemoryLayer { drop_lsn: None, page_versions: page_versions, segsizes: segsizes, + mem_used: mem_used, }), }) } diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 0a181a1aac..142bf76b96 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -69,6 +69,28 @@ pub struct PageVersion { pub record: Option, } +impl PageVersion { + pub fn get_mem_size(&self) -> usize { + let mut sz = 0; + + // every page version has some fixed overhead. + sz += 16; + + if let Some(img) = &self.page_image { + sz += img.len(); + } + + if let Some(rec) = &self.record { + sz += rec.rec.len(); + + // Some per-record overhead. Not very accurate, but close enough + sz += 32; + } + + sz + } +} + /// /// Data needed to reconstruct a page version /// From a389c2ed7f189075f93671673637e372195ad00a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 14:54:20 +0300 Subject: [PATCH 12/24] WIP: Track oldest open layer --- pageserver/src/layered_repository.rs | 79 ++++++++------- .../src/layered_repository/inmemory_layer.rs | 63 +++++++----- .../src/layered_repository/layer_map.rs | 98 ++++++++++--------- 3 files changed, 137 insertions(+), 103 deletions(-) diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index cc78086468..e6ff4ec32c 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -54,14 +54,14 @@ static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); // Timeout when waiting for WAL receiver to catch up to an LSN given in a GetPage@LSN call. static TIMEOUT: Duration = Duration::from_secs(60); -// Perform a checkpoint in the GC thread, when the LSN has advanced this much since -// last checkpoint. This puts a backstop on how much WAL needs to be re-digested if -// the page server is restarted. +// Flush out an inmemory layer, if it's holding WAL older than +// this. This puts a backstop on how much WAL needs to be re-digested +// if the page server is restarted. // // FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB // would be more appropriate. But a low value forces the code to be exercised more, // which is good for now to trigger bugs. -static CHECKPOINT_INTERVAL: u64 = 16 * 1024 * 1024; +static OLDEST_INMEM_DISTANCE: u64 = 16 * 1024 * 1024; // Metrics collected on operations on the storage repository. lazy_static! { @@ -261,11 +261,11 @@ impl LayeredRepository { { let timelines = self.timelines.lock().unwrap(); for (_timelineid, timeline) in timelines.iter() { - let distance = u64::from(timeline.last_valid_lsn.load()) - - u64::from(timeline.last_checkpoint_lsn.load()); - if distance > CHECKPOINT_INTERVAL { - timeline.checkpoint()?; - } + STORAGE_TIME + .with_label_values(&["checkpoint_timed"]) + .observe_closure_duration( + || timeline.checkpoint_internal(false) + )? } // release lock on 'timelines' } @@ -456,7 +456,7 @@ pub struct LayeredTimeline { last_record_lsn: AtomicLsn, prev_record_lsn: AtomicLsn, - last_checkpoint_lsn: AtomicLsn, + oldest_pending_lsn: AtomicLsn, // Parent timeline that this timeline was branched from, and the LSN // of the branch point. @@ -774,8 +774,8 @@ impl Timeline for LayeredTimeline { /// metrics collection. fn checkpoint(&self) -> Result<()> { STORAGE_TIME - .with_label_values(&["checkpoint"]) - .observe_closure_duration(|| self.checkpoint_internal()) + .with_label_values(&["checkpoint_force"]) + .observe_closure_duration(|| self.checkpoint_internal(true)) } /// Remember that WAL has been received and added to the page cache up to the given LSN @@ -867,7 +867,7 @@ impl LayeredTimeline { last_valid_lsn: SeqWait::new(metadata.last_valid_lsn), last_record_lsn: AtomicLsn::new(metadata.last_record_lsn.0), prev_record_lsn: AtomicLsn::new(metadata.prev_record_lsn.0), - last_checkpoint_lsn: AtomicLsn::new(metadata.last_valid_lsn.0), + oldest_pending_lsn: AtomicLsn::new(metadata.last_valid_lsn.0), ancestor_timeline: ancestor, ancestor_lsn: metadata.ancestor_lsn, @@ -1003,23 +1003,23 @@ impl LayeredTimeline { let layer; if let Some((prev_layer, _prev_lsn)) = self.get_layer_for_read(seg, lsn)? { // Create new entry after the previous one. - let lsn; + let start_lsn; if prev_layer.get_timeline_id() != self.timelineid { // First modification on this timeline - lsn = self.ancestor_lsn; + start_lsn = self.ancestor_lsn; trace!( "creating file for write for {} at branch point {}/{}", seg, self.timelineid, - lsn + start_lsn ); } else { - lsn = prev_layer.get_end_lsn(); + start_lsn = prev_layer.get_end_lsn(); trace!( "creating file for write for {} after previous layer {}/{}", seg, self.timelineid, - lsn + start_lsn ); } trace!( @@ -1034,6 +1034,7 @@ impl LayeredTimeline { &*prev_layer, self.timelineid, self.tenantid, + start_lsn, lsn, )?; } else { @@ -1045,7 +1046,7 @@ impl LayeredTimeline { lsn ); - layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, seg, lsn)?; + layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, seg, lsn, lsn)?; } let mut layers = self.layers.lock().unwrap(); @@ -1088,7 +1089,7 @@ impl LayeredTimeline { /// /// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't /// know anything about them here in the repository. - fn checkpoint_internal(&self) -> Result<()> { + fn checkpoint_internal(&self, force: bool) -> Result<()> { let last_valid_lsn = self.last_valid_lsn.load(); let last_record_lsn = self.last_record_lsn.load(); let prev_record_lsn = self.prev_record_lsn.load(); @@ -1130,14 +1131,26 @@ impl LayeredTimeline { // Call freeze() on any unfrozen layers (that is, layers that haven't // been written to disk yet). // Call unload() on all frozen layers, to release memory. - let mut iter = layers.iter_open_layers(); - while let Some(layer) = iter.next() { - let (new_historic, new_open) = layer.freeze(last_valid_lsn, &self)?; + + let mut oldest_pending_lsn = last_valid_lsn; + + while let Some(oldest_layer) = layers.get_oldest_open_layer() { + + oldest_pending_lsn = oldest_layer.get_oldest_pending_lsn(); + let distance = last_valid_lsn.0 - oldest_pending_lsn.0; + if !force && distance < OLDEST_INMEM_DISTANCE { + info!("the oldest layer is now {} which is {} bytes behind last_valid_lsn", + oldest_layer.get_seg_tag(), distance); + break; + } + + let (new_historic, new_open) = oldest_layer.freeze(last_valid_lsn, &self)?; // replace this layer with the new layers that 'freeze' returned - // (removes it if new_open is None) - iter.replace(new_open); - + layers.pop_oldest(); + if let Some(n) = new_open { + layers.insert_open(n); + } if let Some(historic) = new_historic { trace!( "freeze returned layer {} {}-{}", @@ -1145,7 +1158,7 @@ impl LayeredTimeline { historic.get_start_lsn(), historic.get_end_lsn() ); - iter.insert_historic(historic); + layers.insert_historic(historic); } } @@ -1171,7 +1184,7 @@ impl LayeredTimeline { }; LayeredRepository::save_metadata(self.conf, self.timelineid, self.tenantid, &metadata)?; - self.last_checkpoint_lsn.store(last_valid_lsn); + self.oldest_pending_lsn.store(oldest_pending_lsn); Ok(()) } @@ -1202,12 +1215,6 @@ impl LayeredTimeline { let now = Instant::now(); let mut result: GcResult = Default::default(); - // Scan all snapshot files in the directory. For each file, if a newer file - // exists, we can remove the old one. - self.checkpoint()?; - - let mut layers = self.layers.lock().unwrap(); - info!( "running GC on timeline {}, cutoff {}", self.timelineid, cutoff @@ -1215,9 +1222,13 @@ impl LayeredTimeline { let mut layers_to_remove: Vec> = Vec::new(); + // Scan all snapshot files in the directory. For each file, if a newer file + // exists, we can remove the old one. + // // Determine for each file if it needs to be retained // FIXME: also scan open in-memory layers. Normally we cannot remove the // latest layer of any seg, but if it was unlinked it's possible + let mut layers = self.layers.lock().unwrap(); 'outer: for l in layers.iter_historic_layers() { let seg = l.get_seg_tag(); diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index cc356efaed..7f7492f4a5 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -32,6 +32,8 @@ pub struct InMemoryLayer { /// start_lsn: Lsn, + oldest_pending_lsn: Lsn, + /// The above fields never change. The parts that do change are in 'inner', /// and protected by mutex. inner: Mutex, @@ -164,6 +166,11 @@ impl Layer for InMemoryLayer { } impl InMemoryLayer { + + pub fn get_oldest_pending_lsn(&self) -> Lsn { + self.oldest_pending_lsn + } + /// /// Create a new, empty, in-memory layer /// @@ -173,6 +180,7 @@ impl InMemoryLayer { tenantid: ZTenantId, seg: SegmentTag, start_lsn: Lsn, + oldest_pending_lsn: Lsn, ) -> Result { trace!( "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", @@ -187,6 +195,7 @@ impl InMemoryLayer { tenantid, seg, start_lsn, + oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, page_versions: BTreeMap::new(), @@ -314,13 +323,14 @@ impl InMemoryLayer { src: &dyn Layer, timelineid: ZTimelineId, tenantid: ZTenantId, - lsn: Lsn, + start_lsn: Lsn, + oldest_pending_lsn: Lsn, ) -> Result { trace!( "initializing new InMemoryLayer for writing {} on timeline {} at {}", src.get_seg_tag(), timelineid, - lsn + start_lsn ); let mut page_versions = BTreeMap::new(); let mut segsizes = BTreeMap::new(); @@ -331,8 +341,8 @@ impl InMemoryLayer { let startblk; let size; if seg.rel.is_blocky() { - size = src.get_seg_size(lsn)?; - segsizes.insert(lsn, size); + size = src.get_seg_size(start_lsn)?; + segsizes.insert(start_lsn, size); startblk = seg.segno * RELISH_SEG_SIZE; } else { size = 1; @@ -340,13 +350,13 @@ impl InMemoryLayer { } for blknum in startblk..(startblk + size) { - let img = timeline.materialize_page(seg, blknum, lsn, src)?; + let img = timeline.materialize_page(seg, blknum, start_lsn, src)?; let pv = PageVersion { page_image: Some(img), record: None, }; mem_used += pv.get_mem_size(); - page_versions.insert((blknum, lsn), pv); + page_versions.insert((blknum, start_lsn), pv); } Ok(InMemoryLayer { @@ -354,7 +364,8 @@ impl InMemoryLayer { timelineid, tenantid, seg: src.get_seg_tag(), - start_lsn: lsn, + start_lsn, + oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, page_versions: page_versions, @@ -451,26 +462,28 @@ impl InMemoryLayer { // If there were any "new" page versions, initialize a new in-memory layer to hold // them - let new_open = if !after_segsizes.is_empty() || !after_page_versions.is_empty() { - info!("created new in-mem layer for {} {}-", self.seg, end_lsn); + let new_open = + if !after_segsizes.is_empty() || !after_page_versions.is_empty() { + info!("created new in-mem layer for {} {}-", self.seg, end_lsn); - let new_open = Self::copy_snapshot( - self.conf, - timeline, - &snapfile, - self.timelineid, - self.tenantid, - end_lsn, - )?; - let mut new_inner = new_open.inner.lock().unwrap(); - new_inner.page_versions.append(&mut after_page_versions); - new_inner.segsizes.append(&mut after_segsizes); - drop(new_inner); + let new_open = Self::copy_snapshot( + self.conf, + timeline, + &snapfile, + self.timelineid, + self.tenantid, + end_lsn, + end_lsn, + )?; + let mut new_inner = new_open.inner.lock().unwrap(); + new_inner.page_versions.append(&mut after_page_versions); + new_inner.segsizes.append(&mut after_segsizes); + drop(new_inner); - Some(Arc::new(new_open)) - } else { - None - }; + Some(Arc::new(new_open)) + } else { + None + }; let new_historic = Some(Arc::new(snapfile)); diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 0cf9f93419..3cca2f68ca 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -15,19 +15,23 @@ use crate::relish::*; use anyhow::Result; use log::*; use std::collections::HashSet; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BinaryHeap, BTreeMap, HashMap}; use std::ops::Bound::Included; +use std::cmp::Ordering; use std::sync::Arc; use zenith_utils::lsn::Lsn; /// -/// LayerMap tracks what layers exist or a timeline. The last layer that is +/// LayerMap tracks what layers exist on a timeline. The last layer that is /// open for writes is always an InMemoryLayer, and is tracked separately /// because there can be only one for each segment. The older layers, /// stored on disk, are kept in a BTreeMap keyed by the layer's start LSN. /// pub struct LayerMap { segs: HashMap, + + // FIXME: explain this + open_segs: BinaryHeap, } struct SegEntry { @@ -35,6 +39,31 @@ struct SegEntry { pub historic: BTreeMap>, } +struct OpenSegEntry { + pub oldest_pending_lsn: Lsn, + pub layer: Arc, +} +impl Ord for OpenSegEntry { + fn cmp(&self, other: &Self) -> Ordering { + // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here + // to get that. + other.oldest_pending_lsn.cmp(&self.oldest_pending_lsn) + } +} +impl PartialOrd for OpenSegEntry { + fn partial_cmp(&self, other: &Self) -> Option { + // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here + // to get that. + other.oldest_pending_lsn.partial_cmp(&self.oldest_pending_lsn) + } +} +impl PartialEq for OpenSegEntry { + fn eq(&self, other: &Self) -> bool { + self.oldest_pending_lsn.eq(&other.oldest_pending_lsn) + } +} +impl Eq for OpenSegEntry {} + impl LayerMap { /// /// Look up using the given segment tag and LSN. This differs from a plain @@ -88,14 +117,29 @@ impl LayerMap { if let Some(_old) = &segentry.open { // FIXME: shouldn't exist, but check } - segentry.open = Some(layer); + segentry.open = Some(Arc::clone(&layer)); } else { let segentry = SegEntry { - open: Some(layer), + open: Some(Arc::clone(&layer)), historic: BTreeMap::new(), }; self.segs.insert(tag, segentry); } + + let opensegentry = OpenSegEntry { + oldest_pending_lsn: layer.get_oldest_pending_lsn(), + layer: layer, + }; + self.open_segs.push(opensegentry); + } + + // replace given open layer with other layers. + pub fn pop_oldest(&mut self) { + let opensegentry = self.open_segs.pop().unwrap(); + let segtag = opensegentry.layer.get_seg_tag(); + + let mut segentry = self.segs.get_mut(&segtag).unwrap(); + segentry.open = None; } /// @@ -196,10 +240,11 @@ impl LayerMap { false } - pub fn iter_open_layers(&mut self) -> OpenLayerIter { - OpenLayerIter { - last: None, - segiter: self.segs.iter_mut(), + pub fn get_oldest_open_layer(&mut self) -> Option> { + if let Some(opensegentry) = self.open_segs.peek() { + Some(Arc::clone(&opensegentry.layer)) + } else { + None } } @@ -215,46 +260,11 @@ impl Default for LayerMap { fn default() -> Self { LayerMap { segs: HashMap::new(), + open_segs: BinaryHeap::new(), } } } -pub struct OpenLayerIter<'a> { - last: Option<&'a mut SegEntry>, - - segiter: std::collections::hash_map::IterMut<'a, SegmentTag, SegEntry>, -} - -impl<'a> OpenLayerIter<'a> { - pub fn replace(&mut self, replacement: Option>) { - let segentry = self.last.as_mut().unwrap(); - segentry.open = replacement; - } - - pub fn insert_historic(&mut self, new_layer: Arc) { - let start_lsn = new_layer.get_start_lsn(); - - let segentry = self.last.as_mut().unwrap(); - segentry.historic.insert(start_lsn, new_layer); - } -} - -impl<'a> Iterator for OpenLayerIter<'a> { - type Item = Arc; - - fn next(&mut self) -> std::option::Option<::Item> { - while let Some((_seg, entry)) = self.segiter.next() { - if let Some(open) = &entry.open { - let op = Arc::clone(&open); - self.last = Some(entry); - return Some(op); - } - } - self.last = None; - None - } -} - pub struct HistoricLayerIter<'a> { segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>, iter: Option>>, From 11ebcb531f47af10bc8827d00c118fa31163d99a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 17:26:34 +0300 Subject: [PATCH 13/24] Add Gauge for # of layers --- pageserver/src/layered_repository/layer_map.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 3cca2f68ca..c052816e9b 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -13,14 +13,26 @@ use crate::layered_repository::storage_layer::{Layer, SegmentTag}; use crate::layered_repository::{InMemoryLayer, SnapshotLayer}; use crate::relish::*; use anyhow::Result; +use lazy_static::lazy_static; use log::*; use std::collections::HashSet; use std::collections::{BinaryHeap, BTreeMap, HashMap}; use std::ops::Bound::Included; use std::cmp::Ordering; use std::sync::Arc; +use zenith_metrics::{register_int_gauge, IntGauge}; use zenith_utils::lsn::Lsn; +lazy_static! { + static ref NUM_INMEMORY_LAYERS: IntGauge = + register_int_gauge!("pageserver_inmemory_layers", "Number of layers in memory") + .expect("failed to define a metric"); + + static ref NUM_ONDISK_LAYERS: IntGauge = + register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk") + .expect("failed to define a metric"); +} + /// /// LayerMap tracks what layers exist on a timeline. The last layer that is /// open for writes is always an InMemoryLayer, and is tracked separately @@ -131,6 +143,8 @@ impl LayerMap { layer: layer, }; self.open_segs.push(opensegentry); + + NUM_INMEMORY_LAYERS.inc(); } // replace given open layer with other layers. @@ -140,6 +154,7 @@ impl LayerMap { let mut segentry = self.segs.get_mut(&segtag).unwrap(); segentry.open = None; + NUM_INMEMORY_LAYERS.dec(); } /// @@ -161,6 +176,7 @@ impl LayerMap { }; self.segs.insert(tag, segentry); } + NUM_ONDISK_LAYERS.inc(); } /// @@ -175,6 +191,7 @@ impl LayerMap { if let Some(segentry) = self.segs.get_mut(&tag) { segentry.historic.remove(&start_lsn); } + NUM_ONDISK_LAYERS.dec(); } pub fn list_rels(&self, spcnode: u32, dbnode: u32) -> Result> { From 8d2b61f4d125d03ae102382a1c21bad38ea15e86 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 19:24:01 +0300 Subject: [PATCH 14/24] Move code to handle snapshot filenames --- pageserver/src/layered_repository.rs | 19 +- pageserver/src/layered_repository/filename.rs | 175 ++++++++++++++++ .../src/layered_repository/snapshot_layer.rs | 190 ++---------------- 3 files changed, 206 insertions(+), 178 deletions(-) create mode 100644 pageserver/src/layered_repository/filename.rs diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index e6ff4ec32c..5170ab61a2 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -39,6 +39,7 @@ use zenith_utils::bin_ser::BeSer; use zenith_utils::lsn::{AtomicLsn, Lsn}; use zenith_utils::seqwait::SeqWait; +mod filename; mod inmemory_layer; mod layer_map; mod snapshot_layer; @@ -884,19 +885,21 @@ impl LayeredTimeline { self.timelineid ); let mut layers = self.layers.lock().unwrap(); - let snapfiles = - SnapshotLayer::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; + let snapfilenames = + filename::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; + + for filename in snapfilenames.iter() { + let layer = SnapshotLayer::load_snapshot_layer(self.conf, self.timelineid, self.tenantid, filename)?; - for layer_rc in snapfiles.iter() { info!( "found layer {} {}-{} {} on timeline {}", - layer_rc.get_seg_tag(), - layer_rc.get_start_lsn(), - layer_rc.get_end_lsn(), - layer_rc.is_dropped(), + layer.get_seg_tag(), + layer.get_start_lsn(), + layer.get_end_lsn(), + layer.is_dropped(), self.timelineid ); - layers.insert_historic(Arc::clone(layer_rc)); + layers.insert_historic(Arc::new(layer)); } Ok(()) diff --git a/pageserver/src/layered_repository/filename.rs b/pageserver/src/layered_repository/filename.rs new file mode 100644 index 0000000000..948affe547 --- /dev/null +++ b/pageserver/src/layered_repository/filename.rs @@ -0,0 +1,175 @@ +use crate::layered_repository::storage_layer::{SegmentTag}; +use crate::relish::*; +use crate::PageServerConf; +use crate::{ZTenantId, ZTimelineId}; +use std::fmt; +use std::fs; + +use anyhow::{Result}; +use log::*; +use zenith_utils::lsn::Lsn; + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct SnapshotFileName { + pub seg: SegmentTag, + pub start_lsn: Lsn, + pub end_lsn: Lsn, + pub dropped: bool, +} + +impl SnapshotFileName { + fn from_str(fname: &str) -> Option { + // Split the filename into parts + // + // ______ + // + // or if it was dropped: + // + // _______DROPPED + // + let rel; + let mut parts; + if let Some(rest) = fname.strip_prefix("rel_") { + parts = rest.split('_'); + rel = RelishTag::Relation(RelTag { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + relnode: parts.next()?.parse::().ok()?, + forknum: parts.next()?.parse::().ok()?, + }); + } else if let Some(rest) = fname.strip_prefix("pg_xact_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::Clog, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { + parts = rest.split('_'); + rel = RelishTag::FileNodeMap { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { + parts = rest.split('_'); + rel = RelishTag::TwoPhase { + xid: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { + parts = rest.split('_'); + rel = RelishTag::Checkpoint; + } else if let Some(rest) = fname.strip_prefix("pg_control_") { + parts = rest.split('_'); + rel = RelishTag::ControlFile; + } else { + return None; + } + + let segno = parts.next()?.parse::().ok()?; + + let seg = SegmentTag { + rel, + segno + }; + + let start_lsn = Lsn::from_hex(parts.next()?).ok()?; + let end_lsn = Lsn::from_hex(parts.next()?).ok()?; + + let mut dropped = false; + if let Some(suffix) = parts.next() { + if suffix == "DROPPED" { + dropped = true; + } else { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + } + if parts.next().is_some() { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + + Some(SnapshotFileName { + seg, + start_lsn, + end_lsn, + dropped, + }) + } + + fn to_string(&self) -> String { + let basename = match self.seg.rel { + RelishTag::Relation(reltag) => format!( + "rel_{}_{}_{}_{}", + reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum + ), + RelishTag::Slru { + slru: SlruKind::Clog, + segno, + } => format!("pg_xact_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno, + } => format!("pg_multixact_members_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno, + } => format!("pg_multixact_offsets_{:04X}", segno), + RelishTag::FileNodeMap { spcnode, dbnode } => { + format!("pg_filenodemap_{}_{}", spcnode, dbnode) + } + RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), + RelishTag::Checkpoint => format!("pg_control_checkpoint"), + RelishTag::ControlFile => format!("pg_control"), + }; + + format!( + "{}_{}_{:016X}_{:016X}{}", + basename, + self.seg.segno, + u64::from(self.start_lsn), + u64::from(self.end_lsn), + if self.dropped { "_DROPPED" } else { "" } + ) + } +} + +impl fmt::Display for SnapshotFileName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_string()) + } +} + + +/// Create SnapshotLayers representing all files on disk +/// +// TODO: returning an Iterator would be more idiomatic +pub fn list_snapshot_files( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, +) -> Result> { + let path = conf.timeline_path(&timelineid, &tenantid); + + let mut snapfiles: Vec = Vec::new(); + for direntry in fs::read_dir(path)? { + let fname = direntry?.file_name(); + let fname = fname.to_str().unwrap(); + + if let Some(snapfilename) = SnapshotFileName::from_str(fname) { + snapfiles.push(snapfilename); + } + } + return Ok(snapfiles); +} diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index e0f4e77995..084d023cbe 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -40,19 +40,18 @@ use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, }; -use crate::relish::*; +use crate::layered_repository::filename::{SnapshotFileName}; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; use log::*; use std::collections::BTreeMap; -use std::fmt; use std::fs; use std::fs::File; use std::io::Write; use std::ops::Bound::Included; use std::path::PathBuf; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::{Mutex, MutexGuard}; use bookfile::{Book, BookWriter}; @@ -65,145 +64,6 @@ static SNAPSHOT_FILE_MAGIC: u32 = 0x5A616E01; static PAGE_VERSIONS_CHAPTER: u64 = 1; static REL_SIZES_CHAPTER: u64 = 2; -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] -struct SnapshotFileName { - seg: SegmentTag, - start_lsn: Lsn, - end_lsn: Lsn, - dropped: bool, -} - -impl SnapshotFileName { - fn from_str(fname: &str) -> Option { - // Split the filename into parts - // - // ______ - // - // or if it was dropped: - // - // _______DROPPED - // - let rel; - let mut parts; - if let Some(rest) = fname.strip_prefix("rel_") { - parts = rest.split('_'); - rel = RelishTag::Relation(RelTag { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - relnode: parts.next()?.parse::().ok()?, - forknum: parts.next()?.parse::().ok()?, - }); - } else if let Some(rest) = fname.strip_prefix("pg_xact_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::Clog, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { - parts = rest.split('_'); - rel = RelishTag::FileNodeMap { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { - parts = rest.split('_'); - rel = RelishTag::TwoPhase { - xid: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { - parts = rest.split('_'); - rel = RelishTag::Checkpoint; - } else if let Some(rest) = fname.strip_prefix("pg_control_") { - parts = rest.split('_'); - rel = RelishTag::ControlFile; - } else { - return None; - } - - let segno = parts.next()?.parse::().ok()?; - - let seg = SegmentTag { rel, segno }; - - let start_lsn = Lsn::from_hex(parts.next()?).ok()?; - let end_lsn = Lsn::from_hex(parts.next()?).ok()?; - - let mut dropped = false; - if let Some(suffix) = parts.next() { - if suffix == "DROPPED" { - dropped = true; - } else { - warn!("unrecognized filename in timeline dir: {}", fname); - return None; - } - } - if parts.next().is_some() { - warn!("unrecognized filename in timeline dir: {}", fname); - return None; - } - - Some(SnapshotFileName { - seg, - start_lsn, - end_lsn, - dropped, - }) - } - - fn to_string(&self) -> String { - let basename = match self.seg.rel { - RelishTag::Relation(reltag) => format!( - "rel_{}_{}_{}_{}", - reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum - ), - RelishTag::Slru { - slru: SlruKind::Clog, - segno, - } => format!("pg_xact_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno, - } => format!("pg_multixact_members_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno, - } => format!("pg_multixact_offsets_{:04X}", segno), - RelishTag::FileNodeMap { spcnode, dbnode } => { - format!("pg_filenodemap_{}_{}", spcnode, dbnode) - } - RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), - RelishTag::Checkpoint => format!("pg_control_checkpoint"), - RelishTag::ControlFile => format!("pg_control"), - }; - - format!( - "{}_{}_{:016X}_{:016X}{}", - basename, - self.seg.segno, - u64::from(self.start_lsn), - u64::from(self.end_lsn), - if self.dropped { "_DROPPED" } else { "" } - ) - } -} - -impl fmt::Display for SnapshotFileName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.to_string()) - } -} - /// /// SnapshotLayer is the in-memory data structure associated with an /// on-disk snapshot file. We keep a SnapshotLayer in memory for each @@ -474,38 +334,28 @@ impl SnapshotLayer { /// Create SnapshotLayers representing all files on disk /// // TODO: returning an Iterator would be more idiomatic - pub fn list_snapshot_files( + pub fn load_snapshot_layer( conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, - ) -> Result>> { - let path = conf.timeline_path(&timelineid, &tenantid); + filename: &SnapshotFileName, + ) -> Result { + let snapfile = SnapshotLayer { + conf, + timelineid, + tenantid, + seg: filename.seg, + start_lsn: filename.start_lsn, + end_lsn: filename.end_lsn, + dropped: filename.dropped, + inner: Mutex::new(SnapshotLayerInner { + loaded: false, + page_versions: BTreeMap::new(), + relsizes: BTreeMap::new(), + }), + }; - let mut snapfiles: Vec> = Vec::new(); - for direntry in fs::read_dir(path)? { - let fname = direntry?.file_name(); - let fname = fname.to_str().unwrap(); - - if let Some(snapfilename) = SnapshotFileName::from_str(fname) { - let snapfile = SnapshotLayer { - conf, - timelineid, - tenantid, - seg: snapfilename.seg, - start_lsn: snapfilename.start_lsn, - end_lsn: snapfilename.end_lsn, - dropped: snapfilename.dropped, - inner: Mutex::new(SnapshotLayerInner { - loaded: false, - page_versions: BTreeMap::new(), - relsizes: BTreeMap::new(), - }), - }; - - snapfiles.push(Arc::new(snapfile)); - } - } - return Ok(snapfiles); + Ok(snapfile) } pub fn delete(&self) -> Result<()> { From 882f549236e9b1cb5b3623cb6ab3ce8fb6f8f68f Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 22:04:41 +0300 Subject: [PATCH 15/24] WIP: store base images separately --- .../src/layered_repository/inmemory_layer.rs | 67 ++++++++++++------- .../src/layered_repository/snapshot_layer.rs | 61 ++++++++++++----- 2 files changed, 87 insertions(+), 41 deletions(-) diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 7f7492f4a5..7266879a43 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -43,6 +43,8 @@ pub struct InMemoryLayerInner { /// If this relation was dropped, remember when that happened. drop_lsn: Option, + base_images: Vec, + /// /// All versions of all pages in the layer are are kept here. /// Indexed by block number and LSN. @@ -127,7 +129,18 @@ impl Layer for InMemoryLayer { } } - // release lock on 'page_versions' + // Use the base image, if needed + if need_base_image_lsn.is_some() { + let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; + if let Some(img) = inner.base_images.get(base_blknum) { + reconstruct_data.page_img = Some(img.clone()); + need_base_image_lsn = None; + } else { + bail!("inmem: no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); + } + } + + // release lock on 'inner' } Ok(need_base_image_lsn) @@ -135,18 +148,20 @@ impl Layer for InMemoryLayer { /// Get size of the relation at given LSN fn get_seg_size(&self, lsn: Lsn) -> Result { + assert!(lsn >= self.start_lsn); + // Scan the BTreeMap backwards, starting from the given entry. let inner = self.inner.lock().unwrap(); let mut iter = inner.segsizes.range((Included(&Lsn(0)), Included(&lsn))); + let result; if let Some((_entry_lsn, entry)) = iter.next_back() { - let result = *entry; - drop(inner); - trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); - Ok(result) + result = *entry; } else { - bail!("No size found for {} at {} in memory", self.seg, lsn); + result = inner.base_images.len() as u32; } + trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } /// Does this segment exist at given LSN? @@ -198,6 +213,7 @@ impl InMemoryLayer { oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, + base_images: Vec::new(), page_versions: BTreeMap::new(), segsizes: BTreeMap::new(), mem_used: 0, @@ -270,7 +286,7 @@ impl InMemoryLayer { if let Some((_entry_lsn, entry)) = iter.next_back() { oldsize = *entry; } else { - oldsize = 0; + oldsize = inner.base_images.len() as u32; //bail!("No old size found for {} at {}", self.tag, lsn); } if newsize > oldsize { @@ -326,14 +342,6 @@ impl InMemoryLayer { start_lsn: Lsn, oldest_pending_lsn: Lsn, ) -> Result { - trace!( - "initializing new InMemoryLayer for writing {} on timeline {} at {}", - src.get_seg_tag(), - timelineid, - start_lsn - ); - let mut page_versions = BTreeMap::new(); - let mut segsizes = BTreeMap::new(); let mut mem_used = 0; let seg = src.get_seg_tag(); @@ -342,21 +350,27 @@ impl InMemoryLayer { let size; if seg.rel.is_blocky() { size = src.get_seg_size(start_lsn)?; - segsizes.insert(start_lsn, size); startblk = seg.segno * RELISH_SEG_SIZE; } else { size = 1; startblk = 0; } - for blknum in startblk..(startblk + size) { + trace!( + "initializing new InMemoryLayer for writing {} on timeline {} at {}, size {}", + src.get_seg_tag(), + timelineid, + start_lsn, + size, + ); + + let mut base_images: Vec = Vec::new(); + for blknum in startblk..(startblk+size) { let img = timeline.materialize_page(seg, blknum, start_lsn, src)?; - let pv = PageVersion { - page_image: Some(img), - record: None, - }; - mem_used += pv.get_mem_size(); - page_versions.insert((blknum, start_lsn), pv); + + mem_used += img.len(); + + base_images.push(img); } Ok(InMemoryLayer { @@ -368,8 +382,9 @@ impl InMemoryLayer { oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, - page_versions: page_versions, - segsizes: segsizes, + base_images: base_images, + page_versions: BTreeMap::new(), + segsizes: BTreeMap::new(), mem_used: mem_used, }), }) @@ -413,6 +428,7 @@ impl InMemoryLayer { }; // Divide all the page versions into old and new at the 'end_lsn' cutoff point. + let before_base_images = inner.base_images.clone(); let mut before_page_versions; let mut before_segsizes; let mut after_page_versions; @@ -456,6 +472,7 @@ impl InMemoryLayer { self.start_lsn, end_lsn, dropped, + before_base_images, before_page_versions, before_segsizes, )?; diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index 084d023cbe..34f69fb93c 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -36,14 +36,17 @@ //! //! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two //! parts: the page versions and the relation sizes. They are stored as separate chapters. +//! FIXME //! use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, }; use crate::layered_repository::filename::{SnapshotFileName}; +use crate::layered_repository::RELISH_SEG_SIZE; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; +use bytes::Bytes; use log::*; use std::collections::BTreeMap; use std::fs; @@ -61,8 +64,9 @@ use zenith_utils::lsn::Lsn; // Magic constant to identify a Zenith snapshot file static SNAPSHOT_FILE_MAGIC: u32 = 0x5A616E01; -static PAGE_VERSIONS_CHAPTER: u64 = 1; -static REL_SIZES_CHAPTER: u64 = 2; +static BASE_IMAGES_CHAPTER: u64 = 1; +static PAGE_VERSIONS_CHAPTER: u64 = 2; +static REL_SIZES_CHAPTER: u64 = 3; /// /// SnapshotLayer is the in-memory data structure associated with an @@ -94,6 +98,9 @@ pub struct SnapshotLayerInner { /// loaded into memory yet. loaded: bool, + // indexed by block number (within segment) + base_images: Vec, + /// All versions of all pages in the file are are kept here. /// Indexed by block number and LSN. page_versions: BTreeMap<(u32, Lsn), PageVersion>, @@ -159,6 +166,17 @@ impl Layer for SnapshotLayer { } } + // Use the base image, if needed + if need_base_image_lsn.is_some() { + let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; + if let Some(img) = inner.base_images.get(base_blknum) { + reconstruct_data.page_img = Some(img.clone()); + need_base_image_lsn = None; + } else { + bail!("no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); + } + } + // release lock on 'inner' } @@ -167,26 +185,21 @@ impl Layer for SnapshotLayer { /// Get size of the relation at given LSN fn get_seg_size(&self, lsn: Lsn) -> Result { + + assert!(lsn >= self.start_lsn); + // Scan the BTreeMap backwards, starting from the given entry. let inner = self.load()?; let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); + let result; if let Some((_entry_lsn, entry)) = iter.next_back() { - let result = *entry; - drop(inner); - trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); - Ok(result) + result = *entry; } else { - error!( - "No size found for {} at {} in snapshot layer {} {}-{}", - self.seg, lsn, self.seg, self.start_lsn, self.end_lsn - ); - bail!( - "No size found for {} at {} in snapshot layer", - self.seg, - lsn - ); + result = inner.base_images.len() as u32; } + info!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } /// Does this segment exist at given LSN? @@ -240,9 +253,11 @@ impl SnapshotLayer { start_lsn: Lsn, end_lsn: Lsn, dropped: bool, + base_images: Vec, page_versions: BTreeMap<(u32, Lsn), PageVersion>, relsizes: BTreeMap, ) -> Result { + let snapfile = SnapshotLayer { conf: conf, timelineid: timelineid, @@ -253,6 +268,7 @@ impl SnapshotLayer { dropped, inner: Mutex::new(SnapshotLayerInner { loaded: true, + base_images: base_images, page_versions: page_versions, relsizes: relsizes, }), @@ -267,7 +283,14 @@ impl SnapshotLayer { let file = File::create(&path)?; let book = BookWriter::new(file, SNAPSHOT_FILE_MAGIC)?; - // Write out page versions + // Write out the base images + let mut chapter = book.new_chapter(BASE_IMAGES_CHAPTER); + let buf = Vec::ser(&inner.base_images)?; + + chapter.write_all(&buf)?; + let book = chapter.close()?; + + // Write out the other page versions let mut chapter = book.new_chapter(PAGE_VERSIONS_CHAPTER); let buf = BTreeMap::ser(&inner.page_versions)?; chapter.write_all(&buf)?; @@ -314,6 +337,9 @@ impl SnapshotLayer { let file = File::open(&path)?; let book = Book::new(file)?; + let chapter = book.read_chapter(BASE_IMAGES_CHAPTER)?; + let base_images = Vec::des(&chapter)?; + let chapter = book.read_chapter(PAGE_VERSIONS_CHAPTER)?; let page_versions = BTreeMap::des(&chapter)?; @@ -324,6 +350,7 @@ impl SnapshotLayer { *inner = SnapshotLayerInner { loaded: true, + base_images, page_versions, relsizes, }; @@ -350,6 +377,7 @@ impl SnapshotLayer { dropped: filename.dropped, inner: Mutex::new(SnapshotLayerInner { loaded: false, + base_images: Vec::new(), page_versions: BTreeMap::new(), relsizes: BTreeMap::new(), }), @@ -370,6 +398,7 @@ impl SnapshotLayer { /// pub fn unload(&self) -> Result<()> { let mut inner = self.inner.lock().unwrap(); + inner.base_images = Vec::new(); inner.page_versions = BTreeMap::new(); inner.relsizes = BTreeMap::new(); inner.loaded = false; From ddb7155bbecc3abcb1d5fedb79511673700dca1d Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 16 Aug 2021 23:22:55 +0300 Subject: [PATCH 16/24] WIP Store base images in separate ImageLayers --- pageserver/src/layered_repository.rs | 43 +- pageserver/src/layered_repository/filename.rs | 135 +++++- .../src/layered_repository/image_layer.rs | 384 ++++++++++++++++++ .../src/layered_repository/inmemory_layer.rs | 110 ++--- .../src/layered_repository/layer_map.rs | 22 +- .../src/layered_repository/snapshot_layer.rs | 89 ++-- .../src/layered_repository/storage_layer.rs | 5 + 7 files changed, 666 insertions(+), 122 deletions(-) create mode 100644 pageserver/src/layered_repository/image_layer.rs diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index 5170ab61a2..c81625088d 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -35,16 +35,19 @@ use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use zenith_metrics::{register_histogram_vec, HistogramVec}; +use zenith_metrics::{register_histogram, Histogram}; use zenith_utils::bin_ser::BeSer; use zenith_utils::lsn::{AtomicLsn, Lsn}; use zenith_utils::seqwait::SeqWait; mod filename; +mod image_layer; mod inmemory_layer; mod layer_map; mod snapshot_layer; mod storage_layer; +use image_layer::ImageLayer; use inmemory_layer::InMemoryLayer; use layer_map::LayerMap; use snapshot_layer::SnapshotLayer; @@ -74,6 +77,16 @@ lazy_static! { .expect("failed to define a metric"); } + +// Metrics collected on operations on the storage repository. +lazy_static! { + static ref RECONSTRUCT_TIME: Histogram = register_histogram!( + "pageserver_getpage_reconstruct_time", + "FIXME Time spent on storage operations" + ) + .expect("failed to define a metric"); +} + /// /// Repository consists of multiple timelines. Keep them in a hash table. /// @@ -486,7 +499,10 @@ impl Timeline for LayeredTimeline { let seg = SegmentTag::from_blknum(rel, blknum); if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { - self.materialize_page(seg, blknum, lsn, &*layer) + RECONSTRUCT_TIME + .observe_closure_duration(|| { + self.materialize_page(seg, blknum, lsn, &*layer) + }) } else { bail!("relish {} not found at {}", rel, lsn); } @@ -885,9 +901,21 @@ impl LayeredTimeline { self.timelineid ); let mut layers = self.layers.lock().unwrap(); - let snapfilenames = + let (snapfilenames, imgfilenames) = filename::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; + for filename in imgfilenames.iter() { + let layer = ImageLayer::load_image_layer(self.conf, self.timelineid, self.tenantid, filename)?; + + info!( + "found layer {} {} on timeline {}", + layer.get_seg_tag(), + layer.get_start_lsn(), + self.timelineid + ); + layers.insert_historic(Arc::new(layer)); + } + for filename in snapfilenames.iter() { let layer = SnapshotLayer::load_snapshot_layer(self.conf, self.timelineid, self.tenantid, filename)?; @@ -1031,10 +1059,9 @@ impl LayeredTimeline { prev_layer.get_start_lsn(), prev_layer.get_end_lsn() ); - layer = InMemoryLayer::copy_snapshot( + layer = InMemoryLayer::create_successor_layer( self.conf, - &self, - &*prev_layer, + prev_layer, self.timelineid, self.tenantid, start_lsn, @@ -1147,14 +1174,14 @@ impl LayeredTimeline { break; } - let (new_historic, new_open) = oldest_layer.freeze(last_valid_lsn, &self)?; + let (new_historics, new_open) = oldest_layer.freeze(last_valid_lsn, &self)?; // replace this layer with the new layers that 'freeze' returned layers.pop_oldest(); if let Some(n) = new_open { layers.insert_open(n); } - if let Some(historic) = new_historic { + for historic in new_historics { trace!( "freeze returned layer {} {}-{}", historic.get_seg_tag(), @@ -1223,7 +1250,7 @@ impl LayeredTimeline { self.timelineid, cutoff ); - let mut layers_to_remove: Vec> = Vec::new(); + let mut layers_to_remove: Vec> = Vec::new(); // Scan all snapshot files in the directory. For each file, if a newer file // exists, we can remove the old one. diff --git a/pageserver/src/layered_repository/filename.rs b/pageserver/src/layered_repository/filename.rs index 948affe547..f807aa599a 100644 --- a/pageserver/src/layered_repository/filename.rs +++ b/pageserver/src/layered_repository/filename.rs @@ -151,6 +151,132 @@ impl fmt::Display for SnapshotFileName { } } +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct ImageFileName { + pub seg: SegmentTag, + pub lsn: Lsn, +} + +impl ImageFileName { + fn from_str(fname: &str) -> Option { + // Split the filename into parts + // + // ______ + // + // or if it was dropped: + // + // _______DROPPED + // + let rel; + let mut parts; + if let Some(rest) = fname.strip_prefix("rel_") { + parts = rest.split('_'); + rel = RelishTag::Relation(RelTag { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + relnode: parts.next()?.parse::().ok()?, + forknum: parts.next()?.parse::().ok()?, + }); + } else if let Some(rest) = fname.strip_prefix("pg_xact_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::Clog, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { + parts = rest.split('_'); + rel = RelishTag::FileNodeMap { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { + parts = rest.split('_'); + rel = RelishTag::TwoPhase { + xid: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { + parts = rest.split('_'); + rel = RelishTag::Checkpoint; + } else if let Some(rest) = fname.strip_prefix("pg_control_") { + parts = rest.split('_'); + rel = RelishTag::ControlFile; + } else { + return None; + } + + let segno = parts.next()?.parse::().ok()?; + + let seg = SegmentTag { + rel, + segno + }; + + let lsn = Lsn::from_hex(parts.next()?).ok()?; + + if parts.next().is_some() { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + + Some(ImageFileName { + seg, + lsn, + }) + } + + fn to_string(&self) -> String { + let basename = match self.seg.rel { + RelishTag::Relation(reltag) => format!( + "rel_{}_{}_{}_{}", + reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum + ), + RelishTag::Slru { + slru: SlruKind::Clog, + segno, + } => format!("pg_xact_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno, + } => format!("pg_multixact_members_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno, + } => format!("pg_multixact_offsets_{:04X}", segno), + RelishTag::FileNodeMap { spcnode, dbnode } => { + format!("pg_filenodemap_{}_{}", spcnode, dbnode) + } + RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), + RelishTag::Checkpoint => format!("pg_control_checkpoint"), + RelishTag::ControlFile => format!("pg_control"), + }; + + format!( + "{}_{}_{:016X}", + basename, + self.seg.segno, + u64::from(self.lsn), + ) + } +} + +impl fmt::Display for ImageFileName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_string()) + } +} + /// Create SnapshotLayers representing all files on disk /// @@ -159,10 +285,11 @@ pub fn list_snapshot_files( conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, -) -> Result> { +) -> Result<(Vec, Vec)> { let path = conf.timeline_path(&timelineid, &tenantid); let mut snapfiles: Vec = Vec::new(); + let mut imgfiles: Vec = Vec::new(); for direntry in fs::read_dir(path)? { let fname = direntry?.file_name(); let fname = fname.to_str().unwrap(); @@ -170,6 +297,10 @@ pub fn list_snapshot_files( if let Some(snapfilename) = SnapshotFileName::from_str(fname) { snapfiles.push(snapfilename); } + + if let Some(imgfilename) = ImageFileName::from_str(fname) { + imgfiles.push(imgfilename); + } } - return Ok(snapfiles); + return Ok((snapfiles, imgfiles)); } diff --git a/pageserver/src/layered_repository/image_layer.rs b/pageserver/src/layered_repository/image_layer.rs new file mode 100644 index 0000000000..debbeb6ae1 --- /dev/null +++ b/pageserver/src/layered_repository/image_layer.rs @@ -0,0 +1,384 @@ +//! FIXME +//! A SnapshotLayer represents one snapshot file on disk. One file holds all page +//! version and size information of one relation, in a range of LSN. +//! The name "snapshot file" is a bit of a misnomer because a snapshot file doesn't +//! contain a snapshot at a specific LSN, but rather all the page versions in a range +//! of LSNs. +//! +//! Currently, a snapshot file contains full information needed to reconstruct any +//! page version in the LSN range, without consulting any other snapshot files. When +//! a new snapshot file is created for writing, the full contents of relation are +//! materialized as it is at the beginning of the LSN range. That can be very expensive, +//! we should find a way to store differential files. But this keeps the read-side +//! of things simple. You can find the correct snapshot file based on RelishTag and +//! timeline+LSN, and once you've located it, you have all the data you need to in that +//! file. +//! +//! When a snapshot file needs to be accessed, we slurp the whole file into memory, into +//! the SnapshotLayer struct. See load() and unload() functions. +//! +//! On disk, the snapshot files are stored in timelines/ directory. +//! Currently, there are no subdirectories, and each snapshot file is named like this: +//! +//! _____ +//! +//! For example: +//! +//! 1663_13990_2609_0_000000000169C348_000000000169C349 +//! +//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that. +//! So the above example would become: +//! +//! 1663_13990_2609_0_000000000169C348_000000000169C349_DROPPED +//! +//! The end LSN indicates when it was dropped in that case, we don't store it in the +//! file contents in any way. +//! +//! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two +//! parts: the page versions and the relation sizes. They are stored as separate chapters. +//! FIXME +//! +use crate::layered_repository::storage_layer::{Layer, PageReconstructData, SegmentTag}; +use crate::layered_repository::LayeredTimeline; +use crate::layered_repository::filename::{ImageFileName}; +use crate::layered_repository::RELISH_SEG_SIZE; +use crate::PageServerConf; +use crate::{ZTenantId, ZTimelineId}; +use anyhow::{bail, Result}; +use bytes::Bytes; +use lazy_static::lazy_static; +use log::*; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use std::sync::{Mutex, MutexGuard}; + +use bookfile::{Book, BookWriter}; + +use zenith_metrics::{register_histogram, Histogram}; +use zenith_utils::bin_ser::BeSer; +use zenith_utils::lsn::Lsn; + +// Magic constant to identify a Zenith segment image file +static IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1; + +static BASE_IMAGES_CHAPTER: u64 = 1; + + +// Metrics collected on operations on the storage repository. +lazy_static! { + static ref RECONSTRUCT_TIME: Histogram = register_histogram!( + "pageserver_image_reconstruct_time", + "FIXME Time spent on storage operations" + ) + .expect("failed to define a metric"); +} + +/// +/// SnapshotLayer is the in-memory data structure associated with an +/// on-disk snapshot file. We keep a SnapshotLayer in memory for each +/// file, in the LayerMap. If a layer is in "loaded" state, we have a +/// copy of the file in memory, in 'inner'. Otherwise the struct is +/// just a placeholder for a file that exists on disk, and it needs to +/// be loaded before using it in queries. +/// +pub struct ImageLayer { + conf: &'static PageServerConf, + pub tenantid: ZTenantId, + pub timelineid: ZTimelineId, + pub seg: SegmentTag, + + // This entry contains an image of all pages as of this LSN + pub lsn: Lsn, + + inner: Mutex, +} + +pub struct ImageLayerInner { + /// If false, the 'page_versions' and 'relsizes' have not been + /// loaded into memory yet. + loaded: bool, + + // indexed by block number (within segment) + base_images: Vec, +} + +impl Layer for ImageLayer { + fn get_timeline_id(&self) -> ZTimelineId { + return self.timelineid; + } + + fn get_seg_tag(&self) -> SegmentTag { + return self.seg; + } + + fn is_dropped(&self) -> bool { + return false; + } + + fn get_start_lsn(&self) -> Lsn { + return self.lsn; + } + + fn get_end_lsn(&self) -> Lsn { + return self.lsn; + } + + /// Look up given page in the cache. + fn get_page_reconstruct_data( + &self, + blknum: u32, + lsn: Lsn, + reconstruct_data: &mut PageReconstructData, + ) -> Result> { + let need_base_image_lsn: Option; + + assert!(lsn >= self.lsn); + + { + let inner = self.load()?; + + let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; + if let Some(img) = inner.base_images.get(base_blknum) { + reconstruct_data.page_img = Some(img.clone()); + need_base_image_lsn = None; + } else { + bail!("no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); + } + // release lock on 'inner' + } + + Ok(need_base_image_lsn) + } + + /// Get size of the relation at given LSN + fn get_seg_size(&self, _lsn: Lsn) -> Result { + + let inner = self.load()?; + let result = inner.base_images.len() as u32; + + Ok(result) + } + + /// Does this segment exist at given LSN? + fn get_seg_exists(&self, _lsn: Lsn) -> Result { + Ok(true) + } + + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + fn unload(&self) -> Result<()> { + let mut inner = self.inner.lock().unwrap(); + inner.base_images = Vec::new(); + inner.loaded = false; + Ok(()) + } + + fn delete(&self) -> Result<()> { + // delete underlying file + fs::remove_file(self.path())?; + Ok(()) + } + + fn is_incremental(&self) -> bool { + false + } +} + +impl ImageLayer { + fn path(&self) -> PathBuf { + Self::path_for( + self.conf, + self.timelineid, + self.tenantid, + &ImageFileName { + seg: self.seg, + lsn: self.lsn, + }, + ) + } + + fn path_for( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + fname: &ImageFileName, + ) -> PathBuf { + conf.timeline_path(&timelineid, &tenantid) + .join(fname.to_string()) + } + + /// Create a new snapshot file, using the given btreemaps containing the page versions and + /// relsizes. + /// FIXME comment + /// This is used to write the in-memory layer to disk. The in-memory layer uses the same + /// data structure with two btreemaps as we do, so passing the btreemaps is currently + /// expedient. + pub fn create( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + seg: SegmentTag, + lsn: Lsn, + base_images: Vec, + ) -> Result { + + let layer = ImageLayer { + conf: conf, + timelineid: timelineid, + tenantid: tenantid, + seg: seg, + lsn: lsn, + inner: Mutex::new(ImageLayerInner { + loaded: true, + base_images: base_images, + }), + }; + let inner = layer.inner.lock().unwrap(); + + // Write the images into a file + let path = layer.path(); + + // Note: This overwrites any existing file. There shouldn't be any. + // FIXME: throw an error instead? + let file = File::create(&path)?; + let book = BookWriter::new(file, IMAGE_FILE_MAGIC)?; + + // Write out the base images + let mut chapter = book.new_chapter(BASE_IMAGES_CHAPTER); + let buf = Vec::ser(&inner.base_images)?; + + chapter.write_all(&buf)?; + let book = chapter.close()?; + + book.close()?; + + trace!("saved {}", &path.display()); + + drop(inner); + + Ok(layer) + } + + pub fn create_from_src( + conf: &'static PageServerConf, + timeline: &LayeredTimeline, + src: &dyn Layer, + lsn: Lsn, + ) -> Result { + let seg = src.get_seg_tag(); + let timelineid = timeline.timelineid; + + let startblk; + let size; + if seg.rel.is_blocky() { + size = src.get_seg_size(lsn)?; + startblk = seg.segno * RELISH_SEG_SIZE; + } else { + size = 1; + startblk = 0; + } + + trace!( + "creating new ImageLayer for {} on timeline {} at {}", + seg, + timelineid, + lsn, + ); + + let mut base_images: Vec = Vec::new(); + for blknum in startblk..(startblk+size) { + let img = + RECONSTRUCT_TIME + .observe_closure_duration(|| { + timeline.materialize_page(seg, blknum, lsn, &*src) + })?; + + base_images.push(img); + } + + Self::create(conf, timelineid, timeline.tenantid, seg, lsn, + base_images) + } + + + /// + /// Load the contents of the file into memory + /// + fn load(&self) -> Result> { + // quick exit if already loaded + let mut inner = self.inner.lock().unwrap(); + + if inner.loaded { + return Ok(inner); + } + + let path = Self::path_for( + self.conf, + self.timelineid, + self.tenantid, + &ImageFileName { + seg: self.seg, + lsn: self.lsn, + }, + ); + + let file = File::open(&path)?; + let book = Book::new(file)?; + + let chapter = book.read_chapter(BASE_IMAGES_CHAPTER)?; + let base_images = Vec::des(&chapter)?; + + debug!("loaded from {}", &path.display()); + + *inner = ImageLayerInner { + loaded: true, + base_images, + }; + + Ok(inner) + } + + /// Create an ImageLayer represent a file on disk + pub fn load_image_layer( + conf: &'static PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + filename: &ImageFileName, + ) -> Result { + let layer = ImageLayer { + conf, + timelineid, + tenantid, + seg: filename.seg, + lsn: filename.lsn, + inner: Mutex::new(ImageLayerInner { + loaded: false, + base_images: Vec::new(), + }), + }; + + Ok(layer) + } + + /// debugging function to print out the contents of the layer + #[allow(unused)] + pub fn dump(&self) -> String { + let mut result = format!( + "----- image layer for {} at {} ----\n", + self.seg, self.lsn, + ); + + //let inner = self.inner.lock().unwrap(); + + //for (k, v) in inner.page_versions.iter() { + // result += &format!("blk {} at {}: {}/{}\n", k.0, k.1, v.page_image.is_some(), v.record.is_some()); + //} + + result + } +} diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 7266879a43..8eba41af5e 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -6,8 +6,7 @@ use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, RELISH_SEG_SIZE, }; use crate::layered_repository::LayeredTimeline; -use crate::layered_repository::SnapshotLayer; -use crate::repository::WALRecord; +use crate::layered_repository::{ImageLayer, SnapshotLayer}; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; @@ -37,14 +36,14 @@ pub struct InMemoryLayer { /// The above fields never change. The parts that do change are in 'inner', /// and protected by mutex. inner: Mutex, + + img_layer: Option>, } pub struct InMemoryLayerInner { /// If this relation was dropped, remember when that happened. drop_lsn: Option, - base_images: Vec, - /// /// All versions of all pages in the layer are are kept here. /// Indexed by block number and LSN. @@ -130,13 +129,11 @@ impl Layer for InMemoryLayer { } // Use the base image, if needed - if need_base_image_lsn.is_some() { - let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; - if let Some(img) = inner.base_images.get(base_blknum) { - reconstruct_data.page_img = Some(img.clone()); - need_base_image_lsn = None; + if let Some(need_lsn) = need_base_image_lsn { + if let Some(img_layer) = &self.img_layer { + need_base_image_lsn = img_layer.get_page_reconstruct_data(blknum, need_lsn, reconstruct_data)?; } else { - bail!("inmem: no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); + bail!("no base img found for {} at blk {} at LSN {}", self.seg, blknum, lsn); } } @@ -157,8 +154,11 @@ impl Layer for InMemoryLayer { let result; if let Some((_entry_lsn, entry)) = iter.next_back() { result = *entry; + // Use the base image if needed + } else if let Some(img_layer) = &self.img_layer { + result = img_layer.get_seg_size(lsn)?; } else { - result = inner.base_images.len() as u32; + result = 0; } trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); Ok(result) @@ -178,6 +178,23 @@ impl Layer for InMemoryLayer { // Otherwise, it exists Ok(true) } + + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + fn unload(&self) -> Result<()> { + Ok(()) + } + + fn delete(&self) -> Result<()> { + Ok(()) + } + + fn is_incremental(&self) -> bool { + true + } } impl InMemoryLayer { @@ -213,11 +230,11 @@ impl InMemoryLayer { oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, - base_images: Vec::new(), page_versions: BTreeMap::new(), segsizes: BTreeMap::new(), mem_used: 0, }), + img_layer: None, }) } @@ -285,8 +302,10 @@ impl InMemoryLayer { let oldsize; if let Some((_entry_lsn, entry)) = iter.next_back() { oldsize = *entry; + } else if let Some(img_layer) = &self.img_layer { + oldsize = img_layer.get_seg_size(lsn)?; } else { - oldsize = inner.base_images.len() as u32; + oldsize = 0; //bail!("No old size found for {} at {}", self.tag, lsn); } if newsize > oldsize { @@ -333,60 +352,37 @@ impl InMemoryLayer { /// Initialize a new InMemoryLayer for, by copying the state at the given /// point in time from given existing layer. /// - pub fn copy_snapshot( + pub fn create_successor_layer( conf: &'static PageServerConf, - timeline: &LayeredTimeline, - src: &dyn Layer, + src: Arc, timelineid: ZTimelineId, tenantid: ZTenantId, start_lsn: Lsn, oldest_pending_lsn: Lsn, ) -> Result { - let mut mem_used = 0; - let seg = src.get_seg_tag(); - let startblk; - let size; - if seg.rel.is_blocky() { - size = src.get_seg_size(start_lsn)?; - startblk = seg.segno * RELISH_SEG_SIZE; - } else { - size = 1; - startblk = 0; - } - trace!( - "initializing new InMemoryLayer for writing {} on timeline {} at {}, size {}", - src.get_seg_tag(), + "initializing new InMemoryLayer for writing {} on timeline {} at {}", + seg, timelineid, start_lsn, - size, ); - let mut base_images: Vec = Vec::new(); - for blknum in startblk..(startblk+size) { - let img = timeline.materialize_page(seg, blknum, start_lsn, src)?; - - mem_used += img.len(); - - base_images.push(img); - } - Ok(InMemoryLayer { conf, timelineid, tenantid, - seg: src.get_seg_tag(), + seg, start_lsn, oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, - base_images: base_images, page_versions: BTreeMap::new(), segsizes: BTreeMap::new(), - mem_used: mem_used, + mem_used: 0, }), + img_layer: Some(src), }) } @@ -406,7 +402,7 @@ impl InMemoryLayer { cutoff_lsn: Lsn, // This is needed just to call materialize_page() timeline: &LayeredTimeline, - ) -> Result<(Option>, Option>)> { + ) -> Result<(Vec>, Option>)> { info!( "freezing in memory layer for {} on timeline {} at {}", self.seg, self.timelineid, cutoff_lsn @@ -428,7 +424,6 @@ impl InMemoryLayer { }; // Divide all the page versions into old and new at the 'end_lsn' cutoff point. - let before_base_images = inner.base_images.clone(); let mut before_page_versions; let mut before_segsizes; let mut after_page_versions; @@ -463,6 +458,18 @@ impl InMemoryLayer { // we can release the lock now. drop(inner); + let mut historics: Vec> = Vec::new(); + + // write a new base image layer at the cutoff point + let imgfile = ImageLayer::create_from_src( + self.conf, + timeline, + self, + end_lsn, + )?; + let imgfile_rc: Arc = Arc::new(imgfile); + historics.push(Arc::clone(&imgfile_rc)); + // Write the page versions before the cutoff to disk. let snapfile = SnapshotLayer::create( self.conf, @@ -472,10 +479,12 @@ impl InMemoryLayer { self.start_lsn, end_lsn, dropped, - before_base_images, + self.img_layer.clone(), before_page_versions, before_segsizes, )?; + let snapfile_rc: Arc = Arc::new(snapfile); + historics.push(snapfile_rc); // If there were any "new" page versions, initialize a new in-memory layer to hold // them @@ -483,10 +492,9 @@ impl InMemoryLayer { if !after_segsizes.is_empty() || !after_page_versions.is_empty() { info!("created new in-mem layer for {} {}-", self.seg, end_lsn); - let new_open = Self::copy_snapshot( + let new_open = Self::create_successor_layer( self.conf, - timeline, - &snapfile, + imgfile_rc, self.timelineid, self.tenantid, end_lsn, @@ -502,9 +510,7 @@ impl InMemoryLayer { None }; - let new_historic = Some(Arc::new(snapfile)); - - Ok((new_historic, new_open)) + Ok((historics, new_open)) } /// debugging function to print out the contents of the layer diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index c052816e9b..78d6da3312 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -3,14 +3,14 @@ //! //! When the timeline is first accessed, the server lists of all snapshot files //! in the timelines/ directory, and populates this map with -//! SnapshotLayers corresponding to each file. When new WAL is received, +//! SnapshotLayers corresponding to each file. When new WAL is received, FIXME //! we create InMemoryLayers to hold the incoming records. Now and then, //! in the checkpoint() function, the in-memory layers are frozen, forming //! new snapshot layers and corresponding files are written to disk. //! use crate::layered_repository::storage_layer::{Layer, SegmentTag}; -use crate::layered_repository::{InMemoryLayer, SnapshotLayer}; +use crate::layered_repository::{InMemoryLayer}; use crate::relish::*; use anyhow::Result; use lazy_static::lazy_static; @@ -48,7 +48,7 @@ pub struct LayerMap { struct SegEntry { pub open: Option>, - pub historic: BTreeMap>, + pub historic: BTreeMap>, } struct OpenSegEntry { @@ -160,7 +160,7 @@ impl LayerMap { /// /// Insert an on-disk layer /// - pub fn insert_historic(&mut self, layer: Arc) { + pub fn insert_historic(&mut self, layer: Arc) { let tag = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); @@ -184,7 +184,7 @@ impl LayerMap { /// /// This should be called when the corresponding file on disk has been deleted. /// - pub fn remove_historic(&mut self, layer: &SnapshotLayer) { + pub fn remove_historic(&mut self, layer: &dyn Layer) { let tag = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); @@ -230,14 +230,16 @@ impl LayerMap { /// Is there a newer layer for given segment? pub fn newer_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool { if let Some(segentry) = self.segs.get(&seg) { - if let Some(_open) = &segentry.open { - return true; - } + // open layer is always incremental so it doesn't count for (newer_lsn, layer) in segentry .historic .range((Included(lsn), Included(Lsn(u64::MAX)))) { + // FIXME: incremental layers don't count + if layer.is_incremental() { + continue; + } if layer.get_end_lsn() > lsn { trace!( "found later layer for {}, {} {}-{}", @@ -284,11 +286,11 @@ impl Default for LayerMap { pub struct HistoricLayerIter<'a> { segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>, - iter: Option>>, + iter: Option>>, } impl<'a> Iterator for HistoricLayerIter<'a> { - type Item = Arc; + type Item = Arc; fn next(&mut self) -> std::option::Option<::Item> { loop { diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index 34f69fb93c..bc97be9835 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -42,11 +42,9 @@ use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, }; use crate::layered_repository::filename::{SnapshotFileName}; -use crate::layered_repository::RELISH_SEG_SIZE; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; -use bytes::Bytes; use log::*; use std::collections::BTreeMap; use std::fs; @@ -54,7 +52,7 @@ use std::fs::File; use std::io::Write; use std::ops::Bound::Included; use std::path::PathBuf; -use std::sync::{Mutex, MutexGuard}; +use std::sync::{Arc, Mutex, MutexGuard}; use bookfile::{Book, BookWriter}; @@ -64,9 +62,8 @@ use zenith_utils::lsn::Lsn; // Magic constant to identify a Zenith snapshot file static SNAPSHOT_FILE_MAGIC: u32 = 0x5A616E01; -static BASE_IMAGES_CHAPTER: u64 = 1; -static PAGE_VERSIONS_CHAPTER: u64 = 2; -static REL_SIZES_CHAPTER: u64 = 3; +static PAGE_VERSIONS_CHAPTER: u64 = 1; +static REL_SIZES_CHAPTER: u64 = 2; /// /// SnapshotLayer is the in-memory data structure associated with an @@ -91,6 +88,8 @@ pub struct SnapshotLayer { dropped: bool, inner: Mutex, + + img_layer: Option>, } pub struct SnapshotLayerInner { @@ -98,9 +97,6 @@ pub struct SnapshotLayerInner { /// loaded into memory yet. loaded: bool, - // indexed by block number (within segment) - base_images: Vec, - /// All versions of all pages in the file are are kept here. /// Indexed by block number and LSN. page_versions: BTreeMap<(u32, Lsn), PageVersion>, @@ -167,13 +163,11 @@ impl Layer for SnapshotLayer { } // Use the base image, if needed - if need_base_image_lsn.is_some() { - let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; - if let Some(img) = inner.base_images.get(base_blknum) { - reconstruct_data.page_img = Some(img.clone()); - need_base_image_lsn = None; + if let Some(need_lsn) = need_base_image_lsn { + if let Some(img_layer) = &self.img_layer { + need_base_image_lsn = img_layer.get_page_reconstruct_data(blknum, need_lsn, reconstruct_data)?; } else { - bail!("no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); + bail!("no base img found for {} at blk {} at LSN {}", self.seg, blknum, lsn); } } @@ -195,10 +189,12 @@ impl Layer for SnapshotLayer { let result; if let Some((_entry_lsn, entry)) = iter.next_back() { result = *entry; + // Use the base image if needed + } else if let Some(img_layer) = &self.img_layer { + result = img_layer.get_seg_size(lsn)?; } else { - result = inner.base_images.len() as u32; + result = 0; } - info!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); Ok(result) } @@ -212,6 +208,28 @@ impl Layer for SnapshotLayer { // Otherwise, it exists. Ok(true) } + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + fn unload(&self) -> Result<()> { + let mut inner = self.inner.lock().unwrap(); + inner.page_versions = BTreeMap::new(); + inner.relsizes = BTreeMap::new(); + inner.loaded = false; + Ok(()) + } + + fn delete(&self) -> Result<()> { + // delete underlying file + fs::remove_file(self.path())?; + Ok(()) + } + + fn is_incremental(&self) -> bool { + true + } } impl SnapshotLayer { @@ -253,7 +271,7 @@ impl SnapshotLayer { start_lsn: Lsn, end_lsn: Lsn, dropped: bool, - base_images: Vec, + img_layer: Option>, page_versions: BTreeMap<(u32, Lsn), PageVersion>, relsizes: BTreeMap, ) -> Result { @@ -268,10 +286,10 @@ impl SnapshotLayer { dropped, inner: Mutex::new(SnapshotLayerInner { loaded: true, - base_images: base_images, page_versions: page_versions, relsizes: relsizes, }), + img_layer, }; let inner = snapfile.inner.lock().unwrap(); @@ -283,13 +301,6 @@ impl SnapshotLayer { let file = File::create(&path)?; let book = BookWriter::new(file, SNAPSHOT_FILE_MAGIC)?; - // Write out the base images - let mut chapter = book.new_chapter(BASE_IMAGES_CHAPTER); - let buf = Vec::ser(&inner.base_images)?; - - chapter.write_all(&buf)?; - let book = chapter.close()?; - // Write out the other page versions let mut chapter = book.new_chapter(PAGE_VERSIONS_CHAPTER); let buf = BTreeMap::ser(&inner.page_versions)?; @@ -337,9 +348,6 @@ impl SnapshotLayer { let file = File::open(&path)?; let book = Book::new(file)?; - let chapter = book.read_chapter(BASE_IMAGES_CHAPTER)?; - let base_images = Vec::des(&chapter)?; - let chapter = book.read_chapter(PAGE_VERSIONS_CHAPTER)?; let page_versions = BTreeMap::des(&chapter)?; @@ -350,7 +358,6 @@ impl SnapshotLayer { *inner = SnapshotLayerInner { loaded: true, - base_images, page_versions, relsizes, }; @@ -377,34 +384,16 @@ impl SnapshotLayer { dropped: filename.dropped, inner: Mutex::new(SnapshotLayerInner { loaded: false, - base_images: Vec::new(), page_versions: BTreeMap::new(), relsizes: BTreeMap::new(), }), + // FIXME: This doesn't work across restarts. + img_layer: None, }; Ok(snapfile) } - pub fn delete(&self) -> Result<()> { - // delete underlying file - fs::remove_file(self.path())?; - Ok(()) - } - - /// - /// Release most of the memory used by this layer. If it's accessed again later, - /// it will need to be loaded back. - /// - pub fn unload(&self) -> Result<()> { - let mut inner = self.inner.lock().unwrap(); - inner.base_images = Vec::new(); - inner.page_versions = BTreeMap::new(); - inner.relsizes = BTreeMap::new(); - inner.loaded = false; - Ok(()) - } - /// debugging function to print out the contents of the layer #[allow(unused)] pub fn dump(&self) -> String { diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 142bf76b96..167aaaecde 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -147,4 +147,9 @@ pub trait Layer: Send + Sync { fn get_seg_size(&self, lsn: Lsn) -> Result; fn get_seg_exists(&self, lsn: Lsn) -> Result; + + fn is_incremental(&self) -> bool; + + fn unload(&self) -> Result<()>; + fn delete(&self) -> Result<()>; } From 3319befc3052be3347b1346c56c5baabf1791dba Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 17 Aug 2021 19:20:27 +0300 Subject: [PATCH 17/24] Revert a bunch of commits that I pushed by accident This reverts commits: e35a5aa550c8520e7ba4b66e5f8ae979502cf87a a389c2ed7f189075f93671673637e372195ad00a 11ebcb531f47af10bc8827d00c118fa31163d99a 8d2b61f4d125d03ae102382a1c21bad38ea15e86 882f549236e9b1cb5b3623cb6ab3ce8fb6f8f68f ddb7155bbecc3abcb1d5fedb79511673700dca1d Those were follow-up work on top of PR https://github.com/zenithdb/zenith/pull/430, but they were still very much not ready. --- pageserver/src/layered_repository.rs | 137 +++---- pageserver/src/layered_repository/filename.rs | 306 -------------- .../src/layered_repository/image_layer.rs | 384 ------------------ .../src/layered_repository/inmemory_layer.rs | 178 +++----- .../src/layered_repository/layer_map.rs | 137 +++---- .../src/layered_repository/snapshot_layer.rs | 270 ++++++++---- .../src/layered_repository/storage_layer.rs | 27 -- 7 files changed, 367 insertions(+), 1072 deletions(-) delete mode 100644 pageserver/src/layered_repository/filename.rs delete mode 100644 pageserver/src/layered_repository/image_layer.rs diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index c81625088d..cc78086468 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -35,19 +35,15 @@ use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use zenith_metrics::{register_histogram_vec, HistogramVec}; -use zenith_metrics::{register_histogram, Histogram}; use zenith_utils::bin_ser::BeSer; use zenith_utils::lsn::{AtomicLsn, Lsn}; use zenith_utils::seqwait::SeqWait; -mod filename; -mod image_layer; mod inmemory_layer; mod layer_map; mod snapshot_layer; mod storage_layer; -use image_layer::ImageLayer; use inmemory_layer::InMemoryLayer; use layer_map::LayerMap; use snapshot_layer::SnapshotLayer; @@ -58,14 +54,14 @@ static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]); // Timeout when waiting for WAL receiver to catch up to an LSN given in a GetPage@LSN call. static TIMEOUT: Duration = Duration::from_secs(60); -// Flush out an inmemory layer, if it's holding WAL older than -// this. This puts a backstop on how much WAL needs to be re-digested -// if the page server is restarted. +// Perform a checkpoint in the GC thread, when the LSN has advanced this much since +// last checkpoint. This puts a backstop on how much WAL needs to be re-digested if +// the page server is restarted. // // FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB // would be more appropriate. But a low value forces the code to be exercised more, // which is good for now to trigger bugs. -static OLDEST_INMEM_DISTANCE: u64 = 16 * 1024 * 1024; +static CHECKPOINT_INTERVAL: u64 = 16 * 1024 * 1024; // Metrics collected on operations on the storage repository. lazy_static! { @@ -77,16 +73,6 @@ lazy_static! { .expect("failed to define a metric"); } - -// Metrics collected on operations on the storage repository. -lazy_static! { - static ref RECONSTRUCT_TIME: Histogram = register_histogram!( - "pageserver_getpage_reconstruct_time", - "FIXME Time spent on storage operations" - ) - .expect("failed to define a metric"); -} - /// /// Repository consists of multiple timelines. Keep them in a hash table. /// @@ -275,11 +261,11 @@ impl LayeredRepository { { let timelines = self.timelines.lock().unwrap(); for (_timelineid, timeline) in timelines.iter() { - STORAGE_TIME - .with_label_values(&["checkpoint_timed"]) - .observe_closure_duration( - || timeline.checkpoint_internal(false) - )? + let distance = u64::from(timeline.last_valid_lsn.load()) + - u64::from(timeline.last_checkpoint_lsn.load()); + if distance > CHECKPOINT_INTERVAL { + timeline.checkpoint()?; + } } // release lock on 'timelines' } @@ -470,7 +456,7 @@ pub struct LayeredTimeline { last_record_lsn: AtomicLsn, prev_record_lsn: AtomicLsn, - oldest_pending_lsn: AtomicLsn, + last_checkpoint_lsn: AtomicLsn, // Parent timeline that this timeline was branched from, and the LSN // of the branch point. @@ -499,10 +485,7 @@ impl Timeline for LayeredTimeline { let seg = SegmentTag::from_blknum(rel, blknum); if let Some((layer, lsn)) = self.get_layer_for_read(seg, lsn)? { - RECONSTRUCT_TIME - .observe_closure_duration(|| { - self.materialize_page(seg, blknum, lsn, &*layer) - }) + self.materialize_page(seg, blknum, lsn, &*layer) } else { bail!("relish {} not found at {}", rel, lsn); } @@ -791,8 +774,8 @@ impl Timeline for LayeredTimeline { /// metrics collection. fn checkpoint(&self) -> Result<()> { STORAGE_TIME - .with_label_values(&["checkpoint_force"]) - .observe_closure_duration(|| self.checkpoint_internal(true)) + .with_label_values(&["checkpoint"]) + .observe_closure_duration(|| self.checkpoint_internal()) } /// Remember that WAL has been received and added to the page cache up to the given LSN @@ -884,7 +867,7 @@ impl LayeredTimeline { last_valid_lsn: SeqWait::new(metadata.last_valid_lsn), last_record_lsn: AtomicLsn::new(metadata.last_record_lsn.0), prev_record_lsn: AtomicLsn::new(metadata.prev_record_lsn.0), - oldest_pending_lsn: AtomicLsn::new(metadata.last_valid_lsn.0), + last_checkpoint_lsn: AtomicLsn::new(metadata.last_valid_lsn.0), ancestor_timeline: ancestor, ancestor_lsn: metadata.ancestor_lsn, @@ -901,33 +884,19 @@ impl LayeredTimeline { self.timelineid ); let mut layers = self.layers.lock().unwrap(); - let (snapfilenames, imgfilenames) = - filename::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; - - for filename in imgfilenames.iter() { - let layer = ImageLayer::load_image_layer(self.conf, self.timelineid, self.tenantid, filename)?; - - info!( - "found layer {} {} on timeline {}", - layer.get_seg_tag(), - layer.get_start_lsn(), - self.timelineid - ); - layers.insert_historic(Arc::new(layer)); - } - - for filename in snapfilenames.iter() { - let layer = SnapshotLayer::load_snapshot_layer(self.conf, self.timelineid, self.tenantid, filename)?; + let snapfiles = + SnapshotLayer::list_snapshot_files(self.conf, self.timelineid, self.tenantid)?; + for layer_rc in snapfiles.iter() { info!( "found layer {} {}-{} {} on timeline {}", - layer.get_seg_tag(), - layer.get_start_lsn(), - layer.get_end_lsn(), - layer.is_dropped(), + layer_rc.get_seg_tag(), + layer_rc.get_start_lsn(), + layer_rc.get_end_lsn(), + layer_rc.is_dropped(), self.timelineid ); - layers.insert_historic(Arc::new(layer)); + layers.insert_historic(Arc::clone(layer_rc)); } Ok(()) @@ -1034,23 +1003,23 @@ impl LayeredTimeline { let layer; if let Some((prev_layer, _prev_lsn)) = self.get_layer_for_read(seg, lsn)? { // Create new entry after the previous one. - let start_lsn; + let lsn; if prev_layer.get_timeline_id() != self.timelineid { // First modification on this timeline - start_lsn = self.ancestor_lsn; + lsn = self.ancestor_lsn; trace!( "creating file for write for {} at branch point {}/{}", seg, self.timelineid, - start_lsn + lsn ); } else { - start_lsn = prev_layer.get_end_lsn(); + lsn = prev_layer.get_end_lsn(); trace!( "creating file for write for {} after previous layer {}/{}", seg, self.timelineid, - start_lsn + lsn ); } trace!( @@ -1059,12 +1028,12 @@ impl LayeredTimeline { prev_layer.get_start_lsn(), prev_layer.get_end_lsn() ); - layer = InMemoryLayer::create_successor_layer( + layer = InMemoryLayer::copy_snapshot( self.conf, - prev_layer, + &self, + &*prev_layer, self.timelineid, self.tenantid, - start_lsn, lsn, )?; } else { @@ -1076,7 +1045,7 @@ impl LayeredTimeline { lsn ); - layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, seg, lsn, lsn)?; + layer = InMemoryLayer::create(self.conf, self.timelineid, self.tenantid, seg, lsn)?; } let mut layers = self.layers.lock().unwrap(); @@ -1119,7 +1088,7 @@ impl LayeredTimeline { /// /// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't /// know anything about them here in the repository. - fn checkpoint_internal(&self, force: bool) -> Result<()> { + fn checkpoint_internal(&self) -> Result<()> { let last_valid_lsn = self.last_valid_lsn.load(); let last_record_lsn = self.last_record_lsn.load(); let prev_record_lsn = self.prev_record_lsn.load(); @@ -1161,34 +1130,22 @@ impl LayeredTimeline { // Call freeze() on any unfrozen layers (that is, layers that haven't // been written to disk yet). // Call unload() on all frozen layers, to release memory. - - let mut oldest_pending_lsn = last_valid_lsn; - - while let Some(oldest_layer) = layers.get_oldest_open_layer() { - - oldest_pending_lsn = oldest_layer.get_oldest_pending_lsn(); - let distance = last_valid_lsn.0 - oldest_pending_lsn.0; - if !force && distance < OLDEST_INMEM_DISTANCE { - info!("the oldest layer is now {} which is {} bytes behind last_valid_lsn", - oldest_layer.get_seg_tag(), distance); - break; - } - - let (new_historics, new_open) = oldest_layer.freeze(last_valid_lsn, &self)?; + let mut iter = layers.iter_open_layers(); + while let Some(layer) = iter.next() { + let (new_historic, new_open) = layer.freeze(last_valid_lsn, &self)?; // replace this layer with the new layers that 'freeze' returned - layers.pop_oldest(); - if let Some(n) = new_open { - layers.insert_open(n); - } - for historic in new_historics { + // (removes it if new_open is None) + iter.replace(new_open); + + if let Some(historic) = new_historic { trace!( "freeze returned layer {} {}-{}", historic.get_seg_tag(), historic.get_start_lsn(), historic.get_end_lsn() ); - layers.insert_historic(historic); + iter.insert_historic(historic); } } @@ -1214,7 +1171,7 @@ impl LayeredTimeline { }; LayeredRepository::save_metadata(self.conf, self.timelineid, self.tenantid, &metadata)?; - self.oldest_pending_lsn.store(oldest_pending_lsn); + self.last_checkpoint_lsn.store(last_valid_lsn); Ok(()) } @@ -1245,20 +1202,22 @@ impl LayeredTimeline { let now = Instant::now(); let mut result: GcResult = Default::default(); + // Scan all snapshot files in the directory. For each file, if a newer file + // exists, we can remove the old one. + self.checkpoint()?; + + let mut layers = self.layers.lock().unwrap(); + info!( "running GC on timeline {}, cutoff {}", self.timelineid, cutoff ); - let mut layers_to_remove: Vec> = Vec::new(); + let mut layers_to_remove: Vec> = Vec::new(); - // Scan all snapshot files in the directory. For each file, if a newer file - // exists, we can remove the old one. - // // Determine for each file if it needs to be retained // FIXME: also scan open in-memory layers. Normally we cannot remove the // latest layer of any seg, but if it was unlinked it's possible - let mut layers = self.layers.lock().unwrap(); 'outer: for l in layers.iter_historic_layers() { let seg = l.get_seg_tag(); diff --git a/pageserver/src/layered_repository/filename.rs b/pageserver/src/layered_repository/filename.rs deleted file mode 100644 index f807aa599a..0000000000 --- a/pageserver/src/layered_repository/filename.rs +++ /dev/null @@ -1,306 +0,0 @@ -use crate::layered_repository::storage_layer::{SegmentTag}; -use crate::relish::*; -use crate::PageServerConf; -use crate::{ZTenantId, ZTimelineId}; -use std::fmt; -use std::fs; - -use anyhow::{Result}; -use log::*; -use zenith_utils::lsn::Lsn; - -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] -pub struct SnapshotFileName { - pub seg: SegmentTag, - pub start_lsn: Lsn, - pub end_lsn: Lsn, - pub dropped: bool, -} - -impl SnapshotFileName { - fn from_str(fname: &str) -> Option { - // Split the filename into parts - // - // ______ - // - // or if it was dropped: - // - // _______DROPPED - // - let rel; - let mut parts; - if let Some(rest) = fname.strip_prefix("rel_") { - parts = rest.split('_'); - rel = RelishTag::Relation(RelTag { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - relnode: parts.next()?.parse::().ok()?, - forknum: parts.next()?.parse::().ok()?, - }); - } else if let Some(rest) = fname.strip_prefix("pg_xact_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::Clog, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { - parts = rest.split('_'); - rel = RelishTag::FileNodeMap { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { - parts = rest.split('_'); - rel = RelishTag::TwoPhase { - xid: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { - parts = rest.split('_'); - rel = RelishTag::Checkpoint; - } else if let Some(rest) = fname.strip_prefix("pg_control_") { - parts = rest.split('_'); - rel = RelishTag::ControlFile; - } else { - return None; - } - - let segno = parts.next()?.parse::().ok()?; - - let seg = SegmentTag { - rel, - segno - }; - - let start_lsn = Lsn::from_hex(parts.next()?).ok()?; - let end_lsn = Lsn::from_hex(parts.next()?).ok()?; - - let mut dropped = false; - if let Some(suffix) = parts.next() { - if suffix == "DROPPED" { - dropped = true; - } else { - warn!("unrecognized filename in timeline dir: {}", fname); - return None; - } - } - if parts.next().is_some() { - warn!("unrecognized filename in timeline dir: {}", fname); - return None; - } - - Some(SnapshotFileName { - seg, - start_lsn, - end_lsn, - dropped, - }) - } - - fn to_string(&self) -> String { - let basename = match self.seg.rel { - RelishTag::Relation(reltag) => format!( - "rel_{}_{}_{}_{}", - reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum - ), - RelishTag::Slru { - slru: SlruKind::Clog, - segno, - } => format!("pg_xact_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno, - } => format!("pg_multixact_members_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno, - } => format!("pg_multixact_offsets_{:04X}", segno), - RelishTag::FileNodeMap { spcnode, dbnode } => { - format!("pg_filenodemap_{}_{}", spcnode, dbnode) - } - RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), - RelishTag::Checkpoint => format!("pg_control_checkpoint"), - RelishTag::ControlFile => format!("pg_control"), - }; - - format!( - "{}_{}_{:016X}_{:016X}{}", - basename, - self.seg.segno, - u64::from(self.start_lsn), - u64::from(self.end_lsn), - if self.dropped { "_DROPPED" } else { "" } - ) - } -} - -impl fmt::Display for SnapshotFileName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.to_string()) - } -} - -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] -pub struct ImageFileName { - pub seg: SegmentTag, - pub lsn: Lsn, -} - -impl ImageFileName { - fn from_str(fname: &str) -> Option { - // Split the filename into parts - // - // ______ - // - // or if it was dropped: - // - // _______DROPPED - // - let rel; - let mut parts; - if let Some(rest) = fname.strip_prefix("rel_") { - parts = rest.split('_'); - rel = RelishTag::Relation(RelTag { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - relnode: parts.next()?.parse::().ok()?, - forknum: parts.next()?.parse::().ok()?, - }); - } else if let Some(rest) = fname.strip_prefix("pg_xact_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::Clog, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { - parts = rest.split('_'); - rel = RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno: u32::from_str_radix(parts.next()?, 16).ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { - parts = rest.split('_'); - rel = RelishTag::FileNodeMap { - spcnode: parts.next()?.parse::().ok()?, - dbnode: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { - parts = rest.split('_'); - rel = RelishTag::TwoPhase { - xid: parts.next()?.parse::().ok()?, - }; - } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { - parts = rest.split('_'); - rel = RelishTag::Checkpoint; - } else if let Some(rest) = fname.strip_prefix("pg_control_") { - parts = rest.split('_'); - rel = RelishTag::ControlFile; - } else { - return None; - } - - let segno = parts.next()?.parse::().ok()?; - - let seg = SegmentTag { - rel, - segno - }; - - let lsn = Lsn::from_hex(parts.next()?).ok()?; - - if parts.next().is_some() { - warn!("unrecognized filename in timeline dir: {}", fname); - return None; - } - - Some(ImageFileName { - seg, - lsn, - }) - } - - fn to_string(&self) -> String { - let basename = match self.seg.rel { - RelishTag::Relation(reltag) => format!( - "rel_{}_{}_{}_{}", - reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum - ), - RelishTag::Slru { - slru: SlruKind::Clog, - segno, - } => format!("pg_xact_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactMembers, - segno, - } => format!("pg_multixact_members_{:04X}", segno), - RelishTag::Slru { - slru: SlruKind::MultiXactOffsets, - segno, - } => format!("pg_multixact_offsets_{:04X}", segno), - RelishTag::FileNodeMap { spcnode, dbnode } => { - format!("pg_filenodemap_{}_{}", spcnode, dbnode) - } - RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), - RelishTag::Checkpoint => format!("pg_control_checkpoint"), - RelishTag::ControlFile => format!("pg_control"), - }; - - format!( - "{}_{}_{:016X}", - basename, - self.seg.segno, - u64::from(self.lsn), - ) - } -} - -impl fmt::Display for ImageFileName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.to_string()) - } -} - - -/// Create SnapshotLayers representing all files on disk -/// -// TODO: returning an Iterator would be more idiomatic -pub fn list_snapshot_files( - conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, -) -> Result<(Vec, Vec)> { - let path = conf.timeline_path(&timelineid, &tenantid); - - let mut snapfiles: Vec = Vec::new(); - let mut imgfiles: Vec = Vec::new(); - for direntry in fs::read_dir(path)? { - let fname = direntry?.file_name(); - let fname = fname.to_str().unwrap(); - - if let Some(snapfilename) = SnapshotFileName::from_str(fname) { - snapfiles.push(snapfilename); - } - - if let Some(imgfilename) = ImageFileName::from_str(fname) { - imgfiles.push(imgfilename); - } - } - return Ok((snapfiles, imgfiles)); -} diff --git a/pageserver/src/layered_repository/image_layer.rs b/pageserver/src/layered_repository/image_layer.rs deleted file mode 100644 index debbeb6ae1..0000000000 --- a/pageserver/src/layered_repository/image_layer.rs +++ /dev/null @@ -1,384 +0,0 @@ -//! FIXME -//! A SnapshotLayer represents one snapshot file on disk. One file holds all page -//! version and size information of one relation, in a range of LSN. -//! The name "snapshot file" is a bit of a misnomer because a snapshot file doesn't -//! contain a snapshot at a specific LSN, but rather all the page versions in a range -//! of LSNs. -//! -//! Currently, a snapshot file contains full information needed to reconstruct any -//! page version in the LSN range, without consulting any other snapshot files. When -//! a new snapshot file is created for writing, the full contents of relation are -//! materialized as it is at the beginning of the LSN range. That can be very expensive, -//! we should find a way to store differential files. But this keeps the read-side -//! of things simple. You can find the correct snapshot file based on RelishTag and -//! timeline+LSN, and once you've located it, you have all the data you need to in that -//! file. -//! -//! When a snapshot file needs to be accessed, we slurp the whole file into memory, into -//! the SnapshotLayer struct. See load() and unload() functions. -//! -//! On disk, the snapshot files are stored in timelines/ directory. -//! Currently, there are no subdirectories, and each snapshot file is named like this: -//! -//! _____ -//! -//! For example: -//! -//! 1663_13990_2609_0_000000000169C348_000000000169C349 -//! -//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that. -//! So the above example would become: -//! -//! 1663_13990_2609_0_000000000169C348_000000000169C349_DROPPED -//! -//! The end LSN indicates when it was dropped in that case, we don't store it in the -//! file contents in any way. -//! -//! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two -//! parts: the page versions and the relation sizes. They are stored as separate chapters. -//! FIXME -//! -use crate::layered_repository::storage_layer::{Layer, PageReconstructData, SegmentTag}; -use crate::layered_repository::LayeredTimeline; -use crate::layered_repository::filename::{ImageFileName}; -use crate::layered_repository::RELISH_SEG_SIZE; -use crate::PageServerConf; -use crate::{ZTenantId, ZTimelineId}; -use anyhow::{bail, Result}; -use bytes::Bytes; -use lazy_static::lazy_static; -use log::*; -use std::fs; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::sync::{Mutex, MutexGuard}; - -use bookfile::{Book, BookWriter}; - -use zenith_metrics::{register_histogram, Histogram}; -use zenith_utils::bin_ser::BeSer; -use zenith_utils::lsn::Lsn; - -// Magic constant to identify a Zenith segment image file -static IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1; - -static BASE_IMAGES_CHAPTER: u64 = 1; - - -// Metrics collected on operations on the storage repository. -lazy_static! { - static ref RECONSTRUCT_TIME: Histogram = register_histogram!( - "pageserver_image_reconstruct_time", - "FIXME Time spent on storage operations" - ) - .expect("failed to define a metric"); -} - -/// -/// SnapshotLayer is the in-memory data structure associated with an -/// on-disk snapshot file. We keep a SnapshotLayer in memory for each -/// file, in the LayerMap. If a layer is in "loaded" state, we have a -/// copy of the file in memory, in 'inner'. Otherwise the struct is -/// just a placeholder for a file that exists on disk, and it needs to -/// be loaded before using it in queries. -/// -pub struct ImageLayer { - conf: &'static PageServerConf, - pub tenantid: ZTenantId, - pub timelineid: ZTimelineId, - pub seg: SegmentTag, - - // This entry contains an image of all pages as of this LSN - pub lsn: Lsn, - - inner: Mutex, -} - -pub struct ImageLayerInner { - /// If false, the 'page_versions' and 'relsizes' have not been - /// loaded into memory yet. - loaded: bool, - - // indexed by block number (within segment) - base_images: Vec, -} - -impl Layer for ImageLayer { - fn get_timeline_id(&self) -> ZTimelineId { - return self.timelineid; - } - - fn get_seg_tag(&self) -> SegmentTag { - return self.seg; - } - - fn is_dropped(&self) -> bool { - return false; - } - - fn get_start_lsn(&self) -> Lsn { - return self.lsn; - } - - fn get_end_lsn(&self) -> Lsn { - return self.lsn; - } - - /// Look up given page in the cache. - fn get_page_reconstruct_data( - &self, - blknum: u32, - lsn: Lsn, - reconstruct_data: &mut PageReconstructData, - ) -> Result> { - let need_base_image_lsn: Option; - - assert!(lsn >= self.lsn); - - { - let inner = self.load()?; - - let base_blknum: usize = (blknum % RELISH_SEG_SIZE) as usize; - if let Some(img) = inner.base_images.get(base_blknum) { - reconstruct_data.page_img = Some(img.clone()); - need_base_image_lsn = None; - } else { - bail!("no base img found for {} at blk {} at LSN {}", self.seg, base_blknum, lsn); - } - // release lock on 'inner' - } - - Ok(need_base_image_lsn) - } - - /// Get size of the relation at given LSN - fn get_seg_size(&self, _lsn: Lsn) -> Result { - - let inner = self.load()?; - let result = inner.base_images.len() as u32; - - Ok(result) - } - - /// Does this segment exist at given LSN? - fn get_seg_exists(&self, _lsn: Lsn) -> Result { - Ok(true) - } - - - /// - /// Release most of the memory used by this layer. If it's accessed again later, - /// it will need to be loaded back. - /// - fn unload(&self) -> Result<()> { - let mut inner = self.inner.lock().unwrap(); - inner.base_images = Vec::new(); - inner.loaded = false; - Ok(()) - } - - fn delete(&self) -> Result<()> { - // delete underlying file - fs::remove_file(self.path())?; - Ok(()) - } - - fn is_incremental(&self) -> bool { - false - } -} - -impl ImageLayer { - fn path(&self) -> PathBuf { - Self::path_for( - self.conf, - self.timelineid, - self.tenantid, - &ImageFileName { - seg: self.seg, - lsn: self.lsn, - }, - ) - } - - fn path_for( - conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, - fname: &ImageFileName, - ) -> PathBuf { - conf.timeline_path(&timelineid, &tenantid) - .join(fname.to_string()) - } - - /// Create a new snapshot file, using the given btreemaps containing the page versions and - /// relsizes. - /// FIXME comment - /// This is used to write the in-memory layer to disk. The in-memory layer uses the same - /// data structure with two btreemaps as we do, so passing the btreemaps is currently - /// expedient. - pub fn create( - conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, - seg: SegmentTag, - lsn: Lsn, - base_images: Vec, - ) -> Result { - - let layer = ImageLayer { - conf: conf, - timelineid: timelineid, - tenantid: tenantid, - seg: seg, - lsn: lsn, - inner: Mutex::new(ImageLayerInner { - loaded: true, - base_images: base_images, - }), - }; - let inner = layer.inner.lock().unwrap(); - - // Write the images into a file - let path = layer.path(); - - // Note: This overwrites any existing file. There shouldn't be any. - // FIXME: throw an error instead? - let file = File::create(&path)?; - let book = BookWriter::new(file, IMAGE_FILE_MAGIC)?; - - // Write out the base images - let mut chapter = book.new_chapter(BASE_IMAGES_CHAPTER); - let buf = Vec::ser(&inner.base_images)?; - - chapter.write_all(&buf)?; - let book = chapter.close()?; - - book.close()?; - - trace!("saved {}", &path.display()); - - drop(inner); - - Ok(layer) - } - - pub fn create_from_src( - conf: &'static PageServerConf, - timeline: &LayeredTimeline, - src: &dyn Layer, - lsn: Lsn, - ) -> Result { - let seg = src.get_seg_tag(); - let timelineid = timeline.timelineid; - - let startblk; - let size; - if seg.rel.is_blocky() { - size = src.get_seg_size(lsn)?; - startblk = seg.segno * RELISH_SEG_SIZE; - } else { - size = 1; - startblk = 0; - } - - trace!( - "creating new ImageLayer for {} on timeline {} at {}", - seg, - timelineid, - lsn, - ); - - let mut base_images: Vec = Vec::new(); - for blknum in startblk..(startblk+size) { - let img = - RECONSTRUCT_TIME - .observe_closure_duration(|| { - timeline.materialize_page(seg, blknum, lsn, &*src) - })?; - - base_images.push(img); - } - - Self::create(conf, timelineid, timeline.tenantid, seg, lsn, - base_images) - } - - - /// - /// Load the contents of the file into memory - /// - fn load(&self) -> Result> { - // quick exit if already loaded - let mut inner = self.inner.lock().unwrap(); - - if inner.loaded { - return Ok(inner); - } - - let path = Self::path_for( - self.conf, - self.timelineid, - self.tenantid, - &ImageFileName { - seg: self.seg, - lsn: self.lsn, - }, - ); - - let file = File::open(&path)?; - let book = Book::new(file)?; - - let chapter = book.read_chapter(BASE_IMAGES_CHAPTER)?; - let base_images = Vec::des(&chapter)?; - - debug!("loaded from {}", &path.display()); - - *inner = ImageLayerInner { - loaded: true, - base_images, - }; - - Ok(inner) - } - - /// Create an ImageLayer represent a file on disk - pub fn load_image_layer( - conf: &'static PageServerConf, - timelineid: ZTimelineId, - tenantid: ZTenantId, - filename: &ImageFileName, - ) -> Result { - let layer = ImageLayer { - conf, - timelineid, - tenantid, - seg: filename.seg, - lsn: filename.lsn, - inner: Mutex::new(ImageLayerInner { - loaded: false, - base_images: Vec::new(), - }), - }; - - Ok(layer) - } - - /// debugging function to print out the contents of the layer - #[allow(unused)] - pub fn dump(&self) -> String { - let mut result = format!( - "----- image layer for {} at {} ----\n", - self.seg, self.lsn, - ); - - //let inner = self.inner.lock().unwrap(); - - //for (k, v) in inner.page_versions.iter() { - // result += &format!("blk {} at {}: {}/{}\n", k.0, k.1, v.page_image.is_some(), v.record.is_some()); - //} - - result - } -} diff --git a/pageserver/src/layered_repository/inmemory_layer.rs b/pageserver/src/layered_repository/inmemory_layer.rs index 8eba41af5e..70a7b7216e 100644 --- a/pageserver/src/layered_repository/inmemory_layer.rs +++ b/pageserver/src/layered_repository/inmemory_layer.rs @@ -6,7 +6,8 @@ use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, RELISH_SEG_SIZE, }; use crate::layered_repository::LayeredTimeline; -use crate::layered_repository::{ImageLayer, SnapshotLayer}; +use crate::layered_repository::SnapshotLayer; +use crate::repository::WALRecord; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; @@ -31,13 +32,9 @@ pub struct InMemoryLayer { /// start_lsn: Lsn, - oldest_pending_lsn: Lsn, - /// The above fields never change. The parts that do change are in 'inner', /// and protected by mutex. inner: Mutex, - - img_layer: Option>, } pub struct InMemoryLayerInner { @@ -54,11 +51,6 @@ pub struct InMemoryLayerInner { /// `segsizes` tracks the size of the segment at different points in time. /// segsizes: BTreeMap, - - /// - /// Memory usage - /// - mem_used: usize, } impl Layer for InMemoryLayer { @@ -128,16 +120,7 @@ impl Layer for InMemoryLayer { } } - // Use the base image, if needed - if let Some(need_lsn) = need_base_image_lsn { - if let Some(img_layer) = &self.img_layer { - need_base_image_lsn = img_layer.get_page_reconstruct_data(blknum, need_lsn, reconstruct_data)?; - } else { - bail!("no base img found for {} at blk {} at LSN {}", self.seg, blknum, lsn); - } - } - - // release lock on 'inner' + // release lock on 'page_versions' } Ok(need_base_image_lsn) @@ -145,23 +128,18 @@ impl Layer for InMemoryLayer { /// Get size of the relation at given LSN fn get_seg_size(&self, lsn: Lsn) -> Result { - assert!(lsn >= self.start_lsn); - // Scan the BTreeMap backwards, starting from the given entry. let inner = self.inner.lock().unwrap(); let mut iter = inner.segsizes.range((Included(&Lsn(0)), Included(&lsn))); - let result; if let Some((_entry_lsn, entry)) = iter.next_back() { - result = *entry; - // Use the base image if needed - } else if let Some(img_layer) = &self.img_layer { - result = img_layer.get_seg_size(lsn)?; + let result = *entry; + drop(inner); + trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } else { - result = 0; + bail!("No size found for {} at {} in memory", self.seg, lsn); } - trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); - Ok(result) } /// Does this segment exist at given LSN? @@ -178,31 +156,9 @@ impl Layer for InMemoryLayer { // Otherwise, it exists Ok(true) } - - - /// - /// Release most of the memory used by this layer. If it's accessed again later, - /// it will need to be loaded back. - /// - fn unload(&self) -> Result<()> { - Ok(()) - } - - fn delete(&self) -> Result<()> { - Ok(()) - } - - fn is_incremental(&self) -> bool { - true - } } impl InMemoryLayer { - - pub fn get_oldest_pending_lsn(&self) -> Lsn { - self.oldest_pending_lsn - } - /// /// Create a new, empty, in-memory layer /// @@ -212,7 +168,6 @@ impl InMemoryLayer { tenantid: ZTenantId, seg: SegmentTag, start_lsn: Lsn, - oldest_pending_lsn: Lsn, ) -> Result { trace!( "initializing new empty InMemoryLayer for writing {} on timeline {} at {}", @@ -227,14 +182,11 @@ impl InMemoryLayer { tenantid, seg, start_lsn, - oldest_pending_lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, page_versions: BTreeMap::new(), segsizes: BTreeMap::new(), - mem_used: 0, }), - img_layer: None, }) } @@ -276,9 +228,6 @@ impl InMemoryLayer { self.timelineid, lsn ); - - let mem_size = pv.get_mem_size(); - let mut inner = self.inner.lock().unwrap(); let old = inner.page_versions.insert((blknum, lsn), pv); @@ -289,8 +238,6 @@ impl InMemoryLayer { "Page version of rel {} blk {} at {} already exists", self.seg.rel, blknum, lsn ); - } else { - inner.mem_used += mem_size; } // Also update the relation size, if this extended the relation. @@ -302,8 +249,6 @@ impl InMemoryLayer { let oldsize; if let Some((_entry_lsn, entry)) = iter.next_back() { oldsize = *entry; - } else if let Some(img_layer) = &self.img_layer { - oldsize = img_layer.get_seg_size(lsn)?; } else { oldsize = 0; //bail!("No old size found for {} at {}", self.tag, lsn); @@ -352,37 +297,56 @@ impl InMemoryLayer { /// Initialize a new InMemoryLayer for, by copying the state at the given /// point in time from given existing layer. /// - pub fn create_successor_layer( + pub fn copy_snapshot( conf: &'static PageServerConf, - src: Arc, + timeline: &LayeredTimeline, + src: &dyn Layer, timelineid: ZTimelineId, tenantid: ZTenantId, - start_lsn: Lsn, - oldest_pending_lsn: Lsn, + lsn: Lsn, ) -> Result { - let seg = src.get_seg_tag(); - trace!( "initializing new InMemoryLayer for writing {} on timeline {} at {}", - seg, + src.get_seg_tag(), timelineid, - start_lsn, + lsn ); + let mut page_versions = BTreeMap::new(); + let mut segsizes = BTreeMap::new(); + + let seg = src.get_seg_tag(); + + let startblk; + let size; + if seg.rel.is_blocky() { + size = src.get_seg_size(lsn)?; + segsizes.insert(lsn, size); + startblk = seg.segno * RELISH_SEG_SIZE; + } else { + size = 1; + startblk = 0; + } + + for blknum in startblk..(startblk + size) { + let img = timeline.materialize_page(seg, blknum, lsn, src)?; + let pv = PageVersion { + page_image: Some(img), + record: None, + }; + page_versions.insert((blknum, lsn), pv); + } Ok(InMemoryLayer { conf, timelineid, tenantid, - seg, - start_lsn, - oldest_pending_lsn, + seg: src.get_seg_tag(), + start_lsn: lsn, inner: Mutex::new(InMemoryLayerInner { drop_lsn: None, - page_versions: BTreeMap::new(), - segsizes: BTreeMap::new(), - mem_used: 0, + page_versions: page_versions, + segsizes: segsizes, }), - img_layer: Some(src), }) } @@ -402,7 +366,7 @@ impl InMemoryLayer { cutoff_lsn: Lsn, // This is needed just to call materialize_page() timeline: &LayeredTimeline, - ) -> Result<(Vec>, Option>)> { + ) -> Result<(Option>, Option>)> { info!( "freezing in memory layer for {} on timeline {} at {}", self.seg, self.timelineid, cutoff_lsn @@ -458,18 +422,6 @@ impl InMemoryLayer { // we can release the lock now. drop(inner); - let mut historics: Vec> = Vec::new(); - - // write a new base image layer at the cutoff point - let imgfile = ImageLayer::create_from_src( - self.conf, - timeline, - self, - end_lsn, - )?; - let imgfile_rc: Arc = Arc::new(imgfile); - historics.push(Arc::clone(&imgfile_rc)); - // Write the page versions before the cutoff to disk. let snapfile = SnapshotLayer::create( self.conf, @@ -479,38 +431,36 @@ impl InMemoryLayer { self.start_lsn, end_lsn, dropped, - self.img_layer.clone(), before_page_versions, before_segsizes, )?; - let snapfile_rc: Arc = Arc::new(snapfile); - historics.push(snapfile_rc); // If there were any "new" page versions, initialize a new in-memory layer to hold // them - let new_open = - if !after_segsizes.is_empty() || !after_page_versions.is_empty() { - info!("created new in-mem layer for {} {}-", self.seg, end_lsn); + let new_open = if !after_segsizes.is_empty() || !after_page_versions.is_empty() { + info!("created new in-mem layer for {} {}-", self.seg, end_lsn); - let new_open = Self::create_successor_layer( - self.conf, - imgfile_rc, - self.timelineid, - self.tenantid, - end_lsn, - end_lsn, - )?; - let mut new_inner = new_open.inner.lock().unwrap(); - new_inner.page_versions.append(&mut after_page_versions); - new_inner.segsizes.append(&mut after_segsizes); - drop(new_inner); + let new_open = Self::copy_snapshot( + self.conf, + timeline, + &snapfile, + self.timelineid, + self.tenantid, + end_lsn, + )?; + let mut new_inner = new_open.inner.lock().unwrap(); + new_inner.page_versions.append(&mut after_page_versions); + new_inner.segsizes.append(&mut after_segsizes); + drop(new_inner); - Some(Arc::new(new_open)) - } else { - None - }; + Some(Arc::new(new_open)) + } else { + None + }; - Ok((historics, new_open)) + let new_historic = Some(Arc::new(snapfile)); + + Ok((new_historic, new_open)) } /// debugging function to print out the contents of the layer diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 78d6da3312..0cf9f93419 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -3,79 +3,38 @@ //! //! When the timeline is first accessed, the server lists of all snapshot files //! in the timelines/ directory, and populates this map with -//! SnapshotLayers corresponding to each file. When new WAL is received, FIXME +//! SnapshotLayers corresponding to each file. When new WAL is received, //! we create InMemoryLayers to hold the incoming records. Now and then, //! in the checkpoint() function, the in-memory layers are frozen, forming //! new snapshot layers and corresponding files are written to disk. //! use crate::layered_repository::storage_layer::{Layer, SegmentTag}; -use crate::layered_repository::{InMemoryLayer}; +use crate::layered_repository::{InMemoryLayer, SnapshotLayer}; use crate::relish::*; use anyhow::Result; -use lazy_static::lazy_static; use log::*; use std::collections::HashSet; -use std::collections::{BinaryHeap, BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; -use std::cmp::Ordering; use std::sync::Arc; -use zenith_metrics::{register_int_gauge, IntGauge}; use zenith_utils::lsn::Lsn; -lazy_static! { - static ref NUM_INMEMORY_LAYERS: IntGauge = - register_int_gauge!("pageserver_inmemory_layers", "Number of layers in memory") - .expect("failed to define a metric"); - - static ref NUM_ONDISK_LAYERS: IntGauge = - register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk") - .expect("failed to define a metric"); -} - /// -/// LayerMap tracks what layers exist on a timeline. The last layer that is +/// LayerMap tracks what layers exist or a timeline. The last layer that is /// open for writes is always an InMemoryLayer, and is tracked separately /// because there can be only one for each segment. The older layers, /// stored on disk, are kept in a BTreeMap keyed by the layer's start LSN. /// pub struct LayerMap { segs: HashMap, - - // FIXME: explain this - open_segs: BinaryHeap, } struct SegEntry { pub open: Option>, - pub historic: BTreeMap>, + pub historic: BTreeMap>, } -struct OpenSegEntry { - pub oldest_pending_lsn: Lsn, - pub layer: Arc, -} -impl Ord for OpenSegEntry { - fn cmp(&self, other: &Self) -> Ordering { - // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here - // to get that. - other.oldest_pending_lsn.cmp(&self.oldest_pending_lsn) - } -} -impl PartialOrd for OpenSegEntry { - fn partial_cmp(&self, other: &Self) -> Option { - // BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here - // to get that. - other.oldest_pending_lsn.partial_cmp(&self.oldest_pending_lsn) - } -} -impl PartialEq for OpenSegEntry { - fn eq(&self, other: &Self) -> bool { - self.oldest_pending_lsn.eq(&other.oldest_pending_lsn) - } -} -impl Eq for OpenSegEntry {} - impl LayerMap { /// /// Look up using the given segment tag and LSN. This differs from a plain @@ -129,38 +88,20 @@ impl LayerMap { if let Some(_old) = &segentry.open { // FIXME: shouldn't exist, but check } - segentry.open = Some(Arc::clone(&layer)); + segentry.open = Some(layer); } else { let segentry = SegEntry { - open: Some(Arc::clone(&layer)), + open: Some(layer), historic: BTreeMap::new(), }; self.segs.insert(tag, segentry); } - - let opensegentry = OpenSegEntry { - oldest_pending_lsn: layer.get_oldest_pending_lsn(), - layer: layer, - }; - self.open_segs.push(opensegentry); - - NUM_INMEMORY_LAYERS.inc(); - } - - // replace given open layer with other layers. - pub fn pop_oldest(&mut self) { - let opensegentry = self.open_segs.pop().unwrap(); - let segtag = opensegentry.layer.get_seg_tag(); - - let mut segentry = self.segs.get_mut(&segtag).unwrap(); - segentry.open = None; - NUM_INMEMORY_LAYERS.dec(); } /// /// Insert an on-disk layer /// - pub fn insert_historic(&mut self, layer: Arc) { + pub fn insert_historic(&mut self, layer: Arc) { let tag = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); @@ -176,7 +117,6 @@ impl LayerMap { }; self.segs.insert(tag, segentry); } - NUM_ONDISK_LAYERS.inc(); } /// @@ -184,14 +124,13 @@ impl LayerMap { /// /// This should be called when the corresponding file on disk has been deleted. /// - pub fn remove_historic(&mut self, layer: &dyn Layer) { + pub fn remove_historic(&mut self, layer: &SnapshotLayer) { let tag = layer.get_seg_tag(); let start_lsn = layer.get_start_lsn(); if let Some(segentry) = self.segs.get_mut(&tag) { segentry.historic.remove(&start_lsn); } - NUM_ONDISK_LAYERS.dec(); } pub fn list_rels(&self, spcnode: u32, dbnode: u32) -> Result> { @@ -230,16 +169,14 @@ impl LayerMap { /// Is there a newer layer for given segment? pub fn newer_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool { if let Some(segentry) = self.segs.get(&seg) { - // open layer is always incremental so it doesn't count + if let Some(_open) = &segentry.open { + return true; + } for (newer_lsn, layer) in segentry .historic .range((Included(lsn), Included(Lsn(u64::MAX)))) { - // FIXME: incremental layers don't count - if layer.is_incremental() { - continue; - } if layer.get_end_lsn() > lsn { trace!( "found later layer for {}, {} {}-{}", @@ -259,11 +196,10 @@ impl LayerMap { false } - pub fn get_oldest_open_layer(&mut self) -> Option> { - if let Some(opensegentry) = self.open_segs.peek() { - Some(Arc::clone(&opensegentry.layer)) - } else { - None + pub fn iter_open_layers(&mut self) -> OpenLayerIter { + OpenLayerIter { + last: None, + segiter: self.segs.iter_mut(), } } @@ -279,18 +215,53 @@ impl Default for LayerMap { fn default() -> Self { LayerMap { segs: HashMap::new(), - open_segs: BinaryHeap::new(), } } } +pub struct OpenLayerIter<'a> { + last: Option<&'a mut SegEntry>, + + segiter: std::collections::hash_map::IterMut<'a, SegmentTag, SegEntry>, +} + +impl<'a> OpenLayerIter<'a> { + pub fn replace(&mut self, replacement: Option>) { + let segentry = self.last.as_mut().unwrap(); + segentry.open = replacement; + } + + pub fn insert_historic(&mut self, new_layer: Arc) { + let start_lsn = new_layer.get_start_lsn(); + + let segentry = self.last.as_mut().unwrap(); + segentry.historic.insert(start_lsn, new_layer); + } +} + +impl<'a> Iterator for OpenLayerIter<'a> { + type Item = Arc; + + fn next(&mut self) -> std::option::Option<::Item> { + while let Some((_seg, entry)) = self.segiter.next() { + if let Some(open) = &entry.open { + let op = Arc::clone(&open); + self.last = Some(entry); + return Some(op); + } + } + self.last = None; + None + } +} + pub struct HistoricLayerIter<'a> { segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>, - iter: Option>>, + iter: Option>>, } impl<'a> Iterator for HistoricLayerIter<'a> { - type Item = Arc; + type Item = Arc; fn next(&mut self) -> std::option::Option<::Item> { loop { diff --git a/pageserver/src/layered_repository/snapshot_layer.rs b/pageserver/src/layered_repository/snapshot_layer.rs index bc97be9835..e0f4e77995 100644 --- a/pageserver/src/layered_repository/snapshot_layer.rs +++ b/pageserver/src/layered_repository/snapshot_layer.rs @@ -36,17 +36,17 @@ //! //! A snapshot file is constructed using the 'bookfile' crate. Each file consists of two //! parts: the page versions and the relation sizes. They are stored as separate chapters. -//! FIXME //! use crate::layered_repository::storage_layer::{ Layer, PageReconstructData, PageVersion, SegmentTag, }; -use crate::layered_repository::filename::{SnapshotFileName}; +use crate::relish::*; use crate::PageServerConf; use crate::{ZTenantId, ZTimelineId}; use anyhow::{bail, Result}; use log::*; use std::collections::BTreeMap; +use std::fmt; use std::fs; use std::fs::File; use std::io::Write; @@ -65,6 +65,145 @@ static SNAPSHOT_FILE_MAGIC: u32 = 0x5A616E01; static PAGE_VERSIONS_CHAPTER: u64 = 1; static REL_SIZES_CHAPTER: u64 = 2; +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] +struct SnapshotFileName { + seg: SegmentTag, + start_lsn: Lsn, + end_lsn: Lsn, + dropped: bool, +} + +impl SnapshotFileName { + fn from_str(fname: &str) -> Option { + // Split the filename into parts + // + // ______ + // + // or if it was dropped: + // + // _______DROPPED + // + let rel; + let mut parts; + if let Some(rest) = fname.strip_prefix("rel_") { + parts = rest.split('_'); + rel = RelishTag::Relation(RelTag { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + relnode: parts.next()?.parse::().ok()?, + forknum: parts.next()?.parse::().ok()?, + }); + } else if let Some(rest) = fname.strip_prefix("pg_xact_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::Clog, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") { + parts = rest.split('_'); + rel = RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno: u32::from_str_radix(parts.next()?, 16).ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") { + parts = rest.split('_'); + rel = RelishTag::FileNodeMap { + spcnode: parts.next()?.parse::().ok()?, + dbnode: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_twophase_") { + parts = rest.split('_'); + rel = RelishTag::TwoPhase { + xid: parts.next()?.parse::().ok()?, + }; + } else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") { + parts = rest.split('_'); + rel = RelishTag::Checkpoint; + } else if let Some(rest) = fname.strip_prefix("pg_control_") { + parts = rest.split('_'); + rel = RelishTag::ControlFile; + } else { + return None; + } + + let segno = parts.next()?.parse::().ok()?; + + let seg = SegmentTag { rel, segno }; + + let start_lsn = Lsn::from_hex(parts.next()?).ok()?; + let end_lsn = Lsn::from_hex(parts.next()?).ok()?; + + let mut dropped = false; + if let Some(suffix) = parts.next() { + if suffix == "DROPPED" { + dropped = true; + } else { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + } + if parts.next().is_some() { + warn!("unrecognized filename in timeline dir: {}", fname); + return None; + } + + Some(SnapshotFileName { + seg, + start_lsn, + end_lsn, + dropped, + }) + } + + fn to_string(&self) -> String { + let basename = match self.seg.rel { + RelishTag::Relation(reltag) => format!( + "rel_{}_{}_{}_{}", + reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum + ), + RelishTag::Slru { + slru: SlruKind::Clog, + segno, + } => format!("pg_xact_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactMembers, + segno, + } => format!("pg_multixact_members_{:04X}", segno), + RelishTag::Slru { + slru: SlruKind::MultiXactOffsets, + segno, + } => format!("pg_multixact_offsets_{:04X}", segno), + RelishTag::FileNodeMap { spcnode, dbnode } => { + format!("pg_filenodemap_{}_{}", spcnode, dbnode) + } + RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid), + RelishTag::Checkpoint => format!("pg_control_checkpoint"), + RelishTag::ControlFile => format!("pg_control"), + }; + + format!( + "{}_{}_{:016X}_{:016X}{}", + basename, + self.seg.segno, + u64::from(self.start_lsn), + u64::from(self.end_lsn), + if self.dropped { "_DROPPED" } else { "" } + ) + } +} + +impl fmt::Display for SnapshotFileName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_string()) + } +} + /// /// SnapshotLayer is the in-memory data structure associated with an /// on-disk snapshot file. We keep a SnapshotLayer in memory for each @@ -88,8 +227,6 @@ pub struct SnapshotLayer { dropped: bool, inner: Mutex, - - img_layer: Option>, } pub struct SnapshotLayerInner { @@ -162,15 +299,6 @@ impl Layer for SnapshotLayer { } } - // Use the base image, if needed - if let Some(need_lsn) = need_base_image_lsn { - if let Some(img_layer) = &self.img_layer { - need_base_image_lsn = img_layer.get_page_reconstruct_data(blknum, need_lsn, reconstruct_data)?; - } else { - bail!("no base img found for {} at blk {} at LSN {}", self.seg, blknum, lsn); - } - } - // release lock on 'inner' } @@ -179,23 +307,26 @@ impl Layer for SnapshotLayer { /// Get size of the relation at given LSN fn get_seg_size(&self, lsn: Lsn) -> Result { - - assert!(lsn >= self.start_lsn); - // Scan the BTreeMap backwards, starting from the given entry. let inner = self.load()?; let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn))); - let result; if let Some((_entry_lsn, entry)) = iter.next_back() { - result = *entry; - // Use the base image if needed - } else if let Some(img_layer) = &self.img_layer { - result = img_layer.get_seg_size(lsn)?; + let result = *entry; + drop(inner); + trace!("get_seg_size: {} at {} -> {}", self.seg, lsn, result); + Ok(result) } else { - result = 0; + error!( + "No size found for {} at {} in snapshot layer {} {}-{}", + self.seg, lsn, self.seg, self.start_lsn, self.end_lsn + ); + bail!( + "No size found for {} at {} in snapshot layer", + self.seg, + lsn + ); } - Ok(result) } /// Does this segment exist at given LSN? @@ -208,28 +339,6 @@ impl Layer for SnapshotLayer { // Otherwise, it exists. Ok(true) } - - /// - /// Release most of the memory used by this layer. If it's accessed again later, - /// it will need to be loaded back. - /// - fn unload(&self) -> Result<()> { - let mut inner = self.inner.lock().unwrap(); - inner.page_versions = BTreeMap::new(); - inner.relsizes = BTreeMap::new(); - inner.loaded = false; - Ok(()) - } - - fn delete(&self) -> Result<()> { - // delete underlying file - fs::remove_file(self.path())?; - Ok(()) - } - - fn is_incremental(&self) -> bool { - true - } } impl SnapshotLayer { @@ -271,11 +380,9 @@ impl SnapshotLayer { start_lsn: Lsn, end_lsn: Lsn, dropped: bool, - img_layer: Option>, page_versions: BTreeMap<(u32, Lsn), PageVersion>, relsizes: BTreeMap, ) -> Result { - let snapfile = SnapshotLayer { conf: conf, timelineid: timelineid, @@ -289,7 +396,6 @@ impl SnapshotLayer { page_versions: page_versions, relsizes: relsizes, }), - img_layer, }; let inner = snapfile.inner.lock().unwrap(); @@ -301,7 +407,7 @@ impl SnapshotLayer { let file = File::create(&path)?; let book = BookWriter::new(file, SNAPSHOT_FILE_MAGIC)?; - // Write out the other page versions + // Write out page versions let mut chapter = book.new_chapter(PAGE_VERSIONS_CHAPTER); let buf = BTreeMap::ser(&inner.page_versions)?; chapter.write_all(&buf)?; @@ -368,30 +474,56 @@ impl SnapshotLayer { /// Create SnapshotLayers representing all files on disk /// // TODO: returning an Iterator would be more idiomatic - pub fn load_snapshot_layer( + pub fn list_snapshot_files( conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId, - filename: &SnapshotFileName, - ) -> Result { - let snapfile = SnapshotLayer { - conf, - timelineid, - tenantid, - seg: filename.seg, - start_lsn: filename.start_lsn, - end_lsn: filename.end_lsn, - dropped: filename.dropped, - inner: Mutex::new(SnapshotLayerInner { - loaded: false, - page_versions: BTreeMap::new(), - relsizes: BTreeMap::new(), - }), - // FIXME: This doesn't work across restarts. - img_layer: None, - }; + ) -> Result>> { + let path = conf.timeline_path(&timelineid, &tenantid); - Ok(snapfile) + let mut snapfiles: Vec> = Vec::new(); + for direntry in fs::read_dir(path)? { + let fname = direntry?.file_name(); + let fname = fname.to_str().unwrap(); + + if let Some(snapfilename) = SnapshotFileName::from_str(fname) { + let snapfile = SnapshotLayer { + conf, + timelineid, + tenantid, + seg: snapfilename.seg, + start_lsn: snapfilename.start_lsn, + end_lsn: snapfilename.end_lsn, + dropped: snapfilename.dropped, + inner: Mutex::new(SnapshotLayerInner { + loaded: false, + page_versions: BTreeMap::new(), + relsizes: BTreeMap::new(), + }), + }; + + snapfiles.push(Arc::new(snapfile)); + } + } + return Ok(snapfiles); + } + + pub fn delete(&self) -> Result<()> { + // delete underlying file + fs::remove_file(self.path())?; + Ok(()) + } + + /// + /// Release most of the memory used by this layer. If it's accessed again later, + /// it will need to be loaded back. + /// + pub fn unload(&self) -> Result<()> { + let mut inner = self.inner.lock().unwrap(); + inner.page_versions = BTreeMap::new(); + inner.relsizes = BTreeMap::new(); + inner.loaded = false; + Ok(()) } /// debugging function to print out the contents of the layer diff --git a/pageserver/src/layered_repository/storage_layer.rs b/pageserver/src/layered_repository/storage_layer.rs index 167aaaecde..0a181a1aac 100644 --- a/pageserver/src/layered_repository/storage_layer.rs +++ b/pageserver/src/layered_repository/storage_layer.rs @@ -69,28 +69,6 @@ pub struct PageVersion { pub record: Option, } -impl PageVersion { - pub fn get_mem_size(&self) -> usize { - let mut sz = 0; - - // every page version has some fixed overhead. - sz += 16; - - if let Some(img) = &self.page_image { - sz += img.len(); - } - - if let Some(rec) = &self.record { - sz += rec.rec.len(); - - // Some per-record overhead. Not very accurate, but close enough - sz += 32; - } - - sz - } -} - /// /// Data needed to reconstruct a page version /// @@ -147,9 +125,4 @@ pub trait Layer: Send + Sync { fn get_seg_size(&self, lsn: Lsn) -> Result; fn get_seg_exists(&self, lsn: Lsn) -> Result; - - fn is_incremental(&self) -> bool; - - fn unload(&self) -> Result<()>; - fn delete(&self) -> Result<()>; } From 4bce65ff9a7802908c5c2c48b4c467c453181c7b Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Tue, 17 Aug 2021 16:23:34 +0300 Subject: [PATCH 18/24] bump rust version in ci to 1.52.1 --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3e30e1d11a..359b2d77d9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: zenith-build-executor: resource_class: xlarge docker: - - image: cimg/rust:1.51.0 + - image: cimg/rust:1.52.1 jobs: From 9fed5c8fb7cfb43d32155c6474031c265b569d2f Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 18 Aug 2021 20:19:07 +0300 Subject: [PATCH 19/24] Add test for page server restart. --- .../batch_others/test_pageserver_restart.py | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 test_runner/batch_others/test_pageserver_restart.py diff --git a/test_runner/batch_others/test_pageserver_restart.py b/test_runner/batch_others/test_pageserver_restart.py new file mode 100644 index 0000000000..18b17a4efb --- /dev/null +++ b/test_runner/batch_others/test_pageserver_restart.py @@ -0,0 +1,66 @@ +import pytest +import random +import time + +from contextlib import closing +from multiprocessing import Process, Value +from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory + +pytest_plugins = ("fixtures.zenith_fixtures") + +# Check that dead minority doesn't prevent the commits: execute insert n_inserts +# times, with fault_probability chance of getting a wal acceptor down or up +# along the way. 2 of 3 are always alive, so the work keeps going. +def test_pageserver_restart(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory): + + # One safekeeper is enough for this test. + wa_factory.start_n_new(1) + + zenith_cli.run(["branch", "test_pageserver_restart", "empty"]) + pg = postgres.create_start('test_pageserver_restart', + wal_acceptors=wa_factory.get_connstrs()) + + pg_conn = pg.connect() + cur = pg_conn.cursor() + + # Create table, and insert some rows. Make it big enough that it doesn't fit in + # shared_buffers, otherwise the SELECT after restart will just return answer + # from shared_buffers without hitting the page server, which defeats the point + # of this test. + cur.execute('CREATE TABLE foo (t text)') + cur.execute(''' + INSERT INTO foo + SELECT 'long string to consume some space' || g + FROM generate_series(1, 100000) g + ''') + + # Verify that the table is larger than shared_buffers + cur.execute(''' + select setting::int * pg_size_bytes(unit) as shared_buffers, pg_relation_size('foo') as tbl_ize + from pg_settings where name = 'shared_buffers' + ''') + row = cur.fetchone() + print("shared_buffers is {}, table size {}", row[0], row[1]); + assert int(row[0]) < int(row[1]) + + # Stop and restart pageserver. This is a more or less graceful shutdown, although + # the page server doesn't currently have a shutdown routine so there's no difference + # between stopping and crashing. + pageserver.stop(); + pageserver.start(); + + # Stopping the pageserver breaks the connection from the postgres backend to + # the page server, and causes the next query on the connection to fail. Start a new + # postgres connection too, to avoid that error. (Ideally, the compute node would + # handle that and retry internally, without propagating the error to the user, but + # currently it doesn't...) + pg_conn = pg.connect() + cur = pg_conn.cursor() + + cur.execute("SELECT count(*) FROM foo") + assert cur.fetchone() == (100000, ) + + # Stop the page server by force, and restart it + pageserver.stop(); + pageserver.start(); + From 20e6cd7724fbcdd70e356f5dcbdf67c236c4ccdf Mon Sep 17 00:00:00 2001 From: anastasia Date: Fri, 13 Aug 2021 14:44:52 +0300 Subject: [PATCH 20/24] Update test_twophase - check that we correctly restore files at compute node start. --- pageserver/src/restore_local_repo.rs | 2 +- test_runner/batch_others/test_twophase.py | 49 ++++++++++++++++++++--- test_runner/fixtures/zenith_fixtures.py | 7 ++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/pageserver/src/restore_local_repo.rs b/pageserver/src/restore_local_repo.rs index c7f84d7bad..4cc7c7feb5 100644 --- a/pageserver/src/restore_local_repo.rs +++ b/pageserver/src/restore_local_repo.rs @@ -425,7 +425,7 @@ pub fn save_decoded_record( let parsed_xact = XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info); save_xact_record(timeline, lsn, &parsed_xact, decoded)?; // Remove twophase file. see RemoveTwoPhaseFile() in postgres code - info!( + trace!( "unlink twophaseFile for xid {} parsed_xact.xid {} here at {}", decoded.xl_xid, parsed_xact.xid, lsn ); diff --git a/test_runner/batch_others/test_twophase.py b/test_runner/batch_others/test_twophase.py index ab1cdf3001..1d572c5992 100644 --- a/test_runner/batch_others/test_twophase.py +++ b/test_runner/batch_others/test_twophase.py @@ -1,3 +1,5 @@ +import os + from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver @@ -28,24 +30,59 @@ def test_twophase(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFa cur.execute("INSERT INTO foo VALUES ('two')") cur.execute("PREPARE TRANSACTION 'insert_two'") + # Prepare a transaction that will insert a row + cur.execute('BEGIN') + cur.execute("INSERT INTO foo VALUES ('three')") + cur.execute("PREPARE TRANSACTION 'insert_three'") + + # Prepare another transaction that will insert a row + cur.execute('BEGIN') + cur.execute("INSERT INTO foo VALUES ('four')") + cur.execute("PREPARE TRANSACTION 'insert_four'") + + # On checkpoint state data copied to files in + # pg_twophase directory and fsynced + cur.execute('CHECKPOINT') + + twophase_files = os.listdir(pg.pg_twophase_dir_path()) + print(twophase_files) + assert len(twophase_files) == 4 + + cur.execute("COMMIT PREPARED 'insert_three'") + cur.execute("ROLLBACK PREPARED 'insert_four'") + cur.execute('CHECKPOINT') + + twophase_files = os.listdir(pg.pg_twophase_dir_path()) + print(twophase_files) + assert len(twophase_files) == 2 + # Create a branch with the transaction in prepared state zenith_cli.run(["branch", "test_twophase_prepared", "test_twophase"]) - pg2 = postgres.create_start( + # Create compute node, but don't start. + # We want to observe pgdata before postgres starts + pg2 = postgres.create( 'test_twophase_prepared', config_lines=['max_prepared_transactions=5'], ) + + # Check that we restored only needed twophase files + twophase_files2 = os.listdir(pg2.pg_twophase_dir_path()) + print(twophase_files2) + assert twophase_files2.sort() == twophase_files.sort() + + pg2 = pg2.start() conn2 = pg2.connect() cur2 = conn2.cursor() - # On the new branch, commit one of the prepared transactions, abort the other one. + # On the new branch, commit one of the prepared transactions, + # abort the other one. cur2.execute("COMMIT PREPARED 'insert_one'") cur2.execute("ROLLBACK PREPARED 'insert_two'") cur2.execute('SELECT * FROM foo') - assert cur2.fetchall() == [('one', )] + assert cur2.fetchall() == [('one',), ('three',)] - # Neither insert is visible on the original branch, the transactions are still - # in prepared state there. + # Only one committed insert is visible on the original branch cur.execute('SELECT * FROM foo') - assert cur.fetchall() == [] + assert cur.fetchall() == [('three',)] diff --git a/test_runner/fixtures/zenith_fixtures.py b/test_runner/fixtures/zenith_fixtures.py index 2a1081af8f..afb350bfea 100644 --- a/test_runner/fixtures/zenith_fixtures.py +++ b/test_runner/fixtures/zenith_fixtures.py @@ -309,6 +309,13 @@ class Postgres(PgProtocol): path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.branch / 'pg_xact' return os.path.join(self.repo_dir, path) + def pg_twophase_dir_path(self) -> str: + """ Path to pg_twophase dir """ + print(self.tenant_id) + print(self.branch) + path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.branch / 'pg_twophase' + return os.path.join(self.repo_dir, path) + def config_file_path(self) -> str: """ Path to postgresql.conf """ filename = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.branch / 'postgresql.conf' From 04a309f562afc361625e68544b9771ec8563af56 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Tue, 17 Aug 2021 15:12:22 +0300 Subject: [PATCH 21/24] Build zenithdb/zenith:latest in CI (zenithdb/console#18) --- .circleci/config.yml | 28 +++++++++++++ Dockerfile | 83 +++++++++++++++----------------------- Dockerfile.alpine | 95 ++++++++++++++++++++++++++++++++++++++++++++ Dockerfile.build | 15 +++++++ docker-entrypoint.sh | 2 +- 5 files changed, 172 insertions(+), 51 deletions(-) create mode 100644 Dockerfile.alpine create mode 100644 Dockerfile.build diff --git a/.circleci/config.yml b/.circleci/config.yml index 359b2d77d9..7414317fbd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -237,6 +237,23 @@ jobs: - store_test_results: path: /tmp/test_output + # Build zenithdb/zenith:latest image and push it to Docker hub + docker-image: + docker: + - image: cimg/base:2021.04 + steps: + - checkout + - setup_remote_docker: + docker_layer_caching: true + - run: + name: Init postgres submodule + command: git submodule update --init --depth 1 + - run: + name: Build and push Docker image + command: | + echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin + docker build -t zenithdb/zenith:latest . && docker push zenithdb/zenith:latest + workflows: build_and_test: jobs: @@ -265,3 +282,14 @@ workflows: test_selection: batch_others requires: - build-zenith-<< matrix.build_type >> + - docker-image: + # Context gives an ability to login + context: Docker Hub + # Build image only for commits to main + filters: + branches: + only: + - main + requires: + - pg_regress tests release + - other tests release diff --git a/Dockerfile b/Dockerfile index a2a2fea1a4..0579059cc2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,94 +1,77 @@ # # Docker image for console integration testing. # -# We may also reuse it in CI to unify installation process and as a general binaries building -# tool for production servers. -# -# Dynamic linking is used for librocksdb and libstdc++ bacause librocksdb-sys calls -# bindgen with "dynamic" feature flag. This also prevents usage of dockerhub alpine-rust -# images which are statically linked and have guards against any dlopen. I would rather -# prefer all static binaries so we may change the way librocksdb-sys builds or wait until -# we will have our own storage and drop rockdb dependency. -# -# Cargo-chef is used to separate dependencies building from main binaries building. This -# way `docker build` will download and install dependencies only of there are changes to -# out Cargo.toml files. -# - # -# build postgres separately -- this layer will be rebuilt only if one of -# mentioned paths will get any changes +# Build Postgres separately --- this layer will be rebuilt only if one of +# mentioned paths will get any changes. # -FROM alpine:3.13 as pg-build -RUN apk add --update clang llvm compiler-rt compiler-rt-static lld musl-dev binutils \ - make bison flex readline-dev zlib-dev perl linux-headers libseccomp-dev -WORKDIR zenith +FROM zenithdb/build:buster AS pg-build +WORKDIR /zenith COPY ./vendor/postgres vendor/postgres COPY ./Makefile Makefile -# Build using clang and lld -RUN CC='clang' LD='lld' CFLAGS='-fuse-ld=lld --rtlib=compiler-rt' make postgres -j4 +RUN make -j $(getconf _NPROCESSORS_ONLN) -s postgres # # Calculate cargo dependencies. # This will always run, but only generate recipe.json with list of dependencies without # installing them. # -FROM alpine:20210212 as cargo-deps-inspect -RUN apk add --update rust cargo -RUN cargo install cargo-chef -WORKDIR zenith +FROM zenithdb/build:buster AS cargo-deps-inspect +WORKDIR /zenith COPY . . -RUN cargo chef prepare --recipe-path recipe.json +RUN cargo chef prepare --recipe-path /zenith/recipe.json # # Build cargo dependencies. -# This temp cantainner would be build only if recipe.json was changed. +# This temp cantainner should be rebuilt only if recipe.json was changed. # -FROM alpine:20210212 as deps-build -RUN apk add --update rust cargo openssl-dev clang build-base -# rust-rocksdb can be built against system-wide rocksdb -- that saves about -# 10 minutes during build. Rocksdb apk package is in testing now, but use it -# anyway. In case of any troubles we can download and build rocksdb here manually -# (to cache it as a docker layer). -RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev -WORKDIR zenith +FROM zenithdb/build:buster AS deps-build +WORKDIR /zenith COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server -COPY --from=cargo-deps-inspect /root/.cargo/bin/cargo-chef /root/.cargo/bin/ +COPY --from=cargo-deps-inspect /usr/local/cargo/bin/cargo-chef /usr/local/cargo/bin/ COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json # # Build zenith binaries # -FROM alpine:20210212 as build -RUN apk add --update rust cargo openssl-dev clang build-base -RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev -WORKDIR zenith +FROM zenithdb/build:buster AS build +WORKDIR /zenith COPY . . # Copy cached dependencies COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server COPY --from=deps-build /zenith/target target -COPY --from=deps-build /root/.cargo /root/.cargo +COPY --from=deps-build /usr/local/cargo/ /usr/local/cargo/ RUN cargo build --release # # Copy binaries to resulting image. -# build-base hare to provide libstdc++ (it will also bring gcc, but leave it this way until we figure -# out how to statically link rocksdb or avoid it at all). # -FROM alpine:3.13 -RUN apk add --update openssl build-base libseccomp-dev -RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb +FROM debian:buster-slim +WORKDIR /data + +RUN apt-get update && apt-get -yq install librocksdb-dev libseccomp-dev openssl && \ + mkdir zenith_install + COPY --from=build /zenith/target/release/pageserver /usr/local/bin COPY --from=build /zenith/target/release/wal_acceptor /usr/local/bin COPY --from=build /zenith/target/release/proxy /usr/local/bin -COPY --from=pg-build /zenith/tmp_install /usr/local +COPY --from=pg-build /zenith/tmp_install postgres_install COPY docker-entrypoint.sh /docker-entrypoint.sh -RUN addgroup zenith && adduser -h /data -D -G zenith zenith +# Remove build artifacts (~ 500 MB) +RUN rm -rf postgres_install/build && \ + # 'Install' Postgres binaries locally + cp -r postgres_install/* /usr/local/ && \ + # Prepare an archive of Postgres binaries (should be around 11 MB) + # and keep it inside container for an ease of deploy pipeline. + cd postgres_install && tar -czf /data/postgres_install.tar.gz . && cd .. && \ + rm -rf postgres_install + +RUN useradd -m -d /data zenith + VOLUME ["/data"] -WORKDIR /data USER zenith EXPOSE 6400 ENTRYPOINT ["/docker-entrypoint.sh"] diff --git a/Dockerfile.alpine b/Dockerfile.alpine new file mode 100644 index 0000000000..a2a2fea1a4 --- /dev/null +++ b/Dockerfile.alpine @@ -0,0 +1,95 @@ +# +# Docker image for console integration testing. +# +# We may also reuse it in CI to unify installation process and as a general binaries building +# tool for production servers. +# +# Dynamic linking is used for librocksdb and libstdc++ bacause librocksdb-sys calls +# bindgen with "dynamic" feature flag. This also prevents usage of dockerhub alpine-rust +# images which are statically linked and have guards against any dlopen. I would rather +# prefer all static binaries so we may change the way librocksdb-sys builds or wait until +# we will have our own storage and drop rockdb dependency. +# +# Cargo-chef is used to separate dependencies building from main binaries building. This +# way `docker build` will download and install dependencies only of there are changes to +# out Cargo.toml files. +# + + +# +# build postgres separately -- this layer will be rebuilt only if one of +# mentioned paths will get any changes +# +FROM alpine:3.13 as pg-build +RUN apk add --update clang llvm compiler-rt compiler-rt-static lld musl-dev binutils \ + make bison flex readline-dev zlib-dev perl linux-headers libseccomp-dev +WORKDIR zenith +COPY ./vendor/postgres vendor/postgres +COPY ./Makefile Makefile +# Build using clang and lld +RUN CC='clang' LD='lld' CFLAGS='-fuse-ld=lld --rtlib=compiler-rt' make postgres -j4 + +# +# Calculate cargo dependencies. +# This will always run, but only generate recipe.json with list of dependencies without +# installing them. +# +FROM alpine:20210212 as cargo-deps-inspect +RUN apk add --update rust cargo +RUN cargo install cargo-chef +WORKDIR zenith +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# +# Build cargo dependencies. +# This temp cantainner would be build only if recipe.json was changed. +# +FROM alpine:20210212 as deps-build +RUN apk add --update rust cargo openssl-dev clang build-base +# rust-rocksdb can be built against system-wide rocksdb -- that saves about +# 10 minutes during build. Rocksdb apk package is in testing now, but use it +# anyway. In case of any troubles we can download and build rocksdb here manually +# (to cache it as a docker layer). +RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev +WORKDIR zenith +COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server +COPY --from=cargo-deps-inspect /root/.cargo/bin/cargo-chef /root/.cargo/bin/ +COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json +RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json + +# +# Build zenith binaries +# +FROM alpine:20210212 as build +RUN apk add --update rust cargo openssl-dev clang build-base +RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev +WORKDIR zenith +COPY . . +# Copy cached dependencies +COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server +COPY --from=deps-build /zenith/target target +COPY --from=deps-build /root/.cargo /root/.cargo +RUN cargo build --release + +# +# Copy binaries to resulting image. +# build-base hare to provide libstdc++ (it will also bring gcc, but leave it this way until we figure +# out how to statically link rocksdb or avoid it at all). +# +FROM alpine:3.13 +RUN apk add --update openssl build-base libseccomp-dev +RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb +COPY --from=build /zenith/target/release/pageserver /usr/local/bin +COPY --from=build /zenith/target/release/wal_acceptor /usr/local/bin +COPY --from=build /zenith/target/release/proxy /usr/local/bin +COPY --from=pg-build /zenith/tmp_install /usr/local +COPY docker-entrypoint.sh /docker-entrypoint.sh + +RUN addgroup zenith && adduser -h /data -D -G zenith zenith +VOLUME ["/data"] +WORKDIR /data +USER zenith +EXPOSE 6400 +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["pageserver"] diff --git a/Dockerfile.build b/Dockerfile.build new file mode 100644 index 0000000000..92b2c21ffd --- /dev/null +++ b/Dockerfile.build @@ -0,0 +1,15 @@ +# +# Image with all the required dependencies to build https://github.com/zenithdb/zenith +# and Postgres from https://github.com/zenithdb/postgres +# Also includes some rust development and build tools. +# +FROM rust:slim-buster +WORKDIR /zenith + +# Install postgres and zenith build dependencies +# clang is for rocksdb +RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \ + libseccomp-dev pkg-config libssl-dev librocksdb-dev clang + +# Install rust tools +RUN rustup component add clippy && cargo install cargo-chef cargo-audit diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index be57b29427..dcfd502d85 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/sh if [ "$1" = 'pageserver' ]; then - if [ ! -d "/data/timelines" ]; then + if [ ! -d "/data/tenants" ]; then echo "Initializing pageserver data directory" pageserver --init -D /data --postgres-distrib /usr/local fi From 1c3d51ed92ca3eab80b98b9ec855aa670f756803 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Wed, 18 Aug 2021 13:36:50 +0300 Subject: [PATCH 22/24] Add Docker images building doc and refactor the overall docs reference --- README.md | 5 ++--- docs/README.md | 11 +++++++++++ docs/docker.md | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/docker.md diff --git a/README.md b/README.md index 07054b2e7e..96e0e94552 100644 --- a/README.md +++ b/README.md @@ -106,10 +106,9 @@ pytest ## Documentation -Now we use README files to cover design ideas and overall architecture for each module. -And rustdoc style documentation comments. +Now we use README files to cover design ideas and overall architecture for each module and `rustdoc` style documentation comments. See also [/docs/](/docs/) a top-level overview of all available markdown documentation. -To view your documentation in a browser, try running `cargo doc --no-deps --open` +To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open` ## Source tree layout diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..35f66e2d6f --- /dev/null +++ b/docs/README.md @@ -0,0 +1,11 @@ +# Zenith documentation + +## Table of contents + +- [authentication.md](authentication.md) — pageserver JWT authentication. +- [docker.md](docker.md) — Docker images and building pipeline. +- [multitenancy.md](multitenancy.md) — how multitenancy is organized in the pageserver and Zenith CLI. +- [pageserver/README](/pageserver/README) — pageserver overview. +- [postgres_ffi/README](/postgres_ffi/README) — Postgres FFI overview. +- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview. +- [walkeeper/README](/walkeeper/README.md) — WAL service overview. diff --git a/docs/docker.md b/docs/docker.md new file mode 100644 index 0000000000..9a909ebfe3 --- /dev/null +++ b/docs/docker.md @@ -0,0 +1,38 @@ +# Docker images of Zenith + +## Images + +Currently we build two main images: + +- [zenithdb/zenith](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `wal_acceptor` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). +- [zenithdb/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [zenithdb/postgres](https://github.com/zenithdb/postgres). + +And two intermediate images used either to reduce build time or to deliver some additional binary tools from other repos: + +- [zenithdb/build](https://hub.docker.com/repository/docker/zenithdb/build) — image with all the dependencies required to build Zenith and compute node images. This image is based on `rust:slim-buster`, so it also has a proper `rust` environment. Built from [/Dockerfile.build](/Dockerfile.build). +- [zenithdb/compute-tools](https://hub.docker.com/repository/docker/zenithdb/compute-tools) — compute node configuration management tools. + +## Building pipeline + +1. Image `zenithdb/compute-tools` is re-built automatically. + +2. Image `zenithdb/build` is built manually. If you want to introduce any new compile time dependencies to Zenith or compute node you have to update this image as well, build it and push to Docker Hub. + +Build: +```sh +docker build -t zenithdb/build:buster -f Dockerfile.build . +``` + +Login: +```sh +docker login +``` + +Push to Docker Hub: +```sh +docker push zenithdb/build:buster +``` + +3. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo. + +4. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically. From 82725725fdd9b3271d9206c5a51f3189c55353dc Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov Date: Thu, 19 Aug 2021 13:19:13 +0300 Subject: [PATCH 23/24] update README to match required Rust version and new python package installation process --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 96e0e94552..fab84754e5 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,12 @@ apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libsec libssl-dev clang ``` -[Rust] 1.48 or later is also required. +[Rust] 1.52 or later is also required. To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively. To run the integration tests (not required to use the code), install -Python (3.6 or higher), and install python3 packages with `pip` (called `pip3` on some systems): -``` -pip install pytest psycopg2 -``` +Python (3.6 or higher), and install python3 packages with `pipenv` using `pipenv install` in the project directory. 2. Build zenith and patched postgres ```sh From 39bb6fb19c6f8a8741de0770fd2262bec9f8c88e Mon Sep 17 00:00:00 2001 From: Max Sharnoff Date: Thu, 19 Aug 2021 08:46:18 -0700 Subject: [PATCH 24/24] Marginally improve walkeeper error visibility (#440) Adds a warning if a postgres query fails, and some additional context to errors generated inside `ReceiveWalConn::run` --- walkeeper/src/receive_wal.rs | 13 +++++++++---- zenith_utils/src/postgres_backend.rs | 3 ++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/walkeeper/src/receive_wal.rs b/walkeeper/src/receive_wal.rs index 9483a2c6cb..0ca2d2252c 100644 --- a/walkeeper/src/receive_wal.rs +++ b/walkeeper/src/receive_wal.rs @@ -2,7 +2,7 @@ //! //! FIXME: better description needed here -use anyhow::{bail, Result}; +use anyhow::{bail, Context, Result}; use bincode::config::Options; use bytes::{Buf, Bytes}; use log::*; @@ -236,7 +236,9 @@ impl<'pg> ReceiveWalConn<'pg> { .write_message(&BeMessage::CopyBothResponse)?; // Receive information about server - let server_info = self.read_msg::()?; + let server_info = self + .read_msg::() + .context("Failed to receive server info")?; info!( "Start handshake with wal_proposer {} sysid {} timeline {} tenant {}", self.peer_addr, server_info.system_id, server_info.timeline_id, server_info.tenant_id, @@ -284,7 +286,9 @@ impl<'pg> ReceiveWalConn<'pg> { self.write_msg(&my_info)?; /* Wait for vote request */ - let prop = self.read_msg::()?; + let prop = self + .read_msg::() + .context("Failed to read vote request")?; /* This is Paxos check which should ensure that only one master can perform commits */ if prop.node_id < my_info.server.node_id { /* Send my node-id to inform proposer that it's candidate was rejected */ @@ -330,7 +334,8 @@ impl<'pg> ReceiveWalConn<'pg> { let msg_bytes = self.read_msg_bytes()?; let mut msg_reader = msg_bytes.reader(); - let req = SafeKeeperRequest::des_from(&mut msg_reader)?; + let req = SafeKeeperRequest::des_from(&mut msg_reader) + .context("Failed to get WAL message header")?; if req.sender_id != my_info.server.node_id { bail!("Sender NodeId is changed"); } diff --git a/zenith_utils/src/postgres_backend.rs b/zenith_utils/src/postgres_backend.rs index d4de084442..e730178823 100644 --- a/zenith_utils/src/postgres_backend.rs +++ b/zenith_utils/src/postgres_backend.rs @@ -301,8 +301,9 @@ impl PostgresBackend { FeMessage::Query(m) => { trace!("got query {:?}", m.body); // xxx distinguish fatal and recoverable errors? - if let Err(e) = handler.process_query(self, m.body) { + if let Err(e) = handler.process_query(self, m.body.clone()) { let errmsg = format!("{}", e); + warn!("query handler for {:?} failed: {}", m.body, errmsg); self.write_message_noflush(&BeMessage::ErrorResponse(errmsg))?; } self.write_message(&BeMessage::ReadyForQuery)?;