diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index 1ec344a569..7ac1f30cf2 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -83,6 +83,7 @@ fn add_relmap_files( ar: &mut Builder<&mut dyn Write>, timeline: &Arc, lsn: Lsn, + snappath: &str, ) -> anyhow::Result<()> { for db in timeline.get_databases(lsn)?.iter() { let tag = BufferTag { @@ -95,8 +96,12 @@ fn add_relmap_files( } else { // User defined tablespaces are not supported assert!(db.spcnode == pg_constants::DEFAULTTABLESPACE_OID); + let src_path = format!("{}/base/1/PG_VERSION", snappath); + let dst_path = format!("base/{}/PG_VERSION", db.dbnode); + ar.append_path_with_name(&src_path, &dst_path)?; format!("base/{}/pg_filenode.map", db.dbnode) }; + info!("Deliver {}", path); assert!(img.len() == 512); let header = new_tar_header(&path, img.len() as u64)?; ar.append(&header, &img[..])?; @@ -239,7 +244,7 @@ pub fn send_tarball_at_lsn( pg_constants::PG_MXACT_OFFSETS_FORKNUM, lsn, )?; - add_relmap_files(&mut ar, timeline, lsn)?; + add_relmap_files(&mut ar, timeline, lsn, &snappath)?; add_twophase_files(&mut ar, timeline, lsn)?; add_pgcontrol_file(&mut ar, timeline, lsn)?; diff --git a/pageserver/src/repository/rocksdb.rs b/pageserver/src/repository/rocksdb.rs index de6cce768e..2ec40156d8 100644 --- a/pageserver/src/repository/rocksdb.rs +++ b/pageserver/src/repository/rocksdb.rs @@ -866,34 +866,42 @@ impl Timeline for RocksTimeline { src_db_id: Oid, src_tablespace_id: Oid, ) -> Result<()> { - let key = CacheKey { - tag: BufferTag { - rel: RelTag { - spcnode: src_tablespace_id, - dbnode: src_db_id, - relnode: 0, - forknum: 0u8, - }, - blknum: 0, - }, - lsn: Lsn(0), - }; - let mut iter = self.db.raw_iterator(); - iter.seek(key.to_bytes()); let mut n = 0; - while iter.valid() { - let mut key = CacheKey::from_slice(iter.key().unwrap()); - if key.tag.rel.spcnode != src_tablespace_id || key.tag.rel.dbnode != src_db_id { - break; - } - key.tag.rel.spcnode = tablespace_id; - key.tag.rel.dbnode = db_id; - key.lsn = lsn; + for forknum in &[ + pg_constants::MAIN_FORKNUM, + pg_constants::FSM_FORKNUM, + pg_constants::VISIBILITYMAP_FORKNUM, + pg_constants::INIT_FORKNUM, + pg_constants::PG_FILENODEMAP_FORKNUM, + ] { + let key = CacheKey { + tag: BufferTag { + rel: RelTag { + spcnode: src_tablespace_id, + dbnode: src_db_id, + relnode: 0, + forknum: *forknum, + }, + blknum: 0, + }, + lsn: Lsn(0), + }; + let mut iter = self.db.raw_iterator(); + iter.seek(key.to_bytes()); + while iter.valid() { + let mut key = CacheKey::from_slice(iter.key().unwrap()); + if key.tag.rel.spcnode != src_tablespace_id || key.tag.rel.dbnode != src_db_id { + break; + } + key.tag.rel.spcnode = tablespace_id; + key.tag.rel.dbnode = db_id; + key.lsn = lsn; - let v = iter.value().unwrap(); - self.db.put(key.to_bytes(), v)?; - n += 1; - iter.next(); + let v = iter.value().unwrap(); + self.db.put(key.to_bytes(), v)?; + n += 1; + iter.next(); + } } info!( "Create database {}/{}, copy {} entries",