diff --git a/pageserver/src/branches.rs b/pageserver/src/branches.rs index afb32143a6..c92c711b21 100644 --- a/pageserver/src/branches.rs +++ b/pageserver/src/branches.rs @@ -342,30 +342,6 @@ fn parse_point_in_time(conf: &PageServerConf, s: &str) -> Result { bail!("could not parse point-in-time {}", s); } -<<<<<<< HEAD -// If control file says the cluster was shut down cleanly, modify it, to mark -// it as crashed. That forces crash recovery when you start the cluster. -// -// FIXME: -// We currently do this to the initial snapshot in "zenith init". It would -// be more natural to do this when the snapshot is restored instead, but we -// currently don't have any code to create new snapshots, so it doesn't matter -// Or better yet, use a less hacky way of putting the cluster into recovery. -// Perhaps create a backup label file in the data directory when it's restored. -fn force_crash_recovery(datadir: &Path) -> Result<()> { - // Read in the control file - let controlfilepath = datadir.to_path_buf().join("global").join("pg_control"); - let mut controlfile = ControlFileData::decode(&fs::read(controlfilepath.as_path())?)?; - - controlfile.state = postgres_ffi::DBState_DB_IN_PRODUCTION; - - fs::write(controlfilepath.as_path(), controlfile.encode())?; - - Ok(()) -} - -======= ->>>>>>> Fix various bugs caused by switch to new storage model fn create_timeline(conf: &PageServerConf, ancestor: Option) -> Result { // Create initial timeline let mut tli_buf = [0u8; 16]; diff --git a/postgres_ffi/src/xlog_utils.rs b/postgres_ffi/src/xlog_utils.rs index 7876fbad20..ea8c4ae660 100644 --- a/postgres_ffi/src/xlog_utils.rs +++ b/postgres_ffi/src/xlog_utils.rs @@ -43,6 +43,8 @@ pub type TimeLineID = u32; pub type TimestampTz = u64; pub type XLogSegNo = u64; +const XID_CHECKPOINT_INTERVAL: u32 = 1024; + #[allow(non_snake_case)] pub fn XLogSegmentsPerXLogId(wal_segsz_bytes: usize) -> XLogSegNo { (0x100000000u64 / wal_segsz_bytes as u64) as XLogSegNo @@ -370,6 +372,7 @@ impl CheckPoint { // Next XID should be greater than new_xid. // Also take in account 32-bit wrap-around. pub fn update_next_xid(&mut self, xid: u32) { + let xid = xid.wrapping_add(XID_CHECKPOINT_INTERVAL - 1) & !(XID_CHECKPOINT_INTERVAL - 1); let full_xid = self.nextXid.value; let new_xid = std::cmp::max(xid + 1, pg_constants::FIRST_NORMAL_TRANSACTION_ID); let old_xid = full_xid as u32;