naming orhtodoxy: always refere to leases as LSN leases

This commit is contained in:
Christian Schwarz
2025-07-25 13:32:20 +02:00
parent 2ee24900ca
commit a948054db3
6 changed files with 43 additions and 34 deletions

View File

@@ -4842,13 +4842,13 @@ impl TenantShard {
// Cull any expired leases
let now = SystemTime::now();
target.leases.retain(|_, lease| !lease.is_expired(&now));
target.lsn_leases.retain(|_, lease| !lease.is_expired(&now));
timeline.standby_horizons.cull_leases(now);
timeline
.metrics
.valid_lsn_lease_count_gauge
.set(target.leases.len() as u64);
.set(target.lsn_leases.len() as u64);
// Look up parent's PITR cutoff to update the child's knowledge of whether it is within parent's PITR
if let Some(ancestor_id) = timeline.get_ancestor_timeline_id() {
@@ -9493,7 +9493,7 @@ mod tests {
// Keeping everything <= Lsn(0x80) b/c leases:
// 0/10: initdb layer
// (0/20..=0/70).step_by(0x10): image layers added when creating the timeline.
assert_eq!(res.layers_needed_by_leases, 7);
assert_eq!(res.layers_needed_by_lsn_leases, 7);
// Keeping 0/90 b/c it is the latest layer.
assert_eq!(res.layers_not_updated, 1);
// Removed 0/80.
@@ -9778,7 +9778,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -10333,7 +10333,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -10582,7 +10582,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -10831,7 +10831,7 @@ mod tests {
time: Some(Lsn(0x10)),
space: Lsn(0x10),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -10851,7 +10851,7 @@ mod tests {
time: Some(Lsn(0x50)),
space: Lsn(0x50),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -11572,7 +11572,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -11961,7 +11961,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -12213,7 +12213,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}
@@ -12539,7 +12539,7 @@ mod tests {
time: Some(Lsn(0x30)),
space: Lsn(0x30),
},
leases: Default::default(),
lsn_leases: Default::default(),
within_ancestor_pitr: false,
};
}

View File

@@ -13,7 +13,7 @@ pub struct GcResult {
pub layers_needed_by_cutoff: u64,
pub layers_needed_by_pitr: u64,
pub layers_needed_by_branches: u64,
pub layers_needed_by_leases: u64,
pub layers_needed_by_lsn_leases: u64,
pub layers_not_updated: u64,
pub layers_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
@@ -43,7 +43,7 @@ impl AddAssign for GcResult {
self.layers_needed_by_pitr += other.layers_needed_by_pitr;
self.layers_needed_by_cutoff += other.layers_needed_by_cutoff;
self.layers_needed_by_branches += other.layers_needed_by_branches;
self.layers_needed_by_leases += other.layers_needed_by_leases;
self.layers_needed_by_lsn_leases += other.layers_needed_by_lsn_leases;
self.layers_not_updated += other.layers_not_updated;
self.layers_removed += other.layers_removed;

View File

@@ -139,8 +139,9 @@ pub struct TimelineInputs {
/// Cutoff point calculated from the user-supplied 'max_retention_period'
retention_param_cutoff: Option<Lsn>,
/// Lease points on the timeline
lease_points: Vec<Lsn>,
/// LSN lease points on the timeline
#[serde(rename = "lease_points")]
lsn_lease_points: Vec<Lsn>,
}
/// Gathers the inputs for the tenant sizing model.
@@ -250,8 +251,8 @@ pub(super) async fn gather_inputs(
let branch_is_invisible = timeline.is_invisible() == Some(true);
let lease_points = gc_info
.leases
let lsn_lease_points = gc_info
.lsn_leases
.keys()
.filter(|&&lsn| lsn > ancestor_lsn)
.copied()
@@ -274,8 +275,12 @@ pub(super) async fn gather_inputs(
.collect::<Vec<_>>();
if !branch_is_invisible {
// Do not count lease points for invisible branches.
lsns.extend(lease_points.iter().map(|&lsn| (lsn, LsnKind::LeasePoint)));
// Do not count lsn lease points for invisible branches.
lsns.extend(
lsn_lease_points
.iter()
.map(|&lsn| (lsn, LsnKind::LeasePoint)),
);
}
drop(gc_info);
@@ -409,7 +414,7 @@ pub(super) async fn gather_inputs(
latest_gc_cutoff: *timeline.get_applied_gc_cutoff_lsn(),
next_pitr_cutoff,
retention_param_cutoff,
lease_points,
lsn_lease_points,
});
}

View File

@@ -485,7 +485,7 @@ pub(crate) struct GcInfo {
pub(crate) cutoffs: GcCutoffs,
/// Leases granted to particular LSNs.
pub(crate) leases: BTreeMap<Lsn, LsnLease>,
pub(crate) lsn_leases: BTreeMap<Lsn, LsnLease>,
/// Whether our branch point is within our ancestor's PITR interval (for cost estimation)
pub(crate) within_ancestor_pitr: bool,
@@ -533,7 +533,7 @@ impl GcInfo {
self.remove_child_maybe_offloaded(child_id, MaybeOffloaded::Yes)
}
pub(crate) fn lsn_covered_by_lease(&self, lsn: Lsn) -> bool {
self.leases.contains_key(&lsn)
self.lsn_leases.contains_key(&lsn)
}
}
@@ -1813,7 +1813,7 @@ impl Timeline {
let valid_until = SystemTime::now() + length;
let entry = gc_info.leases.entry(lsn);
let entry = gc_info.lsn_leases.entry(lsn);
match entry {
Entry::Occupied(mut occupied) => {
@@ -6536,7 +6536,7 @@ impl Timeline {
return Err(GcError::TimelineCancelled);
}
let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lease) = {
let (space_cutoff, time_cutoff, retain_lsns, max_lsn_with_valid_lsn_lease) = {
let gc_info = self.gc_info.read().unwrap();
let space_cutoff = min(gc_info.cutoffs.space, self.get_disk_consistent_lsn());
@@ -6551,13 +6551,14 @@ impl Timeline {
//
// Caveat: `refresh_gc_info` is in charged of updating the lease map.
// Here, we do not check for stale leases again.
let max_lsn_with_valid_lease = gc_info.leases.last_key_value().map(|(lsn, _)| *lsn);
let max_lsn_with_valid_lsn_lease =
gc_info.lsn_leases.last_key_value().map(|(lsn, _)| *lsn);
(
space_cutoff,
time_cutoff,
retain_lsns,
max_lsn_with_valid_lease,
max_lsn_with_valid_lsn_lease,
)
};
@@ -6604,7 +6605,7 @@ impl Timeline {
space_cutoff,
time_cutoff,
retain_lsns,
max_lsn_with_valid_lease,
max_lsn_with_valid_lsn_lease,
new_gc_cutoff,
)
.instrument(
@@ -6623,7 +6624,7 @@ impl Timeline {
space_cutoff: Lsn,
time_cutoff: Option<Lsn>, // None if uninitialized
retain_lsns: Vec<Lsn>,
max_lsn_with_valid_lease: Option<Lsn>,
max_lsn_with_valid_lsn_lease: Option<Lsn>,
new_gc_cutoff: Lsn,
) -> Result<GcResult, GcError> {
// FIXME: if there is an ongoing detach_from_ancestor, we should just skip gc
@@ -6733,15 +6734,15 @@ impl Timeline {
}
// 4. Is there a valid lease that requires us to keep this layer?
if let Some(lsn) = &max_lsn_with_valid_lease {
if let Some(lsn) = &max_lsn_with_valid_lsn_lease {
// keep if layer start <= any of the lease
if &l.get_lsn_range().start <= lsn {
debug!(
"keeping {} because there is a valid lease preventing GC at {}",
"keeping {} because there is a valid LSN lease preventing GC at {}",
l.layer_name(),
lsn,
);
result.layers_needed_by_leases += 1;
result.layers_needed_by_lsn_leases += 1;
continue 'outer;
}
}

View File

@@ -3270,7 +3270,7 @@ impl Timeline {
retain_lsns_below_horizon.push(*lsn);
}
}
for lsn in gc_info.leases.keys() {
for lsn in gc_info.lsn_leases.keys() {
if lsn < &gc_cutoff {
retain_lsns_below_horizon.push(*lsn);
}

View File

@@ -135,7 +135,10 @@ impl Horizons {
let res = LeaseInfo {
valid_until: updated.valid_until,
};
inner.metrics.leases_count_gauge.set(inner.leases_by_id.len().into_u64());
inner
.metrics
.leases_count_gauge
.set(inner.leases_by_id.len().into_u64());
Ok(res)
}