storcon: separate scheduling context for each tenant in fill/drain

This commit is contained in:
Vlad Lazar
2024-06-19 18:18:00 +01:00
parent fd0b22f5cd
commit 83fdc07b20

View File

@@ -5322,6 +5322,14 @@ impl Service {
}
};
// Reset the scheduling context if we have moved over to a new tenant.
// This is required since the affinity scores stored in the scheduling
// context should be tenant specific. Note that we are relying on
// [`ServiceState::tenants`] being ordered by tenant id.
if last_inspected_shard.map(|tid| tid.tenant_id) != Some(tid.tenant_id) {
schedule_context = ScheduleContext::default();
}
if tenant_shard.intent.demote_attached(scheduler, node_id) {
match tenant_shard.schedule(scheduler, &mut schedule_context) {
Err(e) => {
@@ -5492,8 +5500,17 @@ impl Service {
));
}
let mut last_inspected_tenant = None;
while waiters.len() < MAX_RECONCILES_PER_OPERATION {
if let Some(tid) = tids_to_promote.pop() {
// Reset the scheduling context if we have moved over to a new tenant.
// This is required since the affinity scores stored in the scheduling
// context should be tenant specific. Note that we are relying on the
// result [`Service::fill_node_plan`] being ordered by tenant id.
if last_inspected_tenant != Some(tid.tenant_id) {
schedule_context = ScheduleContext::default();
}
if let Some(tenant_shard) = tenants.get_mut(&tid) {
// If the node being filled is not a secondary anymore,
// skip the promotion.
@@ -5528,6 +5545,8 @@ impl Service {
}
}
}
last_inspected_tenant = Some(tid.tenant_id);
} else {
break;
}