storcon: do not update observed state on node activation (#11155)

## Problem

When a node becomes active, we query its locations and update the
observed state in-place.
This can race with the observed state updates done when processing
reconcile results.

## Summary of changes

The argument for this reconciliation step is that is reduces the need
for background reconciliations.
I don't think is actually true anymore. There's two cases.

1. Restart of node after drain. Usually the node does not go through the
offline state here, so observed locations
were not marked as none. In any case, there should be a handful of
shards max on the node since we've just drained it.
2. Node comes back online after failure or network partition. When the
node is marked offline, we reschedule everything away from it. When it
later becomes active, the previous observed location is extraneous and
requires a reconciliation anyway.

Closes https://github.com/neondatabase/neon/issues/11148
This commit is contained in:
Vlad Lazar
2025-03-12 15:31:28 +00:00
committed by GitHub
parent c7717c85c7
commit 02a83913ec
5 changed files with 48 additions and 12 deletions

View File

@@ -36,7 +36,9 @@ use pageserver_api::config::{
use pageserver_api::controller_api::{
NodeAvailabilityWrapper, PlacementPolicy, TenantCreateRequest,
};
use pageserver_api::models::{ShardParameters, TimelineCreateRequest, TimelineInfo};
use pageserver_api::models::{
ShardParameters, TenantConfigRequest, TimelineCreateRequest, TimelineInfo,
};
use pageserver_api::shard::{ShardCount, ShardStripeSize, TenantShardId};
use postgres_backend::AuthType;
use postgres_connection::parse_host_port;
@@ -1129,12 +1131,16 @@ async fn handle_tenant(subcmd: &TenantCmd, env: &mut local_env::LocalEnv) -> any
let tenant_id = get_tenant_id(args.tenant_id, env)?;
let tenant_conf: HashMap<_, _> =
args.config.iter().flat_map(|c| c.split_once(':')).collect();
let config = PageServerNode::parse_config(tenant_conf)?;
pageserver
.tenant_config(tenant_id, tenant_conf)
let req = TenantConfigRequest { tenant_id, config };
let storage_controller = StorageController::from_env(env);
storage_controller
.set_tenant_config(&req)
.await
.with_context(|| format!("Tenant config failed for tenant with id {tenant_id}"))?;
println!("tenant {tenant_id} successfully configured on the pageserver");
println!("tenant {tenant_id} successfully configured via storcon");
}
}
Ok(())

View File

@@ -14,7 +14,7 @@ use pageserver_api::controller_api::{
NodeConfigureRequest, NodeDescribeResponse, NodeRegisterRequest, TenantCreateRequest,
TenantCreateResponse, TenantLocateResponse,
};
use pageserver_api::models::{TimelineCreateRequest, TimelineInfo};
use pageserver_api::models::{TenantConfigRequest, TimelineCreateRequest, TimelineInfo};
use pageserver_api::shard::TenantShardId;
use pageserver_client::mgmt_api::ResponseErrorMessageExt;
use postgres_backend::AuthType;
@@ -878,4 +878,9 @@ impl StorageController {
)
.await
}
pub async fn set_tenant_config(&self, req: &TenantConfigRequest) -> anyhow::Result<()> {
self.dispatch(Method::PUT, "v1/tenant/config".to_string(), Some(req))
.await
}
}

View File

@@ -2004,21 +2004,41 @@ impl Service {
tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
let mut cleanup = Vec::new();
let mut mismatched_locations = 0;
{
let mut locked = self.inner.write().unwrap();
for (tenant_shard_id, observed_loc) in configs.tenant_shards {
for (tenant_shard_id, reported) in configs.tenant_shards {
let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
cleanup.push(tenant_shard_id);
continue;
};
tenant_shard
let on_record = &mut tenant_shard
.observed
.locations
.insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
.entry(node.get_id())
.or_insert_with(|| ObservedStateLocation { conf: None })
.conf;
// If the location reported by the node does not match our observed state,
// then we mark it as uncertain and let the background reconciliation loop
// deal with it.
//
// Note that this also covers net new locations reported by the node.
if *on_record != reported {
mismatched_locations += 1;
*on_record = None;
}
}
}
if mismatched_locations > 0 {
tracing::info!(
"Set observed state to None for {mismatched_locations} mismatched locations"
);
}
for tenant_shard_id in cleanup {
tracing::info!("Detaching {tenant_shard_id}");
match node

View File

@@ -1749,18 +1749,23 @@ def test_storage_controller_re_attach(neon_env_builder: NeonEnvBuilder):
# Restart the failed pageserver
victim_ps.start()
env.storage_controller.reconcile_until_idle()
# We expect that the re-attach call correctly tipped off the pageserver that its locations
# are all secondaries now.
locations = victim_ps.http_client().tenant_list_locations()["tenant_shards"]
assert len(locations) == 2
assert all(loc[1]["mode"] == "Secondary" for loc in locations)
# We expect that this situation resulted from the re_attach call, and not any explicit
# Reconciler runs: assert that the reconciliation count has not gone up since we restarted.
# We expect that this situation resulted from background reconciliations
# Reconciler runs: assert that the reconciliation count has gone up by exactly
# one for each shard
reconciles_after_restart = env.storage_controller.get_metric_value(
"storage_controller_reconcile_complete_total", filter={"status": "ok"}
)
assert reconciles_after_restart == reconciles_before_restart
assert reconciles_before_restart is not None
assert reconciles_after_restart == reconciles_before_restart + 2
def test_storage_controller_shard_scheduling_policy(neon_env_builder: NeonEnvBuilder):

View File

@@ -436,7 +436,7 @@ def test_single_branch_get_tenant_size_grows(
# when our tenant is configured with a tiny pitr interval, dropping a table should
# cause synthetic size to go down immediately
tenant_config["pitr_interval"] = "0s"
env.pageserver.http_client().set_tenant_config(tenant_id, tenant_config)
env.storage_controller.pageserver_api().set_tenant_config(tenant_id, tenant_config)
(current_lsn, size) = get_current_consistent_size(
env, endpoint, size_debug_file, http_client, tenant_id, timeline_id
)