feat(storage-controller): add node shards api (#8896)

For control-plane managed tenants, we have the page in the admin console
that lists all tenants on a specific pageserver. But for
storage-controller managed ones, we don't have that functionality for
now.

## Summary of changes

Adds an API that lists all shards on a given node (intention + observed)

---------

Signed-off-by: Alex Chi Z <chi@neon.tech>
This commit is contained in:
Alex Chi Z.
2024-09-07 02:14:21 +08:00
committed by GitHub
parent 30583cb626
commit ac5815b594
7 changed files with 157 additions and 10 deletions

View File

@@ -539,6 +539,17 @@ async fn handle_node_status(req: Request<Body>) -> Result<Response<Body>, ApiErr
json_response(StatusCode::OK, node_status)
}
async fn handle_node_shards(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
let state = get_state(&req);
let node_id: NodeId = parse_request_param(&req, "node_id")?;
let node_status = state.service.get_node_shards(node_id).await?;
json_response(StatusCode::OK, node_status)
}
async fn handle_get_leader(req: Request<Body>) -> Result<Response<Body>, ApiError> {
check_permissions(&req, Scope::Admin)?;
@@ -1109,6 +1120,13 @@ pub fn make_router(
.get("/control/v1/node/:node_id", |r| {
named_request_span(r, handle_node_status, RequestName("control_v1_node_status"))
})
.get("/control/v1/node/:node_id/shards", |r| {
named_request_span(
r,
handle_node_shards,
RequestName("control_v1_node_describe"),
)
})
.get("/control/v1/leader", |r| {
named_request_span(r, handle_get_leader, RequestName("control_v1_get_leader"))
})

View File

@@ -41,11 +41,11 @@ use itertools::Itertools;
use pageserver_api::{
controller_api::{
MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest,
NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy, ShardsPreferredAzsRequest,
ShardsPreferredAzsResponse, TenantCreateRequest, TenantCreateResponse,
TenantCreateResponseShard, TenantDescribeResponse, TenantDescribeResponseShard,
TenantLocateResponse, TenantPolicyRequest, TenantShardMigrateRequest,
TenantShardMigrateResponse,
NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy, ShardSchedulingPolicy,
ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, TenantCreateRequest,
TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
TenantShardMigrateRequest, TenantShardMigrateResponse,
},
models::{
SecondaryProgress, TenantConfigRequest, TimelineArchivalConfigRequest,
@@ -4924,6 +4924,45 @@ impl Service {
))
}
pub(crate) async fn get_node_shards(
&self,
node_id: NodeId,
) -> Result<NodeShardResponse, ApiError> {
let locked = self.inner.read().unwrap();
let mut shards = Vec::new();
for (tid, tenant) in locked.tenants.iter() {
let is_intended_secondary = match (
tenant.intent.get_attached() == &Some(node_id),
tenant.intent.get_secondary().contains(&node_id),
) {
(true, true) => {
return Err(ApiError::InternalServerError(anyhow::anyhow!(
"{} attached as primary+secondary on the same node",
tid
)))
}
(true, false) => Some(false),
(false, true) => Some(true),
(false, false) => None,
};
let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
tenant.observed.locations.get(&node_id)
{
Some(conf.secondary_conf.is_some())
} else {
None
};
if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
shards.push(NodeShard {
tenant_shard_id: *tid,
is_intended_secondary,
is_observed_secondary,
});
}
}
Ok(NodeShardResponse { node_id, shards })
}
pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
self.persistence.get_leader().await
}