mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-07 13:32:57 +00:00
pageserver/storcon: add patch endpoints for tenant config metrics (#10020)
## Problem
Cplane and storage controller tenant config changes are not additive.
Any change overrides all existing tenant configs. This would be fine if
both did client side patching, but that's not the case.
Once this merges, we must update cplane to use the PATCH endpoint.
## Summary of changes
### High Level
Allow for patching of tenant configuration with a `PATCH
/v1/tenant/config` endpoint.
It takes the same data as it's PUT counterpart. For example the payload
below will update `gc_period` and unset `compaction_period`. All other
fields are left in their original state.
```
{
"tenant_id": "1234",
"gc_period": "10s",
"compaction_period": null
}
```
### Low Level
* PS and storcon gain `PATCH /v1/tenant/config` endpoints. PS endpoint
is only used for cplane managed instances.
* `storcon_cli` is updated to have separate commands for
`set-tenant-config` and `patch-tenant-config`
Related https://github.com/neondatabase/cloud/issues/21043
This commit is contained in:
@@ -18,8 +18,9 @@ use pageserver_api::controller_api::{
|
||||
ShardsPreferredAzsRequest, TenantCreateRequest,
|
||||
};
|
||||
use pageserver_api::models::{
|
||||
TenantConfigRequest, TenantLocationConfigRequest, TenantShardSplitRequest,
|
||||
TenantTimeTravelRequest, TimelineArchivalConfigRequest, TimelineCreateRequest,
|
||||
TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
|
||||
TenantShardSplitRequest, TenantTimeTravelRequest, TimelineArchivalConfigRequest,
|
||||
TimelineCreateRequest,
|
||||
};
|
||||
use pageserver_api::shard::TenantShardId;
|
||||
use pageserver_client::{mgmt_api, BlockUnblock};
|
||||
@@ -208,6 +209,27 @@ async fn handle_tenant_location_config(
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_tenant_config_patch(
|
||||
service: Arc<Service>,
|
||||
req: Request<Body>,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
check_permissions(&req, Scope::PageServerApi)?;
|
||||
|
||||
let mut req = match maybe_forward(req).await {
|
||||
ForwardOutcome::Forwarded(res) => {
|
||||
return res;
|
||||
}
|
||||
ForwardOutcome::NotForwarded(req) => req,
|
||||
};
|
||||
|
||||
let config_req = json_request::<TenantConfigPatchRequest>(&mut req).await?;
|
||||
|
||||
json_response(
|
||||
StatusCode::OK,
|
||||
service.tenant_config_patch(config_req).await?,
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_tenant_config_set(
|
||||
service: Arc<Service>,
|
||||
req: Request<Body>,
|
||||
@@ -1863,6 +1885,13 @@ pub fn make_router(
|
||||
.delete("/v1/tenant/:tenant_id", |r| {
|
||||
tenant_service_handler(r, handle_tenant_delete, RequestName("v1_tenant"))
|
||||
})
|
||||
.patch("/v1/tenant/config", |r| {
|
||||
tenant_service_handler(
|
||||
r,
|
||||
handle_tenant_config_patch,
|
||||
RequestName("v1_tenant_config"),
|
||||
)
|
||||
})
|
||||
.put("/v1/tenant/config", |r| {
|
||||
tenant_service_handler(r, handle_tenant_config_set, RequestName("v1_tenant_config"))
|
||||
})
|
||||
|
||||
@@ -52,8 +52,8 @@ use pageserver_api::{
|
||||
TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse,
|
||||
},
|
||||
models::{
|
||||
SecondaryProgress, TenantConfigRequest, TimelineArchivalConfigRequest,
|
||||
TopTenantShardsRequest,
|
||||
SecondaryProgress, TenantConfigPatchRequest, TenantConfigRequest,
|
||||
TimelineArchivalConfigRequest, TopTenantShardsRequest,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
@@ -139,6 +139,7 @@ enum TenantOperations {
|
||||
Create,
|
||||
LocationConfig,
|
||||
ConfigSet,
|
||||
ConfigPatch,
|
||||
TimeTravelRemoteStorage,
|
||||
Delete,
|
||||
UpdatePolicy,
|
||||
@@ -2602,6 +2603,55 @@ impl Service {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub(crate) async fn tenant_config_patch(
|
||||
&self,
|
||||
req: TenantConfigPatchRequest,
|
||||
) -> Result<(), ApiError> {
|
||||
let _tenant_lock = trace_exclusive_lock(
|
||||
&self.tenant_op_locks,
|
||||
req.tenant_id,
|
||||
TenantOperations::ConfigPatch,
|
||||
)
|
||||
.await;
|
||||
|
||||
let tenant_id = req.tenant_id;
|
||||
let patch = req.config;
|
||||
|
||||
let base = {
|
||||
let locked = self.inner.read().unwrap();
|
||||
let shards = locked
|
||||
.tenants
|
||||
.range(TenantShardId::tenant_range(req.tenant_id));
|
||||
|
||||
let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
|
||||
|
||||
let first = match configs.peek() {
|
||||
Some(first) => (*first).clone(),
|
||||
None => {
|
||||
return Err(ApiError::NotFound(
|
||||
anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
if !configs.all_equal() {
|
||||
tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
|
||||
// This can't happen because we atomically update the database records
|
||||
// of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
|
||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||
"Tenant configs for {} are mismatched",
|
||||
req.tenant_id
|
||||
)));
|
||||
}
|
||||
|
||||
first
|
||||
};
|
||||
|
||||
let updated_config = base.apply_patch(patch);
|
||||
self.set_tenant_config_and_reconcile(tenant_id, updated_config)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
|
||||
// We require an exclusive lock, because we are updating persistent and in-memory state
|
||||
let _tenant_lock = trace_exclusive_lock(
|
||||
@@ -2611,12 +2661,32 @@ impl Service {
|
||||
)
|
||||
.await;
|
||||
|
||||
let tenant_id = req.tenant_id;
|
||||
let config = req.config;
|
||||
let tenant_exists = {
|
||||
let locked = self.inner.read().unwrap();
|
||||
let mut r = locked
|
||||
.tenants
|
||||
.range(TenantShardId::tenant_range(req.tenant_id));
|
||||
r.next().is_some()
|
||||
};
|
||||
|
||||
if !tenant_exists {
|
||||
return Err(ApiError::NotFound(
|
||||
anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
|
||||
));
|
||||
}
|
||||
|
||||
self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn set_tenant_config_and_reconcile(
|
||||
&self,
|
||||
tenant_id: TenantId,
|
||||
config: TenantConfig,
|
||||
) -> Result<(), ApiError> {
|
||||
self.persistence
|
||||
.update_tenant_shard(
|
||||
TenantFilter::Tenant(req.tenant_id),
|
||||
TenantFilter::Tenant(tenant_id),
|
||||
None,
|
||||
Some(config.clone()),
|
||||
None,
|
||||
|
||||
Reference in New Issue
Block a user