Implement archival_config timeline endpoint in the storage controller (#8680)

Implement the timeline specific `archival_config` endpoint also in the
storage controller.

It's mostly a copy-paste of the detach handler: the task is the same: do
the same operation on all shards.

Part of #8088.
This commit is contained in:
Arpad Müller
2024-09-02 13:51:45 +02:00
committed by GitHub
parent 516ac0591e
commit 9746b6ea31
5 changed files with 174 additions and 50 deletions

View File

@@ -17,7 +17,7 @@ use pageserver_api::controller_api::{
};
use pageserver_api::models::{
TenantConfigRequest, TenantLocationConfigRequest, TenantShardSplitRequest,
TenantTimeTravelRequest, TimelineCreateRequest,
TenantTimeTravelRequest, TimelineArchivalConfigRequest, TimelineCreateRequest,
};
use pageserver_api::shard::TenantShardId;
use pageserver_client::mgmt_api;
@@ -334,6 +334,24 @@ async fn handle_tenant_timeline_delete(
.await
}
async fn handle_tenant_timeline_archival_config(
service: Arc<Service>,
mut req: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?;
check_permissions(&req, Scope::PageServerApi)?;
let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?;
let create_req = json_request::<TimelineArchivalConfigRequest>(&mut req).await?;
service
.tenant_timeline_archival_config(tenant_id, timeline_id, create_req)
.await?;
json_response(StatusCode::OK, ())
}
async fn handle_tenant_timeline_detach_ancestor(
service: Arc<Service>,
req: Request<Body>,
@@ -1160,6 +1178,16 @@ pub fn make_router(
RequestName("v1_tenant_timeline"),
)
})
.post(
"/v1/tenant/:tenant_id/timeline/:timeline_id/archival_config",
|r| {
tenant_service_handler(
r,
handle_tenant_timeline_archival_config,
RequestName("v1_tenant_timeline_archival_config"),
)
},
)
.put(
"/v1/tenant/:tenant_id/timeline/:timeline_id/detach_ancestor",
|r| {

View File

@@ -2,8 +2,8 @@ use pageserver_api::{
models::{
detach_ancestor::AncestorDetached, LocationConfig, LocationConfigListResponse,
PageserverUtilization, SecondaryProgress, TenantScanRemoteStorageResponse,
TenantShardSplitRequest, TenantShardSplitResponse, TimelineCreateRequest, TimelineInfo,
TopTenantShardsRequest, TopTenantShardsResponse,
TenantShardSplitRequest, TenantShardSplitResponse, TimelineArchivalConfigRequest,
TimelineCreateRequest, TimelineInfo, TopTenantShardsRequest, TopTenantShardsResponse,
},
shard::TenantShardId,
};
@@ -227,6 +227,22 @@ impl PageserverClient {
)
}
pub(crate) async fn timeline_archival_config(
&self,
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
req: &TimelineArchivalConfigRequest,
) -> Result<()> {
measured_request!(
"timeline_archival_config",
crate::metrics::Method::Post,
&self.node_id_label,
self.inner
.timeline_archival_config(tenant_shard_id, timeline_id, req)
.await
)
}
pub(crate) async fn timeline_detach_ancestor(
&self,
tenant_shard_id: TenantShardId,

View File

@@ -46,7 +46,10 @@ use pageserver_api::{
TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
TenantShardMigrateRequest, TenantShardMigrateResponse,
},
models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest},
models::{
SecondaryProgress, TenantConfigRequest, TimelineArchivalConfigRequest,
TopTenantShardsRequest,
},
};
use reqwest::StatusCode;
use tracing::{instrument, Instrument};
@@ -131,6 +134,7 @@ enum TenantOperations {
TimelineCreate,
TimelineDelete,
AttachHook,
TimelineArchivalConfig,
TimelineDetachAncestor,
}
@@ -2918,6 +2922,73 @@ impl Service {
.await?
}
pub(crate) async fn tenant_timeline_archival_config(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
req: TimelineArchivalConfigRequest,
) -> Result<(), ApiError> {
tracing::info!(
"Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
req.state
);
let _tenant_lock = trace_shared_lock(
&self.tenant_op_locks,
tenant_id,
TenantOperations::TimelineArchivalConfig,
)
.await;
self.tenant_remote_mutation(tenant_id, move |targets| async move {
if targets.is_empty() {
return Err(ApiError::NotFound(
anyhow::anyhow!("Tenant not found").into(),
));
}
async fn config_one(
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
node: Node,
jwt: Option<String>,
req: TimelineArchivalConfigRequest,
) -> Result<(), ApiError> {
tracing::info!(
"Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
);
let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
client
.timeline_archival_config(tenant_shard_id, timeline_id, &req)
.await
.map_err(|e| match e {
mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
ApiError::PreconditionFailed(msg.into_boxed_str())
}
_ => passthrough_api_error(&node, e),
})
}
// no shard needs to go first/last; the operation should be idempotent
// TODO: it would be great to ensure that all shards return the same error
let results = self
.tenant_for_shards(targets, |tenant_shard_id, node| {
futures::FutureExt::boxed(config_one(
tenant_shard_id,
timeline_id,
node,
self.config.jwt_token.clone(),
req.clone(),
))
})
.await?;
assert!(!results.is_empty(), "must have at least one result");
Ok(())
}).await?
}
pub(crate) async fn tenant_timeline_detach_ancestor(
&self,
tenant_id: TenantId,