mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-06 05:12:54 +00:00
feat: distribute truncate table in region server (#2414)
* feat: distribute truncate table * chore: add metrics for truncate table * test: add sqlness test * chore: cr * test: add multi truncate * chore: add trace id to the header
This commit is contained in:
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -4188,7 +4188,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=115c1080773be8a819e50b257fece9f839a0c836#115c1080773be8a819e50b257fece9f839a0c836"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=693128abe9adc70ba636010a172c9da55b206bba#693128abe9adc70ba636010a172c9da55b206bba"
|
||||
dependencies = [
|
||||
"prost",
|
||||
"serde",
|
||||
|
||||
@@ -78,7 +78,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git
|
||||
derive_builder = "0.12"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "115c1080773be8a819e50b257fece9f839a0c836" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "693128abe9adc70ba636010a172c9da55b206bba" }
|
||||
humantime-serde = "1.1"
|
||||
itertools = "0.10"
|
||||
lazy_static = "1.4"
|
||||
|
||||
@@ -28,6 +28,7 @@ use crate::rpc::router::RegionRoute;
|
||||
pub mod alter_table;
|
||||
pub mod create_table;
|
||||
pub mod drop_table;
|
||||
pub mod truncate_table;
|
||||
pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
|
||||
@@ -199,8 +199,8 @@ impl CreateTableProcedure {
|
||||
for request in requests {
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(request),
|
||||
};
|
||||
|
||||
@@ -156,8 +156,8 @@ impl DropTableProcedure {
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: 0,
|
||||
span_id: 0,
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Drop(PbDropRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
|
||||
234
src/common/meta/src/ddl/truncate_table.rs
Normal file
234
src/common/meta/src/ddl/truncate_table.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::region::{
|
||||
region_request, RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, LockKey, Procedure, Result as ProcedureResult, Status,
|
||||
};
|
||||
use common_telemetry::debug;
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::engine::TableReference;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use super::utils::handle_retry_error;
|
||||
use crate::ddl::utils::handle_operate_region_error;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::table_name::TableName;
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
context: DdlContext,
|
||||
data: TruncateTableData,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Procedure for TruncateTableProcedure {
|
||||
fn type_name(&self) -> &str {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let state = &self.data.state;
|
||||
|
||||
let _timer = common_telemetry::timer!(
|
||||
metrics::METRIC_META_PROCEDURE_TRUNCATE_TABLE,
|
||||
&[("step", state.as_ref().to_string())]
|
||||
);
|
||||
|
||||
match self.data.state {
|
||||
TruncateTableState::Prepare => self.on_prepare().await,
|
||||
TruncateTableState::DatanodeTruncateRegions => {
|
||||
self.on_datanode_truncate_regions().await
|
||||
}
|
||||
}
|
||||
.map_err(handle_retry_error)
|
||||
}
|
||||
|
||||
fn dump(&self) -> ProcedureResult<String> {
|
||||
serde_json::to_string(&self.data).context(ToJsonSnafu)
|
||||
}
|
||||
|
||||
fn lock_key(&self) -> LockKey {
|
||||
let table_ref = &self.data.table_ref();
|
||||
let key = common_catalog::format_full_table_name(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
);
|
||||
|
||||
LockKey::single(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl TruncateTableProcedure {
|
||||
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
|
||||
|
||||
pub(crate) fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
Ok(Self { context, data })
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let table_ref = &self.data.table_ref();
|
||||
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let exist = manager
|
||||
.table_name_manager()
|
||||
.exists(TableNameKey::new(
|
||||
table_ref.catalog,
|
||||
table_ref.schema,
|
||||
table_ref.table,
|
||||
))
|
||||
.await?;
|
||||
|
||||
ensure!(
|
||||
exist,
|
||||
TableNotFoundSnafu {
|
||||
table_name: table_ref.to_string()
|
||||
}
|
||||
);
|
||||
|
||||
self.data.state = TruncateTableState::DatanodeTruncateRegions;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn on_datanode_truncate_regions(&mut self) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
|
||||
let region_routes = &self.data.region_routes;
|
||||
let leaders = find_leaders(region_routes);
|
||||
let mut truncate_region_tasks = Vec::with_capacity(leaders.len());
|
||||
|
||||
for datanode in leaders {
|
||||
let requester = self.context.datanode_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(region_routes, &datanode);
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
debug!(
|
||||
"Truncating table {} region {} on Datanode {:?}",
|
||||
self.data.table_ref(),
|
||||
region_id,
|
||||
datanode
|
||||
);
|
||||
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
trace_id: common_telemetry::trace_id().unwrap_or_default(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Truncate(PbTruncateRegionRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
})),
|
||||
};
|
||||
|
||||
let datanode = datanode.clone();
|
||||
let requester = requester.clone();
|
||||
|
||||
truncate_region_tasks.push(async move {
|
||||
if let Err(err) = requester.handle(request).await {
|
||||
return Err(handle_operate_region_error(datanode)(err));
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
join_all(truncate_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(Status::Done)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TruncateTableData {
|
||||
state: TruncateTableState,
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
}
|
||||
|
||||
impl TruncateTableData {
|
||||
pub fn new(
|
||||
cluster_id: u64,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: TruncateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
self.task.table_ref()
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
self.task.table_name()
|
||||
}
|
||||
|
||||
fn table_info(&self) -> &RawTableInfo {
|
||||
&self.table_info_value.table_info
|
||||
}
|
||||
|
||||
fn table_id(&self) -> TableId {
|
||||
self.table_info().ident.table_id
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
enum TruncateTableState {
|
||||
/// Prepares to truncate the table
|
||||
Prepare,
|
||||
/// Truncates regions on Datanode
|
||||
DatanodeTruncateRegions,
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::{error, info};
|
||||
use common_telemetry::info;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
@@ -23,13 +23,14 @@ use crate::datanode_manager::DatanodeManagerRef;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::create_table::CreateTableProcedure;
|
||||
use crate::ddl::drop_table::DropTableProcedure;
|
||||
use crate::ddl::truncate_table::TruncateTableProcedure;
|
||||
use crate::ddl::{
|
||||
DdlContext, DdlTaskExecutor, ExecutorContext, TableMetadataAllocatorContext,
|
||||
TableMetadataAllocatorRef,
|
||||
};
|
||||
use crate::error::{
|
||||
self, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, TableNotFoundSnafu,
|
||||
UnsupportedSnafu, WaitProcedureSnafu,
|
||||
WaitProcedureSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -122,6 +123,20 @@ impl DdlManager {
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: AlterTableProcedure::TYPE_NAME,
|
||||
})?;
|
||||
|
||||
let context = self.create_context();
|
||||
|
||||
self.procedure_manager
|
||||
.register_loader(
|
||||
TruncateTableProcedure::TYPE_NAME,
|
||||
Box::new(move |json| {
|
||||
let context = context.clone();
|
||||
TruncateTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
|
||||
}),
|
||||
)
|
||||
.context(RegisterProcedureLoaderSnafu {
|
||||
type_name: TruncateTableProcedure::TYPE_NAME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -183,15 +198,21 @@ impl DdlManager {
|
||||
&self,
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
table_info_value: TableInfoValue,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Result<ProcedureId> {
|
||||
error!("Truncate table procedure is not supported, cluster_id = {}, truncate_table_task = {:?}, region_routes = {:?}",
|
||||
cluster_id, truncate_table_task, region_routes);
|
||||
let context = self.create_context();
|
||||
let procedure = TruncateTableProcedure::new(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
context,
|
||||
);
|
||||
|
||||
UnsupportedSnafu {
|
||||
operation: "TRUNCATE TABLE",
|
||||
}
|
||||
.fail()
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
}
|
||||
|
||||
async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
|
||||
@@ -216,32 +237,34 @@ async fn handle_truncate_table_task(
|
||||
cluster_id: u64,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let truncate_table = &truncate_table_task.truncate_table;
|
||||
let table_id = truncate_table
|
||||
.table_id
|
||||
.as_ref()
|
||||
.context(error::UnexpectedSnafu {
|
||||
err_msg: "expected table id ",
|
||||
})?
|
||||
.id;
|
||||
|
||||
let table_id = truncate_table_task.table_id;
|
||||
let table_metadata_manager = &ddl_manager.table_metadata_manager();
|
||||
let table_ref = truncate_table_task.table_ref();
|
||||
|
||||
let table_route_value = ddl_manager
|
||||
.table_metadata_manager()
|
||||
.table_route_manager()
|
||||
.get(table_id)
|
||||
.await?
|
||||
.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
let (table_info_value, table_route_value) =
|
||||
table_metadata_manager.get_full_table_info(table_id).await?;
|
||||
|
||||
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
|
||||
table_name: table_ref.to_string(),
|
||||
})?;
|
||||
|
||||
let table_route = table_route_value.region_routes;
|
||||
|
||||
let id = ddl_manager
|
||||
.submit_truncate_table_task(cluster_id, truncate_table_task, table_route)
|
||||
.submit_truncate_table_task(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
table_route,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is truncated via procedure_id {id:?}");
|
||||
|
||||
Ok(SubmitDdlTaskResponse {
|
||||
key: id.to_string().into(),
|
||||
..Default::default()
|
||||
|
||||
@@ -19,3 +19,4 @@ pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
|
||||
pub(crate) const METRIC_META_PROCEDURE_CREATE_TABLE: &str = "meta.procedure.create_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_DROP_TABLE: &str = "meta.procedure.drop_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_ALTER_TABLE: &str = "meta.procedure.alter_table";
|
||||
pub(crate) const METRIC_META_PROCEDURE_TRUNCATE_TABLE: &str = "meta.procedure.truncate_table";
|
||||
|
||||
@@ -65,8 +65,18 @@ impl DdlTask {
|
||||
DdlTask::AlterTable(AlterTableTask { alter_table })
|
||||
}
|
||||
|
||||
pub fn new_truncate_table(truncate_table: TruncateTableExpr) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask { truncate_table })
|
||||
pub fn new_truncate_table(
|
||||
catalog: String,
|
||||
schema: String,
|
||||
table: String,
|
||||
table_id: TableId,
|
||||
) -> Self {
|
||||
DdlTask::TruncateTable(TruncateTableTask {
|
||||
catalog,
|
||||
schema,
|
||||
table,
|
||||
table_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +122,12 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
|
||||
alter_table: Some(task.alter_table),
|
||||
}),
|
||||
DdlTask::TruncateTable(task) => Task::TruncateTableTask(PbTruncateTableTask {
|
||||
truncate_table: Some(task.truncate_table),
|
||||
truncate_table: Some(TruncateTableExpr {
|
||||
catalog_name: task.catalog,
|
||||
schema_name: task.schema,
|
||||
table_name: task.table,
|
||||
table_id: Some(api::v1::TableId { id: task.table_id }),
|
||||
}),
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -358,27 +373,28 @@ impl<'de> Deserialize<'de> for AlterTableTask {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct TruncateTableTask {
|
||||
pub truncate_table: TruncateTableExpr,
|
||||
pub catalog: String,
|
||||
pub schema: String,
|
||||
pub table: String,
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
impl TruncateTableTask {
|
||||
pub fn table_ref(&self) -> TableReference {
|
||||
TableReference {
|
||||
catalog: &self.truncate_table.catalog_name,
|
||||
schema: &self.truncate_table.schema_name,
|
||||
table: &self.truncate_table.table_name,
|
||||
catalog: &self.catalog,
|
||||
schema: &self.schema,
|
||||
table: &self.table,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn table_name(&self) -> TableName {
|
||||
let table = &self.truncate_table;
|
||||
|
||||
TableName {
|
||||
catalog_name: table.catalog_name.to_string(),
|
||||
schema_name: table.schema_name.to_string(),
|
||||
table_name: table.table_name.to_string(),
|
||||
catalog_name: self.catalog.to_string(),
|
||||
schema_name: self.schema.to_string(),
|
||||
table_name: self.table.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,39 +404,20 @@ impl TryFrom<PbTruncateTableTask> for TruncateTableTask {
|
||||
|
||||
fn try_from(pb: PbTruncateTableTask) -> Result<Self> {
|
||||
let truncate_table = pb.truncate_table.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected truncate_table",
|
||||
err_msg: "expected drop table",
|
||||
})?;
|
||||
|
||||
Ok(TruncateTableTask { truncate_table })
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TruncateTableTask {
|
||||
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let pb = PbTruncateTableTask {
|
||||
truncate_table: Some(self.truncate_table.clone()),
|
||||
};
|
||||
let buf = pb.encode_to_vec();
|
||||
serializer.serialize_bytes(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for TruncateTableTask {
|
||||
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let buf = Vec::<u8>::deserialize(deserializer)?;
|
||||
let task: PbTruncateTableTask = PbTruncateTableTask::decode(&*buf)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
let task = TruncateTableTask::try_from(task)
|
||||
.map_err(|err| serde::de::Error::custom(err.to_string()))?;
|
||||
|
||||
Ok(task)
|
||||
Ok(Self {
|
||||
catalog: truncate_table.catalog_name,
|
||||
schema: truncate_table.schema_name,
|
||||
table: truncate_table.table_name,
|
||||
table_id: truncate_table
|
||||
.table_id
|
||||
.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected table_id",
|
||||
})?
|
||||
.id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::{column_def, AlterExpr, CreateTableExpr, TruncateTableExpr};
|
||||
use api::v1::{column_def, AlterExpr, CreateTableExpr};
|
||||
use catalog::CatalogManagerRef;
|
||||
use chrono::DateTime;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
@@ -169,15 +169,8 @@ impl StatementExecutor {
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?;
|
||||
let table_id = table.table_info().ident.table_id;
|
||||
|
||||
let expr = TruncateTableExpr {
|
||||
catalog_name: table_name.catalog_name.clone(),
|
||||
schema_name: table_name.schema_name.clone(),
|
||||
table_name: table_name.table_name.clone(),
|
||||
table_id: Some(api::v1::TableId { id: table_id }),
|
||||
};
|
||||
self.truncate_table_procedure(&expr).await?;
|
||||
let table_id = table.table_info().table_id();
|
||||
self.truncate_table_procedure(&table_name, table_id).await?;
|
||||
|
||||
Ok(Output::AffectedRows(0))
|
||||
}
|
||||
@@ -312,10 +305,16 @@ impl StatementExecutor {
|
||||
|
||||
async fn truncate_table_procedure(
|
||||
&self,
|
||||
truncate_table: &TruncateTableExpr,
|
||||
table_name: &TableName,
|
||||
table_id: TableId,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let request = SubmitDdlTaskRequest {
|
||||
task: DdlTask::new_truncate_table(truncate_table.clone()),
|
||||
task: DdlTask::new_truncate_table(
|
||||
table_name.catalog_name.to_string(),
|
||||
table_name.schema_name.to_string(),
|
||||
table_name.table_name.to_string(),
|
||||
table_id,
|
||||
),
|
||||
};
|
||||
|
||||
self.ddl_executor
|
||||
|
||||
@@ -116,6 +116,10 @@ impl RegionRequest {
|
||||
compact.region_id.into(),
|
||||
Self::Compact(RegionCompactRequest {}),
|
||||
)]),
|
||||
region_request::Body::Truncate(truncate) => Ok(vec![(
|
||||
truncate.region_id.into(),
|
||||
Self::Truncate(RegionTruncateRequest {}),
|
||||
)]),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -417,6 +421,7 @@ pub struct RegionFlushRequest {}
|
||||
#[derive(Debug)]
|
||||
pub struct RegionCompactRequest {}
|
||||
|
||||
/// Truncate region request.
|
||||
#[derive(Debug)]
|
||||
pub struct RegionTruncateRequest {}
|
||||
|
||||
|
||||
76
tests/cases/standalone/common/truncate/truncate.result
Normal file
76
tests/cases/standalone/common/truncate/truncate.result
Normal file
@@ -0,0 +1,76 @@
|
||||
TRUNCATE TABLE not_exits_table;
|
||||
|
||||
Error: 4001(TableNotFound), Table not found: greptime.public.not_exits_table
|
||||
|
||||
CREATE TABLE monitor (host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host));
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1695217652000, 'host1', 66.6, 1024),
|
||||
(1695217652000, 'host2', 66.6, 1024),
|
||||
(1695217652000, 'host3', 66.6, 1024),
|
||||
(1695217654000, 'host1', 77.7, 2048),
|
||||
(1695217654000, 'host2', 77.7, 2048),
|
||||
(1695217654000, 'host3', 77.7, 2048),
|
||||
(1695217656000, 'host1', 88.8, 4096),
|
||||
(1695217656000, 'host2', 88.8, 4096),
|
||||
(1695217656000, 'host3', 88.8, 4096);
|
||||
|
||||
Affected Rows: 9
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
+---------------------+-------+------+--------+
|
||||
| ts | host | cpu | memory |
|
||||
+---------------------+-------+------+--------+
|
||||
| 2023-09-20T13:47:32 | host1 | 66.6 | 1024.0 |
|
||||
| 2023-09-20T13:47:32 | host2 | 66.6 | 1024.0 |
|
||||
| 2023-09-20T13:47:32 | host3 | 66.6 | 1024.0 |
|
||||
| 2023-09-20T13:47:34 | host1 | 77.7 | 2048.0 |
|
||||
| 2023-09-20T13:47:34 | host2 | 77.7 | 2048.0 |
|
||||
| 2023-09-20T13:47:34 | host3 | 77.7 | 2048.0 |
|
||||
| 2023-09-20T13:47:36 | host1 | 88.8 | 4096.0 |
|
||||
| 2023-09-20T13:47:36 | host2 | 88.8 | 4096.0 |
|
||||
| 2023-09-20T13:47:36 | host3 | 88.8 | 4096.0 |
|
||||
+---------------------+-------+------+--------+
|
||||
|
||||
TRUNCATE monitor;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
++
|
||||
++
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1695217660000, 'host1', 88.8, 4096),
|
||||
(1695217662000, 'host2', 88.8, 4096),
|
||||
(1695217664000, 'host3', 88.8, 4096);
|
||||
|
||||
Affected Rows: 3
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
+---------------------+-------+------+--------+
|
||||
| ts | host | cpu | memory |
|
||||
+---------------------+-------+------+--------+
|
||||
| 2023-09-20T13:47:40 | host1 | 88.8 | 4096.0 |
|
||||
| 2023-09-20T13:47:42 | host2 | 88.8 | 4096.0 |
|
||||
| 2023-09-20T13:47:44 | host3 | 88.8 | 4096.0 |
|
||||
+---------------------+-------+------+--------+
|
||||
|
||||
TRUNCATE monitor;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
++
|
||||
++
|
||||
|
||||
DROP TABLE monitor;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
33
tests/cases/standalone/common/truncate/truncate.sql
Normal file
33
tests/cases/standalone/common/truncate/truncate.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
TRUNCATE TABLE not_exits_table;
|
||||
|
||||
CREATE TABLE monitor (host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host));
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1695217652000, 'host1', 66.6, 1024),
|
||||
(1695217652000, 'host2', 66.6, 1024),
|
||||
(1695217652000, 'host3', 66.6, 1024),
|
||||
(1695217654000, 'host1', 77.7, 2048),
|
||||
(1695217654000, 'host2', 77.7, 2048),
|
||||
(1695217654000, 'host3', 77.7, 2048),
|
||||
(1695217656000, 'host1', 88.8, 4096),
|
||||
(1695217656000, 'host2', 88.8, 4096),
|
||||
(1695217656000, 'host3', 88.8, 4096);
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
TRUNCATE monitor;
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
INSERT INTO monitor(ts, host, cpu, memory) VALUES
|
||||
(1695217660000, 'host1', 88.8, 4096),
|
||||
(1695217662000, 'host2', 88.8, 4096),
|
||||
(1695217664000, 'host3', 88.8, 4096);
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
TRUNCATE monitor;
|
||||
|
||||
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
|
||||
|
||||
DROP TABLE monitor;
|
||||
Reference in New Issue
Block a user