mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 13:52:59 +00:00
feat: show create table (#1336)
* temp commit * feat: impl Display for CreateTable statement * feat: impl show create table for standalone * fix: forgot show.rs * feat: clean code * fix: typo * feat: impl show create table for distributed * test: add show create table sqlness test * fix: typo * fix: sqlness tests * feat: render partition rules for distributed table * Update src/sql/src/statements.rs Co-authored-by: Yingwen <realevenyag@gmail.com> * Update src/sql/src/statements.rs Co-authored-by: Yingwen <realevenyag@gmail.com> * Update src/sql/src/statements.rs Co-authored-by: Yingwen <realevenyag@gmail.com> * Update src/sql/src/statements/create.rs Co-authored-by: Yingwen <realevenyag@gmail.com> * chore: by CR comments * fix: compile error * fix: missing column comments and extra table options * test: add show create table test * test: add show create table test * chore: timestamp precision * fix: test --------- Co-authored-by: Yingwen <realevenyag@gmail.com>
This commit is contained in:
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -6469,6 +6469,7 @@ dependencies = [
|
||||
"format_num",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"humantime",
|
||||
"metrics",
|
||||
"num",
|
||||
"num-traits",
|
||||
|
||||
@@ -104,6 +104,14 @@ impl Instance {
|
||||
.execute(SqlRequest::DropTable(req), query_ctx)
|
||||
.await
|
||||
}
|
||||
Statement::ShowCreateTable(show) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(&show.table_name, query_ctx.clone())?;
|
||||
let table_ref = TableReference::full(&catalog, &schema, &table);
|
||||
let table = self.sql_handler.get_table(&table_ref).await?;
|
||||
|
||||
query::sql::show_create_table(table, None).context(ExecuteStatementSnafu)
|
||||
}
|
||||
_ => NotSupportSqlSnafu {
|
||||
msg: format!("not supported to execute {stmt:?}"),
|
||||
}
|
||||
|
||||
@@ -23,9 +23,8 @@ use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::{ColumnOption, TableConstraint};
|
||||
use sql::statements::column_def_to_schema;
|
||||
use sql::statements::create::CreateTable;
|
||||
use sql::statements::create::{CreateTable, TIME_INDEX};
|
||||
use sql::util::to_lowercase_options_map;
|
||||
use store_api::storage::consts::TIME_INDEX_NAME;
|
||||
use table::engine::{EngineContext, TableReference};
|
||||
use table::metadata::TableId;
|
||||
use table::requests::*;
|
||||
@@ -224,7 +223,7 @@ impl SqlHandler {
|
||||
is_primary,
|
||||
} => {
|
||||
if let Some(name) = name {
|
||||
if name.value == TIME_INDEX_NAME {
|
||||
if name.value == TIME_INDEX {
|
||||
ts_index = *col_map.get(&columns[0].value).context(
|
||||
KeyColumnNotFoundSnafu {
|
||||
name: columns[0].value.to_string(),
|
||||
@@ -340,7 +339,7 @@ mod tests {
|
||||
async fn test_create_table_with_options() {
|
||||
let sql = r#"
|
||||
CREATE TABLE demo_table (
|
||||
"timestamp" BIGINT TIME INDEX,
|
||||
"timestamp" BIGINT TIME INDEX,
|
||||
"value" DOUBLE,
|
||||
host STRING PRIMARY KEY
|
||||
) engine=mito with(regions=1, ttl='7days',write_buffer_size='32MB',some='other');"#;
|
||||
@@ -364,7 +363,7 @@ mod tests {
|
||||
let parsed_stmt = sql_to_statement(
|
||||
r#"
|
||||
CREATE TABLE demo_table(
|
||||
"timestamp" BIGINT TIME INDEX,
|
||||
"timestamp" BIGINT TIME INDEX,
|
||||
"value" DOUBLE,
|
||||
host STRING PRIMARY KEY
|
||||
) engine=mito with(regions=1);"#,
|
||||
|
||||
@@ -20,13 +20,12 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrow::datatypes::{Field, Schema as ArrowSchema};
|
||||
pub use column_schema::TIME_INDEX_KEY;
|
||||
use datafusion_common::DFSchemaRef;
|
||||
use snafu::{ensure, ResultExt};
|
||||
|
||||
use crate::data_type::DataType;
|
||||
use crate::error::{self, Error, Result};
|
||||
pub use crate::schema::column_schema::{ColumnSchema, Metadata};
|
||||
pub use crate::schema::column_schema::{ColumnSchema, Metadata, COMMENT_KEY, TIME_INDEX_KEY};
|
||||
pub use crate::schema::constraint::ColumnDefaultConstraint;
|
||||
pub use crate::schema::raw::RawSchema;
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ pub type Metadata = HashMap<String, String>;
|
||||
|
||||
/// Key used to store whether the column is time index in arrow field's metadata.
|
||||
pub const TIME_INDEX_KEY: &str = "greptime:time_index";
|
||||
pub const COMMENT_KEY: &str = "greptime:storage:comment";
|
||||
/// Key used to store default constraint in arrow field's metadata.
|
||||
const DEFAULT_CONSTRAINT_KEY: &str = "greptime:default_constraint";
|
||||
|
||||
@@ -78,6 +79,11 @@ impl ColumnSchema {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn mut_metadata(&mut self) -> &mut Metadata {
|
||||
&mut self.metadata
|
||||
}
|
||||
|
||||
pub fn with_time_index(mut self, is_time_index: bool) -> Self {
|
||||
self.is_time_index = is_time_index;
|
||||
if is_time_index {
|
||||
|
||||
@@ -16,6 +16,7 @@ use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use datafusion::parquet;
|
||||
use datatypes::value::Value;
|
||||
use snafu::Location;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
@@ -64,6 +65,13 @@ pub enum Error {
|
||||
source: sql::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert value to sql value: {}", value))]
|
||||
ConvertSqlValue {
|
||||
value: Value,
|
||||
#[snafu(backtrace)]
|
||||
source: sql::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Missing insert values"))]
|
||||
MissingInsertValues { location: Location },
|
||||
|
||||
@@ -173,13 +181,28 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to find table route for table {}", table_name))]
|
||||
#[snafu(display(
|
||||
"Failed to find table route for table {}, source: {}",
|
||||
table_name,
|
||||
source
|
||||
))]
|
||||
FindTableRoute {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: partition::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to find table partition rule for table {}, source: {}",
|
||||
table_name,
|
||||
source
|
||||
))]
|
||||
FindTablePartitionRule {
|
||||
table_name: String,
|
||||
#[snafu(backtrace)]
|
||||
source: partition::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to create table info, source: {}", source))]
|
||||
CreateTableInfo {
|
||||
#[snafu(backtrace)]
|
||||
@@ -483,7 +506,9 @@ impl ErrorExt for Error {
|
||||
Error::StartServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownServer { source, .. } => source.status_code(),
|
||||
|
||||
Error::ParseSql { source } => source.status_code(),
|
||||
Error::ConvertSqlValue { source, .. } | Error::ParseSql { source } => {
|
||||
source.status_code()
|
||||
}
|
||||
|
||||
Error::Table { source }
|
||||
| Error::CopyTable { source, .. }
|
||||
@@ -536,9 +561,9 @@ impl ErrorExt for Error {
|
||||
Error::InvokeDatanode { source } => source.status_code(),
|
||||
|
||||
Error::External { source } => source.status_code(),
|
||||
Error::DeserializePartition { source, .. } | Error::FindTableRoute { source, .. } => {
|
||||
source.status_code()
|
||||
}
|
||||
Error::DeserializePartition { source, .. }
|
||||
| Error::FindTablePartitionRule { source, .. }
|
||||
| Error::FindTableRoute { source, .. } => source.status_code(),
|
||||
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::StartScriptManager { source } => source.status_code(),
|
||||
|
||||
@@ -42,15 +42,16 @@ use meta_client::rpc::{
|
||||
CompareAndPutRequest, CreateRequest as MetaCreateRequest, Partition as MetaPartition,
|
||||
RouteRequest, RouteResponse, TableName,
|
||||
};
|
||||
use partition::manager::PartitionInfo;
|
||||
use partition::partition::{PartitionBound, PartitionDef};
|
||||
use query::error::QueryExecutionSnafu;
|
||||
use query::query_engine::SqlStatementExecutor;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::ast::Value as SqlValue;
|
||||
use sql::statements::create::Partitions;
|
||||
use sql::statements::sql_value_to_value;
|
||||
use sql::ast::{Ident, Value as SqlValue};
|
||||
use sql::statements::create::{PartitionEntry, Partitions};
|
||||
use sql::statements::statement::Statement;
|
||||
use sql::statements::{self, sql_value_to_value};
|
||||
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
|
||||
use table::requests::TableOptions;
|
||||
use table::table::AlterContext;
|
||||
@@ -68,6 +69,8 @@ use crate::error::{
|
||||
use crate::expr_factory;
|
||||
use crate::table::DistTable;
|
||||
|
||||
const MAX_VALUE: &str = "MAXVALUE";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct DistInstance {
|
||||
meta_client: Arc<MetaClient>,
|
||||
@@ -323,7 +326,7 @@ impl DistInstance {
|
||||
database_name: stmt.name.to_string(),
|
||||
create_if_not_exists: stmt.if_not_exists,
|
||||
};
|
||||
return self.handle_create_database(expr, query_ctx).await;
|
||||
self.handle_create_database(expr, query_ctx).await
|
||||
}
|
||||
Statement::CreateTable(stmt) => {
|
||||
let create_expr = &mut expr_factory::create_to_expr(&stmt, query_ctx)?;
|
||||
@@ -332,7 +335,7 @@ impl DistInstance {
|
||||
}
|
||||
Statement::Alter(alter_table) => {
|
||||
let expr = grpc::to_alter_expr(alter_table, query_ctx)?;
|
||||
return self.handle_alter_table(expr).await;
|
||||
self.handle_alter_table(expr).await
|
||||
}
|
||||
Statement::DropTable(stmt) => {
|
||||
let (catalog, schema, table) =
|
||||
@@ -340,7 +343,7 @@ impl DistInstance {
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExternalSnafu)?;
|
||||
let table_name = TableName::new(catalog, schema, table);
|
||||
return self.drop_table(table_name).await;
|
||||
self.drop_table(table_name).await
|
||||
}
|
||||
Statement::Insert(insert) => {
|
||||
let (catalog, schema, table) =
|
||||
@@ -360,18 +363,46 @@ impl DistInstance {
|
||||
.await
|
||||
.context(InvokeDatanodeSnafu)?;
|
||||
|
||||
return Ok(Output::AffectedRows(
|
||||
Ok(Output::AffectedRows(
|
||||
table.insert(insert_request).await.context(TableSnafu)?,
|
||||
));
|
||||
))
|
||||
}
|
||||
_ => {
|
||||
return error::NotSupportedSnafu {
|
||||
feat: format!("{stmt:?}"),
|
||||
}
|
||||
.fail()
|
||||
Statement::ShowCreateTable(show) => {
|
||||
let (catalog, schema, table) =
|
||||
table_idents_to_full_name(&show.table_name, query_ctx.clone())
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExternalSnafu)?;
|
||||
|
||||
let table_ref = self
|
||||
.catalog_manager
|
||||
.table(&catalog, &schema, &table)
|
||||
.await
|
||||
.context(CatalogSnafu)?
|
||||
.context(TableNotFoundSnafu { table_name: &table })?;
|
||||
let table_name = TableName::new(catalog, schema, table);
|
||||
|
||||
self.show_create_table(table_name, table_ref).await
|
||||
}
|
||||
_ => error::NotSupportedSnafu {
|
||||
feat: format!("{stmt:?}"),
|
||||
}
|
||||
.fail(),
|
||||
}
|
||||
.context(error::ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
async fn show_create_table(&self, table_name: TableName, table: TableRef) -> Result<Output> {
|
||||
let partitions = self
|
||||
.catalog_manager
|
||||
.partition_manager()
|
||||
.find_table_partitions(&table_name)
|
||||
.await
|
||||
.context(error::FindTablePartitionRuleSnafu {
|
||||
table_name: &table_name.table_name,
|
||||
})?;
|
||||
|
||||
let partitions = create_partitions_stmt(partitions)?;
|
||||
|
||||
query::sql::show_create_table(table, partitions).context(error::ExecuteStatementSnafu)
|
||||
}
|
||||
|
||||
/// Handles distributed database creation
|
||||
@@ -531,6 +562,46 @@ impl SqlStatementExecutor for DistInstance {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_partitions_stmt(partitions: Vec<PartitionInfo>) -> Result<Option<Partitions>> {
|
||||
if partitions.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let column_list: Vec<Ident> = partitions[0]
|
||||
.partition
|
||||
.partition_columns()
|
||||
.iter()
|
||||
.map(|name| name[..].into())
|
||||
.collect();
|
||||
|
||||
let entries = partitions
|
||||
.into_iter()
|
||||
.map(|info| {
|
||||
// Generated the partition name from id
|
||||
let name = &format!("r{}", info.id);
|
||||
let bounds = info.partition.partition_bounds();
|
||||
let value_list = bounds
|
||||
.iter()
|
||||
.map(|b| match b {
|
||||
PartitionBound::Value(v) => statements::value_to_sql_value(v)
|
||||
.with_context(|_| error::ConvertSqlValueSnafu { value: v.clone() }),
|
||||
PartitionBound::MaxValue => Ok(SqlValue::Number(MAX_VALUE.to_string(), false)),
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(PartitionEntry {
|
||||
name: name[..].into(),
|
||||
value_list,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(Some(Partitions {
|
||||
column_list,
|
||||
entries,
|
||||
}))
|
||||
}
|
||||
|
||||
fn create_table_info(create_table: &CreateTableExpr) -> Result<RawTableInfo> {
|
||||
let mut column_schemas = Vec::with_capacity(create_table.column_defs.len());
|
||||
let mut column_name_to_index_map = HashMap::new();
|
||||
@@ -651,7 +722,7 @@ fn find_partition_entries(
|
||||
// indexing is safe here because we have checked that "value_list" and "column_list" are matched in size
|
||||
let (column_name, data_type) = &column_name_and_type[i];
|
||||
let v = match v {
|
||||
SqlValue::Number(n, _) if n == "MAXVALUE" => PartitionBound::MaxValue,
|
||||
SqlValue::Number(n, _) if n == MAX_VALUE => PartitionBound::MaxValue,
|
||||
_ => PartitionBound::Value(
|
||||
sql_value_to_value(column_name, data_type, v).context(ParseSqlSnafu)?,
|
||||
),
|
||||
|
||||
@@ -79,6 +79,65 @@ async fn test_create_database_and_insert_query(instance: Arc<dyn MockInstance>)
|
||||
}
|
||||
}
|
||||
|
||||
#[apply(both_instances_cases)]
|
||||
async fn test_show_create_table(instance: Arc<dyn MockInstance>) {
|
||||
let frontend = instance.frontend();
|
||||
|
||||
let output = execute_sql(
|
||||
&frontend,
|
||||
r#"create table demo(
|
||||
host STRING,
|
||||
cpu DOUBLE,
|
||||
memory DOUBLE,
|
||||
ts bigint,
|
||||
TIME INDEX(ts)
|
||||
)"#,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(output, Output::AffectedRows(0)));
|
||||
|
||||
let output = execute_sql(&frontend, "show create table demo").await;
|
||||
|
||||
let expected = if instance.is_distributed_mode() {
|
||||
"\
|
||||
+-------+--------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+--------------------------------------------+
|
||||
| demo | CREATE TABLE IF NOT EXISTS demo ( |
|
||||
| | host STRING NULL, |
|
||||
| | cpu DOUBLE NULL, |
|
||||
| | memory DOUBLE NULL, |
|
||||
| | ts BIGINT NOT NULL, |
|
||||
| | TIME INDEX (ts) |
|
||||
| | ) |
|
||||
| | PARTITION BY RANGE COLUMNS (ts) ( |
|
||||
| | PARTITION r0 VALUES LESS THAN (MAXVALUE) |
|
||||
| | ) |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+-------+--------------------------------------------+"
|
||||
} else {
|
||||
"\
|
||||
+-------+-----------------------------------+
|
||||
| Table | Create Table |
|
||||
+-------+-----------------------------------+
|
||||
| demo | CREATE TABLE IF NOT EXISTS demo ( |
|
||||
| | host STRING NULL, |
|
||||
| | cpu DOUBLE NULL, |
|
||||
| | memory DOUBLE NULL, |
|
||||
| | ts BIGINT NOT NULL, |
|
||||
| | TIME INDEX (ts) |
|
||||
| | ) |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | regions = 1 |
|
||||
| | ) |
|
||||
+-------+-----------------------------------+"
|
||||
};
|
||||
|
||||
check_output_stream(output, expected).await;
|
||||
}
|
||||
|
||||
#[apply(both_instances_cases)]
|
||||
async fn test_issue477_same_table_name_in_different_databases(instance: Arc<dyn MockInstance>) {
|
||||
let instance = instance.frontend();
|
||||
|
||||
@@ -20,7 +20,7 @@ use datafusion_expr::{BinaryExpr, Expr as DfExpr, Operator};
|
||||
use datatypes::prelude::Value;
|
||||
use meta_client::rpc::{Peer, TableName, TableRoute};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::storage::RegionNumber;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use table::requests::InsertRequest;
|
||||
|
||||
use crate::columns::RangeColumnsPartitionRule;
|
||||
@@ -41,6 +41,12 @@ pub struct PartitionRuleManager {
|
||||
table_routes: Arc<TableRoutes>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PartitionInfo {
|
||||
pub id: RegionId,
|
||||
pub partition: PartitionDef,
|
||||
}
|
||||
|
||||
impl PartitionRuleManager {
|
||||
pub fn new(table_routes: Arc<TableRoutes>) -> Self {
|
||||
Self { table_routes }
|
||||
@@ -68,7 +74,7 @@ impl PartitionRuleManager {
|
||||
.region_routes
|
||||
.iter()
|
||||
.find_map(|x| {
|
||||
if x.region.id == *region as u64 {
|
||||
if x.region.id == *region as RegionId {
|
||||
x.leader_peer.clone()
|
||||
} else {
|
||||
None
|
||||
@@ -86,8 +92,7 @@ impl PartitionRuleManager {
|
||||
Ok(datanodes)
|
||||
}
|
||||
|
||||
/// Get partition rule of given table.
|
||||
pub async fn find_table_partition_rule(&self, table: &TableName) -> Result<PartitionRuleRef> {
|
||||
pub async fn find_table_partitions(&self, table: &TableName) -> Result<Vec<PartitionInfo>> {
|
||||
let route = self.table_routes.get_route(table).await?;
|
||||
ensure!(
|
||||
!route.region_routes.is_empty(),
|
||||
@@ -107,20 +112,36 @@ impl PartitionRuleManager {
|
||||
table_name: table.to_string(),
|
||||
})?;
|
||||
let partition_def = PartitionDef::try_from(partition)?;
|
||||
partitions.push((r.region.id, partition_def));
|
||||
|
||||
partitions.push(PartitionInfo {
|
||||
id: r.region.id,
|
||||
partition: partition_def,
|
||||
});
|
||||
}
|
||||
partitions.sort_by(|a, b| a.1.partition_bounds().cmp(b.1.partition_bounds()));
|
||||
partitions.sort_by(|a, b| {
|
||||
a.partition
|
||||
.partition_bounds()
|
||||
.cmp(b.partition.partition_bounds())
|
||||
});
|
||||
|
||||
ensure!(
|
||||
partitions
|
||||
.windows(2)
|
||||
.all(|w| w[0].1.partition_columns() == w[1].1.partition_columns()),
|
||||
.all(|w| w[0].partition.partition_columns() == w[1].partition.partition_columns()),
|
||||
error::InvalidTableRouteDataSnafu {
|
||||
table_name: table.to_string(),
|
||||
err_msg: "partition columns of all regions are not the same"
|
||||
}
|
||||
);
|
||||
let partition_columns = partitions[0].1.partition_columns();
|
||||
|
||||
Ok(partitions)
|
||||
}
|
||||
|
||||
/// Get partition rule of given table.
|
||||
pub async fn find_table_partition_rule(&self, table: &TableName) -> Result<PartitionRuleRef> {
|
||||
let partitions = self.find_table_partitions(table).await?;
|
||||
|
||||
let partition_columns = partitions[0].partition.partition_columns();
|
||||
ensure!(
|
||||
!partition_columns.is_empty(),
|
||||
error::InvalidTableRouteDataSnafu {
|
||||
@@ -131,7 +152,7 @@ impl PartitionRuleManager {
|
||||
|
||||
let regions = partitions
|
||||
.iter()
|
||||
.map(|x| x.0 as u32)
|
||||
.map(|x| x.id as u32)
|
||||
.collect::<Vec<RegionNumber>>();
|
||||
|
||||
// TODO(LFC): Serializing and deserializing partition rule is ugly, must find a much more elegant way.
|
||||
@@ -140,7 +161,7 @@ impl PartitionRuleManager {
|
||||
// Omit the last "MAXVALUE".
|
||||
let bounds = partitions
|
||||
.iter()
|
||||
.filter_map(|(_, p)| match &p.partition_bounds()[0] {
|
||||
.filter_map(|info| match &info.partition.partition_bounds()[0] {
|
||||
PartitionBound::Value(v) => Some(v.clone()),
|
||||
PartitionBound::MaxValue => None,
|
||||
})
|
||||
@@ -154,7 +175,7 @@ impl PartitionRuleManager {
|
||||
_ => {
|
||||
let bounds = partitions
|
||||
.iter()
|
||||
.map(|x| x.1.partition_bounds().clone())
|
||||
.map(|x| x.partition.partition_bounds().clone())
|
||||
.collect::<Vec<Vec<PartitionBound>>>();
|
||||
Arc::new(RangeColumnsPartitionRule::new(
|
||||
partition_columns.clone(),
|
||||
|
||||
@@ -27,6 +27,7 @@ datafusion-sql.workspace = true
|
||||
datatypes = { path = "../datatypes" }
|
||||
futures = "0.3"
|
||||
futures-util.workspace = true
|
||||
humantime = "2.1"
|
||||
metrics.workspace = true
|
||||
once_cell = "1.10"
|
||||
promql = { path = "../promql" }
|
||||
|
||||
@@ -16,6 +16,8 @@ use std::any::Any;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use datafusion::error::DataFusionError;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
#[derive(Debug, Snafu)]
|
||||
@@ -110,6 +112,20 @@ pub enum Error {
|
||||
table_name: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert value to sql value: {}", value))]
|
||||
ConvertSqlValue {
|
||||
value: Value,
|
||||
#[snafu(backtrace)]
|
||||
source: sql::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to convert concrete type to sql type: {:?}", datatype))]
|
||||
ConvertSqlType {
|
||||
datatype: ConcreteDataType,
|
||||
#[snafu(backtrace)]
|
||||
source: sql::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -134,6 +150,7 @@ impl ErrorExt for Error {
|
||||
DataFusion { .. } | MissingTimestampColumn { .. } => StatusCode::Internal,
|
||||
Sql { source } => source.status_code(),
|
||||
PlanSql { .. } => StatusCode::PlanQuery,
|
||||
ConvertSqlType { source, .. } | ConvertSqlValue { source, .. } => source.status_code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod show;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use catalog::CatalogManagerRef;
|
||||
@@ -24,6 +26,7 @@ use datatypes::vectors::{Helper, StringVector};
|
||||
use once_cell::sync::Lazy;
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use sql::statements::create::Partitions;
|
||||
use sql::statements::show::{ShowDatabases, ShowKind, ShowTables};
|
||||
use table::TableRef;
|
||||
|
||||
@@ -74,6 +77,13 @@ static DESCRIBE_TABLE_OUTPUT_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| {
|
||||
]))
|
||||
});
|
||||
|
||||
static SHOW_CREATE_TABLE_OUTPUT_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new("Table", ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new("Create Table", ConcreteDataType::string_datatype(), false),
|
||||
]))
|
||||
});
|
||||
|
||||
pub fn show_databases(stmt: ShowDatabases, catalog_manager: CatalogManagerRef) -> Result<Output> {
|
||||
// TODO(LFC): supports WHERE
|
||||
ensure!(
|
||||
@@ -152,6 +162,22 @@ pub fn show_tables(
|
||||
Ok(Output::RecordBatches(records))
|
||||
}
|
||||
|
||||
pub fn show_create_table(table: TableRef, partitions: Option<Partitions>) -> Result<Output> {
|
||||
let table_info = table.table_info();
|
||||
let table_name = &table_info.name;
|
||||
let mut stmt = show::create_table_stmt(&table_info)?;
|
||||
stmt.partitions = partitions;
|
||||
let sql = format!("{}", stmt);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![table_name.clone()])) as _,
|
||||
Arc::new(StringVector::from(vec![sql])) as _,
|
||||
];
|
||||
let records = RecordBatches::try_from_columns(SHOW_CREATE_TABLE_OUTPUT_SCHEMA.clone(), columns)
|
||||
.context(error::CreateRecordBatchSnafu)?;
|
||||
|
||||
Ok(Output::RecordBatches(records))
|
||||
}
|
||||
|
||||
pub fn describe_table(table: TableRef) -> Result<Output> {
|
||||
let table_info = table.table_info();
|
||||
let columns_schemas = table_info.meta.schema.column_schemas();
|
||||
|
||||
262
src/query/src/sql/show.rs
Normal file
262
src/query/src/sql/show.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use std::fmt::Display;
|
||||
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaRef, COMMENT_KEY};
|
||||
use humantime::format_duration;
|
||||
use snafu::ResultExt;
|
||||
use sql::ast::{
|
||||
ColumnDef, ColumnOption, ColumnOptionDef, Expr, ObjectName, SqlOption, TableConstraint,
|
||||
Value as SqlValue,
|
||||
};
|
||||
use sql::dialect::GenericDialect;
|
||||
use sql::parser::ParserContext;
|
||||
use sql::statements::create::{CreateTable, TIME_INDEX};
|
||||
use sql::statements::{self};
|
||||
use table::metadata::{TableInfoRef, TableMeta};
|
||||
|
||||
use crate::error::{ConvertSqlTypeSnafu, ConvertSqlValueSnafu, Result, SqlSnafu};
|
||||
|
||||
#[inline]
|
||||
fn number_value<T: Display>(n: T) -> SqlValue {
|
||||
SqlValue::Number(format!("{}", n), false)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn string_value(s: impl Into<String>) -> SqlValue {
|
||||
SqlValue::SingleQuotedString(s.into())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn sql_option(name: &str, value: SqlValue) -> SqlOption {
|
||||
SqlOption {
|
||||
name: name.into(),
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_sql_options(table_meta: &TableMeta) -> Vec<SqlOption> {
|
||||
let table_opts = &table_meta.options;
|
||||
let mut options = Vec::with_capacity(4 + table_opts.extra_options.len());
|
||||
|
||||
if !table_meta.region_numbers.is_empty() {
|
||||
options.push(sql_option(
|
||||
"regions",
|
||||
number_value(table_meta.region_numbers.len()),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(write_buffer_size) = table_opts.write_buffer_size {
|
||||
options.push(sql_option(
|
||||
"write_buffer_size",
|
||||
string_value(write_buffer_size.to_string()),
|
||||
));
|
||||
}
|
||||
if let Some(ttl) = table_opts.ttl {
|
||||
options.push(sql_option(
|
||||
"ttl",
|
||||
string_value(format_duration(ttl).to_string()),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(w) = table_opts.compaction_time_window {
|
||||
options.push(sql_option("compaction_time_window", number_value(w)));
|
||||
}
|
||||
|
||||
for (k, v) in &table_opts.extra_options {
|
||||
options.push(sql_option(k, string_value(v)));
|
||||
}
|
||||
|
||||
options
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn column_option_def(option: ColumnOption) -> ColumnOptionDef {
|
||||
ColumnOptionDef { name: None, option }
|
||||
}
|
||||
|
||||
fn create_column_def(column_schema: &ColumnSchema) -> Result<ColumnDef> {
|
||||
let name = &column_schema.name;
|
||||
let mut options = Vec::with_capacity(2);
|
||||
|
||||
if column_schema.is_nullable() {
|
||||
options.push(column_option_def(ColumnOption::Null));
|
||||
} else {
|
||||
options.push(column_option_def(ColumnOption::NotNull));
|
||||
}
|
||||
|
||||
if let Some(c) = column_schema.default_constraint() {
|
||||
let expr = match c {
|
||||
ColumnDefaultConstraint::Value(v) => Expr::Value(
|
||||
statements::value_to_sql_value(v)
|
||||
.with_context(|_| ConvertSqlValueSnafu { value: v.clone() })?,
|
||||
),
|
||||
ColumnDefaultConstraint::Function(expr) => {
|
||||
ParserContext::parse_function(expr, &GenericDialect {}).context(SqlSnafu)?
|
||||
}
|
||||
};
|
||||
|
||||
options.push(column_option_def(ColumnOption::Default(expr)));
|
||||
}
|
||||
|
||||
if let Some(c) = column_schema.metadata().get(COMMENT_KEY) {
|
||||
options.push(column_option_def(ColumnOption::Comment(c.to_string())));
|
||||
}
|
||||
|
||||
Ok(ColumnDef {
|
||||
name: name[..].into(),
|
||||
data_type: statements::concrete_data_type_to_sql_data_type(&column_schema.data_type)
|
||||
.with_context(|_| ConvertSqlTypeSnafu {
|
||||
datatype: column_schema.data_type.clone(),
|
||||
})?,
|
||||
collation: None,
|
||||
options,
|
||||
})
|
||||
}
|
||||
|
||||
fn create_table_constraints(schema: &SchemaRef, table_meta: &TableMeta) -> Vec<TableConstraint> {
|
||||
let mut constraints = Vec::with_capacity(2);
|
||||
if let Some(timestamp_column) = schema.timestamp_column() {
|
||||
let column_name = ×tamp_column.name;
|
||||
constraints.push(TableConstraint::Unique {
|
||||
name: Some(TIME_INDEX.into()),
|
||||
columns: vec![column_name[..].into()],
|
||||
is_primary: false,
|
||||
});
|
||||
}
|
||||
if !table_meta.primary_key_indices.is_empty() {
|
||||
let columns = table_meta
|
||||
.row_key_column_names()
|
||||
.map(|name| name[..].into())
|
||||
.collect();
|
||||
constraints.push(TableConstraint::Unique {
|
||||
name: None,
|
||||
columns,
|
||||
is_primary: true,
|
||||
});
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
/// Create a CreateTable statement from table info.
|
||||
pub fn create_table_stmt(table_info: &TableInfoRef) -> Result<CreateTable> {
|
||||
let table_meta = &table_info.meta;
|
||||
let table_name = &table_info.name;
|
||||
let schema = &table_info.meta.schema;
|
||||
|
||||
let columns = schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(create_column_def)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let constraints = create_table_constraints(schema, table_meta);
|
||||
|
||||
Ok(CreateTable {
|
||||
if_not_exists: true,
|
||||
table_id: table_info.ident.table_id,
|
||||
name: ObjectName(vec![table_name[..].into()]),
|
||||
columns,
|
||||
engine: table_meta.engine.clone(),
|
||||
constraints,
|
||||
options: create_sql_options(table_meta),
|
||||
partitions: None,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{Schema, SchemaRef};
|
||||
use table::metadata::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_show_create_table_sql() {
|
||||
let schema = vec![
|
||||
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), true),
|
||||
ColumnSchema::new("host", ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
|
||||
ColumnSchema::new("disk", ConcreteDataType::float32_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_datatype(TimeUnit::Millisecond),
|
||||
false,
|
||||
)
|
||||
.with_default_constraint(Some(ColumnDefaultConstraint::Function(String::from(
|
||||
"current_timestamp()",
|
||||
))))
|
||||
.unwrap()
|
||||
.with_time_index(true),
|
||||
];
|
||||
let table_schema = SchemaRef::new(Schema::new(schema));
|
||||
let table_name = "system_metrics";
|
||||
let schema_name = "public".to_string();
|
||||
let catalog_name = "greptime".to_string();
|
||||
let regions = vec![0, 1, 2];
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
.schema(table_schema)
|
||||
.primary_key_indices(vec![0, 1])
|
||||
.value_indices(vec![2, 3])
|
||||
.engine("mito".to_string())
|
||||
.next_column_id(0)
|
||||
.engine_options(Default::default())
|
||||
.options(Default::default())
|
||||
.created_on(Default::default())
|
||||
.region_numbers(regions)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let info = Arc::new(
|
||||
TableInfoBuilder::default()
|
||||
.table_id(1024)
|
||||
.table_version(0 as TableVersion)
|
||||
.name(table_name)
|
||||
.schema_name(schema_name)
|
||||
.catalog_name(catalog_name)
|
||||
.desc(None)
|
||||
.table_type(TableType::Base)
|
||||
.meta(meta)
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let stmt = create_table_stmt(&info).unwrap();
|
||||
|
||||
let sql = format!("\n{}", stmt);
|
||||
assert_eq!(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS system_metrics (
|
||||
id INT UNSIGNED NULL,
|
||||
host STRING NULL,
|
||||
cpu DOUBLE NULL,
|
||||
disk FLOAT NULL,
|
||||
ts TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 3
|
||||
)"#,
|
||||
sql
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -17,12 +17,12 @@ use std::any::Any;
|
||||
use common_error::prelude::*;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::prelude::{ConcreteDataType, Value};
|
||||
use snafu::Location;
|
||||
use sqlparser::parser::ParserError;
|
||||
use sqlparser::tokenizer::TokenizerError;
|
||||
|
||||
use crate::ast::Expr;
|
||||
use crate::ast::{Expr, Value as SqlValue};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -145,6 +145,16 @@ pub enum Error {
|
||||
statement: String,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unable to convert sql value {} to datatype {:?}", value, datatype))]
|
||||
ConvertSqlValue {
|
||||
value: SqlValue,
|
||||
datatype: ConcreteDataType,
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Unable to convert value {} to sql value", value))]
|
||||
ConvertValue { value: Value, location: Location },
|
||||
}
|
||||
|
||||
impl ErrorExt for Error {
|
||||
@@ -175,6 +185,7 @@ impl ErrorExt for Error {
|
||||
SerializeColumnDefaultConstraint { source, .. } => source.status_code(),
|
||||
ConvertToGrpcDataType { source, .. } => source.status_code(),
|
||||
ConvertToDfStatement { .. } => StatusCode::Internal,
|
||||
ConvertSqlValue { .. } | ConvertValue { .. } => StatusCode::Unsupported,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ use sqlparser::keywords::Keyword;
|
||||
use sqlparser::parser::{Parser, ParserError};
|
||||
use sqlparser::tokenizer::{Token, TokenWithLocation};
|
||||
|
||||
use crate::ast::{Expr, ObjectName};
|
||||
use crate::error::{self, InvalidDatabaseNameSnafu, InvalidTableNameSnafu, Result, SyntaxSnafu};
|
||||
use crate::parsers::tql_parser;
|
||||
use crate::statements::describe::DescribeTable;
|
||||
@@ -64,6 +65,17 @@ impl<'a> ParserContext<'a> {
|
||||
Ok(stmts)
|
||||
}
|
||||
|
||||
pub fn parse_function(sql: &'a str, dialect: &dyn Dialect) -> Result<Expr> {
|
||||
let mut parser = Parser::new(dialect)
|
||||
.try_with_sql(sql)
|
||||
.context(SyntaxSnafu { sql })?;
|
||||
|
||||
let function_name = parser.parse_identifier().context(SyntaxSnafu { sql })?;
|
||||
parser
|
||||
.parse_function(ObjectName(vec![function_name]))
|
||||
.context(SyntaxSnafu { sql })
|
||||
}
|
||||
|
||||
/// Parses parser context to a set of statements.
|
||||
pub fn parse_statement(&mut self) -> Result<Statement> {
|
||||
match self.parser.peek_token().token {
|
||||
@@ -174,9 +186,7 @@ impl<'a> ParserContext<'a> {
|
||||
name: table_name.to_string(),
|
||||
}
|
||||
);
|
||||
Ok(Statement::ShowCreateTable(ShowCreateTable {
|
||||
table_name: table_name.to_string(),
|
||||
}))
|
||||
Ok(Statement::ShowCreateTable(ShowCreateTable { table_name }))
|
||||
}
|
||||
|
||||
fn parse_show_tables(&mut self) -> Result<Statement> {
|
||||
@@ -659,4 +669,11 @@ mod tests {
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_function() {
|
||||
let expr =
|
||||
ParserContext::parse_function("current_timestamp()", &GenericDialect {}).unwrap();
|
||||
assert!(matches!(expr, Expr::Function(_)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,18 +31,19 @@ use api::helper::ColumnDataTypeWrapper;
|
||||
use common_base::bytes::Bytes;
|
||||
use common_time::Timestamp;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
|
||||
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
|
||||
use datatypes::types::TimestampType;
|
||||
use datatypes::value::Value;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::ast::{
|
||||
ColumnDef, ColumnOption, ColumnOptionDef, DataType as SqlDataType, Expr, Value as SqlValue,
|
||||
ColumnDef, ColumnOption, ColumnOptionDef, DataType as SqlDataType, Expr, TimezoneInfo,
|
||||
Value as SqlValue,
|
||||
};
|
||||
use crate::error::{
|
||||
self, ColumnTypeMismatchSnafu, ConvertToGrpcDataTypeSnafu, InvalidSqlValueSnafu,
|
||||
ParseSqlValueSnafu, Result, SerializeColumnDefaultConstraintSnafu, TimestampOverflowSnafu,
|
||||
UnsupportedDefaultValueSnafu,
|
||||
self, ColumnTypeMismatchSnafu, ConvertSqlValueSnafu, ConvertToGrpcDataTypeSnafu,
|
||||
ConvertValueSnafu, InvalidSqlValueSnafu, ParseSqlValueSnafu, Result,
|
||||
SerializeColumnDefaultConstraintSnafu, TimestampOverflowSnafu, UnsupportedDefaultValueSnafu,
|
||||
};
|
||||
|
||||
fn parse_string_to_value(
|
||||
@@ -197,7 +198,38 @@ pub fn sql_value_to_value(
|
||||
}
|
||||
SqlValue::HexStringLiteral(s) => parse_hex_string(s)?,
|
||||
SqlValue::Placeholder(s) => return InvalidSqlValueSnafu { value: s }.fail(),
|
||||
_ => todo!("Other sql value"),
|
||||
|
||||
// TODO(dennis): supports binary string
|
||||
_ => {
|
||||
return ConvertSqlValueSnafu {
|
||||
value: sql_val.clone(),
|
||||
datatype: data_type.clone(),
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn value_to_sql_value(val: &Value) -> Result<SqlValue> {
|
||||
Ok(match val {
|
||||
Value::Int8(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::UInt8(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Int16(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::UInt16(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Int32(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::UInt32(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Int64(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::UInt64(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Float32(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Float64(v) => SqlValue::Number(v.to_string(), false),
|
||||
Value::Boolean(b) => SqlValue::Boolean(*b),
|
||||
Value::Date(d) => SqlValue::SingleQuotedString(d.to_string()),
|
||||
Value::DateTime(d) => SqlValue::SingleQuotedString(d.to_string()),
|
||||
Value::Timestamp(ts) => SqlValue::SingleQuotedString(ts.to_iso8601_string()),
|
||||
Value::String(s) => SqlValue::SingleQuotedString(s.as_utf8().to_string()),
|
||||
Value::Null => SqlValue::Null,
|
||||
// TODO(dennis): supports binary
|
||||
_ => return ConvertValueSnafu { value: val.clone() }.fail(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -249,12 +281,26 @@ pub fn column_def_to_schema(column_def: &ColumnDef, is_time_index: bool) -> Resu
|
||||
let default_constraint =
|
||||
parse_column_default_constraint(&name, &data_type, &column_def.options)?;
|
||||
|
||||
ColumnSchema::new(name, data_type, is_nullable)
|
||||
let mut column_schema = ColumnSchema::new(name, data_type, is_nullable)
|
||||
.with_time_index(is_time_index)
|
||||
.with_default_constraint(default_constraint)
|
||||
.context(error::InvalidDefaultSnafu {
|
||||
column: &column_def.name.value,
|
||||
})
|
||||
})?;
|
||||
|
||||
if let Some(ColumnOption::Comment(c)) = column_def.options.iter().find_map(|o| {
|
||||
if matches!(o.option, ColumnOption::Comment(_)) {
|
||||
Some(&o.option)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}) {
|
||||
column_schema
|
||||
.mut_metadata()
|
||||
.insert(COMMENT_KEY.to_string(), c.to_string());
|
||||
}
|
||||
|
||||
Ok(column_schema)
|
||||
}
|
||||
|
||||
/// Convert `ColumnDef` in sqlparser to `ColumnDef` in gRPC proto.
|
||||
@@ -324,6 +370,33 @@ pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<Co
|
||||
}
|
||||
}
|
||||
|
||||
pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Result<SqlDataType> {
|
||||
match data_type {
|
||||
ConcreteDataType::Int64(_) => Ok(SqlDataType::BigInt(None)),
|
||||
ConcreteDataType::UInt64(_) => Ok(SqlDataType::UnsignedBigInt(None)),
|
||||
ConcreteDataType::Int32(_) => Ok(SqlDataType::Int(None)),
|
||||
ConcreteDataType::UInt32(_) => Ok(SqlDataType::UnsignedInt(None)),
|
||||
ConcreteDataType::Int16(_) => Ok(SqlDataType::SmallInt(None)),
|
||||
ConcreteDataType::UInt16(_) => Ok(SqlDataType::UnsignedSmallInt(None)),
|
||||
ConcreteDataType::Int8(_) => Ok(SqlDataType::TinyInt(None)),
|
||||
ConcreteDataType::UInt8(_) => Ok(SqlDataType::UnsignedTinyInt(None)),
|
||||
ConcreteDataType::String(_) => Ok(SqlDataType::String),
|
||||
ConcreteDataType::Float32(_) => Ok(SqlDataType::Float(None)),
|
||||
ConcreteDataType::Float64(_) => Ok(SqlDataType::Double),
|
||||
ConcreteDataType::Boolean(_) => Ok(SqlDataType::Boolean),
|
||||
ConcreteDataType::Date(_) => Ok(SqlDataType::Date),
|
||||
ConcreteDataType::DateTime(_) => Ok(SqlDataType::Datetime(None)),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(SqlDataType::Timestamp(
|
||||
Some(ts_type.precision()),
|
||||
TimezoneInfo::None,
|
||||
)),
|
||||
ConcreteDataType::Binary(_) => Ok(SqlDataType::Varbinary(None)),
|
||||
ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
@@ -676,10 +749,16 @@ mod tests {
|
||||
name: "col2".into(),
|
||||
data_type: SqlDataType::String,
|
||||
collation: None,
|
||||
options: vec![ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::NotNull,
|
||||
}],
|
||||
options: vec![
|
||||
ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::NotNull,
|
||||
},
|
||||
ColumnOptionDef {
|
||||
name: None,
|
||||
option: ColumnOption::Comment("test comment".to_string()),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let column_schema = column_def_to_schema(&column_def, false).unwrap();
|
||||
@@ -688,6 +767,10 @@ mod tests {
|
||||
assert_eq!(ConcreteDataType::string_datatype(), column_schema.data_type);
|
||||
assert!(!column_schema.is_nullable());
|
||||
assert!(!column_schema.is_time_index());
|
||||
assert_eq!(
|
||||
column_schema.metadata().get(COMMENT_KEY),
|
||||
Some(&"test comment".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -13,12 +13,49 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::ast::{ColumnDef, Ident, ObjectName, SqlOption, TableConstraint, Value as SqlValue};
|
||||
|
||||
const LINE_SEP: &str = ",\n";
|
||||
const COMMA_SEP: &str = ", ";
|
||||
const INDENT: usize = 2;
|
||||
|
||||
macro_rules! format_indent {
|
||||
($fmt: expr, $arg: expr) => {
|
||||
format!($fmt, format_args!("{: >1$}", "", INDENT), $arg)
|
||||
};
|
||||
($arg: expr) => {
|
||||
format_indent!("{}{}", $arg)
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! format_list_indent {
|
||||
($list: expr) => {
|
||||
$list.iter().map(|e| format_indent!(e)).join(LINE_SEP)
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! format_list_comma {
|
||||
($list: expr) => {
|
||||
$list.iter().map(|e| format!("{}", e)).join(COMMA_SEP)
|
||||
};
|
||||
}
|
||||
|
||||
/// Time index name, used in table constraints.
|
||||
pub const TIME_INDEX: &str = "__time_index";
|
||||
|
||||
#[inline]
|
||||
pub fn is_time_index(constraint: &TableConstraint) -> bool {
|
||||
matches!(constraint, TableConstraint::Unique {
|
||||
name: Some(name),
|
||||
is_primary: false,
|
||||
..
|
||||
} if name.value == TIME_INDEX)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CreateTable {
|
||||
/// Create if not exists
|
||||
@@ -34,6 +71,55 @@ pub struct CreateTable {
|
||||
pub partitions: Option<Partitions>,
|
||||
}
|
||||
|
||||
impl CreateTable {
|
||||
fn format_constraints(&self) -> String {
|
||||
self.constraints
|
||||
.iter()
|
||||
.map(|c| {
|
||||
if is_time_index(c) {
|
||||
let TableConstraint::Unique { columns, ..} = c else { unreachable!() };
|
||||
|
||||
format_indent!("{}TIME INDEX ({})", format_list_comma!(columns))
|
||||
} else {
|
||||
format_indent!(c)
|
||||
}
|
||||
})
|
||||
.join(LINE_SEP)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn format_partitions(&self) -> String {
|
||||
if let Some(partitions) = &self.partitions {
|
||||
format!("{}\n", partitions)
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn format_if_not_exits(&self) -> &str {
|
||||
if self.if_not_exists {
|
||||
"IF NOT EXISTS"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn format_options(&self) -> String {
|
||||
if self.options.is_empty() {
|
||||
"".to_string()
|
||||
} else {
|
||||
let options = format_list_indent!(self.options);
|
||||
format!(
|
||||
r#"WITH(
|
||||
{options}
|
||||
)"#
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Partitions {
|
||||
pub column_list: Vec<Ident>,
|
||||
@@ -46,6 +132,52 @@ pub struct PartitionEntry {
|
||||
pub value_list: Vec<SqlValue>,
|
||||
}
|
||||
|
||||
impl Display for PartitionEntry {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"PARTITION {} VALUES LESS THAN ({})",
|
||||
self.name,
|
||||
format_list_comma!(self.value_list),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Partitions {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
r#"PARTITION BY RANGE COLUMNS ({}) (
|
||||
{}
|
||||
)"#,
|
||||
format_list_comma!(self.column_list),
|
||||
format_list_indent!(self.entries),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for CreateTable {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let if_not_exists = self.format_if_not_exits();
|
||||
let name = &self.name;
|
||||
let columns = format_list_indent!(self.columns);
|
||||
let constraints = self.format_constraints();
|
||||
let partitions = self.format_partitions();
|
||||
let engine = &self.engine;
|
||||
let options = self.format_options();
|
||||
|
||||
write!(
|
||||
f,
|
||||
r#"CREATE TABLE {if_not_exists} {name} (
|
||||
{columns},
|
||||
{constraints}
|
||||
)
|
||||
{partitions}ENGINE={engine}
|
||||
{options}"#
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CreateDatabase {
|
||||
pub name: ObjectName,
|
||||
@@ -64,3 +196,66 @@ pub struct CreateExternalTable {
|
||||
/// TODO(weny): unify the key's case styling.
|
||||
pub options: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use sqlparser::dialect::GenericDialect;
|
||||
|
||||
use crate::parser::ParserContext;
|
||||
use crate::statements::statement::Statement;
|
||||
|
||||
#[test]
|
||||
fn test_display_create_table() {
|
||||
let sql = r"create table if not exists demo(
|
||||
host string,
|
||||
ts bigint,
|
||||
cpu double default 0,
|
||||
memory double,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY(ts, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (ts) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
engine=mito
|
||||
with(regions=1, ttl='7d');
|
||||
";
|
||||
let result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
|
||||
assert_eq!(1, result.len());
|
||||
|
||||
match &result[0] {
|
||||
Statement::CreateTable(c) => {
|
||||
let new_sql = format!("\n{}", c);
|
||||
assert_eq!(
|
||||
r#"
|
||||
CREATE TABLE IF NOT EXISTS demo (
|
||||
host STRING,
|
||||
ts BIGINT,
|
||||
cpu DOUBLE DEFAULT 0,
|
||||
memory DOUBLE,
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (ts, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (ts) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
regions = 1,
|
||||
ttl = '7d'
|
||||
)"#,
|
||||
&new_sql
|
||||
);
|
||||
|
||||
let new_result =
|
||||
ParserContext::create_with_dialect(&new_sql, &GenericDialect {}).unwrap();
|
||||
assert_eq!(result, new_result);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use crate::ast::{Expr, Ident};
|
||||
use crate::ast::{Expr, Ident, ObjectName};
|
||||
|
||||
/// Show kind for SQL expressions like `SHOW DATABASE` or `SHOW TABLE`
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
@@ -57,7 +57,7 @@ pub struct ShowTables {
|
||||
/// SQL structure for `SHOW CREATE TABLE`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ShowCreateTable {
|
||||
pub table_name: String,
|
||||
pub table_name: ObjectName,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -124,7 +124,7 @@ mod tests {
|
||||
assert_matches!(&stmts[0], Statement::ShowCreateTable { .. });
|
||||
match &stmts[0] {
|
||||
Statement::ShowCreateTable(show) => {
|
||||
let table_name = show.table_name.as_str();
|
||||
let table_name = show.table_name.to_string();
|
||||
assert_eq!(table_name, "test");
|
||||
}
|
||||
_ => {
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_error::prelude::*;
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::{ColumnSchema, Metadata};
|
||||
use datatypes::schema::{ColumnSchema, Metadata, COMMENT_KEY};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, Location, OptionExt};
|
||||
use store_api::storage::consts::{self, ReservedColumnId};
|
||||
@@ -380,7 +380,6 @@ impl TryFrom<RawRegionMetadata> for RegionMetadata {
|
||||
|
||||
const METADATA_CF_ID_KEY: &str = "greptime:storage:cf_id";
|
||||
const METADATA_COLUMN_ID_KEY: &str = "greptime:storage:column_id";
|
||||
const METADATA_COMMENT_KEY: &str = "greptime:storage:comment";
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ColumnMetadata {
|
||||
@@ -416,10 +415,7 @@ impl ColumnMetadata {
|
||||
let metadata = column_schema.metadata();
|
||||
let cf_id = try_parse_int(metadata, METADATA_CF_ID_KEY, Some(consts::DEFAULT_CF_ID))?;
|
||||
let column_id = try_parse_int(metadata, METADATA_COLUMN_ID_KEY, None)?;
|
||||
let comment = metadata
|
||||
.get(METADATA_COMMENT_KEY)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
let comment = metadata.get(COMMENT_KEY).cloned().unwrap_or_default();
|
||||
|
||||
let desc = ColumnDescriptorBuilder::new(
|
||||
column_id,
|
||||
@@ -443,7 +439,7 @@ impl ColumnMetadata {
|
||||
}
|
||||
metadata.insert(METADATA_COLUMN_ID_KEY.to_string(), self.desc.id.to_string());
|
||||
if !self.desc.comment.is_empty() {
|
||||
metadata.insert(METADATA_COMMENT_KEY.to_string(), self.desc.comment.clone());
|
||||
metadata.insert(COMMENT_KEY.to_string(), self.desc.comment.clone());
|
||||
}
|
||||
|
||||
metadata
|
||||
|
||||
@@ -81,9 +81,6 @@ pub const DEFAULT_CF_NAME: &str = "default";
|
||||
/// Name for reserved column: sequence
|
||||
pub const SEQUENCE_COLUMN_NAME: &str = "__sequence";
|
||||
|
||||
/// Name for time index constraint name.
|
||||
pub const TIME_INDEX_NAME: &str = "__time_index";
|
||||
|
||||
/// Name for reserved column: op_type
|
||||
pub const OP_TYPE_COLUMN_NAME: &str = "__op_type";
|
||||
|
||||
|
||||
@@ -77,6 +77,7 @@ pub struct TableOptions {
|
||||
|
||||
pub const WRITE_BUFFER_SIZE_KEY: &str = "write_buffer_size";
|
||||
pub const TTL_KEY: &str = "ttl";
|
||||
pub const REGIONS_KEY: &str = "regions";
|
||||
pub const COMPACTION_TIME_WINDOW_KEY: &str = "compaction_time_window";
|
||||
|
||||
impl TryFrom<&HashMap<String, String>> for TableOptions {
|
||||
@@ -121,7 +122,11 @@ impl TryFrom<&HashMap<String, String>> for TableOptions {
|
||||
};
|
||||
}
|
||||
options.extra_options = HashMap::from_iter(value.iter().filter_map(|(k, v)| {
|
||||
if k != WRITE_BUFFER_SIZE_KEY && k != TTL_KEY && k != COMPACTION_TIME_WINDOW_KEY {
|
||||
if k != WRITE_BUFFER_SIZE_KEY
|
||||
&& k != REGIONS_KEY
|
||||
&& k != TTL_KEY
|
||||
&& k != COMPACTION_TIME_WINDOW_KEY
|
||||
{
|
||||
Some((k.clone(), v.clone()))
|
||||
} else {
|
||||
None
|
||||
|
||||
47
tests/cases/distributed/show/show_create.result
Normal file
47
tests/cases/distributed/show/show_create.result
Normal file
@@ -0,0 +1,47 @@
|
||||
CREATE TABLE system_metrics (
|
||||
id INT UNSIGNED,
|
||||
host STRING,
|
||||
cpu DOUBLE,
|
||||
disk FLOAT,
|
||||
n INT COMMENT 'range key',
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (n) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito;
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE system_metrics;
|
||||
|
||||
+----------------+---------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| system_metrics | CREATE TABLE IF NOT EXISTS system_metrics ( |
|
||||
| | id INT UNSIGNED NULL, |
|
||||
| | host STRING NULL, |
|
||||
| | cpu DOUBLE NULL, |
|
||||
| | disk FLOAT NULL, |
|
||||
| | n INT NULL, |
|
||||
| | ts TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(), |
|
||||
| | TIME INDEX (ts), |
|
||||
| | PRIMARY KEY (id, host) |
|
||||
| | ) |
|
||||
| | PARTITION BY RANGE COLUMNS (n) ( |
|
||||
| | PARTITION r0 VALUES LESS THAN (5), |
|
||||
| | PARTITION r1 VALUES LESS THAN (9), |
|
||||
| | PARTITION r2 VALUES LESS THAN (MAXVALUE) |
|
||||
| | ) |
|
||||
| | ENGINE=mito |
|
||||
| | |
|
||||
+----------------+---------------------------------------------------------+
|
||||
|
||||
DROP TABLE system_metrics;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
21
tests/cases/distributed/show/show_create.sql
Normal file
21
tests/cases/distributed/show/show_create.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
CREATE TABLE system_metrics (
|
||||
id INT UNSIGNED,
|
||||
host STRING,
|
||||
cpu DOUBLE,
|
||||
disk FLOAT,
|
||||
n INT COMMENT 'range key',
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
PARTITION BY RANGE COLUMNS (n) (
|
||||
PARTITION r0 VALUES LESS THAN (5),
|
||||
PARTITION r1 VALUES LESS THAN (9),
|
||||
PARTITION r2 VALUES LESS THAN (MAXVALUE),
|
||||
)
|
||||
ENGINE=mito;
|
||||
|
||||
|
||||
SHOW CREATE TABLE system_metrics;
|
||||
|
||||
DROP TABLE system_metrics;
|
||||
@@ -41,3 +41,7 @@ DROP TABLE integers;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
DROP TABLE presentations;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
|
||||
@@ -21,3 +21,5 @@ CREATE TABLE IF NOT EXISTS presentations (
|
||||
insert into presentations values (1, 'Patrick Damme', 'Analytical Query Processing Based on Continuous Compression of Intermediates', NULL, 'Modern in-memory column-stores are widely accepted as the adequate database architecture for the efficient processing of complex analytical queries over large relational data volumes. These systems keep their entire data in main memory and typically employ lightweight compression to address the bottleneck between main memory and CPU. Numerous lightweight compression algorithms have been proposed in the past years, but none of them is suitable in all cases. While lightweight compression is already well established for base data, the efficient representation of intermediate results generated during query processing has attracted insufficient attention so far, although in in-memory systems, accessing intermeFdiates is as expensive as accessing base data. Thus, our vision is a continuous use of lightweight compression for all intermediates in a query execution plan, whereby a suitable compression algorithm should be selected for each intermediate. In this talk, I will provide an overview of our research in the context of this vision, including an experimental survey of lightweight compression algorithms, our compression-enabled processing model, and our compression-aware query optimization strategies.', 'https://zoom.us/j/7845983526');
|
||||
|
||||
DROP TABLE integers;
|
||||
|
||||
DROP TABLE presentations;
|
||||
|
||||
@@ -19,3 +19,7 @@ SELECT * FROM timestamp_with_precision ORDER BY ts ASC;
|
||||
| 2023-04-04T08:00:00.005200 | 1 |
|
||||
+----------------------------+-----+
|
||||
|
||||
DROP TABLE timestamp_with_precision;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
|
||||
@@ -5,3 +5,5 @@ INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2023-04-04 08:00:00.0052+0
|
||||
INSERT INTO timestamp_with_precision(ts,cnt) VALUES ('2023-04-04 08:00:00.0052+0800', 2);
|
||||
|
||||
SELECT * FROM timestamp_with_precision ORDER BY ts ASC;
|
||||
|
||||
DROP TABLE timestamp_with_precision;
|
||||
|
||||
@@ -409,6 +409,10 @@ DROP TABLE tpch_q1_agg;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
DROP TABLE test5;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
DROP TABLE test6;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
@@ -116,6 +116,8 @@ DROP TABLE test4;
|
||||
|
||||
DROP TABLE tpch_q1_agg;
|
||||
|
||||
DROP TABLE test5;
|
||||
|
||||
DROP TABLE test6;
|
||||
|
||||
DROP table test7;
|
||||
|
||||
43
tests/cases/standalone/show/show_create.result
Normal file
43
tests/cases/standalone/show/show_create.result
Normal file
@@ -0,0 +1,43 @@
|
||||
CREATE TABLE system_metrics (
|
||||
id INT UNSIGNED NULL,
|
||||
host STRING NULL,
|
||||
cpu DOUBLE NULL COMMENT 'cpu',
|
||||
disk FLOAT NULL,
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
ttl = '7d',
|
||||
write_buffer_size = 1024
|
||||
);
|
||||
|
||||
Affected Rows: 0
|
||||
|
||||
SHOW CREATE TABLE system_metrics;
|
||||
|
||||
+----------------+---------------------------------------------------------+
|
||||
| Table | Create Table |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| system_metrics | CREATE TABLE IF NOT EXISTS system_metrics ( |
|
||||
| | id INT UNSIGNED NULL, |
|
||||
| | host STRING NULL, |
|
||||
| | cpu DOUBLE NULL COMMENT 'cpu', |
|
||||
| | disk FLOAT NULL, |
|
||||
| | ts TIMESTAMP(3) NOT NULL DEFAULT current_timestamp(), |
|
||||
| | TIME INDEX (ts), |
|
||||
| | PRIMARY KEY (id, host) |
|
||||
| | ) |
|
||||
| | ENGINE=mito |
|
||||
| | WITH( |
|
||||
| | regions = 1, |
|
||||
| | write_buffer_size = '1.0KiB', |
|
||||
| | ttl = '7days' |
|
||||
| | ) |
|
||||
+----------------+---------------------------------------------------------+
|
||||
|
||||
DROP TABLE system_metrics;
|
||||
|
||||
Affected Rows: 1
|
||||
|
||||
18
tests/cases/standalone/show/show_create.sql
Normal file
18
tests/cases/standalone/show/show_create.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
CREATE TABLE system_metrics (
|
||||
id INT UNSIGNED NULL,
|
||||
host STRING NULL,
|
||||
cpu DOUBLE NULL COMMENT 'cpu',
|
||||
disk FLOAT NULL,
|
||||
ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
|
||||
TIME INDEX (ts),
|
||||
PRIMARY KEY (id, host)
|
||||
)
|
||||
ENGINE=mito
|
||||
WITH(
|
||||
ttl = '7d',
|
||||
write_buffer_size = 1024
|
||||
);
|
||||
|
||||
SHOW CREATE TABLE system_metrics;
|
||||
|
||||
DROP TABLE system_metrics;
|
||||
Reference in New Issue
Block a user