From 272f649b22329fbbd3a1f2ce43c6ccd417fad67b Mon Sep 17 00:00:00 2001 From: dennis zhuang Date: Fri, 18 Aug 2023 18:09:11 +0800 Subject: [PATCH] fix: some TODO in sqlness cases and refactor meta-client error (#2207) * fix: some TODO in sqlness cases and refactor meta-client error * fix: delete tests/cases/standalone/alter/drop_col_not_null_next.output --- src/datanode/src/error.rs | 7 +-- src/frontend/src/error.rs | 7 +-- src/meta-client/src/client/ddl.rs | 2 +- src/meta-client/src/client/heartbeat.rs | 6 +-- src/meta-client/src/client/lock.rs | 4 +- src/meta-client/src/client/router.rs | 2 +- src/meta-client/src/client/store.rs | 31 +++-------- src/meta-client/src/error.rs | 38 +++++++++++--- src/meta-srv/src/error.rs | 10 ++-- src/meta-srv/src/procedure/alter_table.rs | 6 ++- src/servers/src/error.rs | 52 ++++++++++++------- .../distributed/alter/rename_table.result | 42 --------------- .../cases/distributed/alter/rename_table.sql | 16 ------ .../{ => common}/alter/rename_table.result | 0 .../{ => common}/alter/rename_table.sql | 0 .../common/order/nulls_first.result | 24 +++++++-- .../standalone/common/order/nulls_first.sql | 6 +-- .../order/order_variable_size_payload.result | 0 .../order/order_variable_size_payload.sql | 2 - 19 files changed, 114 insertions(+), 141 deletions(-) delete mode 100644 tests/cases/distributed/alter/rename_table.result delete mode 100644 tests/cases/distributed/alter/rename_table.sql rename tests/cases/standalone/{ => common}/alter/rename_table.result (100%) rename tests/cases/standalone/{ => common}/alter/rename_table.sql (100%) rename tests/cases/standalone/{ => common}/order/order_variable_size_payload.result (100%) rename tests/cases/standalone/{ => common}/order/order_variable_size_payload.sql (97%) diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs index 651d20d303..7cd3a6eca9 100644 --- a/src/datanode/src/error.rs +++ b/src/datanode/src/error.rs @@ -18,6 +18,7 @@ use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_procedure::ProcedureId; use serde_json::error::Error as JsonError; +use servers::define_into_tonic_status; use snafu::{Location, Snafu}; use store_api::storage::{RegionId, RegionNumber}; use table::error::Error as TableError; @@ -644,11 +645,7 @@ impl ErrorExt for Error { } } -impl From for tonic::Status { - fn from(err: Error) -> Self { - tonic::Status::from_error(Box::new(err)) - } -} +define_into_tonic_status!(Error); #[cfg(test)] mod tests { diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs index 07c393db32..efc3ae912a 100644 --- a/src/frontend/src/error.rs +++ b/src/frontend/src/error.rs @@ -20,6 +20,7 @@ use common_error::status_code::StatusCode; use datafusion::parquet; use datatypes::arrow::error::ArrowError; use datatypes::value::Value; +use servers::define_into_tonic_status; use snafu::{Location, Snafu}; use store_api::storage::RegionNumber; @@ -734,8 +735,4 @@ impl ErrorExt for Error { } } -impl From for tonic::Status { - fn from(err: Error) -> Self { - tonic::Status::new(tonic::Code::Internal, err.to_string()) - } -} +define_into_tonic_status!(Error); diff --git a/src/meta-client/src/client/ddl.rs b/src/meta-client/src/client/ddl.rs index e006480183..0714f6252a 100644 --- a/src/meta-client/src/client/ddl.rs +++ b/src/meta-client/src/client/ddl.rs @@ -136,7 +136,7 @@ impl Inner { let res = client .submit_ddl_task(req.clone()) .await - .context(error::TonicStatusSnafu)?; + .map_err(error::Error::from)?; let res = res.into_inner(); diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs index acc39af233..c1dabfdcf0 100644 --- a/src/meta-client/src/client/heartbeat.rs +++ b/src/meta-client/src/client/heartbeat.rs @@ -78,7 +78,7 @@ impl HeartbeatStream { /// Fetch the next message from this stream. #[inline] pub async fn message(&mut self) -> Result> { - let res = self.stream.message().await.context(error::TonicStatusSnafu); + let res = self.stream.message().await.map_err(error::Error::from); if let Ok(Some(heartbeat)) = &res { util::check_response_header(heartbeat.header.as_ref()) .context(InvalidResponseHeaderSnafu)?; @@ -214,13 +214,13 @@ impl Inner { let mut stream = leader .heartbeat(receiver) .await - .context(error::TonicStatusSnafu)? + .map_err(error::Error::from)? .into_inner(); let res = stream .message() .await - .context(error::TonicStatusSnafu)? + .map_err(error::Error::from)? .context(error::CreateHeartbeatStreamSnafu)?; info!("Success to create heartbeat stream to server: {:#?}", res); diff --git a/src/meta-client/src/client/lock.rs b/src/meta-client/src/client/lock.rs index 68098255a7..8f0912e567 100644 --- a/src/meta-client/src/client/lock.rs +++ b/src/meta-client/src/client/lock.rs @@ -128,7 +128,7 @@ impl Inner { async fn lock(&self, mut req: LockRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client.lock(req).await.context(error::TonicStatusSnafu)?; + let res = client.lock(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -136,7 +136,7 @@ impl Inner { async fn unlock(&self, mut req: UnlockRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client.unlock(req).await.context(error::TonicStatusSnafu)?; + let res = client.unlock(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } diff --git a/src/meta-client/src/client/router.rs b/src/meta-client/src/client/router.rs index 66fd94f684..5f4ce1f67b 100644 --- a/src/meta-client/src/client/router.rs +++ b/src/meta-client/src/client/router.rs @@ -98,7 +98,7 @@ impl Inner { async fn route(&self, mut req: RouteRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client.route(req).await.context(error::TonicStatusSnafu)?; + let res = client.route(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs index 1164f36633..15a93886eb 100644 --- a/src/meta-client/src/client/store.rs +++ b/src/meta-client/src/client/store.rs @@ -141,7 +141,7 @@ impl Inner { async fn range(&self, mut req: RangeRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client.range(req).await.context(error::TonicStatusSnafu)?; + let res = client.range(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -149,7 +149,7 @@ impl Inner { async fn put(&self, mut req: PutRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client.put(req).await.context(error::TonicStatusSnafu)?; + let res = client.put(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -158,10 +158,7 @@ impl Inner { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client - .batch_get(req) - .await - .context(error::TonicStatusSnafu)?; + let res = client.batch_get(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -169,10 +166,7 @@ impl Inner { async fn batch_put(&self, mut req: BatchPutRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client - .batch_put(req) - .await - .context(error::TonicStatusSnafu)?; + let res = client.batch_put(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -180,10 +174,7 @@ impl Inner { async fn batch_delete(&self, mut req: BatchDeleteRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client - .batch_delete(req) - .await - .context(error::TonicStatusSnafu)?; + let res = client.batch_delete(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -197,7 +188,7 @@ impl Inner { let res = client .compare_and_put(req) .await - .context(error::TonicStatusSnafu)?; + .map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -205,10 +196,7 @@ impl Inner { async fn delete_range(&self, mut req: DeleteRangeRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client - .delete_range(req) - .await - .context(error::TonicStatusSnafu)?; + let res = client.delete_range(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } @@ -216,10 +204,7 @@ impl Inner { async fn move_value(&self, mut req: MoveValueRequest) -> Result { let mut client = self.random_client()?; req.set_header(self.id, self.role); - let res = client - .move_value(req) - .await - .context(error::TonicStatusSnafu)?; + let res = client.move_value(req).await.map_err(error::Error::from)?; Ok(res.into_inner()) } diff --git a/src/meta-client/src/error.rs b/src/meta-client/src/error.rs index 786f2fe601..b33496c070 100644 --- a/src/meta-client/src/error.rs +++ b/src/meta-client/src/error.rs @@ -14,7 +14,9 @@ use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; +use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG}; use snafu::{Location, Snafu}; +use tonic::Status; #[derive(Debug, Snafu)] #[snafu(visibility(pub))] @@ -22,11 +24,8 @@ pub enum Error { #[snafu(display("Illegal GRPC client state: {}", err_msg))] IllegalGrpcClientState { err_msg: String, location: Location }, - #[snafu(display("Tonic internal error, source: {}", source))] - TonicStatus { - source: tonic::Status, - location: Location, - }, + #[snafu(display("{}", msg))] + MetaServer { code: StatusCode, msg: String }, #[snafu(display("Failed to ask leader from all endpoints"))] AskLeader { location: Location }, @@ -79,7 +78,6 @@ impl ErrorExt for Error { fn status_code(&self) -> StatusCode { match self { Error::IllegalGrpcClientState { .. } - | Error::TonicStatus { .. } | Error::AskLeader { .. } | Error::NoLeader { .. } | Error::NotStarted { .. } @@ -87,9 +85,37 @@ impl ErrorExt for Error { | Error::CreateHeartbeatStream { .. } | Error::CreateChannel { .. } => StatusCode::Internal, + Error::MetaServer { code, .. } => *code, + Error::InvalidResponseHeader { source, .. } | Error::ConvertMetaRequest { source, .. } | Error::ConvertMetaResponse { source, .. } => source.status_code(), } } } + +// FIXME(dennis): partial duplicated with src/client/src/error.rs +impl From for Error { + fn from(e: Status) -> Self { + fn get_metadata_value(s: &Status, key: &str) -> Option { + s.metadata() + .get(key) + .and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok()) + } + + let code = get_metadata_value(&e, GREPTIME_ERROR_CODE) + .and_then(|s| { + if let Ok(code) = s.parse::() { + StatusCode::from_u32(code) + } else { + None + } + }) + .unwrap_or(StatusCode::Internal); + + let msg = + get_metadata_value(&e, GREPTIME_ERROR_MSG).unwrap_or_else(|| e.message().to_string()); + + Self::MetaServer { code, msg } + } +} diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs index 43f44cc03a..eaafae1a40 100644 --- a/src/meta-srv/src/error.rs +++ b/src/meta-srv/src/error.rs @@ -16,10 +16,10 @@ use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_meta::peer::Peer; use common_runtime::JoinError; +use servers::define_into_tonic_status; use snafu::{Location, Snafu}; use tokio::sync::mpsc::error::SendError; use tonic::codegen::http; -use tonic::Code; use crate::pubsub::Message; @@ -520,11 +520,7 @@ pub enum Error { pub type Result = std::result::Result; -impl From for tonic::Status { - fn from(err: Error) -> Self { - tonic::Status::new(Code::Internal, err.to_string()) - } -} +define_into_tonic_status!(Error); impl ErrorExt for Error { fn status_code(&self) -> StatusCode { @@ -550,7 +546,6 @@ impl ErrorExt for Error { | Error::SendShutdownSignal { .. } | Error::ParseAddr { .. } | Error::SchemaAlreadyExists { .. } - | Error::TableAlreadyExists { .. } | Error::PusherNotFound { .. } | Error::PushMessage { .. } | Error::MailboxClosed { .. } @@ -564,6 +559,7 @@ impl ErrorExt for Error { | Error::PublishMessage { .. } | Error::Join { .. } | Error::Unsupported { .. } => StatusCode::Internal, + Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists, Error::EmptyKey { .. } | Error::MissingRequiredParameter { .. } | Error::MissingRequestHeader { .. } diff --git a/src/meta-srv/src/procedure/alter_table.rs b/src/meta-srv/src/procedure/alter_table.rs index bd891e4bbf..8e5df33181 100644 --- a/src/meta-srv/src/procedure/alter_table.rs +++ b/src/meta-srv/src/procedure/alter_table.rs @@ -93,7 +93,11 @@ impl AlterTableProcedure { ensure!( !exist, error::TableAlreadyExistsSnafu { - table_name: request.table_ref().to_string() + table_name: common_catalog::format_full_table_name( + &request.catalog_name, + &request.schema_name, + new_table_name, + ), } ) } diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index ed930a0927..52c3842e6d 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -22,14 +22,11 @@ use base64::DecodeError; use catalog; use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; -use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG}; use common_telemetry::logging; use datatypes::prelude::ConcreteDataType; use query::parser::PromQuery; use serde_json::json; -use snafu::{ErrorCompat, Location, Snafu}; -use tonic::codegen::http::{HeaderMap, HeaderValue}; -use tonic::metadata::MetadataMap; +use snafu::{Location, Snafu}; use tonic::Code; #[derive(Debug, Snafu)] @@ -429,7 +426,7 @@ impl ErrorExt for Error { } /// Returns the tonic [Code] of a [StatusCode]. -fn status_to_tonic_code(status_code: StatusCode) -> Code { +pub fn status_to_tonic_code(status_code: StatusCode) -> Code { match status_code { StatusCode::Success => Code::Ok, StatusCode::Unknown => Code::Unknown, @@ -455,24 +452,41 @@ fn status_to_tonic_code(status_code: StatusCode) -> Code { } } -impl From for tonic::Status { - fn from(err: Error) -> Self { - let mut headers = HeaderMap::::with_capacity(2); +#[macro_export] +macro_rules! define_into_tonic_status { + ($Error: ty) => { + impl From<$Error> for tonic::Status { + fn from(err: $Error) -> Self { + use common_error::{GREPTIME_ERROR_CODE, GREPTIME_ERROR_MSG}; + use snafu::ErrorCompat; + use tonic::codegen::http::{HeaderMap, HeaderValue}; + use tonic::metadata::MetadataMap; - // If either of the status_code or error msg cannot convert to valid HTTP header value - // (which is a very rare case), just ignore. Client will use Tonic status code and message. - let status_code = err.status_code(); - headers.insert(GREPTIME_ERROR_CODE, HeaderValue::from(status_code as u32)); - let root_error = err.iter_chain().last().unwrap(); - if let Ok(err_msg) = HeaderValue::from_bytes(root_error.to_string().as_bytes()) { - let _ = headers.insert(GREPTIME_ERROR_MSG, err_msg); + let mut headers = HeaderMap::::with_capacity(2); + + // If either of the status_code or error msg cannot convert to valid HTTP header value + // (which is a very rare case), just ignore. Client will use Tonic status code and message. + let status_code = err.status_code(); + headers.insert(GREPTIME_ERROR_CODE, HeaderValue::from(status_code as u32)); + let root_error = err.iter_chain().last().unwrap(); + + if let Ok(err_msg) = HeaderValue::from_bytes(root_error.to_string().as_bytes()) { + let _ = headers.insert(GREPTIME_ERROR_MSG, err_msg); + } + + let metadata = MetadataMap::from_headers(headers); + tonic::Status::with_metadata( + $crate::error::status_to_tonic_code(status_code), + err.to_string(), + metadata, + ) + } } - - let metadata = MetadataMap::from_headers(headers); - tonic::Status::with_metadata(status_to_tonic_code(status_code), err.to_string(), metadata) - } + }; } +define_into_tonic_status!(Error); + impl From for Error { fn from(e: std::io::Error) -> Self { Error::InternalIo { source: e } diff --git a/tests/cases/distributed/alter/rename_table.result b/tests/cases/distributed/alter/rename_table.result deleted file mode 100644 index 6e703c1010..0000000000 --- a/tests/cases/distributed/alter/rename_table.result +++ /dev/null @@ -1,42 +0,0 @@ -CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); - -Affected Rows: 0 - -DESC TABLE t; - -+-------+-------+------+---------+---------------+ -| Field | Type | Null | Default | Semantic Type | -+-------+-------+------+---------+---------------+ -| i | Int32 | YES | | FIELD | -| j | Int64 | NO | | TIME INDEX | -+-------+-------+------+---------+---------------+ - -INSERT INTO TABLE t VALUES (1, 1), (3, 3), (NULL, 4); - -Affected Rows: 3 - -SELECT * from t; - -+---+---+ -| i | j | -+---+---+ -| 1 | 1 | -| 3 | 3 | -| | 4 | -+---+---+ - --- TODO(LFC): Port test cases from standalone env when distribute rename table is implemented (#723). -ALTER TABLE t RENAME new_table; - -Affected Rows: 0 - -DROP TABLE t; - -Error: 4001(TableNotFound), Table not found: greptime.public.t - --- TODO: this clause should success --- SQLNESS REPLACE details.* -DROP TABLE new_table; - -Affected Rows: 1 - diff --git a/tests/cases/distributed/alter/rename_table.sql b/tests/cases/distributed/alter/rename_table.sql deleted file mode 100644 index da9f40bfe6..0000000000 --- a/tests/cases/distributed/alter/rename_table.sql +++ /dev/null @@ -1,16 +0,0 @@ -CREATE TABLE t(i INTEGER, j BIGINT TIME INDEX); - -DESC TABLE t; - -INSERT INTO TABLE t VALUES (1, 1), (3, 3), (NULL, 4); - -SELECT * from t; - --- TODO(LFC): Port test cases from standalone env when distribute rename table is implemented (#723). -ALTER TABLE t RENAME new_table; - -DROP TABLE t; - --- TODO: this clause should success --- SQLNESS REPLACE details.* -DROP TABLE new_table; diff --git a/tests/cases/standalone/alter/rename_table.result b/tests/cases/standalone/common/alter/rename_table.result similarity index 100% rename from tests/cases/standalone/alter/rename_table.result rename to tests/cases/standalone/common/alter/rename_table.result diff --git a/tests/cases/standalone/alter/rename_table.sql b/tests/cases/standalone/common/alter/rename_table.sql similarity index 100% rename from tests/cases/standalone/alter/rename_table.sql rename to tests/cases/standalone/common/alter/rename_table.sql diff --git a/tests/cases/standalone/common/order/nulls_first.result b/tests/cases/standalone/common/order/nulls_first.result index 1bab6062f9..d2339ff09b 100644 --- a/tests/cases/standalone/common/order/nulls_first.result +++ b/tests/cases/standalone/common/order/nulls_first.result @@ -36,10 +36,26 @@ SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST; | | 1 | 2 | +---+---+---+ --- TODO(ruihang): The following two SQL will fail under distributed mode with error --- Error: 1003(Internal), status: Internal, message: "Failed to collect recordbatch, source: Failed to poll stream, source: Arrow error: Invalid argument error: batches[0] schema is different with argument schema.\n batches[0] schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {\"greptime:version\": \"0\"} },\n argument schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {} }\n ", details: [], metadata: MetadataMap { headers: {"inner_error_code": "Internal"} } --- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; --- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; +SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; + ++---+---+------------------------------------------------------------------------------------------------------------------------+ +| i | j | ROW_NUMBER() PARTITION BY [test.i] ORDER BY [test.j ASC NULLS FIRST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW | ++---+---+------------------------------------------------------------------------------------------------------------------------+ +| | 1 | 1 | +| 1 | | 1 | +| 1 | 1 | 2 | ++---+---+------------------------------------------------------------------------------------------------------------------------+ + +SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; + ++---+---+-----------------------------------------------------------------------------------------------------------------------+ +| i | j | ROW_NUMBER() PARTITION BY [test.i] ORDER BY [test.j ASC NULLS LAST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW | ++---+---+-----------------------------------------------------------------------------------------------------------------------+ +| | 1 | 1 | +| 1 | | 2 | +| 1 | 1 | 1 | ++---+---+-----------------------------------------------------------------------------------------------------------------------+ + SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS LAST LIMIT 2; +---+---+---+ diff --git a/tests/cases/standalone/common/order/nulls_first.sql b/tests/cases/standalone/common/order/nulls_first.sql index 7ddbee4b28..cd2a03283b 100644 --- a/tests/cases/standalone/common/order/nulls_first.sql +++ b/tests/cases/standalone/common/order/nulls_first.sql @@ -8,11 +8,9 @@ SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST; --- TODO(ruihang): The following two SQL will fail under distributed mode with error --- Error: 1003(Internal), status: Internal, message: "Failed to collect recordbatch, source: Failed to poll stream, source: Arrow error: Invalid argument error: batches[0] schema is different with argument schema.\n batches[0] schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {\"greptime:version\": \"0\"} },\n argument schema: Schema { fields: [Field { name: \"i\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"j\", data_type: Int32, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: \"t\", data_type: Int64, nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {\"greptime:time_index\": \"true\"} }], metadata: {} }\n ", details: [], metadata: MetadataMap { headers: {"inner_error_code": "Internal"} } --- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; +SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; --- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; +SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST; SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS LAST LIMIT 2; diff --git a/tests/cases/standalone/order/order_variable_size_payload.result b/tests/cases/standalone/common/order/order_variable_size_payload.result similarity index 100% rename from tests/cases/standalone/order/order_variable_size_payload.result rename to tests/cases/standalone/common/order/order_variable_size_payload.result diff --git a/tests/cases/standalone/order/order_variable_size_payload.sql b/tests/cases/standalone/common/order/order_variable_size_payload.sql similarity index 97% rename from tests/cases/standalone/order/order_variable_size_payload.sql rename to tests/cases/standalone/common/order/order_variable_size_payload.sql index 7e7942cdbe..fef4deeae8 100644 --- a/tests/cases/standalone/order/order_variable_size_payload.sql +++ b/tests/cases/standalone/common/order/order_variable_size_payload.sql @@ -125,5 +125,3 @@ DROP table test7; DROP table test8; DROP TABLE DirectReports; - --- TODO(LFC): Seems creating distributed table has some column schema related issues, look into "order_variable_size_payload" test case.