From 2a3445c72c6822631ae7267bc91cf732abb791f2 Mon Sep 17 00:00:00 2001 From: Yingwen Date: Mon, 16 Jun 2025 20:15:38 +0800 Subject: [PATCH] fix: ignore missing columns and tables in PromQL (#6285) * fix: handle table/column not found in or Signed-off-by: evenyag * test: update result Signed-off-by: evenyag * test: drop table after test Signed-off-by: evenyag * test: fix test cases Signed-off-by: evenyag * fix: do not return table not found error in series_query Signed-off-by: evenyag --------- Signed-off-by: evenyag --- src/promql/src/extension_plan/empty_metric.rs | 6 +- src/query/src/promql/planner.rs | 144 ++++++++++++++---- src/servers/src/http/prometheus.rs | 86 +++++------ tests-integration/src/tests/promql_test.rs | 9 +- tests-integration/tests/http.rs | 13 +- .../common/promql/set_operation.result | 124 ++++++++++++++- .../common/promql/set_operation.sql | 62 +++++++- .../cases/standalone/common/tql/basic.result | 10 +- .../common/tql/case_sensitive.result | 6 +- 9 files changed, 369 insertions(+), 91 deletions(-) diff --git a/src/promql/src/extension_plan/empty_metric.rs b/src/promql/src/extension_plan/empty_metric.rs index 8aa10a251d..fa0a4e3f9a 100644 --- a/src/promql/src/extension_plan/empty_metric.rs +++ b/src/promql/src/extension_plan/empty_metric.rs @@ -259,7 +259,11 @@ impl ExecutionPlan for EmptyMetricExec { } fn statistics(&self) -> DataFusionResult { - let estimated_row_num = (self.end - self.start) as f64 / self.interval as f64; + let estimated_row_num = if self.end > self.start { + (self.end - self.start) as f64 / self.interval as f64 + } else { + 0.0 + }; let total_byte_size = estimated_row_num * std::mem::size_of::() as f64; Ok(Statistics { diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs index f20e218ba6..66e0eacd4d 100644 --- a/src/query/src/promql/planner.rs +++ b/src/query/src/promql/planner.rs @@ -19,6 +19,8 @@ use std::time::UNIX_EPOCH; use arrow::datatypes::IntervalDayTime; use async_recursion::async_recursion; use catalog::table_source::DfTableSourceProvider; +use common_error::ext::ErrorExt; +use common_error::status_code::StatusCode; use common_query::prelude::GREPTIME_VALUE; use datafusion::common::DFSchemaRef; use datafusion::datasource::DefaultTableSource; @@ -216,6 +218,7 @@ impl PromPlanner { PromExpr::Call(expr) => self.prom_call_expr_to_plan(session_state, expr).await?, PromExpr::Extension(expr) => self.prom_ext_expr_to_plan(session_state, expr).await?, }; + Ok(res) } @@ -274,6 +277,7 @@ impl PromPlanner { } = aggr_expr; let input = self.prom_expr_to_plan(expr, session_state).await?; + match (*op).id() { token::T_TOPK | token::T_BOTTOMK => { self.prom_topk_bottomk_to_plan(aggr_expr, input).await @@ -677,7 +681,9 @@ impl PromPlanner { at: _, } = vector_selector; let matchers = self.preprocess_label_matchers(matchers, name)?; - self.setup_context().await?; + if let Some(empty_plan) = self.setup_context().await? { + return Ok(empty_plan); + } let normalize = self .selector_to_series_normalize_plan(offset, matchers, false) .await?; @@ -710,7 +716,9 @@ impl PromPlanner { .. } = vs; let matchers = self.preprocess_label_matchers(matchers, name)?; - self.setup_context().await?; + if let Some(empty_plan) = self.setup_context().await? { + return Ok(empty_plan); + } ensure!(!range.is_zero(), ZeroRangeSelectorSnafu); let range_ms = range.as_millis() as _; @@ -1089,24 +1097,26 @@ impl PromPlanner { match modifier { None => { if update_ctx { - self.ctx.tag_columns = vec![]; + self.ctx.tag_columns.clear(); } Ok(vec![self.create_time_index_column_expr()?]) } Some(LabelModifier::Include(labels)) => { + if update_ctx { + self.ctx.tag_columns.clear(); + } let mut exprs = Vec::with_capacity(labels.labels.len()); for label in &labels.labels { // nonexistence label will be ignored if let Ok(field) = input_schema.field_with_unqualified_name(label) { exprs.push(DfExpr::Column(Column::from(field.name()))); + + if update_ctx { + // update the tag columns in context + self.ctx.tag_columns.push(label.clone()); + } } } - - if update_ctx { - // change the tag columns in context - self.ctx.tag_columns.clone_from(&labels.labels); - } - // add timestamp column exprs.push(self.create_time_index_column_expr()?); @@ -1337,13 +1347,18 @@ impl PromPlanner { } /// Setup [PromPlannerContext]'s state fields. - async fn setup_context(&mut self) -> Result<()> { + /// + /// Returns a logical plan for an empty metric. + async fn setup_context(&mut self) -> Result> { let table_ref = self.table_ref()?; - let table = self - .table_provider - .resolve_table(table_ref.clone()) - .await - .context(CatalogSnafu)? + let table = match self.table_provider.resolve_table(table_ref.clone()).await { + Err(e) if e.status_code() == StatusCode::TableNotFound => { + let plan = self.setup_context_for_empty_metric()?; + return Ok(Some(plan)); + } + res => res.context(CatalogSnafu)?, + }; + let table = table .as_any() .downcast_ref::() .context(UnknownTableSnafu)? @@ -1386,7 +1401,32 @@ impl PromPlanner { .collect(); self.ctx.tag_columns = tags; - Ok(()) + Ok(None) + } + + /// Setup [PromPlannerContext]'s state fields for a non existent table + /// without any rows. + fn setup_context_for_empty_metric(&mut self) -> Result { + self.ctx.time_index_column = Some(SPECIAL_TIME_FUNCTION.to_string()); + self.ctx.reset_table_name_and_schema(); + self.ctx.tag_columns = vec![]; + self.ctx.field_columns = vec![DEFAULT_FIELD_COLUMN.to_string()]; + + // The table doesn't have any data, so we set start to 0 and end to -1. + let plan = LogicalPlan::Extension(Extension { + node: Arc::new( + EmptyMetric::new( + 0, + -1, + self.ctx.interval, + SPECIAL_TIME_FUNCTION.to_string(), + DEFAULT_FIELD_COLUMN.to_string(), + Some(DfExpr::Literal(ScalarValue::Float64(Some(0.0)))), + ) + .context(DataFusionPlanningSnafu)?, + ), + }); + Ok(plan) } // TODO(ruihang): insert column expr @@ -2767,11 +2807,12 @@ impl PromPlanner { col: right_field_col.to_string(), })? .cloned(); + // `skip(1)` to skip the time index column let right_proj_exprs_without_time_index = all_columns.iter().skip(1).map(|col| { // expr if col == left_field_col && left_field_col != right_field_col { - // alias field in right side if necessary to handle different field name + // qualify field in right side if necessary to handle different field name DfExpr::Column(Column::new( right_qualifier_for_field.clone(), right_field_col, @@ -3828,10 +3869,10 @@ mod test { }; let case = r#" - sum(rate(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}[120s])) by (cluster_name,tenant_name) / + sum(rate(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}[120s])) by (cluster_name,tenant_name) / (sum(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}) by (cluster_name,tenant_name) * 100) or - 200 * sum(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}) by (cluster_name,tenant_name) / + 200 * sum(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}) by (cluster_name,tenant_name) / sum(sysstat{tenant_name=~"tenant1",cluster_name=~"cluster1"}) by (cluster_name,tenant_name)"#; let table_provider = build_test_table_provider_with_fields( @@ -3844,15 +3885,15 @@ mod test { .await .unwrap(); - let case = r#"sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / + let case = r#"sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) + - sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / - (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) >= 0 - or - sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / - (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) >= 0 - or - sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / + sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / + (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) >= 0 + or + sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / + (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) >= 0 + or + sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) / (sum(delta(sysstat{tenant_name=~"sys",cluster_name=~"cluster1"}[2m])/120) by (cluster_name,tenant_name) *1000) >= 0"#; let table_provider = build_test_table_provider_with_fields( &[(DEFAULT_SCHEMA_NAME.to_string(), "sysstat".to_string())], @@ -3864,9 +3905,9 @@ mod test { .await .unwrap(); - let case = r#"(sum(background_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name) + - sum(foreground_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name)) or - (sum(background_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name)) or + let case = r#"(sum(background_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name) + + sum(foreground_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name)) or + (sum(background_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name)) or (sum(foreground_waitevent_cnt{tenant_name=~"sys",cluster_name=~"cluster1"}) by (cluster_name,tenant_name))"#; let table_provider = build_test_table_provider_with_fields( &[ @@ -4519,4 +4560,47 @@ mod test { assert_eq!(plan.display_indent_schema().to_string(), expected); } + + #[tokio::test] + async fn test_or_not_exists_table_label() { + let mut eval_stmt = EvalStmt { + expr: PromExpr::NumberLiteral(NumberLiteral { val: 1.0 }), + start: UNIX_EPOCH, + end: UNIX_EPOCH + .checked_add(Duration::from_secs(100_000)) + .unwrap(), + interval: Duration::from_secs(5), + lookback_delta: Duration::from_secs(1), + }; + let case = r#"sum by (job, tag0, tag2) (metric_exists) or sum by (job, tag0, tag2) (metric_not_exists)"#; + + let prom_expr = parser::parse(case).unwrap(); + eval_stmt.expr = prom_expr; + let table_provider = build_test_table_provider_with_fields( + &[(DEFAULT_SCHEMA_NAME.to_string(), "metric_exists".to_string())], + &["job"], + ) + .await; + + let plan = PromPlanner::stmt_to_plan(table_provider, &eval_stmt, &build_session_state()) + .await + .unwrap(); + let expected = "UnionDistinctOn: on col=[[\"job\"]], ts_col=[greptime_timestamp] [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]\ + \n SubqueryAlias: metric_exists [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]\ + \n Projection: metric_exists.greptime_timestamp, metric_exists.job, sum(metric_exists.greptime_value) [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8, sum(metric_exists.greptime_value):Float64;N]\ + \n Sort: metric_exists.job ASC NULLS LAST, metric_exists.greptime_timestamp ASC NULLS LAST [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(metric_exists.greptime_value):Float64;N]\ + \n Aggregate: groupBy=[[metric_exists.job, metric_exists.greptime_timestamp]], aggr=[[sum(metric_exists.greptime_value)]] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), sum(metric_exists.greptime_value):Float64;N]\ + \n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[greptime_timestamp] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\ + \n PromSeriesDivide: tags=[\"job\"] [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\ + \n Sort: metric_exists.job ASC NULLS FIRST, metric_exists.greptime_timestamp ASC NULLS FIRST [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\ + \n Filter: metric_exists.greptime_timestamp >= TimestampMillisecond(-1000, None) AND metric_exists.greptime_timestamp <= TimestampMillisecond(100001000, None) [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\ + \n TableScan: metric_exists [job:Utf8, greptime_timestamp:Timestamp(Millisecond, None), greptime_value:Float64;N]\ + \n SubqueryAlias: [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8;N, sum(.value):Float64;N]\ + \n Projection: .time AS greptime_timestamp, Utf8(NULL) AS job, sum(.value) [greptime_timestamp:Timestamp(Millisecond, None), job:Utf8;N, sum(.value):Float64;N]\ + \n Sort: .time ASC NULLS LAST [time:Timestamp(Millisecond, None), sum(.value):Float64;N]\ + \n Aggregate: groupBy=[[.time]], aggr=[[sum(.value)]] [time:Timestamp(Millisecond, None), sum(.value):Float64;N]\ + \n EmptyMetric: range=[0..-1], interval=[5000] [time:Timestamp(Millisecond, None), value:Float64;N]"; + + assert_eq!(plan.display_indent_schema().to_string(), expected); + } } diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs index fc823f8e76..bad6b4b150 100644 --- a/src/servers/src/http/prometheus.rs +++ b/src/servers/src/http/prometheus.rs @@ -478,6 +478,30 @@ impl<'de> Deserialize<'de> for Matches { } } +/// Handles schema errors, transforming a Result into an Option. +/// - If the input is `Ok(v)`, returns `Some(v)` +/// - If the input is `Err(err)` and the error status code is `TableNotFound` or +/// `TableColumnNotFound`, returns `None` (ignoring these specific errors) +/// - If the input is `Err(err)` with any other error code, directly returns a +/// `PrometheusJsonResponse::error`. +macro_rules! handle_schema_err { + ($result:expr) => { + match $result { + Ok(v) => Some(v), + Err(err) => { + if err.status_code() == StatusCode::TableNotFound + || err.status_code() == StatusCode::TableColumnNotFound + { + // Prometheus won't report error if querying nonexist label and metric + None + } else { + return PrometheusJsonResponse::error(err.status_code(), err.output_msg()); + } + } + } + }; +} + #[axum_macros::debug_handler] #[tracing::instrument( skip_all, @@ -546,17 +570,10 @@ pub async fn labels_query( }; let result = handler.do_query(&prom_query, query_ctx.clone()).await; - if let Err(err) = + handle_schema_err!( retrieve_labels_name_from_query_result(result, &mut fetched_labels, &mut merge_map) .await - { - // Prometheus won't report error if querying nonexist label and metric - if err.status_code() != StatusCode::TableNotFound - && err.status_code() != StatusCode::TableColumnNotFound - { - return PrometheusJsonResponse::error(err.status_code(), err.output_msg()); - } - } + ); } // intersect `fetched_labels` with `labels` to filter out non-tag columns @@ -971,15 +988,10 @@ pub async fn label_values_query( table_names.sort_unstable(); return PrometheusJsonResponse::success(PrometheusResponse::LabelValues(table_names)); } else if label_name == FIELD_NAME_LABEL { - let field_columns = - match retrieve_field_names(&query_ctx, handler.catalog_manager(), params.matches.0) - .await - { - Ok(table_names) => table_names, - Err(e) => { - return PrometheusJsonResponse::error(e.status_code(), e.output_msg()); - } - }; + let field_columns = handle_schema_err!( + retrieve_field_names(&query_ctx, handler.catalog_manager(), params.matches.0).await + ) + .unwrap_or_default(); let mut field_columns = field_columns.into_iter().collect::>(); field_columns.sort_unstable(); return PrometheusJsonResponse::success(PrometheusResponse::LabelValues(field_columns)); @@ -1029,19 +1041,8 @@ pub async fn label_values_query( &query_ctx, ) .await; - - match result { - Ok(result) => { - label_values.extend(result.into_iter()); - } - Err(err) => { - // Prometheus won't report error if querying nonexist label and metric - if err.status_code() != StatusCode::TableNotFound - && err.status_code() != StatusCode::TableColumnNotFound - { - return PrometheusJsonResponse::error(err.status_code(), err.output_msg()); - } - } + if let Some(result) = handle_schema_err!(result) { + label_values.extend(result.into_iter()); } } @@ -1229,18 +1230,17 @@ pub async fn series_query( }; let result = handler.do_query(&prom_query, query_ctx.clone()).await; - if let Err(err) = retrieve_series_from_query_result( - result, - &mut series, - &query_ctx, - &table_name, - &handler.catalog_manager(), - &mut merge_map, - ) - .await - { - return PrometheusJsonResponse::error(err.status_code(), err.output_msg()); - } + handle_schema_err!( + retrieve_series_from_query_result( + result, + &mut series, + &query_ctx, + &table_name, + &handler.catalog_manager(), + &mut merge_map, + ) + .await + ); } let merge_map = merge_map .into_iter() diff --git a/tests-integration/src/tests/promql_test.rs b/tests-integration/src/tests/promql_test.rs index 4b8ac8a691..c8d1f6597a 100644 --- a/tests-integration/src/tests/promql_test.rs +++ b/tests-integration/src/tests/promql_test.rs @@ -638,8 +638,13 @@ async fn cross_schema_query(instance: Arc) { interval, lookback_delta, ) - .await; - assert!(query_output.is_err()); + .await + .unwrap(); + let empty_result = r#"+------+-------+ +| time | value | ++------+-------+ ++------+-------+"#; + check_unordered_output_stream(query_output, empty_result).await; let query_output = promql_query( ins.clone(), diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 0dfbcc7dcb..a42496d7d2 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -666,15 +666,10 @@ pub async fn test_prom_http_api(store_type: StorageType) { .header("Content-Type", "application/x-www-form-urlencoded") .send() .await; - assert_eq!(res.status(), StatusCode::BAD_REQUEST); - let prom_resp = res.json::().await; - assert_eq!(prom_resp.status, "error"); - assert!(prom_resp - .error_type - .is_some_and(|err| err.eq_ignore_ascii_case("TableNotFound"))); - assert!(prom_resp - .error - .is_some_and(|err| err.eq_ignore_ascii_case("Table not found: greptime.public.up"))); + assert_eq!(res.status(), StatusCode::OK); + // An empty array will be deserialized into PrometheusResponse::Labels. + // So here we compare the text directly. + assert_eq!(res.text().await, r#"{"status":"success","data":[]}"#); // label values // should return error if there is no match[] diff --git a/tests/cases/standalone/common/promql/set_operation.result b/tests/cases/standalone/common/promql/set_operation.result index d5f6765978..71c9f4c791 100644 --- a/tests/cases/standalone/common/promql/set_operation.result +++ b/tests/cases/standalone/common/promql/set_operation.result @@ -12,7 +12,7 @@ create table http_requests ( Affected Rows: 0 -insert into http_requests values +insert into http_requests values (3000000, "api", "0", "production", 100), (3000000, "api", "1", "production", 200), (3000000, "api", "0", "canary", 300), @@ -718,3 +718,125 @@ drop table cache_miss_with_null_label; Affected Rows: 0 +-- Physical table for test +CREATE TABLE test_physical (greptime_timestamp timestamp time index, greptime_value double) engine=metric with ("physical_metric_table" = ""); + +Affected Rows: 0 + +CREATE TABLE IF NOT EXISTS node_network_transmit_bytes_total ( + cloud STRING NULL, + dest_port STRING NULL, + dest STRING NULL, + greptime_timestamp TIMESTAMP(3) NOT NULL, + greptime_value DOUBLE NULL, + host STRING NULL, + job STRING NULL, + node STRING NULL, + region STRING NULL, + src_port STRING NULL, + src STRING NULL, + src_namespace STRING NULL, + src_node STRING NULL, + src_pod STRING NULL, + az STRING NULL, + TIME INDEX (greptime_timestamp), + PRIMARY KEY (cloud, dest_port, dest, host, job, node, region, src_port, src, src_namespace, src_node, src_pod, az) +) +ENGINE=metric +WITH( + on_physical_table = 'test_physical' +); + +Affected Rows: 0 + +INSERT INTO node_network_transmit_bytes_total ( + cloud, dest_port, dest, greptime_timestamp, greptime_value, host, job, node, region, src_port, src, src_namespace, src_node, src_pod, az +) VALUES + ('cloud-1', '443', '10.0.0.2', 1000000, 1000, 'host-1', 'greptimedb', 'node-a', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-1', 'pod-1', 'us-west-6'), + ('cloud-1', '443', '10.0.0.2', 1200000, 2000, 'host-1', 'greptimedb', 'node-a', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-1', 'pod-1', 'us-west-6'), + ('cloud-1', '443', '10.0.0.3', 1000000, 1500, 'host-2', 'greptimedb', 'node-b', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-2', 'pod-2', 'us-west-6'), + ('cloud-1', '443', '10.0.0.3', 1200000, 2500, 'host-2', 'greptimedb', 'node-b', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-2', 'pod-2', 'us-west-6'), + ('cloud-2', '80', '10.0.0.5', 1000000, 800, 'host-3', 'greptimedb', 'node-c', 'us-west', '9000', '10.0.0.4', 'namespace-2', 'node-3', 'pod-3', 'us-west-6'), + ('cloud-2', '80', '10.0.0.5', 1200000, 1800, 'host-3', 'greptimedb', 'node-c', 'us-west', '9000', '10.0.0.4', 'namespace-2', 'node-3', 'pod-3', 'us-west-6'); + +Affected Rows: 6 + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') unknown_metric or node_network_transmit_bytes_total; + ++---------------------+-----------+---------+----------+-----------+--------+------------+--------+---------+----------+---------------+----------+---------+----------+--------+ +| time | az | cloud | dest | dest_port | host | job | node | region | src | src_namespace | src_node | src_pod | src_port | value | ++---------------------+-----------+---------+----------+-----------+--------+------------+--------+---------+----------+---------------+----------+---------+----------+--------+ +| 1970-01-01T00:16:40 | us-west-6 | cloud-1 | 10.0.0.2 | 443 | host-1 | greptimedb | node-a | us-west | 10.0.0.1 | namespace-1 | node-1 | pod-1 | 8080 | 1000.0 | +| 1970-01-01T00:16:40 | us-west-6 | cloud-1 | 10.0.0.3 | 443 | host-2 | greptimedb | node-b | us-west | 10.0.0.1 | namespace-1 | node-2 | pod-2 | 8080 | 1500.0 | +| 1970-01-01T00:16:40 | us-west-6 | cloud-2 | 10.0.0.5 | 80 | host-3 | greptimedb | node-c | us-west | 10.0.0.4 | namespace-2 | node-3 | pod-3 | 9000 | 800.0 | +| 1970-01-01T00:21:40 | us-west-6 | cloud-1 | 10.0.0.2 | 443 | host-1 | greptimedb | node-a | us-west | 10.0.0.1 | namespace-1 | node-1 | pod-1 | 8080 | 2000.0 | +| 1970-01-01T00:21:40 | us-west-6 | cloud-1 | 10.0.0.3 | 443 | host-2 | greptimedb | node-b | us-west | 10.0.0.1 | namespace-1 | node-2 | pod-2 | 8080 | 2500.0 | +| 1970-01-01T00:21:40 | us-west-6 | cloud-2 | 10.0.0.5 | 80 | host-3 | greptimedb | node-c | us-west | 10.0.0.4 | namespace-2 | node-3 | pod-3 | 9000 | 1800.0 | ++---------------------+-----------+---------+----------+-----------+--------+------------+--------+---------+----------+---------------+----------+---------+----------+--------+ + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total) or unknown_metric; + ++---------------------+---------+-------------------------------------------------------+ +| greptime_timestamp | cloud | sum(node_network_transmit_bytes_total.greptime_value) | ++---------------------+---------+-------------------------------------------------------+ +| 1970-01-01T00:16:40 | cloud-1 | 2500.0 | +| 1970-01-01T00:16:40 | cloud-2 | 800.0 | +| 1970-01-01T00:21:40 | cloud-1 | 4500.0 | +| 1970-01-01T00:21:40 | cloud-2 | 1800.0 | ++---------------------+---------+-------------------------------------------------------+ + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') unknown_metric or unknown_metric1 or sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total); + ++---------------------+---------+--------+ +| time | cloud | value | ++---------------------+---------+--------+ +| 1970-01-01T00:16:40 | cloud-1 | 2500.0 | +| 1970-01-01T00:16:40 | cloud-2 | 800.0 | +| 1970-01-01T00:21:40 | cloud-1 | 4500.0 | +| 1970-01-01T00:21:40 | cloud-2 | 1800.0 | ++---------------------+---------+--------+ + +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total) or sum by (cloud, tag0, tag1) (unknown_metric); + ++---------------------+---------+-------------------------------------------------------+ +| greptime_timestamp | cloud | sum(node_network_transmit_bytes_total.greptime_value) | ++---------------------+---------+-------------------------------------------------------+ +| 1970-01-01T00:16:40 | cloud-1 | 2500.0 | +| 1970-01-01T00:16:40 | cloud-2 | 800.0 | +| 1970-01-01T00:21:40 | cloud-1 | 4500.0 | +| 1970-01-01T00:21:40 | cloud-2 | 1800.0 | ++---------------------+---------+-------------------------------------------------------+ + +-- Or with unknown label dst_namespace. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (src, src_pod, src_namespace, src_node, dest, dst_pod, dst_namespace, dst_node, cloud, region, az) (increase(node_network_transmit_bytes_total{src_node!="", job=~"greptimedb", region=~"us-west", az=~"us-west-6"}[900s])>0) or sum by (src, src_pod, src_namespace, src_node, dest, dst_pod, dst_namespace, dst_node, cloud, region, az) (increase(node_network_transmit_bytes_total{dst_node!="", job=~"greptimedb", region=~"us-west", az=~"us-west-6"}[900s])>0); + ++---------------------+-----------+---------+----------+---------+----------+---------------+----------+---------+----------------------------------------------------------------------------------------------+ +| greptime_timestamp | az | cloud | dest | region | src | src_namespace | src_node | src_pod | sum(prom_increase(greptime_timestamp_range,greptime_value,greptime_timestamp,Int64(900000))) | ++---------------------+-----------+---------+----------+---------+----------+---------------+----------+---------+----------------------------------------------------------------------------------------------+ +| 1970-01-01T00:21:40 | us-west-6 | cloud-1 | 10.0.0.2 | us-west | 10.0.0.1 | namespace-1 | node-1 | pod-1 | 2500.0 | +| 1970-01-01T00:21:40 | us-west-6 | cloud-1 | 10.0.0.3 | us-west | 10.0.0.1 | namespace-1 | node-2 | pod-2 | 2000.0 | +| 1970-01-01T00:21:40 | us-west-6 | cloud-2 | 10.0.0.5 | us-west | 10.0.0.4 | namespace-2 | node-3 | pod-3 | 2300.0 | +| 1970-01-01T00:26:40 | us-west-6 | cloud-1 | 10.0.0.2 | us-west | 10.0.0.1 | namespace-1 | node-1 | pod-1 | 2500.0 | +| 1970-01-01T00:26:40 | us-west-6 | cloud-1 | 10.0.0.3 | us-west | 10.0.0.1 | namespace-1 | node-2 | pod-2 | 2000.0 | +| 1970-01-01T00:26:40 | us-west-6 | cloud-2 | 10.0.0.5 | us-west | 10.0.0.4 | namespace-2 | node-3 | pod-3 | 2300.0 | +| 1970-01-01T00:31:40 | us-west-6 | cloud-1 | 10.0.0.2 | us-west | 10.0.0.1 | namespace-1 | node-1 | pod-1 | 1500.0 | +| 1970-01-01T00:31:40 | us-west-6 | cloud-1 | 10.0.0.3 | us-west | 10.0.0.1 | namespace-1 | node-2 | pod-2 | 1500.0 | +| 1970-01-01T00:31:40 | us-west-6 | cloud-2 | 10.0.0.5 | us-west | 10.0.0.4 | namespace-2 | node-3 | pod-3 | 1500.0 | ++---------------------+-----------+---------+----------+---------+----------+---------------+----------+---------+----------------------------------------------------------------------------------------------+ + +DROP TABLE node_network_transmit_bytes_total; + +Affected Rows: 0 + +DROP TABLE test_physical; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/promql/set_operation.sql b/tests/cases/standalone/common/promql/set_operation.sql index 856aa3ea13..c17109fb75 100644 --- a/tests/cases/standalone/common/promql/set_operation.sql +++ b/tests/cases/standalone/common/promql/set_operation.sql @@ -11,7 +11,7 @@ create table http_requests ( primary key (job, instance, g) ); -insert into http_requests values +insert into http_requests values (3000000, "api", "0", "production", 100), (3000000, "api", "1", "production", 200), (3000000, "api", "0", "canary", 300), @@ -336,3 +336,63 @@ tql eval (3, 4, '1s') cache_hit_with_null_label / on(job) (cache_miss_with_null_ drop table cache_hit_with_null_label; drop table cache_miss_with_null_label; + +-- Physical table for test +CREATE TABLE test_physical (greptime_timestamp timestamp time index, greptime_value double) engine=metric with ("physical_metric_table" = ""); + +CREATE TABLE IF NOT EXISTS node_network_transmit_bytes_total ( + cloud STRING NULL, + dest_port STRING NULL, + dest STRING NULL, + greptime_timestamp TIMESTAMP(3) NOT NULL, + greptime_value DOUBLE NULL, + host STRING NULL, + job STRING NULL, + node STRING NULL, + region STRING NULL, + src_port STRING NULL, + src STRING NULL, + src_namespace STRING NULL, + src_node STRING NULL, + src_pod STRING NULL, + az STRING NULL, + TIME INDEX (greptime_timestamp), + PRIMARY KEY (cloud, dest_port, dest, host, job, node, region, src_port, src, src_namespace, src_node, src_pod, az) +) +ENGINE=metric +WITH( + on_physical_table = 'test_physical' +); + +INSERT INTO node_network_transmit_bytes_total ( + cloud, dest_port, dest, greptime_timestamp, greptime_value, host, job, node, region, src_port, src, src_namespace, src_node, src_pod, az +) VALUES + ('cloud-1', '443', '10.0.0.2', 1000000, 1000, 'host-1', 'greptimedb', 'node-a', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-1', 'pod-1', 'us-west-6'), + ('cloud-1', '443', '10.0.0.2', 1200000, 2000, 'host-1', 'greptimedb', 'node-a', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-1', 'pod-1', 'us-west-6'), + ('cloud-1', '443', '10.0.0.3', 1000000, 1500, 'host-2', 'greptimedb', 'node-b', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-2', 'pod-2', 'us-west-6'), + ('cloud-1', '443', '10.0.0.3', 1200000, 2500, 'host-2', 'greptimedb', 'node-b', 'us-west', '8080', '10.0.0.1', 'namespace-1', 'node-2', 'pod-2', 'us-west-6'), + ('cloud-2', '80', '10.0.0.5', 1000000, 800, 'host-3', 'greptimedb', 'node-c', 'us-west', '9000', '10.0.0.4', 'namespace-2', 'node-3', 'pod-3', 'us-west-6'), + ('cloud-2', '80', '10.0.0.5', 1200000, 1800, 'host-3', 'greptimedb', 'node-c', 'us-west', '9000', '10.0.0.4', 'namespace-2', 'node-3', 'pod-3', 'us-west-6'); + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') unknown_metric or node_network_transmit_bytes_total; + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total) or unknown_metric; + +-- Or with unknown label and metric. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') unknown_metric or unknown_metric1 or sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total); + +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (cloud, tag0, tag1) (node_network_transmit_bytes_total) or sum by (cloud, tag0, tag1) (unknown_metric); + +-- Or with unknown label dst_namespace. +-- SQLNESS SORT_RESULT 3 1 +tql eval(1000, 2000, '300s') sum by (src, src_pod, src_namespace, src_node, dest, dst_pod, dst_namespace, dst_node, cloud, region, az) (increase(node_network_transmit_bytes_total{src_node!="", job=~"greptimedb", region=~"us-west", az=~"us-west-6"}[900s])>0) or sum by (src, src_pod, src_namespace, src_node, dest, dst_pod, dst_namespace, dst_node, cloud, region, az) (increase(node_network_transmit_bytes_total{dst_node!="", job=~"greptimedb", region=~"us-west", az=~"us-west-6"}[900s])>0); + +DROP TABLE node_network_transmit_bytes_total; + +DROP TABLE test_physical; diff --git a/tests/cases/standalone/common/tql/basic.result b/tests/cases/standalone/common/tql/basic.result index 70d5ad40db..8dbb3230e8 100644 --- a/tests/cases/standalone/common/tql/basic.result +++ b/tests/cases/standalone/common/tql/basic.result @@ -47,7 +47,10 @@ TQL EVAL (0, 10, '5s') test{__schema__="public"}; -- SQLNESS SORT_RESULT 2 1 TQL EVAL (0, 10, '5s') test{__schema__="greptime_private"}; -Error: 4001(TableNotFound), Table not found: greptime.greptime_private.test ++------+-------+ +| time | value | ++------+-------+ ++------+-------+ -- SQLNESS SORT_RESULT 2 1 TQL EVAL (0, 10, '5s') test{__database__="public"}; @@ -64,7 +67,10 @@ TQL EVAL (0, 10, '5s') test{__database__="public"}; -- SQLNESS SORT_RESULT 2 1 TQL EVAL (0, 10, '5s') test{__database__="greptime_private"}; -Error: 4001(TableNotFound), Table not found: greptime.greptime_private.test ++------+-------+ +| time | value | ++------+-------+ ++------+-------+ -- SQLNESS SORT_RESULT 2 1 TQL EVAL (0, 10, '5s') {__name__="test", __field__="i"}; diff --git a/tests/cases/standalone/common/tql/case_sensitive.result b/tests/cases/standalone/common/tql/case_sensitive.result index 608140fafe..de7fb9a566 100644 --- a/tests/cases/standalone/common/tql/case_sensitive.result +++ b/tests/cases/standalone/common/tql/case_sensitive.result @@ -60,12 +60,14 @@ Affected Rows: 0 tql eval (0,10,'5s') sum(MemAvailable / 4) + sum(MemTotal / 4); -Error: 4001(TableNotFound), Table not found: greptime.public.MemTotal +++ +++ -- Cross schema is not supported tql eval (0,10,'5s') sum(MemAvailable / 4) + sum({__name__="AnotherSchema.MemTotal"} / 4); -Error: 4001(TableNotFound), Table not found: greptime.public.AnotherSchema.MemTotal +++ +++ drop table "MemAvailable";