1pub mod builder;
16mod dashboard;
17mod grpc;
18mod influxdb;
19mod jaeger;
20mod log_handler;
21mod logs;
22mod opentsdb;
23mod otlp;
24pub mod prom_store;
25mod promql;
26mod region_query;
27pub mod standalone;
28
29use std::pin::Pin;
30use std::sync::atomic::AtomicBool;
31use std::sync::{Arc, atomic};
32use std::time::{Duration, SystemTime};
33
34use async_stream::stream;
35use async_trait::async_trait;
36use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
37use catalog::CatalogManagerRef;
38use catalog::process_manager::{
39 ProcessManagerRef, QueryStatement as CatalogQueryStatement, SlowQueryTimer,
40};
41use client::OutputData;
42use common_base::Plugins;
43use common_base::cancellation::CancellableFuture;
44use common_error::ext::{BoxedError, ErrorExt};
45use common_event_recorder::EventRecorderRef;
46use common_meta::cache_invalidator::CacheInvalidatorRef;
47use common_meta::key::TableMetadataManagerRef;
48use common_meta::key::table_name::TableNameKey;
49use common_meta::node_manager::NodeManagerRef;
50use common_meta::procedure_executor::ProcedureExecutorRef;
51use common_query::Output;
52use common_recordbatch::RecordBatchStreamWrapper;
53use common_recordbatch::error::StreamTimeoutSnafu;
54use common_telemetry::logging::SlowQueryOptions;
55use common_telemetry::{debug, error, tracing};
56use dashmap::DashMap;
57use datafusion_expr::LogicalPlan;
58use futures::{Stream, StreamExt};
59use lazy_static::lazy_static;
60use operator::delete::DeleterRef;
61use operator::insert::InserterRef;
62use operator::statement::{StatementExecutor, StatementExecutorRef};
63use partition::manager::PartitionRuleManagerRef;
64use pipeline::pipeline_operator::PipelineOperator;
65use prometheus::HistogramTimer;
66use promql_parser::label::Matcher;
67use query::QueryEngineRef;
68use query::metrics::OnDone;
69use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
70use query::query_engine::DescribeResult;
71use query::query_engine::options::{QueryOptions, validate_catalog_and_schema};
72use servers::error::{
73 self as server_error, AuthSnafu, CommonMetaSnafu, ExecuteQuerySnafu,
74 OtlpMetricModeIncompatibleSnafu, ParsePromQLSnafu, UnexpectedResultSnafu,
75};
76use servers::interceptor::{
77 PromQueryInterceptor, PromQueryInterceptorRef, SqlQueryInterceptor, SqlQueryInterceptorRef,
78};
79use servers::otlp::metrics::legacy_normalize_otlp_name;
80use servers::prometheus_handler::PrometheusHandler;
81use servers::query_handler::sql::SqlQueryHandler;
82use session::context::{Channel, QueryContextRef};
83use session::table_name::table_idents_to_full_name;
84use snafu::prelude::*;
85use sql::ast::ObjectNamePartExt;
86use sql::dialect::Dialect;
87use sql::parser::{ParseOptions, ParserContext};
88use sql::statements::comment::CommentObject;
89use sql::statements::copy::{CopyDatabase, CopyTable};
90use sql::statements::statement::Statement;
91use sql::statements::tql::Tql;
92use sqlparser::ast::ObjectName;
93pub use standalone::StandaloneDatanodeManager;
94use table::requests::{OTLP_METRIC_COMPAT_KEY, OTLP_METRIC_COMPAT_PROM};
95use tracing::Span;
96
97use crate::error::{
98 self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, InvalidSqlSnafu,
99 ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
100 StatementTimeoutSnafu, TableOperationSnafu,
101};
102use crate::stream_wrapper::CancellableStreamWrapper;
103
104lazy_static! {
105 static ref OTLP_LEGACY_DEFAULT_VALUE: String = "legacy".to_string();
106}
107
108#[derive(Clone)]
112pub struct Instance {
113 catalog_manager: CatalogManagerRef,
114 pipeline_operator: Arc<PipelineOperator>,
115 statement_executor: Arc<StatementExecutor>,
116 query_engine: QueryEngineRef,
117 plugins: Plugins,
118 inserter: InserterRef,
119 deleter: DeleterRef,
120 table_metadata_manager: TableMetadataManagerRef,
121 event_recorder: Option<EventRecorderRef>,
122 process_manager: ProcessManagerRef,
123 slow_query_options: SlowQueryOptions,
124 suspend: Arc<AtomicBool>,
125
126 otlp_metrics_table_legacy_cache: DashMap<String, DashMap<String, bool>>,
131}
132
133impl Instance {
134 pub fn catalog_manager(&self) -> &CatalogManagerRef {
135 &self.catalog_manager
136 }
137
138 pub fn query_engine(&self) -> &QueryEngineRef {
139 &self.query_engine
140 }
141
142 pub fn plugins(&self) -> &Plugins {
143 &self.plugins
144 }
145
146 pub fn statement_executor(&self) -> &StatementExecutorRef {
147 &self.statement_executor
148 }
149
150 pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
151 &self.table_metadata_manager
152 }
153
154 pub fn inserter(&self) -> &InserterRef {
155 &self.inserter
156 }
157
158 pub fn process_manager(&self) -> &ProcessManagerRef {
159 &self.process_manager
160 }
161
162 pub fn node_manager(&self) -> &NodeManagerRef {
163 self.inserter.node_manager()
164 }
165
166 pub fn partition_manager(&self) -> &PartitionRuleManagerRef {
167 self.inserter.partition_manager()
168 }
169
170 pub fn cache_invalidator(&self) -> &CacheInvalidatorRef {
171 self.statement_executor.cache_invalidator()
172 }
173
174 pub fn procedure_executor(&self) -> &ProcedureExecutorRef {
175 self.statement_executor.procedure_executor()
176 }
177
178 pub fn suspend_state(&self) -> Arc<AtomicBool> {
179 self.suspend.clone()
180 }
181
182 pub(crate) fn is_suspended(&self) -> bool {
183 self.suspend.load(atomic::Ordering::Relaxed)
184 }
185}
186
187fn parse_stmt(sql: &str, dialect: &(dyn Dialect + Send + Sync)) -> Result<Vec<Statement>> {
188 ParserContext::create_with_dialect(sql, dialect, ParseOptions::default()).context(ParseSqlSnafu)
189}
190
191impl Instance {
192 async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
193 check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
194
195 let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
196 let query_interceptor = query_interceptor.as_ref();
197
198 if should_capture_statement(Some(&stmt)) {
199 let slow_query_timer = self
200 .slow_query_options
201 .enable
202 .then(|| self.event_recorder.clone())
203 .flatten()
204 .map(|event_recorder| {
205 SlowQueryTimer::new(
206 CatalogQueryStatement::Sql(stmt.clone()),
207 self.slow_query_options.threshold,
208 self.slow_query_options.sample_ratio,
209 self.slow_query_options.record_type,
210 event_recorder,
211 )
212 });
213
214 let ticket = self.process_manager.register_query(
215 query_ctx.current_catalog().to_string(),
216 vec![query_ctx.current_schema()],
217 stmt.to_string(),
218 query_ctx.conn_info().to_string(),
219 Some(query_ctx.process_id()),
220 slow_query_timer,
221 );
222
223 let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
224
225 CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
226 .await
227 .map_err(|_| error::CancelledSnafu.build())?
228 .map(|output| {
229 let Output { meta, data } = output;
230
231 let data = match data {
232 OutputData::Stream(stream) => OutputData::Stream(Box::pin(
233 CancellableStreamWrapper::new(stream, ticket),
234 )),
235 other => other,
236 };
237 Output { data, meta }
238 })
239 } else {
240 self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor)
241 .await
242 }
243 }
244
245 async fn exec_statement_with_timeout(
246 &self,
247 stmt: Statement,
248 query_ctx: QueryContextRef,
249 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
250 ) -> Result<Output> {
251 let timeout = derive_timeout(&stmt, &query_ctx);
252 match timeout {
253 Some(timeout) => {
254 let start = tokio::time::Instant::now();
255 let output = tokio::time::timeout(
256 timeout,
257 self.exec_statement(stmt, query_ctx, query_interceptor),
258 )
259 .await
260 .map_err(|_| StatementTimeoutSnafu.build())??;
261 let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
263 attach_timeout(output, remaining_timeout)
264 }
265 None => {
266 self.exec_statement(stmt, query_ctx, query_interceptor)
267 .await
268 }
269 }
270 }
271
272 async fn exec_statement(
273 &self,
274 stmt: Statement,
275 query_ctx: QueryContextRef,
276 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
277 ) -> Result<Output> {
278 match stmt {
279 Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
280 if let Statement::Explain(explain) = &stmt
282 && let Some(format) = explain.format()
283 {
284 query_ctx.set_explain_format(format.to_string());
285 }
286
287 self.plan_and_exec_sql(stmt, &query_ctx, query_interceptor)
288 .await
289 }
290 Statement::Tql(tql) => {
291 self.plan_and_exec_tql(&query_ctx, query_interceptor, tql)
292 .await
293 }
294 _ => {
295 query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
296 self.statement_executor
297 .execute_sql(stmt, query_ctx)
298 .await
299 .context(TableOperationSnafu)
300 }
301 }
302 }
303
304 async fn plan_and_exec_sql(
305 &self,
306 stmt: Statement,
307 query_ctx: &QueryContextRef,
308 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
309 ) -> Result<Output> {
310 let stmt = QueryStatement::Sql(stmt);
311 let plan = self
312 .statement_executor
313 .plan(&stmt, query_ctx.clone())
314 .await?;
315 let QueryStatement::Sql(stmt) = stmt else {
316 unreachable!()
317 };
318 query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
319
320 self.statement_executor
321 .exec_plan(plan, query_ctx.clone())
322 .await
323 .context(TableOperationSnafu)
324 }
325
326 async fn plan_and_exec_tql(
327 &self,
328 query_ctx: &QueryContextRef,
329 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
330 tql: Tql,
331 ) -> Result<Output> {
332 let plan = self
333 .statement_executor
334 .plan_tql(tql.clone(), query_ctx)
335 .await?;
336 query_interceptor.pre_execute(&Statement::Tql(tql), Some(&plan), query_ctx.clone())?;
337 self.statement_executor
338 .exec_plan(plan, query_ctx.clone())
339 .await
340 .context(TableOperationSnafu)
341 }
342
343 async fn check_otlp_legacy(
344 &self,
345 names: &[&String],
346 ctx: QueryContextRef,
347 ) -> server_error::Result<bool> {
348 let db_string = ctx.get_db_string();
349 let cache = self
351 .otlp_metrics_table_legacy_cache
352 .entry(db_string.clone())
353 .or_default();
354 if let Some(flag) = fast_legacy_check(&cache, names)? {
355 return Ok(flag);
356 }
357 drop(cache);
359
360 let catalog = ctx.current_catalog();
361 let schema = ctx.current_schema();
362
363 let normalized_names = names
365 .iter()
366 .map(|n| legacy_normalize_otlp_name(n))
367 .collect::<Vec<_>>();
368 let table_names = normalized_names
369 .iter()
370 .map(|n| TableNameKey::new(catalog, &schema, n))
371 .collect::<Vec<_>>();
372 let table_values = self
373 .table_metadata_manager()
374 .table_name_manager()
375 .batch_get(table_names)
376 .await
377 .context(CommonMetaSnafu)?;
378 let table_ids = table_values
379 .into_iter()
380 .filter_map(|v| v.map(|vi| vi.table_id()))
381 .collect::<Vec<_>>();
382
383 if table_ids.is_empty() {
385 let cache = self
386 .otlp_metrics_table_legacy_cache
387 .entry(db_string)
388 .or_default();
389 names.iter().for_each(|name| {
390 cache.insert((*name).clone(), false);
391 });
392 return Ok(false);
393 }
394
395 let table_infos = self
397 .table_metadata_manager()
398 .table_info_manager()
399 .batch_get(&table_ids)
400 .await
401 .context(CommonMetaSnafu)?;
402 let options = table_infos
403 .values()
404 .map(|info| {
405 info.table_info
406 .meta
407 .options
408 .extra_options
409 .get(OTLP_METRIC_COMPAT_KEY)
410 .unwrap_or(&OTLP_LEGACY_DEFAULT_VALUE)
411 })
412 .collect::<Vec<_>>();
413 let cache = self
414 .otlp_metrics_table_legacy_cache
415 .entry(db_string)
416 .or_default();
417 if !options.is_empty() {
418 let has_prom = options.iter().any(|opt| *opt == OTLP_METRIC_COMPAT_PROM);
420 let has_legacy = options
421 .iter()
422 .any(|opt| *opt == OTLP_LEGACY_DEFAULT_VALUE.as_str());
423 ensure!(!(has_prom && has_legacy), OtlpMetricModeIncompatibleSnafu);
424 let flag = has_legacy;
425 names.iter().for_each(|name| {
426 cache.insert((*name).clone(), flag);
427 });
428 Ok(flag)
429 } else {
430 names.iter().for_each(|name| {
432 cache.insert((*name).clone(), false);
433 });
434 Ok(false)
435 }
436 }
437}
438
439fn fast_legacy_check(
440 cache: &DashMap<String, bool>,
441 names: &[&String],
442) -> server_error::Result<Option<bool>> {
443 let hit_cache = names
444 .iter()
445 .filter_map(|name| cache.get(*name))
446 .collect::<Vec<_>>();
447 if !hit_cache.is_empty() {
448 let hit_legacy = hit_cache.iter().any(|en| *en.value());
449 let hit_prom = hit_cache.iter().any(|en| !*en.value());
450
451 ensure!(!(hit_legacy && hit_prom), OtlpMetricModeIncompatibleSnafu);
455
456 let flag = hit_legacy;
457 drop(hit_cache);
459
460 names.iter().for_each(|name| {
462 if !cache.contains_key(*name) {
463 cache.insert((*name).clone(), flag);
464 }
465 });
466 Ok(Some(flag))
467 } else {
468 Ok(None)
469 }
470}
471
472fn derive_timeout(stmt: &Statement, query_ctx: &QueryContextRef) -> Option<Duration> {
475 let query_timeout = query_ctx.query_timeout()?;
476 if query_timeout.is_zero() {
477 return None;
478 }
479 match query_ctx.channel() {
480 Channel::Mysql if stmt.is_readonly() => Some(query_timeout),
481 Channel::Postgres => Some(query_timeout),
482 _ => None,
483 }
484}
485
486fn derive_timeout_for_plan(
489 stmt: Option<&Statement>,
490 query_ctx: &QueryContextRef,
491) -> Option<Duration> {
492 match stmt {
493 Some(s) => derive_timeout(s, query_ctx),
494 None => {
495 let query_timeout = query_ctx.query_timeout()?;
496 if query_timeout.is_zero() {
497 return None;
498 }
499 match query_ctx.channel() {
500 Channel::Postgres => Some(query_timeout),
501 _ => None,
502 }
503 }
504 }
505}
506
507fn attach_timeout(output: Output, mut timeout: Duration) -> Result<Output> {
508 if timeout.is_zero() {
509 return StatementTimeoutSnafu.fail();
510 }
511
512 let output = match output.data {
513 OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
514 OutputData::Stream(mut stream) => {
515 let schema = stream.schema();
516 let s = Box::pin(stream! {
517 let mut start = tokio::time::Instant::now();
518 while let Some(item) = tokio::time::timeout(timeout, stream.next()).await.map_err(|_| StreamTimeoutSnafu.build())? {
519 yield item;
520
521 let now = tokio::time::Instant::now();
522 timeout = timeout.checked_sub(now - start).unwrap_or(Duration::ZERO);
523 start = now;
524 if timeout.is_zero() {
526 StreamTimeoutSnafu.fail()?;
527 }
528 }
529 }) as Pin<Box<dyn Stream<Item = _> + Send>>;
530 let stream = RecordBatchStreamWrapper {
531 schema,
532 stream: s,
533 output_ordering: None,
534 metrics: Default::default(),
535 span: Span::current(),
536 };
537 Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
538 }
539 };
540
541 Ok(output)
542}
543
544impl Instance {
545 #[tracing::instrument(skip_all, name = "SqlQueryHandler::do_query")]
546 async fn do_query_inner(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
547 if self.is_suspended() {
548 return vec![error::SuspendedSnafu {}.fail()];
549 }
550
551 let query_interceptor_opt = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
552 let query_interceptor = query_interceptor_opt.as_ref();
553 let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
554 Ok(q) => q,
555 Err(e) => return vec![Err(e)],
556 };
557
558 let checker_ref = self.plugins.get::<PermissionCheckerRef>();
559 let checker = checker_ref.as_ref();
560
561 match parse_stmt(query.as_ref(), query_ctx.sql_dialect())
562 .and_then(|stmts| query_interceptor.post_parsing(stmts, query_ctx.clone()))
563 {
564 Ok(stmts) => {
565 if stmts.is_empty() {
566 return vec![
567 InvalidSqlSnafu {
568 err_msg: "empty statements",
569 }
570 .fail(),
571 ];
572 }
573
574 let mut results = Vec::with_capacity(stmts.len());
575 for stmt in stmts {
576 if let Err(e) = checker
577 .check_permission(
578 query_ctx.current_user(),
579 PermissionReq::SqlStatement(&stmt),
580 )
581 .context(PermissionSnafu)
582 {
583 results.push(Err(e));
584 break;
585 }
586
587 match self.query_statement(stmt.clone(), query_ctx.clone()).await {
588 Ok(output) => {
589 let output_result =
590 query_interceptor.post_execute(output, query_ctx.clone());
591 results.push(output_result);
592 }
593 Err(e) => {
594 if e.status_code().should_log_error() {
595 error!(e; "Failed to execute query: {stmt}");
596 } else {
597 debug!("Failed to execute query: {stmt}, {e}");
598 }
599 results.push(Err(e));
600 break;
601 }
602 }
603 }
604 results
605 }
606 Err(e) => {
607 vec![Err(e)]
608 }
609 }
610 }
611
612 async fn exec_plan(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output> {
613 self.query_engine
614 .execute(plan, query_ctx)
615 .await
616 .context(ExecLogicalPlanSnafu)
617 }
618
619 async fn exec_plan_with_timeout(
620 &self,
621 stmt: Option<Statement>,
622 plan: LogicalPlan,
623 query_ctx: QueryContextRef,
624 ) -> Result<Output> {
625 let timeout = derive_timeout_for_plan(stmt.as_ref(), &query_ctx);
626 match timeout {
627 Some(timeout) => {
628 let start = tokio::time::Instant::now();
629 let output = tokio::time::timeout(timeout, self.exec_plan(plan, query_ctx))
630 .await
631 .map_err(|_| StatementTimeoutSnafu.build())??;
632 let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
633 attach_timeout(output, remaining_timeout)
634 }
635 None => self.exec_plan(plan, query_ctx).await,
636 }
637 }
638
639 async fn do_exec_plan_inner(
640 &self,
641 stmt: Option<Statement>,
642 plan: LogicalPlan,
643 query_ctx: QueryContextRef,
644 ) -> Result<Output> {
645 ensure!(!self.is_suspended(), error::SuspendedSnafu);
646
647 if should_capture_statement(stmt.as_ref()) {
648 let stmt = stmt.unwrap();
650 let query = stmt.to_string();
651 let slow_query_timer = self
652 .slow_query_options
653 .enable
654 .then(|| self.event_recorder.clone())
655 .flatten()
656 .map(|event_recorder| {
657 SlowQueryTimer::new(
658 CatalogQueryStatement::Sql(stmt.clone()),
659 self.slow_query_options.threshold,
660 self.slow_query_options.sample_ratio,
661 self.slow_query_options.record_type,
662 event_recorder,
663 )
664 });
665
666 let ticket = self.process_manager.register_query(
667 query_ctx.current_catalog().to_string(),
668 vec![query_ctx.current_schema()],
669 query,
670 query_ctx.conn_info().to_string(),
671 Some(query_ctx.process_id()),
672 slow_query_timer,
673 );
674
675 let query_fut = self.exec_plan_with_timeout(Some(stmt), plan, query_ctx);
676
677 CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
678 .await
679 .map_err(|_| error::CancelledSnafu.build())?
680 .map(|output| {
681 let Output { meta, data } = output;
682
683 let data = match data {
684 OutputData::Stream(stream) => OutputData::Stream(Box::pin(
685 CancellableStreamWrapper::new(stream, ticket),
686 )),
687 other => other,
688 };
689 Output { data, meta }
690 })
691 } else {
692 self.exec_plan_with_timeout(stmt, plan, query_ctx).await
693 }
694 }
695
696 #[tracing::instrument(skip_all, name = "SqlQueryHandler::do_promql_query")]
697 async fn do_promql_query_inner(
698 &self,
699 query: &PromQuery,
700 query_ctx: QueryContextRef,
701 ) -> Vec<Result<Output>> {
702 if self.is_suspended() {
703 return vec![error::SuspendedSnafu {}.fail()];
704 }
705
706 let result = PrometheusHandler::do_query(self, query, query_ctx)
708 .await
709 .with_context(|_| ExecutePromqlSnafu {
710 query: format!("{query:?}"),
711 });
712 vec![result]
713 }
714
715 async fn do_describe_inner(
716 &self,
717 stmt: Statement,
718 query_ctx: QueryContextRef,
719 ) -> Result<Option<DescribeResult>> {
720 ensure!(!self.is_suspended(), error::SuspendedSnafu);
721
722 if matches!(
723 stmt,
724 Statement::Insert(_) | Statement::Query(_) | Statement::Delete(_)
725 ) {
726 self.plugins
727 .get::<PermissionCheckerRef>()
728 .as_ref()
729 .check_permission(query_ctx.current_user(), PermissionReq::SqlStatement(&stmt))
730 .context(PermissionSnafu)?;
731
732 let plan = self
733 .query_engine
734 .planner()
735 .plan(&QueryStatement::Sql(stmt), query_ctx.clone())
736 .await
737 .context(PlanStatementSnafu)?;
738 self.query_engine
739 .describe(plan, query_ctx)
740 .await
741 .map(Some)
742 .context(error::DescribeStatementSnafu)
743 } else {
744 Ok(None)
745 }
746 }
747
748 async fn is_valid_schema_inner(&self, catalog: &str, schema: &str) -> Result<bool> {
749 self.catalog_manager
750 .schema_exists(catalog, schema, None)
751 .await
752 .context(error::CatalogSnafu)
753 }
754}
755
756#[async_trait]
757impl SqlQueryHandler for Instance {
758 async fn do_query(
759 &self,
760 query: &str,
761 query_ctx: QueryContextRef,
762 ) -> Vec<server_error::Result<Output>> {
763 self.do_query_inner(query, query_ctx)
764 .await
765 .into_iter()
766 .map(|result| result.map_err(BoxedError::new).context(ExecuteQuerySnafu))
767 .collect()
768 }
769
770 async fn do_exec_plan(
771 &self,
772 stmt: Option<Statement>,
773 plan: LogicalPlan,
774 query_ctx: QueryContextRef,
775 ) -> server_error::Result<Output> {
776 self.do_exec_plan_inner(stmt, plan, query_ctx)
777 .await
778 .map_err(BoxedError::new)
779 .context(server_error::ExecutePlanSnafu)
780 }
781
782 async fn do_promql_query(
783 &self,
784 query: &PromQuery,
785 query_ctx: QueryContextRef,
786 ) -> Vec<server_error::Result<Output>> {
787 self.do_promql_query_inner(query, query_ctx)
788 .await
789 .into_iter()
790 .map(|result| result.map_err(BoxedError::new).context(ExecuteQuerySnafu))
791 .collect()
792 }
793
794 async fn do_describe(
795 &self,
796 stmt: Statement,
797 query_ctx: QueryContextRef,
798 ) -> server_error::Result<Option<DescribeResult>> {
799 self.do_describe_inner(stmt, query_ctx)
800 .await
801 .map_err(BoxedError::new)
802 .context(server_error::DescribeStatementSnafu)
803 }
804
805 async fn is_valid_schema(&self, catalog: &str, schema: &str) -> server_error::Result<bool> {
806 self.is_valid_schema_inner(catalog, schema)
807 .await
808 .map_err(BoxedError::new)
809 .context(server_error::CheckDatabaseValiditySnafu)
810 }
811}
812
813pub fn attach_timer(output: Output, timer: HistogramTimer) -> Output {
815 match output.data {
816 OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
817 OutputData::Stream(stream) => {
818 let stream = OnDone::new(stream, move || {
819 timer.observe_duration();
820 });
821 Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
822 }
823 }
824}
825
826#[async_trait]
827impl PrometheusHandler for Instance {
828 #[tracing::instrument(skip_all)]
829 async fn do_query(
830 &self,
831 query: &PromQuery,
832 query_ctx: QueryContextRef,
833 ) -> server_error::Result<Output> {
834 let interceptor = self
835 .plugins
836 .get::<PromQueryInterceptorRef<server_error::Error>>();
837
838 self.plugins
839 .get::<PermissionCheckerRef>()
840 .as_ref()
841 .check_permission(query_ctx.current_user(), PermissionReq::PromQuery)
842 .context(AuthSnafu)?;
843
844 let stmt = QueryLanguageParser::parse_promql(query, &query_ctx).with_context(|_| {
845 ParsePromQLSnafu {
846 query: query.clone(),
847 }
848 })?;
849
850 let plan = self
851 .statement_executor
852 .plan(&stmt, query_ctx.clone())
853 .await
854 .map_err(BoxedError::new)
855 .context(ExecuteQuerySnafu)?;
856
857 interceptor.pre_execute(query, Some(&plan), query_ctx.clone())?;
858
859 let query_statement = if let QueryStatement::Promql(eval_stmt, alias) = stmt {
861 CatalogQueryStatement::Promql(eval_stmt, alias)
862 } else {
863 return UnexpectedResultSnafu {
865 reason: "The query should always be promql.".to_string(),
866 }
867 .fail();
868 };
869 let query = query_statement.to_string();
870
871 let slow_query_timer = self
872 .slow_query_options
873 .enable
874 .then(|| self.event_recorder.clone())
875 .flatten()
876 .map(|event_recorder| {
877 SlowQueryTimer::new(
878 query_statement,
879 self.slow_query_options.threshold,
880 self.slow_query_options.sample_ratio,
881 self.slow_query_options.record_type,
882 event_recorder,
883 )
884 });
885
886 let ticket = self.process_manager.register_query(
887 query_ctx.current_catalog().to_string(),
888 vec![query_ctx.current_schema()],
889 query,
890 query_ctx.conn_info().to_string(),
891 Some(query_ctx.process_id()),
892 slow_query_timer,
893 );
894
895 let query_fut = self.statement_executor.exec_plan(plan, query_ctx.clone());
896
897 let output = CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
898 .await
899 .map_err(|_| servers::error::CancelledSnafu.build())?
900 .map(|output| {
901 let Output { meta, data } = output;
902 let data = match data {
903 OutputData::Stream(stream) => {
904 OutputData::Stream(Box::pin(CancellableStreamWrapper::new(stream, ticket)))
905 }
906 other => other,
907 };
908 Output { data, meta }
909 })
910 .map_err(BoxedError::new)
911 .context(ExecuteQuerySnafu)?;
912
913 Ok(interceptor.post_execute(output, query_ctx)?)
914 }
915
916 async fn query_metric_names(
917 &self,
918 matchers: Vec<Matcher>,
919 ctx: &QueryContextRef,
920 ) -> server_error::Result<Vec<String>> {
921 self.handle_query_metric_names(matchers, ctx)
922 .await
923 .map_err(BoxedError::new)
924 .context(ExecuteQuerySnafu)
925 }
926
927 async fn query_label_values(
928 &self,
929 metric: String,
930 label_name: String,
931 matchers: Vec<Matcher>,
932 start: SystemTime,
933 end: SystemTime,
934 ctx: &QueryContextRef,
935 ) -> server_error::Result<Vec<String>> {
936 self.handle_query_label_values(metric, label_name, matchers, start, end, ctx)
937 .await
938 .map_err(BoxedError::new)
939 .context(ExecuteQuerySnafu)
940 }
941
942 fn catalog_manager(&self) -> CatalogManagerRef {
943 self.catalog_manager.clone()
944 }
945}
946
947macro_rules! validate_db_permission {
949 ($stmt: expr, $query_ctx: expr) => {
950 if let Some(database) = &$stmt.database {
951 validate_catalog_and_schema($query_ctx.current_catalog(), database, $query_ctx)
952 .map_err(BoxedError::new)
953 .context(SqlExecInterceptedSnafu)?;
954 }
955 };
956}
957
958pub fn check_permission(
959 plugins: Plugins,
960 stmt: &Statement,
961 query_ctx: &QueryContextRef,
962) -> Result<()> {
963 let need_validate = plugins
964 .get::<QueryOptions>()
965 .map(|opts| opts.disallow_cross_catalog_query)
966 .unwrap_or_default();
967
968 if !need_validate {
969 return Ok(());
970 }
971
972 match stmt {
973 Statement::Admin(_) => {}
976 Statement::Query(_)
978 | Statement::Explain(_)
979 | Statement::Tql(_)
980 | Statement::Delete(_)
981 | Statement::DeclareCursor(_)
982 | Statement::Copy(sql::statements::copy::Copy::CopyQueryTo(_)) => {}
983 Statement::CreateDatabase(_)
985 | Statement::ShowDatabases(_)
986 | Statement::DropDatabase(_)
987 | Statement::AlterDatabase(_)
988 | Statement::DropFlow(_)
989 | Statement::Use(_) => {}
990 #[cfg(feature = "enterprise")]
991 Statement::DropTrigger(_) => {}
992 Statement::ShowCreateDatabase(stmt) => {
993 validate_database(&stmt.database_name, query_ctx)?;
994 }
995 Statement::ShowCreateTable(stmt) => {
996 validate_param(&stmt.table_name, query_ctx)?;
997 }
998 Statement::ShowCreateFlow(stmt) => {
999 validate_flow(&stmt.flow_name, query_ctx)?;
1000 }
1001 #[cfg(feature = "enterprise")]
1002 Statement::ShowCreateTrigger(stmt) => {
1003 validate_param(&stmt.trigger_name, query_ctx)?;
1004 }
1005 Statement::ShowCreateView(stmt) => {
1006 validate_param(&stmt.view_name, query_ctx)?;
1007 }
1008 Statement::CreateExternalTable(stmt) => {
1009 validate_param(&stmt.name, query_ctx)?;
1010 }
1011 Statement::CreateFlow(stmt) => {
1012 validate_param(&stmt.sink_table_name, query_ctx)?;
1014 }
1015 #[cfg(feature = "enterprise")]
1016 Statement::CreateTrigger(stmt) => {
1017 validate_param(&stmt.trigger_name, query_ctx)?;
1018 }
1019 Statement::CreateView(stmt) => {
1020 validate_param(&stmt.name, query_ctx)?;
1021 }
1022 Statement::AlterTable(stmt) => {
1023 validate_param(stmt.table_name(), query_ctx)?;
1024 }
1025 #[cfg(feature = "enterprise")]
1026 Statement::AlterTrigger(_) => {}
1027 Statement::SetVariables(_) | Statement::ShowVariables(_) => {}
1029 Statement::ShowCharset(_) | Statement::ShowCollation(_) => {}
1031
1032 Statement::Comment(comment) => match &comment.object {
1033 CommentObject::Table(table) => validate_param(table, query_ctx)?,
1034 CommentObject::Column { table, .. } => validate_param(table, query_ctx)?,
1035 CommentObject::Flow(flow) => validate_flow(flow, query_ctx)?,
1036 },
1037
1038 Statement::Insert(insert) => {
1039 let name = insert.table_name().context(ParseSqlSnafu)?;
1040 validate_param(name, query_ctx)?;
1041 }
1042 Statement::CreateTable(stmt) => {
1043 validate_param(&stmt.name, query_ctx)?;
1044 }
1045 Statement::CreateTableLike(stmt) => {
1046 validate_param(&stmt.table_name, query_ctx)?;
1047 validate_param(&stmt.source_name, query_ctx)?;
1048 }
1049 Statement::DropTable(drop_stmt) => {
1050 for table_name in drop_stmt.table_names() {
1051 validate_param(table_name, query_ctx)?;
1052 }
1053 }
1054 Statement::DropView(stmt) => {
1055 validate_param(&stmt.view_name, query_ctx)?;
1056 }
1057 Statement::ShowTables(stmt) => {
1058 validate_db_permission!(stmt, query_ctx);
1059 }
1060 Statement::ShowTableStatus(stmt) => {
1061 validate_db_permission!(stmt, query_ctx);
1062 }
1063 Statement::ShowColumns(stmt) => {
1064 validate_db_permission!(stmt, query_ctx);
1065 }
1066 Statement::ShowIndex(stmt) => {
1067 validate_db_permission!(stmt, query_ctx);
1068 }
1069 Statement::ShowRegion(stmt) => {
1070 validate_db_permission!(stmt, query_ctx);
1071 }
1072 Statement::ShowViews(stmt) => {
1073 validate_db_permission!(stmt, query_ctx);
1074 }
1075 Statement::ShowFlows(stmt) => {
1076 validate_db_permission!(stmt, query_ctx);
1077 }
1078 #[cfg(feature = "enterprise")]
1079 Statement::ShowTriggers(_stmt) => {
1080 }
1083 Statement::ShowStatus(_stmt) => {}
1084 Statement::ShowSearchPath(_stmt) => {}
1085 Statement::DescribeTable(stmt) => {
1086 validate_param(stmt.name(), query_ctx)?;
1087 }
1088 Statement::Copy(sql::statements::copy::Copy::CopyTable(stmt)) => match stmt {
1089 CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
1090 CopyTable::From(copy_table_from) => {
1091 validate_param(©_table_from.table_name, query_ctx)?
1092 }
1093 },
1094 Statement::Copy(sql::statements::copy::Copy::CopyDatabase(copy_database)) => {
1095 match copy_database {
1096 CopyDatabase::To(stmt) => validate_database(&stmt.database_name, query_ctx)?,
1097 CopyDatabase::From(stmt) => validate_database(&stmt.database_name, query_ctx)?,
1098 }
1099 }
1100 Statement::TruncateTable(stmt) => {
1101 validate_param(stmt.table_name(), query_ctx)?;
1102 }
1103 Statement::FetchCursor(_) | Statement::CloseCursor(_) => {}
1105 Statement::Kill(_) => {}
1107 Statement::ShowProcesslist(_) => {}
1109 }
1110 Ok(())
1111}
1112
1113fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1114 let (catalog, schema, _) = table_idents_to_full_name(name, query_ctx)
1115 .map_err(BoxedError::new)
1116 .context(ExternalSnafu)?;
1117
1118 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1119 .map_err(BoxedError::new)
1120 .context(SqlExecInterceptedSnafu)
1121}
1122
1123fn validate_flow(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1124 let catalog = match &name.0[..] {
1125 [_flow] => query_ctx.current_catalog().to_string(),
1126 [catalog, _flow] => catalog.to_string_unquoted(),
1127 _ => {
1128 return InvalidSqlSnafu {
1129 err_msg: format!(
1130 "expect flow name to be <catalog>.<flow_name> or <flow_name>, actual: {name}",
1131 ),
1132 }
1133 .fail();
1134 }
1135 };
1136
1137 let schema = query_ctx.current_schema();
1138
1139 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1140 .map_err(BoxedError::new)
1141 .context(SqlExecInterceptedSnafu)
1142}
1143
1144fn validate_database(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1145 let (catalog, schema) = match &name.0[..] {
1146 [schema] => (
1147 query_ctx.current_catalog().to_string(),
1148 schema.to_string_unquoted(),
1149 ),
1150 [catalog, schema] => (catalog.to_string_unquoted(), schema.to_string_unquoted()),
1151 _ => InvalidSqlSnafu {
1152 err_msg: format!(
1153 "expect database name to be <catalog>.<schema> or <schema>, actual: {name}",
1154 ),
1155 }
1156 .fail()?,
1157 };
1158
1159 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1160 .map_err(BoxedError::new)
1161 .context(SqlExecInterceptedSnafu)
1162}
1163
1164fn should_capture_statement(stmt: Option<&Statement>) -> bool {
1166 if let Some(stmt) = stmt {
1167 matches!(stmt, Statement::Query(_)) || stmt.is_readonly()
1168 } else {
1169 false
1170 }
1171}
1172
1173#[cfg(test)]
1174mod tests {
1175 use std::collections::HashMap;
1176 use std::sync::atomic::{AtomicBool, Ordering};
1177 use std::sync::{Arc, Barrier};
1178 use std::thread;
1179 use std::time::{Duration, Instant};
1180
1181 use common_base::Plugins;
1182 use query::query_engine::options::QueryOptions;
1183 use session::context::QueryContext;
1184 use sql::dialect::GreptimeDbDialect;
1185 use strfmt::Format;
1186
1187 use super::*;
1188
1189 #[test]
1190 fn test_fast_legacy_check_deadlock_prevention() {
1191 let cache = DashMap::new();
1193
1194 cache.insert("metric1".to_string(), true); cache.insert("metric2".to_string(), false); cache.insert("metric3".to_string(), true); let metric1 = "metric1".to_string();
1201 let metric4 = "metric4".to_string();
1202 let names1 = vec![&metric1, &metric4];
1203 let result = fast_legacy_check(&cache, &names1);
1204 assert!(result.is_ok());
1205 assert_eq!(result.unwrap(), Some(true)); assert!(cache.contains_key("metric4"));
1209 assert!(*cache.get("metric4").unwrap().value());
1210
1211 let metric5 = "metric5".to_string();
1213 let metric6 = "metric6".to_string();
1214 let names2 = vec![&metric5, &metric6];
1215 let result = fast_legacy_check(&cache, &names2);
1216 assert!(result.is_ok());
1217 assert_eq!(result.unwrap(), None); let cache_incompatible = DashMap::new();
1221 cache_incompatible.insert("metric1".to_string(), true); cache_incompatible.insert("metric2".to_string(), false); let metric1_test = "metric1".to_string();
1224 let metric2_test = "metric2".to_string();
1225 let names3 = vec![&metric1_test, &metric2_test];
1226 let result = fast_legacy_check(&cache_incompatible, &names3);
1227 assert!(result.is_err()); let cache_concurrent = Arc::new(DashMap::new());
1233 cache_concurrent.insert("shared_metric".to_string(), true);
1234
1235 let num_threads = 8;
1236 let operations_per_thread = 100;
1237 let barrier = Arc::new(Barrier::new(num_threads));
1238 let success_flag = Arc::new(AtomicBool::new(true));
1239
1240 let handles: Vec<_> = (0..num_threads)
1241 .map(|thread_id| {
1242 let cache_clone = Arc::clone(&cache_concurrent);
1243 let barrier_clone = Arc::clone(&barrier);
1244 let success_flag_clone = Arc::clone(&success_flag);
1245
1246 thread::spawn(move || {
1247 barrier_clone.wait();
1249
1250 let start_time = Instant::now();
1251 for i in 0..operations_per_thread {
1252 let shared_metric = "shared_metric".to_string();
1254 let new_metric = format!("thread_{}_metric_{}", thread_id, i);
1255 let names = vec![&shared_metric, &new_metric];
1256
1257 match fast_legacy_check(&cache_clone, &names) {
1258 Ok(_) => {}
1259 Err(_) => {
1260 success_flag_clone.store(false, Ordering::Relaxed);
1261 return;
1262 }
1263 }
1264
1265 if start_time.elapsed() > Duration::from_secs(10) {
1267 success_flag_clone.store(false, Ordering::Relaxed);
1268 return;
1269 }
1270 }
1271 })
1272 })
1273 .collect();
1274
1275 let start_time = Instant::now();
1277 for (i, handle) in handles.into_iter().enumerate() {
1278 let join_result = handle.join();
1279
1280 if start_time.elapsed() > Duration::from_secs(30) {
1282 panic!("Test timed out - possible deadlock detected!");
1283 }
1284
1285 if join_result.is_err() {
1286 panic!("Thread {} panicked during execution", i);
1287 }
1288 }
1289
1290 assert!(
1292 success_flag.load(Ordering::Relaxed),
1293 "Some operations failed"
1294 );
1295
1296 let final_count = cache_concurrent.len();
1298 assert!(
1299 final_count > 1 + num_threads * operations_per_thread / 2,
1300 "Expected more cache entries, got {}",
1301 final_count
1302 );
1303 }
1304
1305 #[test]
1306 fn test_exec_validation() {
1307 let query_ctx = QueryContext::arc();
1308 let plugins: Plugins = Plugins::new();
1309 plugins.insert(QueryOptions {
1310 disallow_cross_catalog_query: true,
1311 });
1312
1313 let sql = r#"
1314 SELECT * FROM demo;
1315 EXPLAIN SELECT * FROM demo;
1316 CREATE DATABASE test_database;
1317 SHOW DATABASES;
1318 "#;
1319 let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1320 assert_eq!(stmts.len(), 4);
1321 for stmt in stmts {
1322 let re = check_permission(plugins.clone(), &stmt, &query_ctx);
1323 re.unwrap();
1324 }
1325
1326 let sql = r#"
1327 SHOW CREATE TABLE demo;
1328 ALTER TABLE demo ADD COLUMN new_col INT;
1329 "#;
1330 let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1331 assert_eq!(stmts.len(), 2);
1332 for stmt in stmts {
1333 let re = check_permission(plugins.clone(), &stmt, &query_ctx);
1334 re.unwrap();
1335 }
1336
1337 fn replace_test(template_sql: &str, plugins: Plugins, query_ctx: &QueryContextRef) {
1338 let right = vec![("", ""), ("", "public."), ("greptime.", "public.")];
1340 for (catalog, schema) in right {
1341 let sql = do_fmt(template_sql, catalog, schema);
1342 do_test(&sql, plugins.clone(), query_ctx, true);
1343 }
1344
1345 let wrong = vec![
1346 ("wrongcatalog.", "public."),
1347 ("wrongcatalog.", "wrongschema."),
1348 ];
1349 for (catalog, schema) in wrong {
1350 let sql = do_fmt(template_sql, catalog, schema);
1351 do_test(&sql, plugins.clone(), query_ctx, false);
1352 }
1353 }
1354
1355 fn do_fmt(template: &str, catalog: &str, schema: &str) -> String {
1356 let vars = HashMap::from([
1357 ("catalog".to_string(), catalog),
1358 ("schema".to_string(), schema),
1359 ]);
1360 template.format(&vars).unwrap()
1361 }
1362
1363 fn do_test(sql: &str, plugins: Plugins, query_ctx: &QueryContextRef, is_ok: bool) {
1364 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1365 let re = check_permission(plugins, stmt, query_ctx);
1366 if is_ok {
1367 re.unwrap();
1368 } else {
1369 assert!(re.is_err());
1370 }
1371 }
1372
1373 let sql = "INSERT INTO {catalog}{schema}monitor(host) VALUES ('host1');";
1375 replace_test(sql, plugins.clone(), &query_ctx);
1376
1377 let sql = r#"CREATE TABLE {catalog}{schema}demo(
1379 host STRING,
1380 ts TIMESTAMP,
1381 TIME INDEX (ts),
1382 PRIMARY KEY(host)
1383 ) engine=mito;"#;
1384 replace_test(sql, plugins.clone(), &query_ctx);
1385
1386 let sql = "DROP TABLE {catalog}{schema}demo;";
1388 replace_test(sql, plugins.clone(), &query_ctx);
1389
1390 let sql = "SHOW TABLES FROM public";
1392 let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1393 check_permission(plugins.clone(), &stmt[0], &query_ctx).unwrap();
1394
1395 let sql = "SHOW TABLES FROM private";
1396 let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1397 let re = check_permission(plugins.clone(), &stmt[0], &query_ctx);
1398 assert!(re.is_ok());
1399
1400 let sql = "DESC TABLE {catalog}{schema}demo;";
1402 replace_test(sql, plugins.clone(), &query_ctx);
1403
1404 let comment_flow_cases = [
1405 ("COMMENT ON FLOW my_flow IS 'comment';", true),
1406 ("COMMENT ON FLOW greptime.my_flow IS 'comment';", true),
1407 ("COMMENT ON FLOW wrongcatalog.my_flow IS 'comment';", false),
1408 ];
1409 for (sql, is_ok) in comment_flow_cases {
1410 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1411 let result = check_permission(plugins.clone(), stmt, &query_ctx);
1412 assert_eq!(result.is_ok(), is_ok);
1413 }
1414
1415 let show_flow_cases = [
1416 ("SHOW CREATE FLOW my_flow;", true),
1417 ("SHOW CREATE FLOW greptime.my_flow;", true),
1418 ("SHOW CREATE FLOW wrongcatalog.my_flow;", false),
1419 ];
1420 for (sql, is_ok) in show_flow_cases {
1421 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1422 let result = check_permission(plugins.clone(), stmt, &query_ctx);
1423 assert_eq!(result.is_ok(), is_ok);
1424 }
1425 }
1426}