1pub mod builder;
16mod dashboard;
17mod grpc;
18mod influxdb;
19mod jaeger;
20mod log_handler;
21mod logs;
22mod opentsdb;
23mod otlp;
24pub mod prom_store;
25mod promql;
26mod region_query;
27pub mod standalone;
28
29use std::pin::Pin;
30use std::sync::atomic::AtomicBool;
31use std::sync::{Arc, atomic};
32use std::time::{Duration, SystemTime};
33
34use async_stream::stream;
35use async_trait::async_trait;
36use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
37use catalog::CatalogManagerRef;
38use catalog::process_manager::{
39 ProcessManagerRef, QueryStatement as CatalogQueryStatement, SlowQueryTimer,
40};
41use client::OutputData;
42use common_base::Plugins;
43use common_base::cancellation::CancellableFuture;
44use common_error::ext::{BoxedError, ErrorExt};
45use common_event_recorder::EventRecorderRef;
46use common_meta::cache_invalidator::CacheInvalidatorRef;
47use common_meta::key::TableMetadataManagerRef;
48use common_meta::key::table_name::TableNameKey;
49use common_meta::node_manager::NodeManagerRef;
50use common_meta::procedure_executor::ProcedureExecutorRef;
51use common_query::Output;
52use common_recordbatch::RecordBatchStreamWrapper;
53use common_recordbatch::error::StreamTimeoutSnafu;
54use common_telemetry::logging::SlowQueryOptions;
55use common_telemetry::{debug, error, tracing};
56use dashmap::DashMap;
57use datafusion_expr::LogicalPlan;
58use futures::{Stream, StreamExt};
59use lazy_static::lazy_static;
60use operator::delete::DeleterRef;
61use operator::insert::InserterRef;
62use operator::statement::{StatementExecutor, StatementExecutorRef};
63use partition::manager::PartitionRuleManagerRef;
64use pipeline::pipeline_operator::PipelineOperator;
65use prometheus::HistogramTimer;
66use promql_parser::label::Matcher;
67use query::QueryEngineRef;
68use query::metrics::OnDone;
69use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
70use query::query_engine::DescribeResult;
71use query::query_engine::options::{QueryOptions, validate_catalog_and_schema};
72use servers::error::{
73 self as server_error, AuthSnafu, CommonMetaSnafu, ExecuteQuerySnafu,
74 OtlpMetricModeIncompatibleSnafu, ParsePromQLSnafu, UnexpectedResultSnafu,
75};
76use servers::interceptor::{
77 PromQueryInterceptor, PromQueryInterceptorRef, SqlQueryInterceptor, SqlQueryInterceptorRef,
78};
79use servers::otlp::metrics::legacy_normalize_otlp_name;
80use servers::prometheus_handler::PrometheusHandler;
81use servers::query_handler::sql::SqlQueryHandler;
82use session::context::{Channel, QueryContextRef};
83use session::table_name::table_idents_to_full_name;
84use snafu::prelude::*;
85use sql::ast::ObjectNamePartExt;
86use sql::dialect::Dialect;
87use sql::parser::{ParseOptions, ParserContext};
88use sql::statements::comment::CommentObject;
89use sql::statements::copy::{CopyDatabase, CopyTable};
90use sql::statements::statement::Statement;
91use sql::statements::tql::Tql;
92use sqlparser::ast::ObjectName;
93pub use standalone::StandaloneDatanodeManager;
94use table::requests::{OTLP_METRIC_COMPAT_KEY, OTLP_METRIC_COMPAT_PROM};
95use tracing::Span;
96
97use crate::error::{
98 self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, InvalidSqlSnafu,
99 ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
100 StatementTimeoutSnafu, TableOperationSnafu,
101};
102use crate::stream_wrapper::CancellableStreamWrapper;
103
104lazy_static! {
105 static ref OTLP_LEGACY_DEFAULT_VALUE: String = "legacy".to_string();
106}
107
108#[derive(Clone)]
112pub struct Instance {
113 catalog_manager: CatalogManagerRef,
114 pipeline_operator: Arc<PipelineOperator>,
115 statement_executor: Arc<StatementExecutor>,
116 query_engine: QueryEngineRef,
117 plugins: Plugins,
118 inserter: InserterRef,
119 deleter: DeleterRef,
120 table_metadata_manager: TableMetadataManagerRef,
121 event_recorder: Option<EventRecorderRef>,
122 process_manager: ProcessManagerRef,
123 slow_query_options: SlowQueryOptions,
124 suspend: Arc<AtomicBool>,
125
126 otlp_metrics_table_legacy_cache: DashMap<String, DashMap<String, bool>>,
131}
132
133impl Instance {
134 pub fn catalog_manager(&self) -> &CatalogManagerRef {
135 &self.catalog_manager
136 }
137
138 pub fn query_engine(&self) -> &QueryEngineRef {
139 &self.query_engine
140 }
141
142 pub fn plugins(&self) -> &Plugins {
143 &self.plugins
144 }
145
146 pub fn statement_executor(&self) -> &StatementExecutorRef {
147 &self.statement_executor
148 }
149
150 pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
151 &self.table_metadata_manager
152 }
153
154 pub fn inserter(&self) -> &InserterRef {
155 &self.inserter
156 }
157
158 pub fn process_manager(&self) -> &ProcessManagerRef {
159 &self.process_manager
160 }
161
162 pub fn node_manager(&self) -> &NodeManagerRef {
163 self.inserter.node_manager()
164 }
165
166 pub fn partition_manager(&self) -> &PartitionRuleManagerRef {
167 self.inserter.partition_manager()
168 }
169
170 pub fn cache_invalidator(&self) -> &CacheInvalidatorRef {
171 self.statement_executor.cache_invalidator()
172 }
173
174 pub fn procedure_executor(&self) -> &ProcedureExecutorRef {
175 self.statement_executor.procedure_executor()
176 }
177
178 pub fn suspend_state(&self) -> Arc<AtomicBool> {
179 self.suspend.clone()
180 }
181
182 pub(crate) fn is_suspended(&self) -> bool {
183 self.suspend.load(atomic::Ordering::Relaxed)
184 }
185}
186
187fn parse_stmt(sql: &str, dialect: &(dyn Dialect + Send + Sync)) -> Result<Vec<Statement>> {
188 ParserContext::create_with_dialect(sql, dialect, ParseOptions::default()).context(ParseSqlSnafu)
189}
190
191impl Instance {
192 async fn query_statement(&self, stmt: Statement, query_ctx: QueryContextRef) -> Result<Output> {
193 check_permission(self.plugins.clone(), &stmt, &query_ctx)?;
194
195 let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
196 let query_interceptor = query_interceptor.as_ref();
197
198 if stmt.is_readonly() {
199 let slow_query_timer = self
200 .slow_query_options
201 .enable
202 .then(|| self.event_recorder.clone())
203 .flatten()
204 .map(|event_recorder| {
205 SlowQueryTimer::new(
206 CatalogQueryStatement::Sql(stmt.clone()),
207 self.slow_query_options.threshold,
208 self.slow_query_options.sample_ratio,
209 self.slow_query_options.record_type,
210 event_recorder,
211 )
212 });
213
214 let ticket = self.process_manager.register_query(
215 query_ctx.current_catalog().to_string(),
216 vec![query_ctx.current_schema()],
217 stmt.to_string(),
218 query_ctx.conn_info().to_string(),
219 Some(query_ctx.process_id()),
220 slow_query_timer,
221 );
222
223 let query_fut = self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor);
224
225 CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
226 .await
227 .map_err(|_| error::CancelledSnafu.build())?
228 .map(|output| {
229 let Output { meta, data } = output;
230
231 let data = match data {
232 OutputData::Stream(stream) => OutputData::Stream(Box::pin(
233 CancellableStreamWrapper::new(stream, ticket),
234 )),
235 other => other,
236 };
237 Output { data, meta }
238 })
239 } else {
240 self.exec_statement_with_timeout(stmt, query_ctx, query_interceptor)
241 .await
242 }
243 }
244
245 async fn exec_statement_with_timeout(
246 &self,
247 stmt: Statement,
248 query_ctx: QueryContextRef,
249 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
250 ) -> Result<Output> {
251 let timeout = derive_timeout(&stmt, &query_ctx);
252 match timeout {
253 Some(timeout) => {
254 let start = tokio::time::Instant::now();
255 let output = tokio::time::timeout(
256 timeout,
257 self.exec_statement(stmt, query_ctx, query_interceptor),
258 )
259 .await
260 .map_err(|_| StatementTimeoutSnafu.build())??;
261 let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
263 attach_timeout(output, remaining_timeout)
264 }
265 None => {
266 self.exec_statement(stmt, query_ctx, query_interceptor)
267 .await
268 }
269 }
270 }
271
272 async fn exec_statement(
273 &self,
274 stmt: Statement,
275 query_ctx: QueryContextRef,
276 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
277 ) -> Result<Output> {
278 match stmt {
279 Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => {
280 if let Statement::Explain(explain) = &stmt
282 && let Some(format) = explain.format()
283 {
284 query_ctx.set_explain_format(format.to_string());
285 }
286
287 self.plan_and_exec_sql(stmt, &query_ctx, query_interceptor)
288 .await
289 }
290 Statement::Tql(tql) => {
291 self.plan_and_exec_tql(&query_ctx, query_interceptor, tql)
292 .await
293 }
294 _ => {
295 query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
296 self.statement_executor
297 .execute_sql(stmt, query_ctx)
298 .await
299 .context(TableOperationSnafu)
300 }
301 }
302 }
303
304 async fn plan_and_exec_sql(
305 &self,
306 stmt: Statement,
307 query_ctx: &QueryContextRef,
308 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
309 ) -> Result<Output> {
310 let stmt = QueryStatement::Sql(stmt);
311 let plan = self
312 .statement_executor
313 .plan(&stmt, query_ctx.clone())
314 .await?;
315 let QueryStatement::Sql(stmt) = stmt else {
316 unreachable!()
317 };
318 query_interceptor.pre_execute(&stmt, Some(&plan), query_ctx.clone())?;
319
320 self.statement_executor
321 .exec_plan(plan, query_ctx.clone())
322 .await
323 .context(TableOperationSnafu)
324 }
325
326 async fn plan_and_exec_tql(
327 &self,
328 query_ctx: &QueryContextRef,
329 query_interceptor: Option<&SqlQueryInterceptorRef<Error>>,
330 tql: Tql,
331 ) -> Result<Output> {
332 let plan = self
333 .statement_executor
334 .plan_tql(tql.clone(), query_ctx)
335 .await?;
336 query_interceptor.pre_execute(&Statement::Tql(tql), Some(&plan), query_ctx.clone())?;
337 self.statement_executor
338 .exec_plan(plan, query_ctx.clone())
339 .await
340 .context(TableOperationSnafu)
341 }
342
343 async fn check_otlp_legacy(
344 &self,
345 names: &[&String],
346 ctx: QueryContextRef,
347 ) -> server_error::Result<bool> {
348 let db_string = ctx.get_db_string();
349 let cache = self
351 .otlp_metrics_table_legacy_cache
352 .entry(db_string.clone())
353 .or_default();
354 if let Some(flag) = fast_legacy_check(&cache, names)? {
355 return Ok(flag);
356 }
357 drop(cache);
359
360 let catalog = ctx.current_catalog();
361 let schema = ctx.current_schema();
362
363 let normalized_names = names
365 .iter()
366 .map(|n| legacy_normalize_otlp_name(n))
367 .collect::<Vec<_>>();
368 let table_names = normalized_names
369 .iter()
370 .map(|n| TableNameKey::new(catalog, &schema, n))
371 .collect::<Vec<_>>();
372 let table_values = self
373 .table_metadata_manager()
374 .table_name_manager()
375 .batch_get(table_names)
376 .await
377 .context(CommonMetaSnafu)?;
378 let table_ids = table_values
379 .into_iter()
380 .filter_map(|v| v.map(|vi| vi.table_id()))
381 .collect::<Vec<_>>();
382
383 if table_ids.is_empty() {
385 let cache = self
386 .otlp_metrics_table_legacy_cache
387 .entry(db_string)
388 .or_default();
389 names.iter().for_each(|name| {
390 cache.insert((*name).clone(), false);
391 });
392 return Ok(false);
393 }
394
395 let table_infos = self
397 .table_metadata_manager()
398 .table_info_manager()
399 .batch_get(&table_ids)
400 .await
401 .context(CommonMetaSnafu)?;
402 let options = table_infos
403 .values()
404 .map(|info| {
405 info.table_info
406 .meta
407 .options
408 .extra_options
409 .get(OTLP_METRIC_COMPAT_KEY)
410 .unwrap_or(&OTLP_LEGACY_DEFAULT_VALUE)
411 })
412 .collect::<Vec<_>>();
413 let cache = self
414 .otlp_metrics_table_legacy_cache
415 .entry(db_string)
416 .or_default();
417 if !options.is_empty() {
418 let has_prom = options.iter().any(|opt| *opt == OTLP_METRIC_COMPAT_PROM);
420 let has_legacy = options
421 .iter()
422 .any(|opt| *opt == OTLP_LEGACY_DEFAULT_VALUE.as_str());
423 ensure!(!(has_prom && has_legacy), OtlpMetricModeIncompatibleSnafu);
424 let flag = has_legacy;
425 names.iter().for_each(|name| {
426 cache.insert((*name).clone(), flag);
427 });
428 Ok(flag)
429 } else {
430 names.iter().for_each(|name| {
432 cache.insert((*name).clone(), false);
433 });
434 Ok(false)
435 }
436 }
437}
438
439fn fast_legacy_check(
440 cache: &DashMap<String, bool>,
441 names: &[&String],
442) -> server_error::Result<Option<bool>> {
443 let hit_cache = names
444 .iter()
445 .filter_map(|name| cache.get(*name))
446 .collect::<Vec<_>>();
447 if !hit_cache.is_empty() {
448 let hit_legacy = hit_cache.iter().any(|en| *en.value());
449 let hit_prom = hit_cache.iter().any(|en| !*en.value());
450
451 ensure!(!(hit_legacy && hit_prom), OtlpMetricModeIncompatibleSnafu);
455
456 let flag = hit_legacy;
457 drop(hit_cache);
459
460 names.iter().for_each(|name| {
462 if !cache.contains_key(*name) {
463 cache.insert((*name).clone(), flag);
464 }
465 });
466 Ok(Some(flag))
467 } else {
468 Ok(None)
469 }
470}
471
472fn derive_timeout(stmt: &Statement, query_ctx: &QueryContextRef) -> Option<Duration> {
475 let query_timeout = query_ctx.query_timeout()?;
476 if query_timeout.is_zero() {
477 return None;
478 }
479 match query_ctx.channel() {
480 Channel::Mysql if stmt.is_readonly() => Some(query_timeout),
481 Channel::Postgres => Some(query_timeout),
482 _ => None,
483 }
484}
485
486fn derive_timeout_for_plan(plan: &LogicalPlan, query_ctx: &QueryContextRef) -> Option<Duration> {
488 let query_timeout = query_ctx.query_timeout()?;
489 if query_timeout.is_zero() {
490 return None;
491 }
492 match query_ctx.channel() {
493 Channel::Mysql if is_readonly_plan(plan) => Some(query_timeout),
494 Channel::Postgres => Some(query_timeout),
495 _ => None,
496 }
497}
498
499fn attach_timeout(output: Output, mut timeout: Duration) -> Result<Output> {
500 if timeout.is_zero() {
501 return StatementTimeoutSnafu.fail();
502 }
503
504 let output = match output.data {
505 OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
506 OutputData::Stream(mut stream) => {
507 let schema = stream.schema();
508 let s = Box::pin(stream! {
509 let mut start = tokio::time::Instant::now();
510 while let Some(item) = tokio::time::timeout(timeout, stream.next()).await.map_err(|_| StreamTimeoutSnafu.build())? {
511 yield item;
512
513 let now = tokio::time::Instant::now();
514 timeout = timeout.checked_sub(now - start).unwrap_or(Duration::ZERO);
515 start = now;
516 if timeout.is_zero() {
518 StreamTimeoutSnafu.fail()?;
519 }
520 }
521 }) as Pin<Box<dyn Stream<Item = _> + Send>>;
522 let stream = RecordBatchStreamWrapper {
523 schema,
524 stream: s,
525 output_ordering: None,
526 metrics: Default::default(),
527 span: Span::current(),
528 };
529 Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
530 }
531 };
532
533 Ok(output)
534}
535
536impl Instance {
537 #[tracing::instrument(skip_all, name = "SqlQueryHandler::do_query")]
538 async fn do_query_inner(&self, query: &str, query_ctx: QueryContextRef) -> Vec<Result<Output>> {
539 if self.is_suspended() {
540 return vec![error::SuspendedSnafu {}.fail()];
541 }
542
543 let query_interceptor_opt = self.plugins.get::<SqlQueryInterceptorRef<Error>>();
544 let query_interceptor = query_interceptor_opt.as_ref();
545 let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
546 Ok(q) => q,
547 Err(e) => return vec![Err(e)],
548 };
549
550 let checker_ref = self.plugins.get::<PermissionCheckerRef>();
551 let checker = checker_ref.as_ref();
552
553 match parse_stmt(query.as_ref(), query_ctx.sql_dialect())
554 .and_then(|stmts| query_interceptor.post_parsing(stmts, query_ctx.clone()))
555 {
556 Ok(stmts) => {
557 if stmts.is_empty() {
558 return vec![
559 InvalidSqlSnafu {
560 err_msg: "empty statements",
561 }
562 .fail(),
563 ];
564 }
565
566 let mut results = Vec::with_capacity(stmts.len());
567 for stmt in stmts {
568 if let Err(e) = checker
569 .check_permission(
570 query_ctx.current_user(),
571 PermissionReq::SqlStatement(&stmt),
572 )
573 .context(PermissionSnafu)
574 {
575 results.push(Err(e));
576 break;
577 }
578
579 match self.query_statement(stmt.clone(), query_ctx.clone()).await {
580 Ok(output) => {
581 let output_result =
582 query_interceptor.post_execute(output, query_ctx.clone());
583 results.push(output_result);
584 }
585 Err(e) => {
586 if e.status_code().should_log_error() {
587 error!(e; "Failed to execute query: {stmt}");
588 } else {
589 debug!("Failed to execute query: {stmt}, {e}");
590 }
591 results.push(Err(e));
592 break;
593 }
594 }
595 }
596 results
597 }
598 Err(e) => {
599 vec![Err(e)]
600 }
601 }
602 }
603
604 async fn exec_plan(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output> {
605 self.query_engine
606 .execute(plan, query_ctx)
607 .await
608 .context(ExecLogicalPlanSnafu)
609 }
610
611 async fn exec_plan_with_timeout(
612 &self,
613 plan: LogicalPlan,
614 query_ctx: QueryContextRef,
615 ) -> Result<Output> {
616 let timeout = derive_timeout_for_plan(&plan, &query_ctx);
617 match timeout {
618 Some(timeout) => {
619 let start = tokio::time::Instant::now();
620 let output = tokio::time::timeout(timeout, self.exec_plan(plan, query_ctx))
621 .await
622 .map_err(|_| StatementTimeoutSnafu.build())??;
623 let remaining_timeout = timeout.checked_sub(start.elapsed()).unwrap_or_default();
624 attach_timeout(output, remaining_timeout)
625 }
626 None => self.exec_plan(plan, query_ctx).await,
627 }
628 }
629
630 async fn do_exec_plan_inner(
631 &self,
632 plan: LogicalPlan,
633 query: String,
634 query_ctx: QueryContextRef,
635 ) -> Result<Output> {
636 ensure!(!self.is_suspended(), error::SuspendedSnafu);
637
638 if is_readonly_plan(&plan) {
639 let slow_query_timer = self
640 .slow_query_options
641 .enable
642 .then(|| self.event_recorder.clone())
643 .flatten()
644 .map(|event_recorder| {
645 SlowQueryTimer::new(
646 CatalogQueryStatement::Plan(query.clone()),
647 self.slow_query_options.threshold,
648 self.slow_query_options.sample_ratio,
649 self.slow_query_options.record_type,
650 event_recorder,
651 )
652 });
653
654 let ticket = self.process_manager.register_query(
655 query_ctx.current_catalog().to_string(),
656 vec![query_ctx.current_schema()],
657 query,
658 query_ctx.conn_info().to_string(),
659 Some(query_ctx.process_id()),
660 slow_query_timer,
661 );
662
663 let query_fut = self.exec_plan_with_timeout(plan, query_ctx);
664
665 CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
666 .await
667 .map_err(|_| error::CancelledSnafu.build())?
668 .map(|output| {
669 let Output { meta, data } = output;
670
671 let data = match data {
672 OutputData::Stream(stream) => OutputData::Stream(Box::pin(
673 CancellableStreamWrapper::new(stream, ticket),
674 )),
675 other => other,
676 };
677 Output { data, meta }
678 })
679 } else {
680 self.exec_plan_with_timeout(plan, query_ctx).await
681 }
682 }
683
684 #[tracing::instrument(skip_all, name = "SqlQueryHandler::do_promql_query")]
685 async fn do_promql_query_inner(
686 &self,
687 query: &PromQuery,
688 query_ctx: QueryContextRef,
689 ) -> Vec<Result<Output>> {
690 if self.is_suspended() {
691 return vec![error::SuspendedSnafu {}.fail()];
692 }
693
694 let result = PrometheusHandler::do_query(self, query, query_ctx)
696 .await
697 .with_context(|_| ExecutePromqlSnafu {
698 query: format!("{query:?}"),
699 });
700 vec![result]
701 }
702
703 async fn do_describe_inner(
704 &self,
705 stmt: Statement,
706 query_ctx: QueryContextRef,
707 ) -> Result<Option<DescribeResult>> {
708 ensure!(!self.is_suspended(), error::SuspendedSnafu);
709
710 let is_inner_plannable = |s: &Statement| {
714 matches!(
715 s,
716 Statement::Insert(_) | Statement::Query(_) | Statement::Delete(_)
717 )
718 };
719 let plannable = is_inner_plannable(&stmt)
720 || matches!(&stmt, Statement::Explain(explain) if is_inner_plannable(explain.statement.as_ref()));
721
722 if plannable {
723 self.plugins
724 .get::<PermissionCheckerRef>()
725 .as_ref()
726 .check_permission(query_ctx.current_user(), PermissionReq::SqlStatement(&stmt))
727 .context(PermissionSnafu)?;
728
729 let plan = self
730 .query_engine
731 .planner()
732 .plan(&QueryStatement::Sql(stmt), query_ctx.clone())
733 .await
734 .context(PlanStatementSnafu)?;
735 self.query_engine
736 .describe(plan, query_ctx)
737 .await
738 .map(Some)
739 .context(error::DescribeStatementSnafu)
740 } else {
741 Ok(None)
742 }
743 }
744
745 async fn is_valid_schema_inner(&self, catalog: &str, schema: &str) -> Result<bool> {
746 self.catalog_manager
747 .schema_exists(catalog, schema, None)
748 .await
749 .context(error::CatalogSnafu)
750 }
751}
752
753#[async_trait]
754impl SqlQueryHandler for Instance {
755 async fn do_query(
756 &self,
757 query: &str,
758 query_ctx: QueryContextRef,
759 ) -> Vec<server_error::Result<Output>> {
760 self.do_query_inner(query, query_ctx)
761 .await
762 .into_iter()
763 .map(|result| result.map_err(BoxedError::new).context(ExecuteQuerySnafu))
764 .collect()
765 }
766
767 async fn do_exec_plan(
768 &self,
769 plan: LogicalPlan,
770 query: String,
771 query_ctx: QueryContextRef,
772 ) -> server_error::Result<Output> {
773 self.do_exec_plan_inner(plan, query, query_ctx)
774 .await
775 .map_err(BoxedError::new)
776 .context(server_error::ExecutePlanSnafu)
777 }
778
779 async fn do_promql_query(
780 &self,
781 query: &PromQuery,
782 query_ctx: QueryContextRef,
783 ) -> Vec<server_error::Result<Output>> {
784 self.do_promql_query_inner(query, query_ctx)
785 .await
786 .into_iter()
787 .map(|result| result.map_err(BoxedError::new).context(ExecuteQuerySnafu))
788 .collect()
789 }
790
791 async fn do_describe(
792 &self,
793 stmt: Statement,
794 query_ctx: QueryContextRef,
795 ) -> server_error::Result<Option<DescribeResult>> {
796 self.do_describe_inner(stmt, query_ctx)
797 .await
798 .map_err(BoxedError::new)
799 .context(server_error::DescribeStatementSnafu)
800 }
801
802 async fn is_valid_schema(&self, catalog: &str, schema: &str) -> server_error::Result<bool> {
803 self.is_valid_schema_inner(catalog, schema)
804 .await
805 .map_err(BoxedError::new)
806 .context(server_error::CheckDatabaseValiditySnafu)
807 }
808}
809
810pub fn attach_timer(output: Output, timer: HistogramTimer) -> Output {
812 match output.data {
813 OutputData::AffectedRows(_) | OutputData::RecordBatches(_) => output,
814 OutputData::Stream(stream) => {
815 let stream = OnDone::new(stream, move || {
816 timer.observe_duration();
817 });
818 Output::new(OutputData::Stream(Box::pin(stream)), output.meta)
819 }
820 }
821}
822
823#[async_trait]
824impl PrometheusHandler for Instance {
825 #[tracing::instrument(skip_all)]
826 async fn do_query(
827 &self,
828 query: &PromQuery,
829 query_ctx: QueryContextRef,
830 ) -> server_error::Result<Output> {
831 let interceptor = self
832 .plugins
833 .get::<PromQueryInterceptorRef<server_error::Error>>();
834
835 self.plugins
836 .get::<PermissionCheckerRef>()
837 .as_ref()
838 .check_permission(query_ctx.current_user(), PermissionReq::PromQuery)
839 .context(AuthSnafu)?;
840
841 let stmt = QueryLanguageParser::parse_promql(query, &query_ctx).with_context(|_| {
842 ParsePromQLSnafu {
843 query: query.clone(),
844 }
845 })?;
846
847 let plan = self
848 .statement_executor
849 .plan(&stmt, query_ctx.clone())
850 .await
851 .map_err(BoxedError::new)
852 .context(ExecuteQuerySnafu)?;
853
854 interceptor.pre_execute(query, Some(&plan), query_ctx.clone())?;
855
856 let query_statement = if let QueryStatement::Promql(eval_stmt, alias) = stmt {
858 CatalogQueryStatement::Promql(eval_stmt, alias)
859 } else {
860 return UnexpectedResultSnafu {
862 reason: "The query should always be promql.".to_string(),
863 }
864 .fail();
865 };
866 let query = query_statement.to_string();
867
868 let slow_query_timer = self
869 .slow_query_options
870 .enable
871 .then(|| self.event_recorder.clone())
872 .flatten()
873 .map(|event_recorder| {
874 SlowQueryTimer::new(
875 query_statement,
876 self.slow_query_options.threshold,
877 self.slow_query_options.sample_ratio,
878 self.slow_query_options.record_type,
879 event_recorder,
880 )
881 });
882
883 let ticket = self.process_manager.register_query(
884 query_ctx.current_catalog().to_string(),
885 vec![query_ctx.current_schema()],
886 query,
887 query_ctx.conn_info().to_string(),
888 Some(query_ctx.process_id()),
889 slow_query_timer,
890 );
891
892 let query_fut = self.statement_executor.exec_plan(plan, query_ctx.clone());
893
894 let output = CancellableFuture::new(query_fut, ticket.cancellation_handle.clone())
895 .await
896 .map_err(|_| servers::error::CancelledSnafu.build())?
897 .map(|output| {
898 let Output { meta, data } = output;
899 let data = match data {
900 OutputData::Stream(stream) => {
901 OutputData::Stream(Box::pin(CancellableStreamWrapper::new(stream, ticket)))
902 }
903 other => other,
904 };
905 Output { data, meta }
906 })
907 .map_err(BoxedError::new)
908 .context(ExecuteQuerySnafu)?;
909
910 Ok(interceptor.post_execute(output, query_ctx)?)
911 }
912
913 async fn query_metric_names(
914 &self,
915 matchers: Vec<Matcher>,
916 ctx: &QueryContextRef,
917 ) -> server_error::Result<Vec<String>> {
918 self.handle_query_metric_names(matchers, ctx)
919 .await
920 .map_err(BoxedError::new)
921 .context(ExecuteQuerySnafu)
922 }
923
924 async fn query_label_values(
925 &self,
926 metric: String,
927 label_name: String,
928 matchers: Vec<Matcher>,
929 start: SystemTime,
930 end: SystemTime,
931 ctx: &QueryContextRef,
932 ) -> server_error::Result<Vec<String>> {
933 self.handle_query_label_values(metric, label_name, matchers, start, end, ctx)
934 .await
935 .map_err(BoxedError::new)
936 .context(ExecuteQuerySnafu)
937 }
938
939 fn catalog_manager(&self) -> CatalogManagerRef {
940 self.catalog_manager.clone()
941 }
942}
943
944macro_rules! validate_db_permission {
946 ($stmt: expr, $query_ctx: expr) => {
947 if let Some(database) = &$stmt.database {
948 validate_catalog_and_schema($query_ctx.current_catalog(), database, $query_ctx)
949 .map_err(BoxedError::new)
950 .context(SqlExecInterceptedSnafu)?;
951 }
952 };
953}
954
955pub fn check_permission(
956 plugins: Plugins,
957 stmt: &Statement,
958 query_ctx: &QueryContextRef,
959) -> Result<()> {
960 let need_validate = plugins
961 .get::<QueryOptions>()
962 .map(|opts| opts.disallow_cross_catalog_query)
963 .unwrap_or_default();
964
965 if !need_validate {
966 return Ok(());
967 }
968
969 match stmt {
970 Statement::Admin(_) => {}
973 Statement::Query(_)
975 | Statement::Explain(_)
976 | Statement::Tql(_)
977 | Statement::Delete(_)
978 | Statement::DeclareCursor(_)
979 | Statement::Copy(sql::statements::copy::Copy::CopyQueryTo(_)) => {}
980 Statement::CreateDatabase(_)
982 | Statement::ShowDatabases(_)
983 | Statement::DropDatabase(_)
984 | Statement::AlterDatabase(_)
985 | Statement::DropFlow(_)
986 | Statement::Use(_) => {}
987 #[cfg(feature = "enterprise")]
988 Statement::DropTrigger(_) => {}
989 Statement::ShowCreateDatabase(stmt) => {
990 validate_database(&stmt.database_name, query_ctx)?;
991 }
992 Statement::ShowCreateTable(stmt) => {
993 validate_param(&stmt.table_name, query_ctx)?;
994 }
995 Statement::ShowCreateFlow(stmt) => {
996 validate_flow(&stmt.flow_name, query_ctx)?;
997 }
998 #[cfg(feature = "enterprise")]
999 Statement::ShowCreateTrigger(stmt) => {
1000 validate_param(&stmt.trigger_name, query_ctx)?;
1001 }
1002 Statement::ShowCreateView(stmt) => {
1003 validate_param(&stmt.view_name, query_ctx)?;
1004 }
1005 Statement::CreateExternalTable(stmt) => {
1006 validate_param(&stmt.name, query_ctx)?;
1007 }
1008 Statement::CreateFlow(stmt) => {
1009 validate_param(&stmt.sink_table_name, query_ctx)?;
1011 }
1012 #[cfg(feature = "enterprise")]
1013 Statement::CreateTrigger(stmt) => {
1014 validate_param(&stmt.trigger_name, query_ctx)?;
1015 }
1016 Statement::CreateView(stmt) => {
1017 validate_param(&stmt.name, query_ctx)?;
1018 }
1019 Statement::AlterTable(stmt) => {
1020 validate_param(stmt.table_name(), query_ctx)?;
1021 }
1022 #[cfg(feature = "enterprise")]
1023 Statement::AlterTrigger(_) => {}
1024 Statement::SetVariables(_) | Statement::ShowVariables(_) => {}
1026 Statement::ShowCharset(_) | Statement::ShowCollation(_) => {}
1028
1029 Statement::Comment(comment) => match &comment.object {
1030 CommentObject::Table(table) => validate_param(table, query_ctx)?,
1031 CommentObject::Column { table, .. } => validate_param(table, query_ctx)?,
1032 CommentObject::Flow(flow) => validate_flow(flow, query_ctx)?,
1033 },
1034
1035 Statement::Insert(insert) => {
1036 let name = insert.table_name().context(ParseSqlSnafu)?;
1037 validate_param(name, query_ctx)?;
1038 }
1039 Statement::CreateTable(stmt) => {
1040 validate_param(&stmt.name, query_ctx)?;
1041 }
1042 Statement::CreateTableLike(stmt) => {
1043 validate_param(&stmt.table_name, query_ctx)?;
1044 validate_param(&stmt.source_name, query_ctx)?;
1045 }
1046 Statement::DropTable(drop_stmt) => {
1047 for table_name in drop_stmt.table_names() {
1048 validate_param(table_name, query_ctx)?;
1049 }
1050 }
1051 Statement::DropView(stmt) => {
1052 validate_param(&stmt.view_name, query_ctx)?;
1053 }
1054 Statement::ShowTables(stmt) => {
1055 validate_db_permission!(stmt, query_ctx);
1056 }
1057 Statement::ShowTableStatus(stmt) => {
1058 validate_db_permission!(stmt, query_ctx);
1059 }
1060 Statement::ShowColumns(stmt) => {
1061 validate_db_permission!(stmt, query_ctx);
1062 }
1063 Statement::ShowIndex(stmt) => {
1064 validate_db_permission!(stmt, query_ctx);
1065 }
1066 Statement::ShowRegion(stmt) => {
1067 validate_db_permission!(stmt, query_ctx);
1068 }
1069 Statement::ShowViews(stmt) => {
1070 validate_db_permission!(stmt, query_ctx);
1071 }
1072 Statement::ShowFlows(stmt) => {
1073 validate_db_permission!(stmt, query_ctx);
1074 }
1075 #[cfg(feature = "enterprise")]
1076 Statement::ShowTriggers(_stmt) => {
1077 }
1080 Statement::ShowStatus(_stmt) => {}
1081 Statement::ShowSearchPath(_stmt) => {}
1082 Statement::DescribeTable(stmt) => {
1083 validate_param(stmt.name(), query_ctx)?;
1084 }
1085 Statement::Copy(sql::statements::copy::Copy::CopyTable(stmt)) => match stmt {
1086 CopyTable::To(copy_table_to) => validate_param(©_table_to.table_name, query_ctx)?,
1087 CopyTable::From(copy_table_from) => {
1088 validate_param(©_table_from.table_name, query_ctx)?
1089 }
1090 },
1091 Statement::Copy(sql::statements::copy::Copy::CopyDatabase(copy_database)) => {
1092 match copy_database {
1093 CopyDatabase::To(stmt) => validate_database(&stmt.database_name, query_ctx)?,
1094 CopyDatabase::From(stmt) => validate_database(&stmt.database_name, query_ctx)?,
1095 }
1096 }
1097 Statement::TruncateTable(stmt) => {
1098 validate_param(stmt.table_name(), query_ctx)?;
1099 }
1100 Statement::FetchCursor(_) | Statement::CloseCursor(_) => {}
1102 Statement::Kill(_) => {}
1104 Statement::ShowProcesslist(_) => {}
1106 }
1107 Ok(())
1108}
1109
1110fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1111 let (catalog, schema, _) = table_idents_to_full_name(name, query_ctx)
1112 .map_err(BoxedError::new)
1113 .context(ExternalSnafu)?;
1114
1115 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1116 .map_err(BoxedError::new)
1117 .context(SqlExecInterceptedSnafu)
1118}
1119
1120fn validate_flow(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1121 let catalog = match &name.0[..] {
1122 [_flow] => query_ctx.current_catalog().to_string(),
1123 [catalog, _flow] => catalog.to_string_unquoted(),
1124 _ => {
1125 return InvalidSqlSnafu {
1126 err_msg: format!(
1127 "expect flow name to be <catalog>.<flow_name> or <flow_name>, actual: {name}",
1128 ),
1129 }
1130 .fail();
1131 }
1132 };
1133
1134 let schema = query_ctx.current_schema();
1135
1136 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1137 .map_err(BoxedError::new)
1138 .context(SqlExecInterceptedSnafu)
1139}
1140
1141fn validate_database(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
1142 let (catalog, schema) = match &name.0[..] {
1143 [schema] => (
1144 query_ctx.current_catalog().to_string(),
1145 schema.to_string_unquoted(),
1146 ),
1147 [catalog, schema] => (catalog.to_string_unquoted(), schema.to_string_unquoted()),
1148 _ => InvalidSqlSnafu {
1149 err_msg: format!(
1150 "expect database name to be <catalog>.<schema> or <schema>, actual: {name}",
1151 ),
1152 }
1153 .fail()?,
1154 };
1155
1156 validate_catalog_and_schema(&catalog, &schema, query_ctx)
1157 .map_err(BoxedError::new)
1158 .context(SqlExecInterceptedSnafu)
1159}
1160
1161fn is_readonly_plan(plan: &LogicalPlan) -> bool {
1162 !matches!(plan, LogicalPlan::Dml(_) | LogicalPlan::Ddl(_))
1163}
1164
1165#[cfg(test)]
1166mod tests {
1167 use std::collections::HashMap;
1168 use std::sync::atomic::{AtomicBool, Ordering};
1169 use std::sync::{Arc, Barrier};
1170 use std::thread;
1171 use std::time::{Duration, Instant};
1172
1173 use common_base::Plugins;
1174 use query::query_engine::options::QueryOptions;
1175 use session::context::QueryContext;
1176 use sql::dialect::GreptimeDbDialect;
1177 use strfmt::Format;
1178
1179 use super::*;
1180
1181 #[test]
1182 fn test_fast_legacy_check_deadlock_prevention() {
1183 let cache = DashMap::new();
1185
1186 cache.insert("metric1".to_string(), true); cache.insert("metric2".to_string(), false); cache.insert("metric3".to_string(), true); let metric1 = "metric1".to_string();
1193 let metric4 = "metric4".to_string();
1194 let names1 = vec![&metric1, &metric4];
1195 let result = fast_legacy_check(&cache, &names1);
1196 assert!(result.is_ok());
1197 assert_eq!(result.unwrap(), Some(true)); assert!(cache.contains_key("metric4"));
1201 assert!(*cache.get("metric4").unwrap().value());
1202
1203 let metric5 = "metric5".to_string();
1205 let metric6 = "metric6".to_string();
1206 let names2 = vec![&metric5, &metric6];
1207 let result = fast_legacy_check(&cache, &names2);
1208 assert!(result.is_ok());
1209 assert_eq!(result.unwrap(), None); let cache_incompatible = DashMap::new();
1213 cache_incompatible.insert("metric1".to_string(), true); cache_incompatible.insert("metric2".to_string(), false); let metric1_test = "metric1".to_string();
1216 let metric2_test = "metric2".to_string();
1217 let names3 = vec![&metric1_test, &metric2_test];
1218 let result = fast_legacy_check(&cache_incompatible, &names3);
1219 assert!(result.is_err()); let cache_concurrent = Arc::new(DashMap::new());
1225 cache_concurrent.insert("shared_metric".to_string(), true);
1226
1227 let num_threads = 8;
1228 let operations_per_thread = 100;
1229 let barrier = Arc::new(Barrier::new(num_threads));
1230 let success_flag = Arc::new(AtomicBool::new(true));
1231
1232 let handles: Vec<_> = (0..num_threads)
1233 .map(|thread_id| {
1234 let cache_clone = Arc::clone(&cache_concurrent);
1235 let barrier_clone = Arc::clone(&barrier);
1236 let success_flag_clone = Arc::clone(&success_flag);
1237
1238 thread::spawn(move || {
1239 barrier_clone.wait();
1241
1242 let start_time = Instant::now();
1243 for i in 0..operations_per_thread {
1244 let shared_metric = "shared_metric".to_string();
1246 let new_metric = format!("thread_{}_metric_{}", thread_id, i);
1247 let names = vec![&shared_metric, &new_metric];
1248
1249 match fast_legacy_check(&cache_clone, &names) {
1250 Ok(_) => {}
1251 Err(_) => {
1252 success_flag_clone.store(false, Ordering::Relaxed);
1253 return;
1254 }
1255 }
1256
1257 if start_time.elapsed() > Duration::from_secs(10) {
1259 success_flag_clone.store(false, Ordering::Relaxed);
1260 return;
1261 }
1262 }
1263 })
1264 })
1265 .collect();
1266
1267 let start_time = Instant::now();
1269 for (i, handle) in handles.into_iter().enumerate() {
1270 let join_result = handle.join();
1271
1272 if start_time.elapsed() > Duration::from_secs(30) {
1274 panic!("Test timed out - possible deadlock detected!");
1275 }
1276
1277 if join_result.is_err() {
1278 panic!("Thread {} panicked during execution", i);
1279 }
1280 }
1281
1282 assert!(
1284 success_flag.load(Ordering::Relaxed),
1285 "Some operations failed"
1286 );
1287
1288 let final_count = cache_concurrent.len();
1290 assert!(
1291 final_count > 1 + num_threads * operations_per_thread / 2,
1292 "Expected more cache entries, got {}",
1293 final_count
1294 );
1295 }
1296
1297 #[test]
1298 fn test_exec_validation() {
1299 let query_ctx = QueryContext::arc();
1300 let plugins: Plugins = Plugins::new();
1301 plugins.insert(QueryOptions {
1302 disallow_cross_catalog_query: true,
1303 });
1304
1305 let sql = r#"
1306 SELECT * FROM demo;
1307 EXPLAIN SELECT * FROM demo;
1308 CREATE DATABASE test_database;
1309 SHOW DATABASES;
1310 "#;
1311 let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1312 assert_eq!(stmts.len(), 4);
1313 for stmt in stmts {
1314 let re = check_permission(plugins.clone(), &stmt, &query_ctx);
1315 re.unwrap();
1316 }
1317
1318 let sql = r#"
1319 SHOW CREATE TABLE demo;
1320 ALTER TABLE demo ADD COLUMN new_col INT;
1321 "#;
1322 let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1323 assert_eq!(stmts.len(), 2);
1324 for stmt in stmts {
1325 let re = check_permission(plugins.clone(), &stmt, &query_ctx);
1326 re.unwrap();
1327 }
1328
1329 fn replace_test(template_sql: &str, plugins: Plugins, query_ctx: &QueryContextRef) {
1330 let right = vec![("", ""), ("", "public."), ("greptime.", "public.")];
1332 for (catalog, schema) in right {
1333 let sql = do_fmt(template_sql, catalog, schema);
1334 do_test(&sql, plugins.clone(), query_ctx, true);
1335 }
1336
1337 let wrong = vec![
1338 ("wrongcatalog.", "public."),
1339 ("wrongcatalog.", "wrongschema."),
1340 ];
1341 for (catalog, schema) in wrong {
1342 let sql = do_fmt(template_sql, catalog, schema);
1343 do_test(&sql, plugins.clone(), query_ctx, false);
1344 }
1345 }
1346
1347 fn do_fmt(template: &str, catalog: &str, schema: &str) -> String {
1348 let vars = HashMap::from([
1349 ("catalog".to_string(), catalog),
1350 ("schema".to_string(), schema),
1351 ]);
1352 template.format(&vars).unwrap()
1353 }
1354
1355 fn do_test(sql: &str, plugins: Plugins, query_ctx: &QueryContextRef, is_ok: bool) {
1356 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1357 let re = check_permission(plugins, stmt, query_ctx);
1358 if is_ok {
1359 re.unwrap();
1360 } else {
1361 assert!(re.is_err());
1362 }
1363 }
1364
1365 let sql = "INSERT INTO {catalog}{schema}monitor(host) VALUES ('host1');";
1367 replace_test(sql, plugins.clone(), &query_ctx);
1368
1369 let sql = r#"CREATE TABLE {catalog}{schema}demo(
1371 host STRING,
1372 ts TIMESTAMP,
1373 TIME INDEX (ts),
1374 PRIMARY KEY(host)
1375 ) engine=mito;"#;
1376 replace_test(sql, plugins.clone(), &query_ctx);
1377
1378 let sql = "DROP TABLE {catalog}{schema}demo;";
1380 replace_test(sql, plugins.clone(), &query_ctx);
1381
1382 let sql = "SHOW TABLES FROM public";
1384 let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1385 check_permission(plugins.clone(), &stmt[0], &query_ctx).unwrap();
1386
1387 let sql = "SHOW TABLES FROM private";
1388 let stmt = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
1389 let re = check_permission(plugins.clone(), &stmt[0], &query_ctx);
1390 assert!(re.is_ok());
1391
1392 let sql = "DESC TABLE {catalog}{schema}demo;";
1394 replace_test(sql, plugins.clone(), &query_ctx);
1395
1396 let comment_flow_cases = [
1397 ("COMMENT ON FLOW my_flow IS 'comment';", true),
1398 ("COMMENT ON FLOW greptime.my_flow IS 'comment';", true),
1399 ("COMMENT ON FLOW wrongcatalog.my_flow IS 'comment';", false),
1400 ];
1401 for (sql, is_ok) in comment_flow_cases {
1402 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1403 let result = check_permission(plugins.clone(), stmt, &query_ctx);
1404 assert_eq!(result.is_ok(), is_ok);
1405 }
1406
1407 let show_flow_cases = [
1408 ("SHOW CREATE FLOW my_flow;", true),
1409 ("SHOW CREATE FLOW greptime.my_flow;", true),
1410 ("SHOW CREATE FLOW wrongcatalog.my_flow;", false),
1411 ];
1412 for (sql, is_ok) in show_flow_cases {
1413 let stmt = &parse_stmt(sql, &GreptimeDbDialect {}).unwrap()[0];
1414 let result = check_permission(plugins.clone(), stmt, &query_ctx);
1415 assert_eq!(result.is_ok(), is_ok);
1416 }
1417 }
1418}