frontend/instance/
grpc.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::pin::Pin;
16use std::sync::Arc;
17use std::time::Instant;
18
19use api::helper::from_pb_time_ranges;
20use api::v1::ddl_request::{Expr as DdlExpr, Expr};
21use api::v1::greptime_request::Request;
22use api::v1::query_request::Query;
23use api::v1::{
24    DeleteRequests, DropFlowExpr, InsertIntoPlan, InsertRequests, RowDeleteRequests,
25    RowInsertRequests,
26};
27use async_stream::try_stream;
28use async_trait::async_trait;
29use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
30use common_base::AffectedRows;
31use common_error::ext::BoxedError;
32use common_grpc::flight::do_put::DoPutResponse;
33use common_query::Output;
34use common_query::logical_plan::add_insert_to_logical_plan;
35use common_telemetry::tracing::{self};
36use datafusion::datasource::DefaultTableSource;
37use futures::Stream;
38use futures::stream::StreamExt;
39use query::parser::PromQuery;
40use servers::interceptor::{GrpcQueryInterceptor, GrpcQueryInterceptorRef};
41use servers::query_handler::grpc::GrpcQueryHandler;
42use servers::query_handler::sql::SqlQueryHandler;
43use session::context::QueryContextRef;
44use snafu::{OptionExt, ResultExt, ensure};
45use table::TableRef;
46use table::table::adapter::DfTableProviderAdapter;
47use table::table_name::TableName;
48
49use crate::error::{
50    CatalogSnafu, DataFusionSnafu, Error, ExternalSnafu, IncompleteGrpcRequestSnafu,
51    NotSupportedSnafu, PermissionSnafu, PlanStatementSnafu, Result,
52    SubstraitDecodeLogicalPlanSnafu, TableNotFoundSnafu, TableOperationSnafu,
53};
54use crate::instance::{Instance, attach_timer};
55use crate::metrics::{
56    GRPC_HANDLE_PLAN_ELAPSED, GRPC_HANDLE_PROMQL_ELAPSED, GRPC_HANDLE_SQL_ELAPSED,
57};
58
59#[async_trait]
60impl GrpcQueryHandler for Instance {
61    type Error = Error;
62
63    async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
64        let interceptor_ref = self.plugins.get::<GrpcQueryInterceptorRef<Error>>();
65        let interceptor = interceptor_ref.as_ref();
66        interceptor.pre_execute(&request, ctx.clone())?;
67
68        self.plugins
69            .get::<PermissionCheckerRef>()
70            .as_ref()
71            .check_permission(ctx.current_user(), PermissionReq::GrpcRequest(&request))
72            .context(PermissionSnafu)?;
73
74        let _guard = if let Some(limiter) = &self.limiter {
75            Some(limiter.limit_request(&request).await?)
76        } else {
77            None
78        };
79
80        let output = match request {
81            Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
82            Request::RowInserts(requests) => {
83                self.handle_row_inserts(requests, ctx.clone(), false, false)
84                    .await?
85            }
86            Request::Deletes(requests) => self.handle_deletes(requests, ctx.clone()).await?,
87            Request::RowDeletes(requests) => self.handle_row_deletes(requests, ctx.clone()).await?,
88            Request::Query(query_request) => {
89                let query = query_request.query.context(IncompleteGrpcRequestSnafu {
90                    err_msg: "Missing field 'QueryRequest.query'",
91                })?;
92                match query {
93                    Query::Sql(sql) => {
94                        let timer = GRPC_HANDLE_SQL_ELAPSED.start_timer();
95                        let mut result = SqlQueryHandler::do_query(self, &sql, ctx.clone()).await;
96                        ensure!(
97                            result.len() == 1,
98                            NotSupportedSnafu {
99                                feat: "execute multiple statements in SQL query string through GRPC interface"
100                            }
101                        );
102                        let output = result.remove(0)?;
103                        attach_timer(output, timer)
104                    }
105                    Query::LogicalPlan(plan) => {
106                        // this path is useful internally when flownode needs to execute a logical plan through gRPC interface
107                        let timer = GRPC_HANDLE_PLAN_ELAPSED.start_timer();
108
109                        // use dummy catalog to provide table
110                        let plan_decoder = self
111                            .query_engine()
112                            .engine_context(ctx.clone())
113                            .new_plan_decoder()
114                            .context(PlanStatementSnafu)?;
115
116                        let dummy_catalog_list =
117                            Arc::new(catalog::table_source::dummy_catalog::DummyCatalogList::new(
118                                self.catalog_manager().clone(),
119                            ));
120
121                        let logical_plan = plan_decoder
122                            .decode(bytes::Bytes::from(plan), dummy_catalog_list, true)
123                            .await
124                            .context(SubstraitDecodeLogicalPlanSnafu)?;
125                        let output =
126                            SqlQueryHandler::do_exec_plan(self, None, logical_plan, ctx.clone())
127                                .await?;
128
129                        attach_timer(output, timer)
130                    }
131                    Query::InsertIntoPlan(insert) => {
132                        self.handle_insert_plan(insert, ctx.clone()).await?
133                    }
134                    Query::PromRangeQuery(promql) => {
135                        let timer = GRPC_HANDLE_PROMQL_ELAPSED.start_timer();
136                        let prom_query = PromQuery {
137                            query: promql.query,
138                            start: promql.start,
139                            end: promql.end,
140                            step: promql.step,
141                            lookback: promql.lookback,
142                            alias: None,
143                        };
144                        let mut result =
145                            SqlQueryHandler::do_promql_query(self, &prom_query, ctx.clone()).await;
146                        ensure!(
147                            result.len() == 1,
148                            NotSupportedSnafu {
149                                feat: "execute multiple statements in PromQL query string through GRPC interface"
150                            }
151                        );
152                        let output = result.remove(0)?;
153                        attach_timer(output, timer)
154                    }
155                }
156            }
157            Request::Ddl(request) => {
158                let mut expr = request.expr.context(IncompleteGrpcRequestSnafu {
159                    err_msg: "'expr' is absent in DDL request",
160                })?;
161
162                fill_catalog_and_schema_from_context(&mut expr, &ctx);
163
164                match expr {
165                    DdlExpr::CreateTable(mut expr) => {
166                        let _ = self
167                            .statement_executor
168                            .create_table_inner(&mut expr, None, ctx.clone())
169                            .await?;
170                        Output::new_with_affected_rows(0)
171                    }
172                    DdlExpr::AlterDatabase(expr) => {
173                        let _ = self
174                            .statement_executor
175                            .alter_database_inner(expr, ctx.clone())
176                            .await?;
177                        Output::new_with_affected_rows(0)
178                    }
179                    DdlExpr::AlterTable(expr) => {
180                        self.statement_executor
181                            .alter_table_inner(expr, ctx.clone())
182                            .await?
183                    }
184                    DdlExpr::CreateDatabase(expr) => {
185                        self.statement_executor
186                            .create_database(
187                                &expr.schema_name,
188                                expr.create_if_not_exists,
189                                expr.options,
190                                ctx.clone(),
191                            )
192                            .await?
193                    }
194                    DdlExpr::DropTable(expr) => {
195                        let table_name =
196                            TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
197                        self.statement_executor
198                            .drop_table(table_name, expr.drop_if_exists, ctx.clone())
199                            .await?
200                    }
201                    DdlExpr::TruncateTable(expr) => {
202                        let table_name =
203                            TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
204                        let time_ranges = from_pb_time_ranges(expr.time_ranges.unwrap_or_default())
205                            .map_err(BoxedError::new)
206                            .context(ExternalSnafu)?;
207                        self.statement_executor
208                            .truncate_table(table_name, time_ranges, ctx.clone())
209                            .await?
210                    }
211                    DdlExpr::CreateFlow(expr) => {
212                        self.statement_executor
213                            .create_flow_inner(expr, ctx.clone())
214                            .await?
215                    }
216                    DdlExpr::DropFlow(DropFlowExpr {
217                        catalog_name,
218                        flow_name,
219                        drop_if_exists,
220                        ..
221                    }) => {
222                        self.statement_executor
223                            .drop_flow(catalog_name, flow_name, drop_if_exists, ctx.clone())
224                            .await?
225                    }
226                    DdlExpr::CreateView(expr) => {
227                        let _ = self
228                            .statement_executor
229                            .create_view_by_expr(expr, ctx.clone())
230                            .await?;
231
232                        Output::new_with_affected_rows(0)
233                    }
234                    DdlExpr::DropView(_) => {
235                        todo!("implemented in the following PR")
236                    }
237                    DdlExpr::CommentOn(expr) => {
238                        self.statement_executor
239                            .comment_by_expr(expr, ctx.clone())
240                            .await?
241                    }
242                }
243            }
244        };
245
246        let output = interceptor.post_execute(output, ctx)?;
247        Ok(output)
248    }
249
250    async fn put_record_batch(
251        &self,
252        request: servers::grpc::flight::PutRecordBatchRequest,
253        table_ref: &mut Option<TableRef>,
254        ctx: QueryContextRef,
255    ) -> Result<AffectedRows> {
256        let table = if let Some(table) = table_ref {
257            table.clone()
258        } else {
259            let table = self
260                .catalog_manager()
261                .table(
262                    &request.table_name.catalog_name,
263                    &request.table_name.schema_name,
264                    &request.table_name.table_name,
265                    None,
266                )
267                .await
268                .context(CatalogSnafu)?
269                .with_context(|| TableNotFoundSnafu {
270                    table_name: request.table_name.to_string(),
271                })?;
272            *table_ref = Some(table.clone());
273            table
274        };
275
276        let interceptor_ref = self.plugins.get::<GrpcQueryInterceptorRef<Error>>();
277        let interceptor = interceptor_ref.as_ref();
278        interceptor.pre_bulk_insert(table.clone(), ctx.clone())?;
279
280        self.plugins
281            .get::<PermissionCheckerRef>()
282            .as_ref()
283            .check_permission(ctx.current_user(), PermissionReq::BulkInsert)
284            .context(PermissionSnafu)?;
285
286        // do we check limit for bulk insert?
287
288        self.inserter
289            .handle_bulk_insert(
290                table,
291                request.flight_data,
292                request.record_batch,
293                request.schema_bytes,
294            )
295            .await
296            .context(TableOperationSnafu)
297    }
298
299    fn handle_put_record_batch_stream(
300        &self,
301        mut stream: servers::grpc::flight::PutRecordBatchRequestStream,
302        ctx: QueryContextRef,
303    ) -> Pin<Box<dyn Stream<Item = Result<DoPutResponse>> + Send>> {
304        // Clone all necessary data to make it 'static
305        let catalog_manager = self.catalog_manager().clone();
306        let plugins = self.plugins.clone();
307        let inserter = self.inserter.clone();
308        let ctx = ctx.clone();
309        let mut table_ref: Option<TableRef> = None;
310        let mut table_checked = false;
311
312        Box::pin(try_stream! {
313            // Process each request in the stream
314            while let Some(request_result) = stream.next().await {
315                let request = request_result.map_err(|e| {
316                    let error_msg = format!("Stream error: {:?}", e);
317                    IncompleteGrpcRequestSnafu { err_msg: error_msg }.build()
318                })?;
319
320                // Resolve table and check permissions on first RecordBatch (after schema is received)
321                if !table_checked {
322                    let table_name = &request.table_name;
323
324                    plugins
325                        .get::<PermissionCheckerRef>()
326                        .as_ref()
327                        .check_permission(ctx.current_user(), PermissionReq::BulkInsert)
328                        .context(PermissionSnafu)?;
329
330                    // Resolve table reference
331                    table_ref = Some(
332                        catalog_manager
333                            .table(
334                                &table_name.catalog_name,
335                                &table_name.schema_name,
336                                &table_name.table_name,
337                                None,
338                            )
339                            .await
340                            .context(CatalogSnafu)?
341                            .with_context(|| TableNotFoundSnafu {
342                                table_name: table_name.to_string(),
343                            })?,
344                    );
345
346                    // Check permissions for the table
347                    let interceptor_ref = plugins.get::<GrpcQueryInterceptorRef<Error>>();
348                    let interceptor = interceptor_ref.as_ref();
349                    interceptor.pre_bulk_insert(table_ref.clone().unwrap(), ctx.clone())?;
350
351                    table_checked = true;
352                }
353
354                let request_id = request.request_id;
355                let start = Instant::now();
356                let rows = inserter
357                    .handle_bulk_insert(
358                        table_ref.clone().unwrap(),
359                        request.flight_data,
360                        request.record_batch,
361                        request.schema_bytes,
362                    )
363                    .await
364                    .context(TableOperationSnafu)?;
365                let elapsed_secs = start.elapsed().as_secs_f64();
366                yield DoPutResponse::new(request_id, rows, elapsed_secs);
367            }
368        })
369    }
370}
371
372fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryContextRef) {
373    let catalog = ctx.current_catalog();
374    let schema = ctx.current_schema();
375
376    macro_rules! check_and_fill {
377        ($expr:ident) => {
378            if $expr.catalog_name.is_empty() {
379                $expr.catalog_name = catalog.to_string();
380            }
381            if $expr.schema_name.is_empty() {
382                $expr.schema_name = schema.to_string();
383            }
384        };
385    }
386
387    match ddl_expr {
388        Expr::CreateDatabase(_) | Expr::AlterDatabase(_) => { /* do nothing*/ }
389        Expr::CreateTable(expr) => {
390            check_and_fill!(expr);
391        }
392        Expr::AlterTable(expr) => {
393            check_and_fill!(expr);
394        }
395        Expr::DropTable(expr) => {
396            check_and_fill!(expr);
397        }
398        Expr::TruncateTable(expr) => {
399            check_and_fill!(expr);
400        }
401        Expr::CreateFlow(expr) => {
402            if expr.catalog_name.is_empty() {
403                expr.catalog_name = catalog.to_string();
404            }
405        }
406        Expr::DropFlow(expr) => {
407            if expr.catalog_name.is_empty() {
408                expr.catalog_name = catalog.to_string();
409            }
410        }
411        Expr::CreateView(expr) => {
412            check_and_fill!(expr);
413        }
414        Expr::DropView(expr) => {
415            check_and_fill!(expr);
416        }
417        Expr::CommentOn(expr) => {
418            check_and_fill!(expr);
419        }
420    }
421}
422
423impl Instance {
424    async fn handle_insert_plan(
425        &self,
426        insert: InsertIntoPlan,
427        ctx: QueryContextRef,
428    ) -> Result<Output> {
429        let timer = GRPC_HANDLE_PLAN_ELAPSED.start_timer();
430        let table_name = insert.table_name.context(IncompleteGrpcRequestSnafu {
431            err_msg: "'table_name' is absent in InsertIntoPlan",
432        })?;
433
434        // use dummy catalog to provide table
435        let plan_decoder = self
436            .query_engine()
437            .engine_context(ctx.clone())
438            .new_plan_decoder()
439            .context(PlanStatementSnafu)?;
440
441        let dummy_catalog_list =
442            Arc::new(catalog::table_source::dummy_catalog::DummyCatalogList::new(
443                self.catalog_manager().clone(),
444            ));
445
446        // no optimize yet since we still need to add stuff
447        let logical_plan = plan_decoder
448            .decode(
449                bytes::Bytes::from(insert.logical_plan),
450                dummy_catalog_list,
451                false,
452            )
453            .await
454            .context(SubstraitDecodeLogicalPlanSnafu)?;
455
456        let table = self
457            .catalog_manager()
458            .table(
459                &table_name.catalog_name,
460                &table_name.schema_name,
461                &table_name.table_name,
462                None,
463            )
464            .await
465            .context(CatalogSnafu)?
466            .with_context(|| TableNotFoundSnafu {
467                table_name: [
468                    table_name.catalog_name.clone(),
469                    table_name.schema_name.clone(),
470                    table_name.table_name.clone(),
471                ]
472                .join("."),
473            })?;
474        let table_provider = Arc::new(DfTableProviderAdapter::new(table));
475        let table_source = Arc::new(DefaultTableSource::new(table_provider));
476
477        let insert_into = add_insert_to_logical_plan(table_name, table_source, logical_plan)
478            .context(SubstraitDecodeLogicalPlanSnafu)?;
479
480        let engine_ctx = self.query_engine().engine_context(ctx.clone());
481        let state = engine_ctx.state();
482        // Analyze the plan
483        let analyzed_plan = state
484            .analyzer()
485            .execute_and_check(insert_into, state.config_options(), |_, _| {})
486            .context(DataFusionSnafu)?;
487
488        // Optimize the plan
489        let optimized_plan = state.optimize(&analyzed_plan).context(DataFusionSnafu)?;
490
491        let output = SqlQueryHandler::do_exec_plan(self, None, optimized_plan, ctx.clone()).await?;
492
493        Ok(attach_timer(output, timer))
494    }
495    #[tracing::instrument(skip_all)]
496    pub async fn handle_inserts(
497        &self,
498        requests: InsertRequests,
499        ctx: QueryContextRef,
500    ) -> Result<Output> {
501        self.inserter
502            .handle_column_inserts(requests, ctx, self.statement_executor.as_ref())
503            .await
504            .context(TableOperationSnafu)
505    }
506
507    #[tracing::instrument(skip_all)]
508    pub async fn handle_row_inserts(
509        &self,
510        requests: RowInsertRequests,
511        ctx: QueryContextRef,
512        accommodate_existing_schema: bool,
513        is_single_value: bool,
514    ) -> Result<Output> {
515        self.inserter
516            .handle_row_inserts(
517                requests,
518                ctx,
519                self.statement_executor.as_ref(),
520                accommodate_existing_schema,
521                is_single_value,
522            )
523            .await
524            .context(TableOperationSnafu)
525    }
526
527    #[tracing::instrument(skip_all)]
528    pub async fn handle_influx_row_inserts(
529        &self,
530        requests: RowInsertRequests,
531        ctx: QueryContextRef,
532    ) -> Result<Output> {
533        self.inserter
534            .handle_last_non_null_inserts(
535                requests,
536                ctx,
537                self.statement_executor.as_ref(),
538                true,
539                // Influx protocol may writes multiple fields (values).
540                false,
541            )
542            .await
543            .context(TableOperationSnafu)
544    }
545
546    #[tracing::instrument(skip_all)]
547    pub async fn handle_metric_row_inserts(
548        &self,
549        requests: RowInsertRequests,
550        ctx: QueryContextRef,
551        physical_table: String,
552    ) -> Result<Output> {
553        self.inserter
554            .handle_metric_row_inserts(requests, ctx, &self.statement_executor, physical_table)
555            .await
556            .context(TableOperationSnafu)
557    }
558
559    #[tracing::instrument(skip_all)]
560    pub async fn handle_deletes(
561        &self,
562        requests: DeleteRequests,
563        ctx: QueryContextRef,
564    ) -> Result<Output> {
565        self.deleter
566            .handle_column_deletes(requests, ctx)
567            .await
568            .context(TableOperationSnafu)
569    }
570
571    #[tracing::instrument(skip_all)]
572    pub async fn handle_row_deletes(
573        &self,
574        requests: RowDeleteRequests,
575        ctx: QueryContextRef,
576    ) -> Result<Output> {
577        self.deleter
578            .handle_row_deletes(requests, ctx)
579            .await
580            .context(TableOperationSnafu)
581    }
582}