Skip to main content

servers/
query_handler.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! All query handler traits for various request protocols, like SQL or GRPC.
16//!
17//! Instance that wishes to support certain request protocol, just implement the corresponding
18//! trait, the Server will handle codec for you.
19//!
20//! Note:
21//! Query handlers are not confined to only handle read requests, they are expecting to handle
22//! write requests too. So the "query" here not might seem ambiguity. However, "query" has been
23//! used as some kind of "convention", it's the "Q" in "SQL". So we might better stick to the
24//! word "query".
25
26pub mod grpc;
27pub mod sql;
28
29use std::collections::HashMap;
30use std::sync::Arc;
31
32use api::prom_store::remote::ReadRequest;
33use api::v1::RowInsertRequests;
34use async_trait::async_trait;
35use catalog::CatalogManager;
36use common_query::Output;
37use datatypes::timestamp::TimestampNanosecond;
38use headers::HeaderValue;
39use log_query::LogQuery;
40use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
41use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
42use otel_arrow_rust::proto::opentelemetry::collector::metrics::v1::ExportMetricsServiceRequest;
43use pipeline::{GreptimePipelineParams, Pipeline, PipelineInfo, PipelineVersion, PipelineWay};
44use serde_json::Value;
45use session::context::{QueryContext, QueryContextRef};
46
47#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
48pub struct DashboardDefinition {
49    pub name: String,
50    pub definition: String,
51}
52
53use crate::error::Result;
54use crate::http::jaeger::QueryTraceParams;
55use crate::influxdb::InfluxdbRequest;
56use crate::opentsdb::codec::DataPoint;
57use crate::prom_store::Metrics;
58pub type OpentsdbProtocolHandlerRef = Arc<dyn OpentsdbProtocolHandler + Send + Sync>;
59pub type InfluxdbLineProtocolHandlerRef = Arc<dyn InfluxdbLineProtocolHandler + Send + Sync>;
60pub type PromStoreProtocolHandlerRef = Arc<dyn PromStoreProtocolHandler + Send + Sync>;
61pub type OpenTelemetryProtocolHandlerRef = Arc<dyn OpenTelemetryProtocolHandler + Send + Sync>;
62pub type PipelineHandlerRef = Arc<dyn PipelineHandler + Send + Sync>;
63pub type LogQueryHandlerRef = Arc<dyn LogQueryHandler + Send + Sync>;
64pub type JaegerQueryHandlerRef = Arc<dyn JaegerQueryHandler + Send + Sync>;
65
66#[async_trait]
67pub trait InfluxdbLineProtocolHandler {
68    /// A successful request will not return a response.
69    /// Only on error will the socket return a line of data.
70    async fn exec(&self, request: InfluxdbRequest, ctx: QueryContextRef) -> Result<Output>;
71}
72
73#[async_trait]
74pub trait OpentsdbProtocolHandler {
75    /// A successful request will not return a response.
76    /// Only on error will the socket return a line of data.
77    async fn exec(&self, data_points: Vec<DataPoint>, ctx: QueryContextRef) -> Result<usize>;
78}
79
80pub struct PromStoreResponse {
81    pub content_type: HeaderValue,
82    pub content_encoding: HeaderValue,
83    pub resp_metrics: HashMap<String, Value>,
84    pub body: Vec<u8>,
85}
86
87#[async_trait]
88pub trait PromStoreProtocolHandler {
89    /// Runs pre-write checks/hooks for prometheus remote write requests.
90    async fn pre_write(&self, _request: &RowInsertRequests, _ctx: QueryContextRef) -> Result<()> {
91        Ok(())
92    }
93
94    /// Handling prometheus remote write requests
95    async fn write(
96        &self,
97        request: RowInsertRequests,
98        ctx: QueryContextRef,
99        with_metric_engine: bool,
100    ) -> Result<Output>;
101
102    /// Handling prometheus remote read requests
103    async fn read(&self, request: ReadRequest, ctx: QueryContextRef) -> Result<PromStoreResponse>;
104    /// Handling push gateway requests
105    async fn ingest_metrics(&self, metrics: Metrics) -> Result<()>;
106}
107
108#[async_trait]
109pub trait OpenTelemetryProtocolHandler: PipelineHandler {
110    /// Handling opentelemetry metrics request
111    async fn metrics(
112        &self,
113        request: ExportMetricsServiceRequest,
114        ctx: QueryContextRef,
115    ) -> Result<Output>;
116
117    /// Handling opentelemetry traces request
118    async fn traces(
119        &self,
120        pipeline_handler: PipelineHandlerRef,
121        request: ExportTraceServiceRequest,
122        pipeline: PipelineWay,
123        pipeline_params: GreptimePipelineParams,
124        table_name: String,
125        ctx: QueryContextRef,
126    ) -> Result<Output>;
127
128    async fn logs(
129        &self,
130        pipeline_handler: PipelineHandlerRef,
131        request: ExportLogsServiceRequest,
132        pipeline: PipelineWay,
133        pipeline_params: GreptimePipelineParams,
134        table_name: String,
135        ctx: QueryContextRef,
136    ) -> Result<Vec<Output>>;
137}
138
139/// PipelineHandler is responsible for handling pipeline related requests.
140///
141/// The "Pipeline" is a series of transformations that can be applied to unstructured
142/// data like logs. This handler is responsible to manage pipelines and accept data for
143/// processing.
144///
145/// The pipeline is stored in the database and can be retrieved by its name.
146#[async_trait]
147pub trait PipelineHandler {
148    async fn insert(&self, input: RowInsertRequests, ctx: QueryContextRef) -> Result<Output>;
149
150    async fn get_pipeline(
151        &self,
152        name: &str,
153        version: PipelineVersion,
154        query_ctx: QueryContextRef,
155    ) -> Result<Arc<Pipeline>>;
156
157    async fn insert_pipeline(
158        &self,
159        name: &str,
160        content_type: &str,
161        pipeline: &str,
162        query_ctx: QueryContextRef,
163    ) -> Result<PipelineInfo>;
164
165    async fn delete_pipeline(
166        &self,
167        name: &str,
168        version: PipelineVersion,
169        query_ctx: QueryContextRef,
170    ) -> Result<Option<()>>;
171
172    async fn get_table(
173        &self,
174        table: &str,
175        query_ctx: &QueryContext,
176    ) -> std::result::Result<Option<Arc<table::Table>>, catalog::error::Error>;
177
178    //// Build a pipeline from a string.
179    fn build_pipeline(&self, pipeline: &str) -> Result<Pipeline>;
180
181    /// Get a original pipeline by name.
182    async fn get_pipeline_str(
183        &self,
184        name: &str,
185        version: PipelineVersion,
186        query_ctx: QueryContextRef,
187    ) -> Result<(String, TimestampNanosecond)>;
188}
189
190/// Handling dashboard as code CRUD
191pub type DashboardHandlerRef = Arc<dyn DashboardHandler + Send + Sync>;
192
193#[async_trait]
194pub trait DashboardHandler {
195    async fn save(&self, name: &str, definition: &str, ctx: QueryContextRef) -> Result<()>;
196
197    async fn list(&self, ctx: QueryContextRef) -> Result<Vec<DashboardDefinition>>;
198
199    async fn delete(&self, name: &str, ctx: QueryContextRef) -> Result<()>;
200}
201
202/// Handle log query requests.
203#[async_trait]
204pub trait LogQueryHandler {
205    /// Execute a log query.
206    async fn query(&self, query: LogQuery, ctx: QueryContextRef) -> Result<Output>;
207
208    /// Get catalog manager.
209    fn catalog_manager(&self, ctx: &QueryContext) -> Result<&dyn CatalogManager>;
210}
211
212/// Handle Jaeger query requests.
213#[async_trait]
214pub trait JaegerQueryHandler {
215    /// Get trace services. It's used for `/api/services` API.
216    async fn get_services(&self, ctx: QueryContextRef) -> Result<Output>;
217
218    /// Get Jaeger operations. It's used for `/api/operations` and `/api/services/{service_name}/operations` API.
219    async fn get_operations(
220        &self,
221        ctx: QueryContextRef,
222        service_name: &str,
223        span_kind: Option<&str>,
224    ) -> Result<Output>;
225
226    /// Retrieves a trace by its unique identifier.
227    ///
228    /// This method is used to handle requests to the `/api/traces/{trace_id}` endpoint.
229    /// It accepts optional `start_time` and `end_time` parameters in nanoseconds to filter the trace data within a specific time range.
230    async fn get_trace(
231        &self,
232        ctx: QueryContextRef,
233        trace_id: &str,
234        start_time: Option<i64>,
235        end_time: Option<i64>,
236        limit: Option<usize>,
237    ) -> Result<Output>;
238
239    /// Find traces by query params. It's used for `/api/traces` API.
240    async fn find_traces(
241        &self,
242        ctx: QueryContextRef,
243        query_params: QueryTraceParams,
244    ) -> Result<Output>;
245}