1use std::collections::HashMap;
16use std::convert::Infallible;
17use std::fmt::Display;
18use std::net::SocketAddr;
19use std::sync::Mutex as StdMutex;
20use std::time::Duration;
21
22use async_trait::async_trait;
23use auth::UserProviderRef;
24use axum::extract::{DefaultBodyLimit, Request};
25use axum::http::StatusCode as HttpStatusCode;
26use axum::response::{IntoResponse, Response};
27use axum::routing::Route;
28use axum::serve::ListenerExt;
29use axum::{Router, middleware, routing};
30use common_base::Plugins;
31use common_base::readable_size::ReadableSize;
32use common_recordbatch::RecordBatch;
33use common_telemetry::{error, info};
34use common_time::Timestamp;
35use common_time::timestamp::TimeUnit;
36use datatypes::data_type::DataType;
37use datatypes::schema::SchemaRef;
38use event::{LogState, LogValidatorRef};
39use futures::FutureExt;
40use http::{HeaderValue, Method};
41use serde::{Deserialize, Serialize};
42use serde_json::Value;
43use snafu::{ResultExt, ensure};
44use tokio::sync::Mutex;
45use tokio::sync::oneshot::{self, Sender};
46use tonic::codegen::Service;
47use tower::{Layer, ServiceBuilder};
48use tower_http::compression::CompressionLayer;
49use tower_http::cors::{AllowOrigin, Any, CorsLayer};
50use tower_http::decompression::RequestDecompressionLayer;
51use tower_http::trace::TraceLayer;
52
53use self::authorize::AuthState;
54use self::result::table_result::TableResponse;
55use crate::configurator::HttpConfiguratorRef;
56use crate::elasticsearch;
57use crate::error::{
58 AddressBindSnafu, AlreadyStartedSnafu, Error, InternalIoSnafu, InvalidHeaderValueSnafu,
59 OtherSnafu, Result,
60};
61use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
62use crate::http::otlp::OtlpState;
63use crate::http::prom_store::PromStoreState;
64use crate::http::prometheus::{
65 build_info_query, format_query, instant_query, label_values_query, labels_query, parse_query,
66 range_query, series_query,
67};
68use crate::http::result::arrow_result::ArrowResponse;
69use crate::http::result::csv_result::CsvResponse;
70use crate::http::result::error_result::ErrorResponse;
71use crate::http::result::greptime_result_v1::GreptimedbV1Response;
72use crate::http::result::influxdb_result_v1::InfluxdbV1Response;
73use crate::http::result::json_result::JsonResponse;
74use crate::http::result::null_result::NullResponse;
75use crate::interceptor::LogIngestInterceptorRef;
76use crate::metrics::http_metrics_layer;
77use crate::metrics_handler::MetricsHandler;
78use crate::prometheus_handler::PrometheusHandlerRef;
79use crate::query_handler::sql::ServerSqlQueryHandlerRef;
80use crate::query_handler::{
81 DashboardHandlerRef, InfluxdbLineProtocolHandlerRef, JaegerQueryHandlerRef, LogQueryHandlerRef,
82 OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef, PipelineHandlerRef,
83 PromStoreProtocolHandlerRef,
84};
85use crate::request_memory_limiter::ServerMemoryLimiter;
86use crate::server::Server;
87
88pub mod authorize;
89#[cfg(feature = "dashboard")]
90mod dashboard;
91pub mod dyn_log;
92pub mod dyn_trace;
93pub mod event;
94pub mod extractor;
95pub mod handler;
96pub mod header;
97pub mod influxdb;
98pub mod jaeger;
99pub mod logs;
100pub mod loki;
101pub mod mem_prof;
102mod memory_limit;
103pub mod opentsdb;
104pub mod otlp;
105pub mod pprof;
106pub mod prom_store;
107pub mod prometheus;
108pub mod result;
109mod timeout;
110pub mod utils;
111
112use result::HttpOutputWriter;
113pub(crate) use timeout::DynamicTimeoutLayer;
114
115use crate::prom_remote_write::validation::PromValidationMode;
116
117mod hints;
118mod read_preference;
119#[cfg(any(test, feature = "testing"))]
120pub mod test_helpers;
121
122pub const HTTP_API_VERSION: &str = "v1";
123pub const HTTP_API_PREFIX: &str = "/v1/";
124const DEFAULT_BODY_LIMIT: ReadableSize = ReadableSize::mb(64);
126
127pub const AUTHORIZATION_HEADER: &str = "x-greptime-auth";
129
130pub static PUBLIC_APIS: [&str; 3] = ["/v1/influxdb/ping", "/v1/influxdb/health", "/v1/health"];
132
133#[derive(Default)]
134pub struct HttpServer {
135 router: StdMutex<Router>,
136 shutdown_tx: Mutex<Option<Sender<()>>>,
137 user_provider: Option<UserProviderRef>,
138 memory_limiter: ServerMemoryLimiter,
139
140 plugins: Plugins,
142
143 options: HttpOptions,
145 bind_addr: Option<SocketAddr>,
146}
147
148#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
149#[serde(default)]
150pub struct HttpOptions {
151 pub addr: String,
152
153 #[serde(with = "humantime_serde")]
154 pub timeout: Duration,
155
156 #[serde(skip)]
157 pub disable_dashboard: bool,
158
159 pub body_limit: ReadableSize,
160
161 pub prom_validation_mode: PromValidationMode,
163
164 pub cors_allowed_origins: Vec<String>,
165
166 pub enable_cors: bool,
167}
168
169impl Default for HttpOptions {
170 fn default() -> Self {
171 Self {
172 addr: "127.0.0.1:4000".to_string(),
173 timeout: Duration::from_secs(0),
174 disable_dashboard: false,
175 body_limit: DEFAULT_BODY_LIMIT,
176 cors_allowed_origins: Vec::new(),
177 enable_cors: true,
178 prom_validation_mode: PromValidationMode::Strict,
179 }
180 }
181}
182
183#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
184pub struct ColumnSchema {
185 name: String,
186 data_type: String,
187}
188
189impl ColumnSchema {
190 pub fn new(name: String, data_type: String) -> ColumnSchema {
191 ColumnSchema { name, data_type }
192 }
193}
194
195#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
196pub struct OutputSchema {
197 column_schemas: Vec<ColumnSchema>,
198}
199
200impl OutputSchema {
201 pub fn new(columns: Vec<ColumnSchema>) -> OutputSchema {
202 OutputSchema {
203 column_schemas: columns,
204 }
205 }
206}
207
208impl From<SchemaRef> for OutputSchema {
209 fn from(schema: SchemaRef) -> OutputSchema {
210 OutputSchema {
211 column_schemas: schema
212 .column_schemas()
213 .iter()
214 .map(|cs| ColumnSchema {
215 name: cs.name.clone(),
216 data_type: cs.data_type.name(),
217 })
218 .collect(),
219 }
220 }
221}
222
223#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
224pub struct HttpRecordsOutput {
225 schema: OutputSchema,
226 rows: Vec<Vec<Value>>,
227 #[serde(default)]
230 total_rows: usize,
231
232 #[serde(skip_serializing_if = "HashMap::is_empty")]
234 #[serde(default)]
235 metrics: HashMap<String, Value>,
236}
237
238impl HttpRecordsOutput {
239 pub fn num_rows(&self) -> usize {
240 self.rows.len()
241 }
242
243 pub fn num_cols(&self) -> usize {
244 self.schema.column_schemas.len()
245 }
246
247 pub fn schema(&self) -> &OutputSchema {
248 &self.schema
249 }
250
251 pub fn rows(&self) -> &Vec<Vec<Value>> {
252 &self.rows
253 }
254}
255
256impl HttpRecordsOutput {
257 pub fn try_new(
258 schema: SchemaRef,
259 recordbatches: Vec<RecordBatch>,
260 ) -> std::result::Result<HttpRecordsOutput, Error> {
261 if recordbatches.is_empty() {
262 Ok(HttpRecordsOutput {
263 schema: OutputSchema::from(schema),
264 rows: vec![],
265 total_rows: 0,
266 metrics: Default::default(),
267 })
268 } else {
269 let num_rows = recordbatches.iter().map(|r| r.num_rows()).sum::<usize>();
270 let mut rows = Vec::with_capacity(num_rows);
271
272 for recordbatch in recordbatches {
273 let mut writer = HttpOutputWriter::new(schema.num_columns(), None);
274 writer.write(recordbatch, &mut rows)?;
275 }
276
277 Ok(HttpRecordsOutput {
278 schema: OutputSchema::from(schema),
279 total_rows: rows.len(),
280 rows,
281 metrics: Default::default(),
282 })
283 }
284 }
285}
286
287#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
288#[serde(rename_all = "lowercase")]
289pub enum GreptimeQueryOutput {
290 AffectedRows(usize),
291 Records(HttpRecordsOutput),
292}
293
294#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
296pub enum ResponseFormat {
297 Arrow,
298 Csv(bool, bool),
300 Table,
301 #[default]
302 GreptimedbV1,
303 InfluxdbV1,
304 Json,
305 Null,
306}
307
308impl ResponseFormat {
309 pub fn parse(s: &str) -> Option<Self> {
310 match s {
311 "arrow" => Some(ResponseFormat::Arrow),
312 "csv" => Some(ResponseFormat::Csv(false, false)),
313 "csvwithnames" => Some(ResponseFormat::Csv(true, false)),
314 "csvwithnamesandtypes" => Some(ResponseFormat::Csv(true, true)),
315 "table" => Some(ResponseFormat::Table),
316 "greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
317 "influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
318 "json" => Some(ResponseFormat::Json),
319 "null" => Some(ResponseFormat::Null),
320 _ => None,
321 }
322 }
323
324 pub fn as_str(&self) -> &'static str {
325 match self {
326 ResponseFormat::Arrow => "arrow",
327 ResponseFormat::Csv(_, _) => "csv",
328 ResponseFormat::Table => "table",
329 ResponseFormat::GreptimedbV1 => "greptimedb_v1",
330 ResponseFormat::InfluxdbV1 => "influxdb_v1",
331 ResponseFormat::Json => "json",
332 ResponseFormat::Null => "null",
333 }
334 }
335}
336
337#[derive(Debug, Clone, Copy, PartialEq, Eq)]
338pub enum Epoch {
339 Nanosecond,
340 Microsecond,
341 Millisecond,
342 Second,
343}
344
345impl Epoch {
346 pub fn parse(s: &str) -> Option<Epoch> {
347 match s {
352 "ns" => Some(Epoch::Nanosecond),
353 "u" | "µ" => Some(Epoch::Microsecond),
354 "ms" => Some(Epoch::Millisecond),
355 "s" => Some(Epoch::Second),
356 _ => None, }
358 }
359
360 pub fn convert_timestamp(&self, ts: Timestamp) -> Option<Timestamp> {
361 match self {
362 Epoch::Nanosecond => ts.convert_to(TimeUnit::Nanosecond),
363 Epoch::Microsecond => ts.convert_to(TimeUnit::Microsecond),
364 Epoch::Millisecond => ts.convert_to(TimeUnit::Millisecond),
365 Epoch::Second => ts.convert_to(TimeUnit::Second),
366 }
367 }
368}
369
370impl Display for Epoch {
371 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
372 match self {
373 Epoch::Nanosecond => write!(f, "Epoch::Nanosecond"),
374 Epoch::Microsecond => write!(f, "Epoch::Microsecond"),
375 Epoch::Millisecond => write!(f, "Epoch::Millisecond"),
376 Epoch::Second => write!(f, "Epoch::Second"),
377 }
378 }
379}
380
381#[derive(Serialize, Deserialize, Debug)]
382pub enum HttpResponse {
383 Arrow(ArrowResponse),
384 Csv(CsvResponse),
385 Table(TableResponse),
386 Error(ErrorResponse),
387 GreptimedbV1(GreptimedbV1Response),
388 InfluxdbV1(InfluxdbV1Response),
389 Json(JsonResponse),
390 Null(NullResponse),
391}
392
393impl HttpResponse {
394 pub fn with_execution_time(self, execution_time: u64) -> Self {
395 match self {
396 HttpResponse::Arrow(resp) => resp.with_execution_time(execution_time).into(),
397 HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
398 HttpResponse::Table(resp) => resp.with_execution_time(execution_time).into(),
399 HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
400 HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
401 HttpResponse::Json(resp) => resp.with_execution_time(execution_time).into(),
402 HttpResponse::Null(resp) => resp.with_execution_time(execution_time).into(),
403 HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
404 }
405 }
406
407 pub fn with_limit(self, limit: usize) -> Self {
408 match self {
409 HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
410 HttpResponse::Table(resp) => resp.with_limit(limit).into(),
411 HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
412 HttpResponse::Json(resp) => resp.with_limit(limit).into(),
413 _ => self,
414 }
415 }
416}
417
418pub fn process_with_limit(
419 mut outputs: Vec<GreptimeQueryOutput>,
420 limit: usize,
421) -> Vec<GreptimeQueryOutput> {
422 outputs
423 .drain(..)
424 .map(|data| match data {
425 GreptimeQueryOutput::Records(mut records) => {
426 if records.rows.len() > limit {
427 records.rows.truncate(limit);
428 records.total_rows = limit;
429 }
430 GreptimeQueryOutput::Records(records)
431 }
432 _ => data,
433 })
434 .collect()
435}
436
437impl IntoResponse for HttpResponse {
438 fn into_response(self) -> Response {
439 match self {
440 HttpResponse::Arrow(resp) => resp.into_response(),
441 HttpResponse::Csv(resp) => resp.into_response(),
442 HttpResponse::Table(resp) => resp.into_response(),
443 HttpResponse::GreptimedbV1(resp) => resp.into_response(),
444 HttpResponse::InfluxdbV1(resp) => resp.into_response(),
445 HttpResponse::Json(resp) => resp.into_response(),
446 HttpResponse::Null(resp) => resp.into_response(),
447 HttpResponse::Error(resp) => resp.into_response(),
448 }
449 }
450}
451
452impl From<ArrowResponse> for HttpResponse {
453 fn from(value: ArrowResponse) -> Self {
454 HttpResponse::Arrow(value)
455 }
456}
457
458impl From<CsvResponse> for HttpResponse {
459 fn from(value: CsvResponse) -> Self {
460 HttpResponse::Csv(value)
461 }
462}
463
464impl From<TableResponse> for HttpResponse {
465 fn from(value: TableResponse) -> Self {
466 HttpResponse::Table(value)
467 }
468}
469
470impl From<ErrorResponse> for HttpResponse {
471 fn from(value: ErrorResponse) -> Self {
472 HttpResponse::Error(value)
473 }
474}
475
476impl From<GreptimedbV1Response> for HttpResponse {
477 fn from(value: GreptimedbV1Response) -> Self {
478 HttpResponse::GreptimedbV1(value)
479 }
480}
481
482impl From<InfluxdbV1Response> for HttpResponse {
483 fn from(value: InfluxdbV1Response) -> Self {
484 HttpResponse::InfluxdbV1(value)
485 }
486}
487
488impl From<JsonResponse> for HttpResponse {
489 fn from(value: JsonResponse) -> Self {
490 HttpResponse::Json(value)
491 }
492}
493
494impl From<NullResponse> for HttpResponse {
495 fn from(value: NullResponse) -> Self {
496 HttpResponse::Null(value)
497 }
498}
499
500#[derive(Clone)]
501pub struct ApiState {
502 pub sql_handler: ServerSqlQueryHandlerRef,
503}
504
505#[derive(Clone)]
506pub struct GreptimeOptionsConfigState {
507 pub greptime_config_options: String,
508}
509
510#[derive(Clone)]
511pub struct DashboardState {
512 pub handler: DashboardHandlerRef,
513}
514
515pub struct HttpServerBuilder {
516 options: HttpOptions,
517 plugins: Plugins,
518 user_provider: Option<UserProviderRef>,
519 router: Router,
520 memory_limiter: ServerMemoryLimiter,
521}
522
523impl HttpServerBuilder {
524 pub fn new(options: HttpOptions) -> Self {
525 Self {
526 options,
527 plugins: Plugins::default(),
528 user_provider: None,
529 router: Router::new(),
530 memory_limiter: ServerMemoryLimiter::default(),
531 }
532 }
533
534 pub fn with_memory_limiter(mut self, limiter: ServerMemoryLimiter) -> Self {
536 self.memory_limiter = limiter;
537 self
538 }
539
540 pub fn with_sql_handler(self, sql_handler: ServerSqlQueryHandlerRef) -> Self {
541 let sql_router = HttpServer::route_sql(ApiState { sql_handler });
542
543 Self {
544 router: self
545 .router
546 .nest(&format!("/{HTTP_API_VERSION}"), sql_router),
547 ..self
548 }
549 }
550
551 pub fn with_logs_handler(self, logs_handler: LogQueryHandlerRef) -> Self {
552 let logs_router = HttpServer::route_logs(logs_handler);
553
554 Self {
555 router: self
556 .router
557 .nest(&format!("/{HTTP_API_VERSION}"), logs_router),
558 ..self
559 }
560 }
561
562 pub fn with_opentsdb_handler(self, handler: OpentsdbProtocolHandlerRef) -> Self {
563 Self {
564 router: self.router.nest(
565 &format!("/{HTTP_API_VERSION}/opentsdb"),
566 HttpServer::route_opentsdb(handler),
567 ),
568 ..self
569 }
570 }
571
572 pub fn with_influxdb_handler(self, handler: InfluxdbLineProtocolHandlerRef) -> Self {
573 Self {
574 router: self.router.nest(
575 &format!("/{HTTP_API_VERSION}/influxdb"),
576 HttpServer::route_influxdb(handler),
577 ),
578 ..self
579 }
580 }
581
582 pub fn with_prom_handler(
583 self,
584 handler: PromStoreProtocolHandlerRef,
585 pipeline_handler: Option<PipelineHandlerRef>,
586 prom_store_with_metric_engine: bool,
587 prom_validation_mode: PromValidationMode,
588 ) -> Self {
589 let state = PromStoreState {
590 prom_store_handler: handler,
591 pipeline_handler,
592 prom_store_with_metric_engine,
593 prom_validation_mode,
594 };
595
596 Self {
597 router: self.router.nest(
598 &format!("/{HTTP_API_VERSION}/prometheus"),
599 HttpServer::route_prom(state),
600 ),
601 ..self
602 }
603 }
604
605 pub fn with_prometheus_handler(self, handler: PrometheusHandlerRef) -> Self {
606 Self {
607 router: self.router.nest(
608 &format!("/{HTTP_API_VERSION}/prometheus/api/v1"),
609 HttpServer::route_prometheus(handler),
610 ),
611 ..self
612 }
613 }
614
615 pub fn with_otlp_handler(
616 self,
617 handler: OpenTelemetryProtocolHandlerRef,
618 with_metric_engine: bool,
619 ) -> Self {
620 Self {
621 router: self.router.nest(
622 &format!("/{HTTP_API_VERSION}/otlp"),
623 HttpServer::route_otlp(handler, with_metric_engine),
624 ),
625 ..self
626 }
627 }
628
629 pub fn with_user_provider(self, user_provider: UserProviderRef) -> Self {
630 Self {
631 user_provider: Some(user_provider),
632 ..self
633 }
634 }
635
636 pub fn with_metrics_handler(self, handler: MetricsHandler) -> Self {
637 Self {
638 router: self.router.merge(HttpServer::route_metrics(handler)),
639 ..self
640 }
641 }
642
643 pub fn with_log_ingest_handler(
644 self,
645 handler: PipelineHandlerRef,
646 validator: Option<LogValidatorRef>,
647 ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
648 ) -> Self {
649 let log_state = LogState {
650 log_handler: handler,
651 log_validator: validator,
652 ingest_interceptor,
653 };
654
655 let router = self.router.nest(
656 &format!("/{HTTP_API_VERSION}"),
657 HttpServer::route_pipelines(log_state.clone()),
658 );
659 let router = router.nest(
661 &format!("/{HTTP_API_VERSION}/events"),
662 #[allow(deprecated)]
663 HttpServer::route_log_deprecated(log_state.clone()),
664 );
665
666 let router = router.nest(
667 &format!("/{HTTP_API_VERSION}/loki"),
668 HttpServer::route_loki(log_state.clone()),
669 );
670
671 let router = router.nest(
672 &format!("/{HTTP_API_VERSION}/elasticsearch"),
673 HttpServer::route_elasticsearch(log_state.clone()),
674 );
675
676 let router = router.nest(
677 &format!("/{HTTP_API_VERSION}/elasticsearch/"),
678 Router::new()
679 .route("/", routing::get(elasticsearch::handle_get_version))
680 .with_state(log_state),
681 );
682
683 Self { router, ..self }
684 }
685
686 pub fn with_plugins(self, plugins: Plugins) -> Self {
687 Self { plugins, ..self }
688 }
689
690 pub fn with_greptime_config_options(self, opts: String) -> Self {
691 let config_router = HttpServer::route_config(GreptimeOptionsConfigState {
692 greptime_config_options: opts,
693 });
694
695 Self {
696 router: self.router.merge(config_router),
697 ..self
698 }
699 }
700
701 pub fn with_jaeger_handler(self, handler: JaegerQueryHandlerRef) -> Self {
702 Self {
703 router: self.router.nest(
704 &format!("/{HTTP_API_VERSION}/jaeger"),
705 HttpServer::route_jaeger(handler),
706 ),
707 ..self
708 }
709 }
710
711 pub fn with_dashboard_handler(self, handler: DashboardHandlerRef) -> Self {
712 Self {
713 router: self.router.nest(
714 &format!("/{HTTP_API_VERSION}/dashboards"),
715 HttpServer::route_dashboard(handler),
716 ),
717 ..self
718 }
719 }
720
721 pub fn with_extra_router(self, router: Router) -> Self {
722 Self {
723 router: self.router.merge(router),
724 ..self
725 }
726 }
727
728 pub fn add_layer<L>(self, layer: L) -> Self
729 where
730 L: Layer<Route> + Clone + Send + Sync + 'static,
731 L::Service: Service<Request> + Clone + Send + Sync + 'static,
732 <L::Service as Service<Request>>::Response: IntoResponse + 'static,
733 <L::Service as Service<Request>>::Error: Into<Infallible> + 'static,
734 <L::Service as Service<Request>>::Future: Send + 'static,
735 {
736 Self {
737 router: self.router.layer(layer),
738 ..self
739 }
740 }
741
742 pub fn build(self) -> HttpServer {
743 HttpServer {
744 options: self.options,
745 user_provider: self.user_provider,
746 shutdown_tx: Mutex::new(None),
747 plugins: self.plugins,
748 router: StdMutex::new(self.router),
749 bind_addr: None,
750 memory_limiter: self.memory_limiter,
751 }
752 }
753}
754
755impl HttpServer {
756 pub fn make_app(&self) -> Router {
758 let mut router = {
759 let router = self.router.lock().unwrap();
760 router.clone()
761 };
762
763 router = router
764 .route(
765 "/health",
766 routing::get(handler::health).post(handler::health),
767 )
768 .route(
769 &format!("/{HTTP_API_VERSION}/health"),
770 routing::get(handler::health).post(handler::health),
771 )
772 .route(
773 "/ready",
774 routing::get(handler::health).post(handler::health),
775 );
776
777 router = router.route("/status", routing::get(handler::status));
778
779 #[cfg(feature = "dashboard")]
780 {
781 if !self.options.disable_dashboard {
782 info!("Enable dashboard service at '/dashboard'");
783 router = router.route(
785 "/dashboard",
786 routing::get(|uri: axum::http::uri::Uri| async move {
787 let path = uri.path();
788 let query = uri.query().map(|q| format!("?{}", q)).unwrap_or_default();
789
790 let new_uri = format!("{}/{}", path, query);
791 axum::response::Redirect::permanent(&new_uri)
792 }),
793 );
794
795 router = router
799 .route(
800 "/dashboard/",
801 routing::get(dashboard::static_handler).post(dashboard::static_handler),
802 )
803 .route(
804 "/dashboard/{*x}",
805 routing::get(dashboard::static_handler).post(dashboard::static_handler),
806 );
807 }
808 }
809
810 router = router.route_layer(middleware::from_fn(http_metrics_layer));
812
813 router
814 }
815
816 pub fn build(&self, router: Router) -> Result<Router> {
819 let timeout_layer = if self.options.timeout != Duration::default() {
820 Some(ServiceBuilder::new().layer(DynamicTimeoutLayer::new(self.options.timeout)))
821 } else {
822 info!("HTTP server timeout is disabled");
823 None
824 };
825 let body_limit_layer = if self.options.body_limit != ReadableSize(0) {
826 Some(
827 ServiceBuilder::new()
828 .layer(DefaultBodyLimit::max(self.options.body_limit.0 as usize)),
829 )
830 } else {
831 info!("HTTP server body limit is disabled");
832 None
833 };
834 let cors_layer = if self.options.enable_cors {
835 Some(
836 CorsLayer::new()
837 .allow_methods([
838 Method::GET,
839 Method::POST,
840 Method::PUT,
841 Method::DELETE,
842 Method::HEAD,
843 ])
844 .allow_origin(if self.options.cors_allowed_origins.is_empty() {
845 AllowOrigin::from(Any)
846 } else {
847 AllowOrigin::from(
848 self.options
849 .cors_allowed_origins
850 .iter()
851 .map(|s| {
852 HeaderValue::from_str(s.as_str())
853 .context(InvalidHeaderValueSnafu)
854 })
855 .collect::<Result<Vec<HeaderValue>>>()?,
856 )
857 })
858 .allow_headers(Any),
859 )
860 } else {
861 info!("HTTP server cross-origin is disabled");
862 None
863 };
864
865 Ok(router
866 .layer(
868 ServiceBuilder::new()
869 .layer(TraceLayer::new_for_http().on_failure(()))
872 .option_layer(cors_layer)
873 .option_layer(timeout_layer)
874 .option_layer(body_limit_layer)
875 .layer(middleware::from_fn_with_state(
877 self.memory_limiter.clone(),
878 memory_limit::memory_limit_middleware,
879 ))
880 .layer(middleware::from_fn_with_state(
882 AuthState::new(self.user_provider.clone()),
883 authorize::check_http_auth,
884 ))
885 .layer(middleware::from_fn(hints::extract_hints))
886 .layer(middleware::from_fn(
887 read_preference::extract_read_preference,
888 )),
889 )
890 .nest(
892 "/debug",
893 Router::new()
894 .route("/log_level", routing::post(dyn_log::dyn_log_handler))
896 .route("/enable_trace", routing::post(dyn_trace::dyn_trace_handler))
897 .nest(
898 "/prof",
899 Router::new()
900 .route("/cpu", routing::post(pprof::pprof_handler))
901 .route("/mem", routing::post(mem_prof::mem_prof_handler))
902 .route("/mem/symbol", routing::post(mem_prof::symbolicate_handler))
903 .route(
904 "/mem/activate",
905 routing::post(mem_prof::activate_heap_prof_handler),
906 )
907 .route(
908 "/mem/deactivate",
909 routing::post(mem_prof::deactivate_heap_prof_handler),
910 )
911 .route(
912 "/mem/status",
913 routing::get(mem_prof::heap_prof_status_handler),
914 ) .route(
916 "/mem/gdump",
917 routing::get(mem_prof::gdump_status_handler)
918 .post(mem_prof::gdump_toggle_handler),
919 ),
920 ),
921 ))
922 }
923
924 fn route_metrics<S>(metrics_handler: MetricsHandler) -> Router<S> {
925 Router::new()
926 .route("/metrics", routing::get(handler::metrics))
927 .with_state(metrics_handler)
928 }
929
930 fn route_loki<S>(log_state: LogState) -> Router<S> {
931 Router::new()
932 .route("/api/v1/push", routing::post(loki::loki_ingest))
933 .layer(
934 ServiceBuilder::new()
935 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
936 )
937 .with_state(log_state)
938 }
939
940 fn route_elasticsearch<S>(log_state: LogState) -> Router<S> {
941 Router::new()
942 .route(
944 "/",
945 routing::head((HttpStatusCode::OK, elasticsearch::elasticsearch_headers())),
946 )
947 .route("/", routing::get(elasticsearch::handle_get_version))
949 .route("/_license", routing::get(elasticsearch::handle_get_license))
951 .route("/_bulk", routing::post(elasticsearch::handle_bulk_api))
952 .route(
953 "/{index}/_bulk",
954 routing::post(elasticsearch::handle_bulk_api_with_index),
955 )
956 .route(
958 "/_ilm/policy/{*path}",
959 routing::any((
960 HttpStatusCode::OK,
961 elasticsearch::elasticsearch_headers(),
962 axum::Json(serde_json::json!({})),
963 )),
964 )
965 .route(
967 "/_index_template/{*path}",
968 routing::any((
969 HttpStatusCode::OK,
970 elasticsearch::elasticsearch_headers(),
971 axum::Json(serde_json::json!({})),
972 )),
973 )
974 .route(
977 "/_ingest/{*path}",
978 routing::any((
979 HttpStatusCode::OK,
980 elasticsearch::elasticsearch_headers(),
981 axum::Json(serde_json::json!({})),
982 )),
983 )
984 .route(
987 "/_nodes/{*path}",
988 routing::any((
989 HttpStatusCode::OK,
990 elasticsearch::elasticsearch_headers(),
991 axum::Json(serde_json::json!({})),
992 )),
993 )
994 .route(
997 "/logstash/{*path}",
998 routing::any((
999 HttpStatusCode::OK,
1000 elasticsearch::elasticsearch_headers(),
1001 axum::Json(serde_json::json!({})),
1002 )),
1003 )
1004 .route(
1005 "/_logstash/{*path}",
1006 routing::any((
1007 HttpStatusCode::OK,
1008 elasticsearch::elasticsearch_headers(),
1009 axum::Json(serde_json::json!({})),
1010 )),
1011 )
1012 .layer(ServiceBuilder::new().layer(RequestDecompressionLayer::new()))
1013 .with_state(log_state)
1014 }
1015
1016 #[deprecated(since = "0.11.0", note = "Use `route_pipelines()` instead.")]
1017 fn route_log_deprecated<S>(log_state: LogState) -> Router<S> {
1018 Router::new()
1019 .route("/logs", routing::post(event::log_ingester))
1020 .route(
1021 "/pipelines/{pipeline_name}",
1022 routing::get(event::query_pipeline),
1023 )
1024 .route(
1025 "/pipelines/{pipeline_name}",
1026 routing::post(event::add_pipeline),
1027 )
1028 .route(
1029 "/pipelines/{pipeline_name}",
1030 routing::delete(event::delete_pipeline),
1031 )
1032 .route("/pipelines/dryrun", routing::post(event::pipeline_dryrun))
1033 .layer(
1034 ServiceBuilder::new()
1035 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1036 )
1037 .with_state(log_state)
1038 }
1039
1040 fn route_pipelines<S>(log_state: LogState) -> Router<S> {
1041 Router::new()
1042 .route("/ingest", routing::post(event::log_ingester))
1043 .route(
1044 "/pipelines/{pipeline_name}",
1045 routing::get(event::query_pipeline),
1046 )
1047 .route(
1048 "/pipelines/{pipeline_name}/ddl",
1049 routing::get(event::query_pipeline_ddl),
1050 )
1051 .route(
1052 "/pipelines/{pipeline_name}",
1053 routing::post(event::add_pipeline),
1054 )
1055 .route(
1056 "/pipelines/{pipeline_name}",
1057 routing::delete(event::delete_pipeline),
1058 )
1059 .route("/pipelines/_dryrun", routing::post(event::pipeline_dryrun))
1060 .layer(
1061 ServiceBuilder::new()
1062 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1063 )
1064 .with_state(log_state)
1065 }
1066
1067 fn route_sql<S>(api_state: ApiState) -> Router<S> {
1068 Router::new()
1069 .route("/sql", routing::get(handler::sql).post(handler::sql))
1070 .route(
1071 "/sql/parse",
1072 routing::get(handler::sql_parse).post(handler::sql_parse),
1073 )
1074 .route(
1075 "/sql/format",
1076 routing::get(handler::sql_format).post(handler::sql_format),
1077 )
1078 .route(
1079 "/promql",
1080 routing::get(handler::promql).post(handler::promql),
1081 )
1082 .with_state(api_state)
1083 }
1084
1085 fn route_logs<S>(log_handler: LogQueryHandlerRef) -> Router<S> {
1086 Router::new()
1087 .route("/logs", routing::get(logs::logs).post(logs::logs))
1088 .with_state(log_handler)
1089 }
1090
1091 pub fn route_prometheus<S>(prometheus_handler: PrometheusHandlerRef) -> Router<S> {
1095 Router::new()
1096 .route(
1097 "/format_query",
1098 routing::post(format_query).get(format_query),
1099 )
1100 .route("/status/buildinfo", routing::get(build_info_query))
1101 .route("/query", routing::post(instant_query).get(instant_query))
1102 .route("/query_range", routing::post(range_query).get(range_query))
1103 .route("/labels", routing::post(labels_query).get(labels_query))
1104 .route("/series", routing::post(series_query).get(series_query))
1105 .route("/parse_query", routing::post(parse_query).get(parse_query))
1106 .route(
1107 "/label/{label_name}/values",
1108 routing::get(label_values_query),
1109 )
1110 .layer(ServiceBuilder::new().layer(CompressionLayer::new()))
1111 .with_state(prometheus_handler)
1112 }
1113
1114 fn route_prom<S>(state: PromStoreState) -> Router<S> {
1120 Router::new()
1121 .route("/read", routing::post(prom_store::remote_read))
1122 .route("/write", routing::post(prom_store::remote_write))
1123 .with_state(state)
1124 }
1125
1126 fn route_influxdb<S>(influxdb_handler: InfluxdbLineProtocolHandlerRef) -> Router<S> {
1127 Router::new()
1128 .route("/write", routing::post(influxdb_write_v1))
1129 .route("/api/v2/write", routing::post(influxdb_write_v2))
1130 .layer(
1131 ServiceBuilder::new()
1132 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1133 )
1134 .route("/ping", routing::get(influxdb_ping))
1135 .route("/health", routing::get(influxdb_health))
1136 .with_state(influxdb_handler)
1137 }
1138
1139 fn route_opentsdb<S>(opentsdb_handler: OpentsdbProtocolHandlerRef) -> Router<S> {
1140 Router::new()
1141 .route("/api/put", routing::post(opentsdb::put))
1142 .with_state(opentsdb_handler)
1143 }
1144
1145 fn route_otlp<S>(
1146 otlp_handler: OpenTelemetryProtocolHandlerRef,
1147 with_metric_engine: bool,
1148 ) -> Router<S> {
1149 Router::new()
1150 .route("/v1/metrics", routing::post(otlp::metrics))
1151 .route("/v1/traces", routing::post(otlp::traces))
1152 .route("/v1/logs", routing::post(otlp::logs))
1153 .layer(
1154 ServiceBuilder::new()
1155 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1156 )
1157 .with_state(OtlpState {
1158 with_metric_engine,
1159 handler: otlp_handler,
1160 })
1161 }
1162
1163 fn route_config<S>(state: GreptimeOptionsConfigState) -> Router<S> {
1164 Router::new()
1165 .route("/config", routing::get(handler::config))
1166 .with_state(state)
1167 }
1168
1169 fn route_jaeger<S>(handler: JaegerQueryHandlerRef) -> Router<S> {
1170 Router::new()
1171 .route("/api/services", routing::get(jaeger::handle_get_services))
1172 .route(
1173 "/api/services/{service_name}/operations",
1174 routing::get(jaeger::handle_get_operations_by_service),
1175 )
1176 .route(
1177 "/api/operations",
1178 routing::get(jaeger::handle_get_operations),
1179 )
1180 .route("/api/traces", routing::get(jaeger::handle_find_traces))
1181 .route(
1182 "/api/traces/{trace_id}",
1183 routing::get(jaeger::handle_get_trace),
1184 )
1185 .with_state(handler)
1186 }
1187
1188 #[cfg(feature = "dashboard")]
1189 fn route_dashboard<S>(handler: DashboardHandlerRef) -> Router<S> {
1190 use crate::http::dashboard::{add_dashboard, delete_dashboard, list_dashboards};
1191
1192 Router::new()
1193 .route("/", routing::get(list_dashboards))
1194 .route("/{dashboard_name}", routing::post(add_dashboard))
1195 .route("/{dashboard_name}", routing::delete(delete_dashboard))
1196 .layer(
1197 ServiceBuilder::new()
1198 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1199 )
1200 .with_state(DashboardState { handler })
1201 }
1202
1203 #[cfg(not(feature = "dashboard"))]
1204 fn route_dashboard<S>(handler: DashboardHandlerRef) -> Router<S> {
1205 Router::new().with_state(DashboardState { handler })
1206 }
1207}
1208
1209pub const HTTP_SERVER: &str = "HTTP_SERVER";
1210
1211#[async_trait]
1212impl Server for HttpServer {
1213 async fn shutdown(&self) -> Result<()> {
1214 let mut shutdown_tx = self.shutdown_tx.lock().await;
1215 if let Some(tx) = shutdown_tx.take()
1216 && tx.send(()).is_err()
1217 {
1218 info!("Receiver dropped, the HTTP server has already exited");
1219 }
1220 info!("Shutdown HTTP server");
1221
1222 Ok(())
1223 }
1224
1225 async fn start(&mut self, listening: SocketAddr) -> Result<()> {
1226 let (tx, rx) = oneshot::channel();
1227 let serve = {
1228 let mut shutdown_tx = self.shutdown_tx.lock().await;
1229 ensure!(
1230 shutdown_tx.is_none(),
1231 AlreadyStartedSnafu { server: "HTTP" }
1232 );
1233
1234 let mut app = self.make_app();
1235 if let Some(configurator) = self.plugins.get::<HttpConfiguratorRef<()>>() {
1236 app = configurator
1237 .configure_http(app, ())
1238 .await
1239 .context(OtherSnafu)?;
1240 }
1241 let app = self.build(app)?;
1242 let listener = tokio::net::TcpListener::bind(listening)
1243 .await
1244 .context(AddressBindSnafu { addr: listening })?
1245 .tap_io(|tcp_stream| {
1246 if let Err(e) = tcp_stream.set_nodelay(true) {
1247 error!(e; "Failed to set TCP_NODELAY on incoming connection");
1248 }
1249 });
1250 let serve = axum::serve(listener, app.into_make_service());
1251
1252 *shutdown_tx = Some(tx);
1269
1270 serve
1271 };
1272 let listening = serve.local_addr().context(InternalIoSnafu)?;
1273 info!("HTTP server is bound to {}", listening);
1274
1275 common_runtime::spawn_global(async move {
1276 if let Err(e) = serve
1277 .with_graceful_shutdown(rx.map(drop))
1278 .await
1279 .context(InternalIoSnafu)
1280 {
1281 error!(e; "Failed to shutdown http server");
1282 }
1283 });
1284
1285 self.bind_addr = Some(listening);
1286 Ok(())
1287 }
1288
1289 fn name(&self) -> &str {
1290 HTTP_SERVER
1291 }
1292
1293 fn bind_addr(&self) -> Option<SocketAddr> {
1294 self.bind_addr
1295 }
1296
1297 fn as_any(&self) -> &dyn std::any::Any {
1298 self
1299 }
1300}
1301
1302#[cfg(test)]
1303mod test {
1304 use std::future::pending;
1305 use std::io::Cursor;
1306 use std::sync::Arc;
1307
1308 use arrow_ipc::reader::FileReader;
1309 use arrow_schema::DataType;
1310 use axum::handler::Handler;
1311 use axum::http::StatusCode;
1312 use axum::routing::get;
1313 use common_query::Output;
1314 use common_recordbatch::RecordBatches;
1315 use datafusion_expr::LogicalPlan;
1316 use datatypes::prelude::*;
1317 use datatypes::schema::{ColumnSchema, Schema};
1318 use datatypes::vectors::{StringVector, UInt32Vector};
1319 use header::constants::GREPTIME_DB_HEADER_TIMEOUT;
1320 use query::parser::PromQuery;
1321 use query::query_engine::DescribeResult;
1322 use session::context::QueryContextRef;
1323 use sql::statements::statement::Statement;
1324 use tokio::sync::mpsc;
1325 use tokio::time::Instant;
1326
1327 use super::*;
1328 use crate::http::test_helpers::TestClient;
1329 use crate::prom_remote_write::validation::validate_label_name;
1330 use crate::query_handler::sql::SqlQueryHandler;
1331
1332 struct DummyInstance {
1333 _tx: mpsc::Sender<(String, Vec<u8>)>,
1334 }
1335
1336 #[async_trait]
1337 impl SqlQueryHandler for DummyInstance {
1338 async fn do_query(&self, _: &str, _: QueryContextRef) -> Vec<Result<Output>> {
1339 unimplemented!()
1340 }
1341
1342 async fn do_promql_query(&self, _: &PromQuery, _: QueryContextRef) -> Vec<Result<Output>> {
1343 unimplemented!()
1344 }
1345
1346 async fn do_exec_plan(
1347 &self,
1348 _stmt: Option<Statement>,
1349 _plan: LogicalPlan,
1350 _query_ctx: QueryContextRef,
1351 ) -> Result<Output> {
1352 unimplemented!()
1353 }
1354
1355 async fn do_describe(
1356 &self,
1357 _stmt: sql::statements::statement::Statement,
1358 _query_ctx: QueryContextRef,
1359 ) -> Result<Option<DescribeResult>> {
1360 unimplemented!()
1361 }
1362
1363 async fn is_valid_schema(&self, _catalog: &str, _schema: &str) -> Result<bool> {
1364 Ok(true)
1365 }
1366 }
1367
1368 fn timeout() -> DynamicTimeoutLayer {
1369 DynamicTimeoutLayer::new(Duration::from_millis(10))
1370 }
1371
1372 async fn forever() {
1373 pending().await
1374 }
1375
1376 fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
1377 make_test_app_custom(tx, HttpOptions::default())
1378 }
1379
1380 fn make_test_app_custom(tx: mpsc::Sender<(String, Vec<u8>)>, options: HttpOptions) -> Router {
1381 let instance = Arc::new(DummyInstance { _tx: tx });
1382 let server = HttpServerBuilder::new(options)
1383 .with_sql_handler(instance.clone())
1384 .build();
1385 server.build(server.make_app()).unwrap().route(
1386 "/test/timeout",
1387 get(forever.layer(ServiceBuilder::new().layer(timeout()))),
1388 )
1389 }
1390
1391 #[tokio::test]
1392 pub async fn test_cors() {
1393 let (tx, _rx) = mpsc::channel(100);
1395 let app = make_test_app(tx);
1396 let client = TestClient::new(app).await;
1397
1398 let res = client.get("/health").send().await;
1399
1400 assert_eq!(res.status(), StatusCode::OK);
1401 assert_eq!(
1402 res.headers()
1403 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1404 .expect("expect cors header origin"),
1405 "*"
1406 );
1407
1408 let res = client.get("/v1/health").send().await;
1409
1410 assert_eq!(res.status(), StatusCode::OK);
1411 assert_eq!(
1412 res.headers()
1413 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1414 .expect("expect cors header origin"),
1415 "*"
1416 );
1417
1418 let res = client
1419 .options("/health")
1420 .header("Access-Control-Request-Headers", "x-greptime-auth")
1421 .header("Access-Control-Request-Method", "DELETE")
1422 .header("Origin", "https://example.com")
1423 .send()
1424 .await;
1425 assert_eq!(res.status(), StatusCode::OK);
1426 assert_eq!(
1427 res.headers()
1428 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1429 .expect("expect cors header origin"),
1430 "*"
1431 );
1432 assert_eq!(
1433 res.headers()
1434 .get(http::header::ACCESS_CONTROL_ALLOW_HEADERS)
1435 .expect("expect cors header headers"),
1436 "*"
1437 );
1438 assert_eq!(
1439 res.headers()
1440 .get(http::header::ACCESS_CONTROL_ALLOW_METHODS)
1441 .expect("expect cors header methods"),
1442 "GET,POST,PUT,DELETE,HEAD"
1443 );
1444 }
1445
1446 #[tokio::test]
1447 pub async fn test_cors_custom_origins() {
1448 let (tx, _rx) = mpsc::channel(100);
1450 let origin = "https://example.com";
1451
1452 let options = HttpOptions {
1453 cors_allowed_origins: vec![origin.to_string()],
1454 ..Default::default()
1455 };
1456
1457 let app = make_test_app_custom(tx, options);
1458 let client = TestClient::new(app).await;
1459
1460 let res = client.get("/health").header("Origin", origin).send().await;
1461
1462 assert_eq!(res.status(), StatusCode::OK);
1463 assert_eq!(
1464 res.headers()
1465 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1466 .expect("expect cors header origin"),
1467 origin
1468 );
1469
1470 let res = client
1471 .get("/health")
1472 .header("Origin", "https://notallowed.com")
1473 .send()
1474 .await;
1475
1476 assert_eq!(res.status(), StatusCode::OK);
1477 assert!(
1478 !res.headers()
1479 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1480 );
1481 }
1482
1483 #[tokio::test]
1484 pub async fn test_cors_disabled() {
1485 let (tx, _rx) = mpsc::channel(100);
1487
1488 let options = HttpOptions {
1489 enable_cors: false,
1490 ..Default::default()
1491 };
1492
1493 let app = make_test_app_custom(tx, options);
1494 let client = TestClient::new(app).await;
1495
1496 let res = client.get("/health").send().await;
1497
1498 assert_eq!(res.status(), StatusCode::OK);
1499 assert!(
1500 !res.headers()
1501 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1502 );
1503 }
1504
1505 #[test]
1506 fn test_http_options_default() {
1507 let default = HttpOptions::default();
1508 assert_eq!("127.0.0.1:4000".to_string(), default.addr);
1509 assert_eq!(Duration::from_secs(0), default.timeout)
1510 }
1511
1512 #[tokio::test]
1513 async fn test_http_server_request_timeout() {
1514 common_telemetry::init_default_ut_logging();
1515
1516 let (tx, _rx) = mpsc::channel(100);
1517 let app = make_test_app(tx);
1518 let client = TestClient::new(app).await;
1519 let res = client.get("/test/timeout").send().await;
1520 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1521
1522 let now = Instant::now();
1523 let res = client
1524 .get("/test/timeout")
1525 .header(GREPTIME_DB_HEADER_TIMEOUT, "20ms")
1526 .send()
1527 .await;
1528 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1529 let elapsed = now.elapsed();
1530 assert!(elapsed > Duration::from_millis(15));
1531
1532 tokio::time::timeout(
1533 Duration::from_millis(15),
1534 client
1535 .get("/test/timeout")
1536 .header(GREPTIME_DB_HEADER_TIMEOUT, "0s")
1537 .send(),
1538 )
1539 .await
1540 .unwrap_err();
1541
1542 tokio::time::timeout(
1543 Duration::from_millis(15),
1544 client
1545 .get("/test/timeout")
1546 .header(
1547 GREPTIME_DB_HEADER_TIMEOUT,
1548 humantime::format_duration(Duration::default()).to_string(),
1549 )
1550 .send(),
1551 )
1552 .await
1553 .unwrap_err();
1554 }
1555
1556 #[tokio::test]
1557 async fn test_schema_for_empty_response() {
1558 let column_schemas = vec![
1559 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1560 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1561 ];
1562 let schema = Arc::new(Schema::new(column_schemas));
1563
1564 let recordbatches = RecordBatches::try_new(schema.clone(), vec![]).unwrap();
1565 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1566
1567 let json_resp = GreptimedbV1Response::from_output(outputs).await;
1568 if let HttpResponse::GreptimedbV1(json_resp) = json_resp {
1569 let json_output = &json_resp.output[0];
1570 if let GreptimeQueryOutput::Records(r) = json_output {
1571 assert_eq!(r.num_rows(), 0);
1572 assert_eq!(r.num_cols(), 2);
1573 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1574 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1575 } else {
1576 panic!("invalid output type");
1577 }
1578 } else {
1579 panic!("invalid format")
1580 }
1581 }
1582
1583 #[tokio::test]
1584 async fn test_recordbatches_conversion() {
1585 let column_schemas = vec![
1586 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1587 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1588 ];
1589 let schema = Arc::new(Schema::new(column_schemas));
1590 let columns: Vec<VectorRef> = vec![
1591 Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
1592 Arc::new(StringVector::from(vec![
1593 None,
1594 Some("hello"),
1595 Some("greptime"),
1596 None,
1597 ])),
1598 ];
1599 let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
1600
1601 for format in [
1602 ResponseFormat::GreptimedbV1,
1603 ResponseFormat::InfluxdbV1,
1604 ResponseFormat::Csv(true, true),
1605 ResponseFormat::Table,
1606 ResponseFormat::Arrow,
1607 ResponseFormat::Json,
1608 ResponseFormat::Null,
1609 ] {
1610 let recordbatches =
1611 RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
1612 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1613 let json_resp = match format {
1614 ResponseFormat::Arrow => ArrowResponse::from_output(outputs, None).await,
1615 ResponseFormat::Csv(with_names, with_types) => {
1616 CsvResponse::from_output(outputs, with_names, with_types).await
1617 }
1618 ResponseFormat::Table => TableResponse::from_output(outputs).await,
1619 ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
1620 ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
1621 ResponseFormat::Json => JsonResponse::from_output(outputs).await,
1622 ResponseFormat::Null => NullResponse::from_output(outputs).await,
1623 };
1624
1625 match json_resp {
1626 HttpResponse::GreptimedbV1(resp) => {
1627 let json_output = &resp.output[0];
1628 if let GreptimeQueryOutput::Records(r) = json_output {
1629 assert_eq!(r.num_rows(), 4);
1630 assert_eq!(r.num_cols(), 2);
1631 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1632 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1633 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1634 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1635 } else {
1636 panic!("invalid output type");
1637 }
1638 }
1639 HttpResponse::InfluxdbV1(resp) => {
1640 let json_output = &resp.results()[0];
1641 assert_eq!(json_output.num_rows(), 4);
1642 assert_eq!(json_output.num_cols(), 2);
1643 assert_eq!(json_output.series[0].columns.clone()[0], "numbers");
1644 assert_eq!(
1645 json_output.series[0].values[0][0],
1646 serde_json::Value::from(1)
1647 );
1648 assert_eq!(json_output.series[0].values[0][1], serde_json::Value::Null);
1649 }
1650 HttpResponse::Csv(resp) => {
1651 let output = &resp.output()[0];
1652 if let GreptimeQueryOutput::Records(r) = output {
1653 assert_eq!(r.num_rows(), 4);
1654 assert_eq!(r.num_cols(), 2);
1655 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1656 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1657 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1658 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1659 } else {
1660 panic!("invalid output type");
1661 }
1662 }
1663
1664 HttpResponse::Table(resp) => {
1665 let output = &resp.output()[0];
1666 if let GreptimeQueryOutput::Records(r) = output {
1667 assert_eq!(r.num_rows(), 4);
1668 assert_eq!(r.num_cols(), 2);
1669 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1670 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1671 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1672 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1673 } else {
1674 panic!("invalid output type");
1675 }
1676 }
1677
1678 HttpResponse::Arrow(resp) => {
1679 let output = resp.data;
1680 let mut reader =
1681 FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
1682 let schema = reader.schema();
1683 assert_eq!(schema.fields[0].name(), "numbers");
1684 assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
1685 assert_eq!(schema.fields[1].name(), "strings");
1686 assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
1687
1688 let rb = reader.next().unwrap().expect("read record batch failed");
1689 assert_eq!(rb.num_columns(), 2);
1690 assert_eq!(rb.num_rows(), 4);
1691 }
1692
1693 HttpResponse::Json(resp) => {
1694 let output = &resp.output()[0];
1695 if let GreptimeQueryOutput::Records(r) = output {
1696 assert_eq!(r.num_rows(), 4);
1697 assert_eq!(r.num_cols(), 2);
1698 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1699 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1700 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1701 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1702 } else {
1703 panic!("invalid output type");
1704 }
1705 }
1706
1707 HttpResponse::Null(resp) => {
1708 assert_eq!(resp.rows(), 4);
1709 }
1710
1711 HttpResponse::Error(err) => unreachable!("{err:?}"),
1712 }
1713 }
1714 }
1715
1716 #[test]
1717 fn test_response_format_misc() {
1718 assert_eq!(ResponseFormat::default(), ResponseFormat::GreptimedbV1);
1719 assert_eq!(ResponseFormat::parse("arrow"), Some(ResponseFormat::Arrow));
1720 assert_eq!(
1721 ResponseFormat::parse("csv"),
1722 Some(ResponseFormat::Csv(false, false))
1723 );
1724 assert_eq!(
1725 ResponseFormat::parse("csvwithnames"),
1726 Some(ResponseFormat::Csv(true, false))
1727 );
1728 assert_eq!(
1729 ResponseFormat::parse("csvwithnamesandtypes"),
1730 Some(ResponseFormat::Csv(true, true))
1731 );
1732 assert_eq!(ResponseFormat::parse("table"), Some(ResponseFormat::Table));
1733 assert_eq!(
1734 ResponseFormat::parse("greptimedb_v1"),
1735 Some(ResponseFormat::GreptimedbV1)
1736 );
1737 assert_eq!(
1738 ResponseFormat::parse("influxdb_v1"),
1739 Some(ResponseFormat::InfluxdbV1)
1740 );
1741 assert_eq!(ResponseFormat::parse("json"), Some(ResponseFormat::Json));
1742 assert_eq!(ResponseFormat::parse("null"), Some(ResponseFormat::Null));
1743
1744 assert_eq!(ResponseFormat::parse("invalid"), None);
1746 assert_eq!(ResponseFormat::parse(""), None);
1747 assert_eq!(ResponseFormat::parse("CSV"), None); assert_eq!(ResponseFormat::Arrow.as_str(), "arrow");
1751 assert_eq!(ResponseFormat::Csv(false, false).as_str(), "csv");
1752 assert_eq!(ResponseFormat::Csv(true, true).as_str(), "csv");
1753 assert_eq!(ResponseFormat::Table.as_str(), "table");
1754 assert_eq!(ResponseFormat::GreptimedbV1.as_str(), "greptimedb_v1");
1755 assert_eq!(ResponseFormat::InfluxdbV1.as_str(), "influxdb_v1");
1756 assert_eq!(ResponseFormat::Json.as_str(), "json");
1757 assert_eq!(ResponseFormat::Null.as_str(), "null");
1758 assert_eq!(ResponseFormat::default().as_str(), "greptimedb_v1");
1759 }
1760
1761 #[test]
1762 fn test_decode_label_name_strict() {
1763 let strict = PromValidationMode::Strict;
1764
1765 assert!(strict.decode_label_name(b"__name__").is_ok());
1767 assert!(strict.decode_label_name(b"job").is_ok());
1768 assert!(strict.decode_label_name(b"instance").is_ok());
1769 assert!(strict.decode_label_name(b"_private").is_ok());
1770 assert!(strict.decode_label_name(b"label_with_underscores").is_ok());
1771 assert!(strict.decode_label_name(b"abc123").is_ok());
1772 assert!(strict.decode_label_name(b"A").is_ok());
1773 assert!(strict.decode_label_name(b"_").is_ok());
1774
1775 assert!(strict.decode_label_name(b"0abc").is_err());
1777 assert!(strict.decode_label_name(b"123").is_err());
1778
1779 assert!(strict.decode_label_name(b"label-name").is_err());
1781 assert!(strict.decode_label_name(b"label.name").is_err());
1782 assert!(strict.decode_label_name(b"label name").is_err());
1783 assert!(strict.decode_label_name(b"label/name").is_err());
1784
1785 assert!(strict.decode_label_name(b"").is_err());
1787
1788 assert!(strict.decode_label_name("ラベル".as_bytes()).is_err());
1790
1791 assert!(strict.decode_label_name(&[0xff, 0xfe]).is_err());
1793 }
1794
1795 #[test]
1796 fn test_decode_label_name_lossy() {
1797 let lossy = PromValidationMode::Lossy;
1798
1799 assert!(lossy.decode_label_name(b"__name__").is_ok());
1801 assert!(lossy.decode_label_name(b"label-name").is_err());
1802 assert!(lossy.decode_label_name(b"0abc").is_err());
1803
1804 assert!(lossy.decode_label_name(&[0xff, 0xfe]).is_err());
1806 }
1807
1808 #[test]
1809 fn test_decode_label_name_unchecked() {
1810 let unchecked = PromValidationMode::Unchecked;
1811
1812 assert!(unchecked.decode_label_name(b"__name__").is_ok());
1814 assert!(unchecked.decode_label_name(b"label-name").is_err());
1815 assert!(unchecked.decode_label_name(b"0abc").is_err());
1816 }
1817
1818 #[test]
1819 fn test_is_valid_prom_label_name_bytes() {
1820 assert!(validate_label_name(b"__name__"));
1821 assert!(validate_label_name(b"job"));
1822 assert!(validate_label_name(b"_"));
1823 assert!(validate_label_name(b"A"));
1824 assert!(validate_label_name(b"abc123"));
1825 assert!(validate_label_name(b"_leading_underscore"));
1826
1827 assert!(!validate_label_name(b""));
1828 assert!(!validate_label_name(b"0starts_with_digit"));
1829 assert!(!validate_label_name(b"has-dash"));
1830 assert!(!validate_label_name(b"has.dot"));
1831 assert!(!validate_label_name(b"has space"));
1832 assert!(!validate_label_name(&[0xff, 0xfe]));
1833 }
1834}