diff --git a/config/config.md b/config/config.md
index 58c491b4ad..3aa22a5cc6 100644
--- a/config/config.md
+++ b/config/config.md
@@ -210,14 +210,6 @@
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
-| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
You must create the database before enabling it. |
-| `export_metrics.self_import.db` | String | Unset | -- |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -335,12 +327,6 @@
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
-| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -430,12 +416,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -608,12 +588,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index dda926e1cb..8db6bf3d1c 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -712,21 +712,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 04d763c18f..ecac6cff01 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -329,21 +329,6 @@ sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
ttl = "90d"
-## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index d7d5ace99c..38810362a8 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -323,21 +323,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 70e6e0888f..661067d2a1 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -820,27 +820,6 @@ default_ratio = 1.0
## @toml2docs:none-default
#+ sample_ratio = 1.0
-## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
-## You must create the database before enabling it.
-[export_metrics.self_import]
-## @toml2docs:none-default
-db = "greptime_metrics"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/src/catalog/src/system_schema/information_schema.rs b/src/catalog/src/system_schema/information_schema.rs
index 44609ade34..02005c74c0 100644
--- a/src/catalog/src/system_schema/information_schema.rs
+++ b/src/catalog/src/system_schema/information_schema.rs
@@ -22,7 +22,6 @@ mod procedure_info;
pub mod process_list;
pub mod region_peers;
mod region_statistics;
-mod runtime_metrics;
pub mod schemata;
mod ssts;
mod table_constraints;
@@ -65,7 +64,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
-use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::ssts::{
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
@@ -216,7 +214,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
- RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
@@ -311,10 +308,6 @@ impl InformationSchemaProvider {
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
- tables.insert(
- RUNTIME_METRICS.to_string(),
- self.build_table(RUNTIME_METRICS).unwrap(),
- );
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
diff --git a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs b/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
deleted file mode 100644
index 5ccb871321..0000000000
--- a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use arrow_schema::SchemaRef as ArrowSchemaRef;
-use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
-use common_error::ext::BoxedError;
-use common_recordbatch::adapter::RecordBatchStreamAdapter;
-use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
-use common_time::util::current_time_millis;
-use datafusion::execution::TaskContext;
-use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
-use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
-use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
-use datatypes::prelude::{ConcreteDataType, MutableVector};
-use datatypes::scalars::ScalarVectorBuilder;
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use datatypes::vectors::{
- ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
- VectorRef,
-};
-use itertools::Itertools;
-use snafu::ResultExt;
-use store_api::storage::{ScanRequest, TableId};
-
-use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
-use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
-
-#[derive(Debug)]
-pub(super) struct InformationSchemaMetrics {
- schema: SchemaRef,
-}
-
-const METRIC_NAME: &str = "metric_name";
-const METRIC_VALUE: &str = "value";
-const METRIC_LABELS: &str = "labels";
-const PEER_ADDR: &str = "peer_addr";
-const PEER_TYPE: &str = "peer_type";
-const TIMESTAMP: &str = "timestamp";
-
-/// The `information_schema.runtime_metrics` virtual table.
-/// It provides the GreptimeDB runtime metrics for the users by SQL.
-impl InformationSchemaMetrics {
- pub(super) fn new() -> Self {
- Self {
- schema: Self::schema(),
- }
- }
-
- fn schema() -> SchemaRef {
- Arc::new(Schema::new(vec![
- ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
- ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(
- TIMESTAMP,
- ConcreteDataType::timestamp_millisecond_datatype(),
- false,
- ),
- ]))
- }
-
- fn builder(&self) -> InformationSchemaMetricsBuilder {
- InformationSchemaMetricsBuilder::new(self.schema.clone())
- }
-}
-
-impl InformationTable for InformationSchemaMetrics {
- fn table_id(&self) -> TableId {
- INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
- }
-
- fn table_name(&self) -> &'static str {
- RUNTIME_METRICS
- }
-
- fn schema(&self) -> SchemaRef {
- self.schema.clone()
- }
-
- fn to_stream(&self, request: ScanRequest) -> Result {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- let stream = Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(Some(request))
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ));
-
- Ok(Box::pin(
- RecordBatchStreamAdapter::try_new(stream)
- .map_err(BoxedError::new)
- .context(InternalSnafu)?,
- ))
- }
-}
-
-struct InformationSchemaMetricsBuilder {
- schema: SchemaRef,
-
- metric_names: StringVectorBuilder,
- metric_values: Float64VectorBuilder,
- metric_labels: StringVectorBuilder,
- peer_addrs: StringVectorBuilder,
- peer_types: StringVectorBuilder,
-}
-
-impl InformationSchemaMetricsBuilder {
- fn new(schema: SchemaRef) -> Self {
- Self {
- schema,
- metric_names: StringVectorBuilder::with_capacity(42),
- metric_values: Float64VectorBuilder::with_capacity(42),
- metric_labels: StringVectorBuilder::with_capacity(42),
- peer_addrs: StringVectorBuilder::with_capacity(42),
- peer_types: StringVectorBuilder::with_capacity(42),
- }
- }
-
- fn add_metric(
- &mut self,
- metric_name: &str,
- labels: String,
- metric_value: f64,
- peer: Option<&str>,
- peer_type: &str,
- ) {
- self.metric_names.push(Some(metric_name));
- self.metric_values.push(Some(metric_value));
- self.metric_labels.push(Some(&labels));
- self.peer_addrs.push(peer);
- self.peer_types.push(Some(peer_type));
- }
-
- async fn make_metrics(&mut self, _request: Option) -> Result {
- let metric_families = prometheus::gather();
-
- let write_request =
- common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
-
- for ts in write_request.timeseries {
- //Safety: always has `__name__` label
- let metric_name = ts
- .labels
- .iter()
- .find_map(|label| {
- if label.name == "__name__" {
- Some(label.value.clone())
- } else {
- None
- }
- })
- .unwrap();
-
- self.add_metric(
- &metric_name,
- ts.labels
- .into_iter()
- .filter_map(|label| {
- if label.name == "__name__" {
- None
- } else {
- Some(format!("{}={}", label.name, label.value))
- }
- })
- .join(", "),
- // Safety: always has a sample
- ts.samples[0].value,
- // The peer column is always `None` for standalone
- None,
- "STANDALONE",
- );
- }
-
- // FIXME(dennis): fetching other peers metrics
- self.finish()
- }
-
- fn finish(&mut self) -> Result {
- let rows_num = self.metric_names.len();
-
- let timestamps = Arc::new(ConstantVector::new(
- Arc::new(TimestampMillisecondVector::from_slice([
- current_time_millis(),
- ])),
- rows_num,
- ));
-
- let columns: Vec = vec![
- Arc::new(self.metric_names.finish()),
- Arc::new(self.metric_values.finish()),
- Arc::new(self.metric_labels.finish()),
- Arc::new(self.peer_addrs.finish()),
- Arc::new(self.peer_types.finish()),
- timestamps,
- ];
-
- RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
- }
-}
-
-impl DfPartitionStream for InformationSchemaMetrics {
- fn schema(&self) -> &ArrowSchemaRef {
- self.schema.arrow_schema()
- }
-
- fn execute(&self, _: Arc) -> DfSendableRecordBatchStream {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(None)
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use common_recordbatch::RecordBatches;
-
- use super::*;
-
- #[tokio::test]
- async fn test_make_metrics() {
- let metrics = InformationSchemaMetrics::new();
-
- let stream = metrics.to_stream(ScanRequest::default()).unwrap();
-
- let batches = RecordBatches::try_collect(stream).await.unwrap();
-
- let result_literal = batches.pretty_print().unwrap();
-
- assert!(result_literal.contains(METRIC_NAME));
- assert!(result_literal.contains(METRIC_VALUE));
- assert!(result_literal.contains(METRIC_LABELS));
- assert!(result_literal.contains(PEER_ADDR));
- assert!(result_literal.contains(PEER_TYPE));
- assert!(result_literal.contains(TIMESTAMP));
- }
-}
diff --git a/src/catalog/src/system_schema/information_schema/table_names.rs b/src/catalog/src/system_schema/information_schema/table_names.rs
index 23791425dc..2a3329fece 100644
--- a/src/catalog/src/system_schema/information_schema/table_names.rs
+++ b/src/catalog/src/system_schema/information_schema/table_names.rs
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
-pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 0b77dec341..fbff2d42e0 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -99,13 +99,6 @@ pub enum Error {
source: flow::Error,
},
- #[snafu(display("Servers error"))]
- Servers {
- #[snafu(implicit)]
- location: Location,
- source: servers::error::Error,
- },
-
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -336,7 +329,6 @@ impl ErrorExt for Error {
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
- Error::Servers { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::BuildCli { source, .. } => source.status_code(),
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 93dac1e922..c5162e5ac0 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -43,7 +43,6 @@ use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
use servers::addrs;
-use servers::export_metrics::ExportMetricsTask;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
@@ -458,9 +457,6 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
let instance = Arc::new(instance);
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, instance.clone(), plugins)
.build()
.context(error::StartFrontendSnafu)?;
@@ -469,7 +465,6 @@ impl StartCommand {
instance,
servers,
heartbeat_task,
- export_metrics_task,
};
Ok(Instance::new(frontend, guard))
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 8d33fc5193..8d3741111f 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -57,7 +57,6 @@ use frontend::instance::StandaloneDatanodeManager;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
-use servers::export_metrics::ExportMetricsTask;
use servers::tls::{TlsMode, TlsOption};
use snafu::ResultExt;
use standalone::StandaloneInformationExtension;
@@ -565,9 +564,6 @@ impl StartCommand {
.context(StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
.build()
.context(error::StartFrontendSnafu)?;
@@ -576,7 +572,6 @@ impl StartCommand {
instance: fe_instance,
servers,
heartbeat_task: None,
- export_metrics_task,
};
#[cfg(feature = "enterprise")]
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
index 222012bfd8..56a6caa71b 100644
--- a/src/cmd/tests/load_config_test.rs
+++ b/src/cmd/tests/load_config_test.rs
@@ -31,7 +31,6 @@ use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use query::options::QueryOptions;
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -95,11 +94,6 @@ fn test_load_datanode_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:3001")
.with_server_addr("127.0.0.1:3001"),
@@ -146,11 +140,6 @@ fn test_load_frontend_example_config() {
..Default::default()
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
grpc: GrpcOptions {
bind_addr: "127.0.0.1:4001".to_string(),
server_addr: "127.0.0.1:4001".to_string(),
@@ -201,11 +190,6 @@ fn test_load_metasrv_example_config() {
tcp_nodelay: true,
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
backend_tls: Some(TlsOption {
mode: TlsMode::Prefer,
cert_path: String::new(),
@@ -317,11 +301,6 @@ fn test_load_standalone_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: Some(Default::default()),
- remote_write: Some(Default::default()),
- ..Default::default()
- },
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 8a59a15cc6..b594bd89a6 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -86,8 +86,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
-/// id for information_schema.RUNTIME_METRICS
-pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// id for information_schema.PARTITIONS
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
/// id for information_schema.REGION_PEERS
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index e40a52bd6b..19b4647b8e 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -28,7 +28,6 @@ use mito2::config::MitoConfig;
pub(crate) use object_store::config::ObjectStoreConfig;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
@@ -82,7 +81,6 @@ pub struct DatanodeOptions {
pub region_engine: Vec,
pub logging: LoggingOptions,
pub enable_telemetry: bool,
- pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub query: QueryOptions,
pub memory: MemoryOptions,
@@ -138,7 +136,6 @@ impl Default for DatanodeOptions {
logging: LoggingOptions::default(),
heartbeat: HeartbeatOptions::datanode_default(),
enable_telemetry: true,
- export_metrics: ExportMetricsOption::default(),
tracing: TracingOptions::default(),
query: QueryOptions::default(),
memory: MemoryOptions::default(),
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 6b370c7eb6..5a1279db9b 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -48,7 +48,6 @@ use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef};
use object_store::util::normalize_dir;
use query::QueryEngineFactory;
use query::dummy_catalog::{DummyCatalogManager, TableProviderFactoryRef};
-use servers::export_metrics::ExportMetricsTask;
use servers::server::ServerHandlers;
use snafu::{OptionExt, ResultExt, ensure};
use store_api::path_utils::WAL_DIR;
@@ -84,7 +83,6 @@ pub struct Datanode {
greptimedb_telemetry_task: Arc,
leases_notifier: Option>,
plugins: Plugins,
- export_metrics_task: Option,
}
impl Datanode {
@@ -96,10 +94,6 @@ impl Datanode {
self.start_telemetry();
- if let Some(t) = self.export_metrics_task.as_ref() {
- t.start(None).context(StartServerSnafu)?
- }
-
self.services.start_all().await.context(StartServerSnafu)
}
@@ -319,10 +313,6 @@ impl DatanodeBuilder {
None
};
- let export_metrics_task =
- ExportMetricsTask::try_new(&self.opts.export_metrics, Some(&self.plugins))
- .context(StartServerSnafu)?;
-
Ok(Datanode {
services: ServerHandlers::default(),
heartbeat_task,
@@ -331,7 +321,6 @@ impl DatanodeBuilder {
region_event_receiver,
leases_notifier,
plugins: self.plugins.clone(),
- export_metrics_task,
})
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index dce9ffd158..027f8a4254 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -23,7 +23,6 @@ use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions
use meta_client::MetaClientOptions;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
@@ -34,7 +33,6 @@ use crate::error;
use crate::error::Result;
use crate::heartbeat::HeartbeatTask;
use crate::instance::Instance;
-use crate::instance::prom_store::ExportMetricHandler;
use crate::service_config::{
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, OtlpOptions, PostgresOptions,
PromStoreOptions,
@@ -63,7 +61,6 @@ pub struct FrontendOptions {
pub logging: LoggingOptions,
pub datanode: DatanodeClientOptions,
pub user_provider: Option,
- pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub query: QueryOptions,
pub max_in_flight_write_bytes: Option,
@@ -94,7 +91,6 @@ impl Default for FrontendOptions {
logging: LoggingOptions::default(),
datanode: DatanodeClientOptions::default(),
user_provider: None,
- export_metrics: ExportMetricsOption::default(),
tracing: TracingOptions::default(),
query: QueryOptions::default(),
max_in_flight_write_bytes: None,
@@ -117,7 +113,6 @@ pub struct Frontend {
pub instance: Arc,
pub servers: ServerHandlers,
pub heartbeat_task: Option,
- pub export_metrics_task: Option,
}
impl Frontend {
@@ -126,17 +121,6 @@ impl Frontend {
t.start().await?;
}
- if let Some(t) = self.export_metrics_task.as_ref() {
- if t.send_by_handler {
- let inserter = self.instance.inserter().clone();
- let statement_executor = self.instance.statement_executor().clone();
- let handler = ExportMetricHandler::new_handler(inserter, statement_executor);
- t.start(Some(handler)).context(error::StartServerSnafu)?
- } else {
- t.start(None).context(error::StartServerSnafu)?;
- }
- }
-
self.servers
.start_all()
.await
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 20e5810a90..9647d9a3ef 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -30,7 +30,6 @@ use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
use common_telemetry::info;
use either::Either;
use servers::configurator::ConfiguratorRef;
-use servers::export_metrics::ExportMetricsTask;
use servers::http::{HttpServer, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::server::Server;
@@ -70,8 +69,6 @@ pub struct MetasrvInstance {
plugins: Plugins,
- export_metrics_task: Option,
-
/// gRPC serving state receiver. Only present if the gRPC server is started.
serve_state: Arc>>>>,
@@ -95,15 +92,12 @@ impl MetasrvInstance {
// put metasrv into plugins for later use
plugins.insert::>(metasrv.clone());
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::InitExportMetricsTaskSnafu)?;
Ok(MetasrvInstance {
metasrv,
http_server: Either::Left(Some(builder)),
opts,
signal_sender: None,
plugins,
- export_metrics_task,
serve_state: Default::default(),
bind_addr: None,
})
@@ -131,10 +125,6 @@ impl MetasrvInstance {
self.metasrv.try_start().await?;
- if let Some(t) = self.export_metrics_task.as_ref() {
- t.start(None).context(error::InitExportMetricsTaskSnafu)?
- }
-
let (tx, rx) = mpsc::channel::<()>(1);
self.signal_sender = Some(tx);
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 2f4756c2ae..9a0e669b13 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -304,13 +304,6 @@ pub enum Error {
source: servers::error::Error,
},
- #[snafu(display("Failed to init export metrics task"))]
- InitExportMetricsTask {
- #[snafu(implicit)]
- location: Location,
- source: servers::error::Error,
- },
-
#[snafu(display("Failed to parse address {}", addr))]
ParseAddr {
addr: String,
@@ -1061,7 +1054,6 @@ impl ErrorExt for Error {
| Error::ParseAddr { .. }
| Error::UnsupportedSelectorType { .. }
| Error::InvalidArguments { .. }
- | Error::InitExportMetricsTask { .. }
| Error::ProcedureNotFound { .. }
| Error::TooManyPartitions { .. }
| Error::TomlFormat { .. }
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 2e40e46150..39fbf66ccb 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -52,7 +52,6 @@ use common_telemetry::{error, info, warn};
use common_time::util::DefaultSystemTimer;
use common_wal::config::MetasrvWalConfig;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::TlsOption;
@@ -169,8 +168,6 @@ pub struct MetasrvOptions {
pub data_home: String,
/// The WAL options.
pub wal: MetasrvWalConfig,
- /// The metrics export options.
- pub export_metrics: ExportMetricsOption,
/// The store key prefix. If it is not empty, all keys in the store will be prefixed with it.
/// This is useful when multiple metasrv clusters share the same store.
pub store_key_prefix: String,
@@ -234,7 +231,6 @@ impl fmt::Debug for MetasrvOptions {
.field("enable_telemetry", &self.enable_telemetry)
.field("data_home", &self.data_home)
.field("wal", &self.wal)
- .field("export_metrics", &self.export_metrics)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("flush_stats_factor", &self.flush_stats_factor)
@@ -292,7 +288,6 @@ impl Default for MetasrvOptions {
enable_telemetry: true,
data_home: DEFAULT_DATA_HOME.to_string(),
wal: MetasrvWalConfig::default(),
- export_metrics: ExportMetricsOption::default(),
store_key_prefix: String::new(),
max_txn_ops: 128,
flush_stats_factor: 3,
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index c7e5c5d07a..87702078f9 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -269,21 +269,6 @@ pub enum Error {
error: std::io::Error,
},
- #[snafu(display("Failed to send prometheus remote request"))]
- SendPromRemoteRequest {
- #[snafu(implicit)]
- location: Location,
- #[snafu(source)]
- error: reqwest::Error,
- },
-
- #[snafu(display("Invalid export metrics config, msg: {}", msg))]
- InvalidExportMetricsConfig {
- msg: String,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to compress prometheus remote request"))]
CompressPromRemoteRequest {
#[snafu(implicit)]
@@ -661,7 +646,6 @@ impl ErrorExt for Error {
| StartHttp { .. }
| StartGrpc { .. }
| TcpBind { .. }
- | SendPromRemoteRequest { .. }
| BuildHttpResponse { .. }
| Arrow { .. }
| FileWatch { .. } => StatusCode::Internal,
@@ -698,7 +682,6 @@ impl ErrorExt for Error {
| DecompressSnappyPromRemoteRequest { .. }
| DecompressZstdPromRemoteRequest { .. }
| InvalidPromRemoteRequest { .. }
- | InvalidExportMetricsConfig { .. }
| InvalidFlightTicket { .. }
| InvalidPrepareStatement { .. }
| DataFrame { .. }
diff --git a/src/servers/src/export_metrics.rs b/src/servers/src/export_metrics.rs
deleted file mode 100644
index aac7e8dda4..0000000000
--- a/src/servers/src/export_metrics.rs
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::HashMap;
-use std::sync::Arc;
-use std::time::Duration;
-
-use common_base::Plugins;
-use common_telemetry::metric::{MetricFilter, convert_metric_to_write_request};
-use common_telemetry::{error, info};
-use common_time::Timestamp;
-use prost::Message;
-use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
-use serde::{Deserialize, Serialize};
-use session::context::QueryContextBuilder;
-use snafu::{ResultExt, ensure};
-use tokio::time::{self, Interval};
-
-use crate::error::{InvalidExportMetricsConfigSnafu, Result, SendPromRemoteRequestSnafu};
-use crate::prom_store::{snappy_compress, to_grpc_row_insert_requests};
-use crate::query_handler::PromStoreProtocolHandlerRef;
-
-/// Use to export the metrics generated by greptimedb.
-///
-/// Encoded to Prometheus [RemoteWrite format](https://prometheus.io/docs/concepts/remote_write_spec/),
-/// and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
-#[serde(default)]
-pub struct ExportMetricsOption {
- pub enable: bool,
- #[serde(with = "humantime_serde")]
- pub write_interval: Duration,
- pub self_import: Option,
- pub remote_write: Option,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Default)]
-#[serde(default)]
-pub struct RemoteWriteOption {
- pub url: String,
- pub headers: HashMap,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
-#[serde(default)]
-pub struct SelfImportOption {
- pub db: String,
-}
-
-impl Default for SelfImportOption {
- fn default() -> Self {
- Self {
- db: "greptime_metrics".to_string(),
- }
- }
-}
-
-impl Default for ExportMetricsOption {
- fn default() -> Self {
- Self {
- enable: false,
- write_interval: Duration::from_secs(30),
- self_import: None,
- remote_write: None,
- }
- }
-}
-
-#[derive(Default, Clone)]
-pub struct ExportMetricsTask {
- config: ExportMetricsOption,
- filter: Option,
- headers: HeaderMap,
- pub send_by_handler: bool,
-}
-
-impl ExportMetricsTask {
- pub fn try_new(
- config: &ExportMetricsOption,
- plugins: Option<&Plugins>,
- ) -> Result