mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 21:02:58 +00:00
feat: make logging to stdout configurable (#3003)
* feat: make logging to stdout configurable * fix sqlness * fix: resolve PR comments
This commit is contained in:
@@ -218,6 +218,8 @@ parallel_scan_channel_size = 32
|
||||
# otlp_endpoint = "localhost:4317"
|
||||
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
# tracing_sample_ratio = 1.0
|
||||
# Whether to append logs to stdout. Defaults to true.
|
||||
# append_stdout = true
|
||||
|
||||
# Standalone export the metrics generated by itself
|
||||
# encoded to Prometheus remote-write format
|
||||
|
||||
@@ -43,6 +43,7 @@ pub struct LoggingOptions {
|
||||
pub enable_otlp_tracing: bool,
|
||||
pub otlp_endpoint: Option<String>,
|
||||
pub tracing_sample_ratio: Option<f64>,
|
||||
pub append_stdout: bool,
|
||||
}
|
||||
|
||||
impl PartialEq for LoggingOptions {
|
||||
@@ -52,6 +53,7 @@ impl PartialEq for LoggingOptions {
|
||||
&& self.enable_otlp_tracing == other.enable_otlp_tracing
|
||||
&& self.otlp_endpoint == other.otlp_endpoint
|
||||
&& self.tracing_sample_ratio == other.tracing_sample_ratio
|
||||
&& self.append_stdout == other.append_stdout
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +67,7 @@ impl Default for LoggingOptions {
|
||||
enable_otlp_tracing: false,
|
||||
otlp_endpoint: None,
|
||||
tracing_sample_ratio: None,
|
||||
append_stdout: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,10 +132,14 @@ pub fn init_global_logging(
|
||||
// Enable log compatible layer to convert log record to tracing span.
|
||||
LogTracer::init().expect("log tracer must be valid");
|
||||
|
||||
// Stdout layer.
|
||||
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
|
||||
let stdout_logging_layer = Layer::new().with_writer(stdout_writer);
|
||||
guards.push(stdout_guard);
|
||||
let stdout_logging_layer = if opts.append_stdout {
|
||||
let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
|
||||
guards.push(stdout_guard);
|
||||
|
||||
Some(Layer::new().with_writer(stdout_writer))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// JSON log layer.
|
||||
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
|
||||
@@ -184,7 +191,7 @@ pub fn init_global_logging(
|
||||
None
|
||||
};
|
||||
|
||||
let stdout_logging_layer = stdout_logging_layer.with_filter(filter.clone());
|
||||
let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
|
||||
|
||||
let file_logging_layer = file_logging_layer.with_filter(filter);
|
||||
|
||||
|
||||
@@ -483,6 +483,12 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: query::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid table name: {}", table_name))]
|
||||
InvalidTableName {
|
||||
table_name: String,
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -507,7 +513,8 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidPartitionColumns { .. }
|
||||
| Error::PrepareFileTable { .. }
|
||||
| Error::InferFileTableSchema { .. }
|
||||
| Error::SchemaIncompatible { .. } => StatusCode::InvalidArguments,
|
||||
| Error::SchemaIncompatible { .. }
|
||||
| Error::InvalidTableName { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
|
||||
|
||||
|
||||
@@ -50,8 +50,8 @@ use table::TableRef;
|
||||
use super::StatementExecutor;
|
||||
use crate::error::{
|
||||
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
|
||||
DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, ParseSqlSnafu, Result,
|
||||
SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
|
||||
DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, InvalidTableNameSnafu, ParseSqlSnafu,
|
||||
Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
|
||||
UnrecognizedTableOptionSnafu,
|
||||
};
|
||||
use crate::expr_factory;
|
||||
@@ -131,8 +131,8 @@ impl StatementExecutor {
|
||||
|
||||
ensure!(
|
||||
NAME_PATTERN_REG.is_match(&create_table.table_name),
|
||||
error::UnexpectedSnafu {
|
||||
violated: format!("Invalid table name: {}", create_table.table_name)
|
||||
InvalidTableNameSnafu {
|
||||
table_name: create_table.table_name.clone(),
|
||||
}
|
||||
);
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ use catalog;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use common_telemetry::logging;
|
||||
use common_telemetry::{debug, error};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use query::parser::PromQuery;
|
||||
use serde_json::json;
|
||||
@@ -620,7 +620,11 @@ impl IntoResponse for Error {
|
||||
| Error::InvalidQuery { .. }
|
||||
| Error::TimePrecision { .. } => HttpStatusCode::BAD_REQUEST,
|
||||
_ => {
|
||||
logging::error!(self; "Failed to handle HTTP request");
|
||||
if self.status_code().should_log_error() {
|
||||
error!(self; "Failed to handle HTTP request: ");
|
||||
} else {
|
||||
debug!("Failed to handle HTTP request: {self}");
|
||||
}
|
||||
|
||||
HttpStatusCode::INTERNAL_SERVER_ERROR
|
||||
}
|
||||
|
||||
@@ -747,6 +747,7 @@ enable = true
|
||||
|
||||
[frontend.logging]
|
||||
enable_otlp_tracing = false
|
||||
append_stdout = true
|
||||
|
||||
[frontend.datanode.client]
|
||||
timeout = "10s"
|
||||
@@ -815,6 +816,7 @@ parallel_scan_channel_size = 32
|
||||
|
||||
[datanode.logging]
|
||||
enable_otlp_tracing = false
|
||||
append_stdout = true
|
||||
|
||||
[datanode.export_metrics]
|
||||
enable = false
|
||||
@@ -825,6 +827,7 @@ write_interval = "30s"
|
||||
|
||||
[logging]
|
||||
enable_otlp_tracing = false
|
||||
append_stdout = true
|
||||
|
||||
[wal_meta]
|
||||
provider = "raft_engine""#,
|
||||
|
||||
@@ -52,7 +52,7 @@ Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test2`
|
||||
|
||||
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
|
||||
|
||||
Error: 1002(Unexpected), Unexpected, violated: Invalid table name: N.~
|
||||
Error: 1004(InvalidArguments), Invalid table name: N.~
|
||||
|
||||
DESC TABLE integers;
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ impl Env {
|
||||
"start".to_string(),
|
||||
"-c".to_string(),
|
||||
self.generate_config_file(subcommand, db_ctx),
|
||||
"--http-addr=127.0.0.1:5001".to_string(),
|
||||
"--http-addr=127.0.0.1:5002".to_string(),
|
||||
];
|
||||
(args, SERVER_ADDR.to_string())
|
||||
}
|
||||
@@ -213,7 +213,7 @@ impl Env {
|
||||
"true".to_string(),
|
||||
"--enable-region-failover".to_string(),
|
||||
"false".to_string(),
|
||||
"--http-addr=127.0.0.1:5001".to_string(),
|
||||
"--http-addr=127.0.0.1:5002".to_string(),
|
||||
];
|
||||
(args, METASRV_ADDR.to_string())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user