From 8ea2aa73cf025ba560beb9fae28bae26357a2332 Mon Sep 17 00:00:00 2001 From: evenyag Date: Tue, 23 Aug 2022 17:35:24 +0800 Subject: [PATCH] refactor: Use `error!(e; xxx)` pattern to log error (#195) Use `error!(e; xxx)` pattern so we could get backtrace in error log. Also use BoxedError as error source of ExecuteQuery instead of String, so we could carry backtrace and other info in it. --- src/datanode/src/instance.rs | 16 +++++----------- src/log-store/src/fs/file.rs | 10 +++++----- src/log-store/src/fs/log.rs | 2 +- src/servers/src/error.rs | 12 ++++++++---- src/servers/src/mysql/server.rs | 10 +++++++--- src/storage/src/region/writer.rs | 6 +----- 6 files changed, 27 insertions(+), 29 deletions(-) diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs index dc0b20bec6..a5f4e4ed75 100644 --- a/src/datanode/src/instance.rs +++ b/src/datanode/src/instance.rs @@ -3,8 +3,9 @@ use std::{fs, path, sync::Arc}; use api::v1::{object_expr, select_expr, InsertExpr, ObjectExpr, ObjectResult, SelectExpr}; use async_trait::async_trait; use catalog::{CatalogManagerRef, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use common_error::prelude::BoxedError; use common_error::status_code::StatusCode; -use common_telemetry::logging::info; +use common_telemetry::logging::{error, info}; use common_telemetry::timer; use log_store::fs::{config::LogConfig, log::LocalFileLogStore}; use object_store::{backend::fs::Backend, util, ObjectStore}; @@ -229,18 +230,11 @@ impl SqlQueryHandler for Instance { let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED); self.execute_sql(query) .await - // TODO(LFC): use snafu's `context` to include source error and backtrace. - // Ideally we should define a snafu in servers::error to wrap the error thrown - // by `execute_sql`. However, we cannot do that because that would introduce a circular - // dependency. .map_err(|e| { - servers::error::ExecuteQuerySnafu { - query, - err_msg: format!("{}", e), - } - .fail::() - .unwrap_err() + error!(e; "Instance failed to execute sql"); + BoxedError::new(e) }) + .context(servers::error::ExecuteQuerySnafu { query }) } } diff --git a/src/log-store/src/fs/file.rs b/src/log-store/src/fs/file.rs index ff62134613..fb6cfc7931 100644 --- a/src/log-store/src/fs/file.rs +++ b/src/log-store/src/fs/file.rs @@ -257,7 +257,7 @@ impl LogFile { batch.into_iter().for_each(AppendRequest::complete); } Err(e) => { - error!("Failed to flush log file: {}", e); + error!(e; "Failed to flush log file"); batch.into_iter().for_each(|r| r.fail()); state .write_offset @@ -265,7 +265,7 @@ impl LogFile { } }, Err(e) => { - error!("Failed to write append requests, error: {}", e); + error!(e; "Failed to write append requests"); batch.into_iter().for_each(|r| r.fail()); state .write_offset @@ -333,7 +333,7 @@ impl LogFile { } } Err(e) => { - error!("Error while replay log {} {:?}", log_name, e); + error!(e; "Error while replay log {}", log_name); break; } } @@ -568,7 +568,7 @@ fn file_chunk_stream( continue; } Err(e) => { - error!("Failed to read file chunk, error: {}", &e); + error!(e; "Failed to read file chunk"); // we're going to break any way so just forget the join result. let _ = tx.blocking_send(Err(e)); break; @@ -600,7 +600,7 @@ fn file_chunk_stream_sync( continue; } Err(e) => { - error!("Failed to read file chunk, error: {}", &e); + error!(e; "Failed to read file chunk"); yield Err(e); break; } diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs index 3cadb10f32..8540c273dc 100644 --- a/src/log-store/src/fs/log.rs +++ b/src/log-store/src/fs/log.rs @@ -199,7 +199,7 @@ impl LogStore for LocalFileLogStore { continue; } _ => { - error!("Failed to roll to next log file, error:{}", e); + error!(e; "Failed to roll to next log file"); return Err(e); } }, diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index 85c83225ab..22df75682c 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -42,8 +42,12 @@ pub enum Error { source: std::io::Error, }, - #[snafu(display("Failed to execute query: {}, error: {}", query, err_msg))] - ExecuteQuery { query: String, err_msg: String }, + #[snafu(display("Failed to execute query: {}, source: {}", query, source))] + ExecuteQuery { + query: String, + #[snafu(backtrace)] + source: BoxedError, + }, #[snafu(display("Not supported: {}", feat))] NotSupported { feat: String }, @@ -61,8 +65,8 @@ impl ErrorExt for Error { | Error::CollectRecordbatch { .. } | Error::StartHttp { .. } | Error::StartGrpc { .. } - | Error::TcpBind { .. } - | Error::ExecuteQuery { .. } => StatusCode::Internal, + | Error::TcpBind { .. } => StatusCode::Internal, + Error::ExecuteQuery { source, .. } => source.status_code(), Error::NotSupported { .. } => StatusCode::InvalidArguments, } } diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs index ce844de5e2..3f282c0aec 100644 --- a/src/servers/src/mysql/server.rs +++ b/src/servers/src/mysql/server.rs @@ -72,10 +72,10 @@ impl MysqlServer { let query_handler = query_handler.clone(); async move { match tcp_stream { - Err(error) => error!("Broken pipe: {}", error), + Err(error) => error!("Broken pipe: {}", error), // IoError doesn't impl ErrorExt. Ok(io_stream) => { if let Err(error) = Self::handle(io_stream, io_runtime, query_handler) { - error!("Unexpected error when handling TcpStream: {:?}", error); + error!(error; "Unexpected error when handling TcpStream"); }; } }; @@ -104,7 +104,11 @@ impl Server for MysqlServer { self.abort_handle.abort(); if let Err(error) = join_handle.await { - error!("Unexpected error during shutdown MySQL server: {}", error); + // Couldn't use `error!(e; xxx)` as JoinError doesn't implement ErrorExt. + error!( + "Unexpected error during shutdown MySQL server, error: {}", + error + ); } else { info!("MySQL server is shutdown.") } diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs index aac1d1031f..69ed3d5490 100644 --- a/src/storage/src/region/writer.rs +++ b/src/storage/src/region/writer.rs @@ -359,11 +359,7 @@ impl WriterInner { // TODO(yingwen): We should release the write lock during waiting flush done, which // needs something like async condvar. flush_handle.join().await.map_err(|e| { - logging::error!( - "Previous flush job failed, region: {}, err: {}", - shared.name, - e - ); + logging::error!(e; "Previous flush job failed, region: {}", shared.name); e })?; }