mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-14 09:12:57 +00:00
refactor: Use error!(e; xxx) pattern to log error (#195)
Use `error!(e; xxx)` pattern so we could get backtrace in error log. Also use BoxedError as error source of ExecuteQuery instead of String, so we could carry backtrace and other info in it.
This commit is contained in:
@@ -3,8 +3,9 @@ use std::{fs, path, sync::Arc};
|
||||
use api::v1::{object_expr, select_expr, InsertExpr, ObjectExpr, ObjectResult, SelectExpr};
|
||||
use async_trait::async_trait;
|
||||
use catalog::{CatalogManagerRef, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::prelude::BoxedError;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_telemetry::logging::info;
|
||||
use common_telemetry::logging::{error, info};
|
||||
use common_telemetry::timer;
|
||||
use log_store::fs::{config::LogConfig, log::LocalFileLogStore};
|
||||
use object_store::{backend::fs::Backend, util, ObjectStore};
|
||||
@@ -229,18 +230,11 @@ impl SqlQueryHandler for Instance {
|
||||
let _timer = timer!(metric::METRIC_HANDLE_SQL_ELAPSED);
|
||||
self.execute_sql(query)
|
||||
.await
|
||||
// TODO(LFC): use snafu's `context` to include source error and backtrace.
|
||||
// Ideally we should define a snafu in servers::error to wrap the error thrown
|
||||
// by `execute_sql`. However, we cannot do that because that would introduce a circular
|
||||
// dependency.
|
||||
.map_err(|e| {
|
||||
servers::error::ExecuteQuerySnafu {
|
||||
query,
|
||||
err_msg: format!("{}", e),
|
||||
}
|
||||
.fail::<servers::error::Error>()
|
||||
.unwrap_err()
|
||||
error!(e; "Instance failed to execute sql");
|
||||
BoxedError::new(e)
|
||||
})
|
||||
.context(servers::error::ExecuteQuerySnafu { query })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -257,7 +257,7 @@ impl LogFile {
|
||||
batch.into_iter().for_each(AppendRequest::complete);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to flush log file: {}", e);
|
||||
error!(e; "Failed to flush log file");
|
||||
batch.into_iter().for_each(|r| r.fail());
|
||||
state
|
||||
.write_offset
|
||||
@@ -265,7 +265,7 @@ impl LogFile {
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("Failed to write append requests, error: {}", e);
|
||||
error!(e; "Failed to write append requests");
|
||||
batch.into_iter().for_each(|r| r.fail());
|
||||
state
|
||||
.write_offset
|
||||
@@ -333,7 +333,7 @@ impl LogFile {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error while replay log {} {:?}", log_name, e);
|
||||
error!(e; "Error while replay log {}", log_name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -568,7 +568,7 @@ fn file_chunk_stream(
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read file chunk, error: {}", &e);
|
||||
error!(e; "Failed to read file chunk");
|
||||
// we're going to break any way so just forget the join result.
|
||||
let _ = tx.blocking_send(Err(e));
|
||||
break;
|
||||
@@ -600,7 +600,7 @@ fn file_chunk_stream_sync(
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read file chunk, error: {}", &e);
|
||||
error!(e; "Failed to read file chunk");
|
||||
yield Err(e);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ impl LogStore for LocalFileLogStore {
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
error!("Failed to roll to next log file, error:{}", e);
|
||||
error!(e; "Failed to roll to next log file");
|
||||
return Err(e);
|
||||
}
|
||||
},
|
||||
|
||||
@@ -42,8 +42,12 @@ pub enum Error {
|
||||
source: std::io::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to execute query: {}, error: {}", query, err_msg))]
|
||||
ExecuteQuery { query: String, err_msg: String },
|
||||
#[snafu(display("Failed to execute query: {}, source: {}", query, source))]
|
||||
ExecuteQuery {
|
||||
query: String,
|
||||
#[snafu(backtrace)]
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Not supported: {}", feat))]
|
||||
NotSupported { feat: String },
|
||||
@@ -61,8 +65,8 @@ impl ErrorExt for Error {
|
||||
| Error::CollectRecordbatch { .. }
|
||||
| Error::StartHttp { .. }
|
||||
| Error::StartGrpc { .. }
|
||||
| Error::TcpBind { .. }
|
||||
| Error::ExecuteQuery { .. } => StatusCode::Internal,
|
||||
| Error::TcpBind { .. } => StatusCode::Internal,
|
||||
Error::ExecuteQuery { source, .. } => source.status_code(),
|
||||
Error::NotSupported { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,10 +72,10 @@ impl MysqlServer {
|
||||
let query_handler = query_handler.clone();
|
||||
async move {
|
||||
match tcp_stream {
|
||||
Err(error) => error!("Broken pipe: {}", error),
|
||||
Err(error) => error!("Broken pipe: {}", error), // IoError doesn't impl ErrorExt.
|
||||
Ok(io_stream) => {
|
||||
if let Err(error) = Self::handle(io_stream, io_runtime, query_handler) {
|
||||
error!("Unexpected error when handling TcpStream: {:?}", error);
|
||||
error!(error; "Unexpected error when handling TcpStream");
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -104,7 +104,11 @@ impl Server for MysqlServer {
|
||||
self.abort_handle.abort();
|
||||
|
||||
if let Err(error) = join_handle.await {
|
||||
error!("Unexpected error during shutdown MySQL server: {}", error);
|
||||
// Couldn't use `error!(e; xxx)` as JoinError doesn't implement ErrorExt.
|
||||
error!(
|
||||
"Unexpected error during shutdown MySQL server, error: {}",
|
||||
error
|
||||
);
|
||||
} else {
|
||||
info!("MySQL server is shutdown.")
|
||||
}
|
||||
|
||||
@@ -359,11 +359,7 @@ impl WriterInner {
|
||||
// TODO(yingwen): We should release the write lock during waiting flush done, which
|
||||
// needs something like async condvar.
|
||||
flush_handle.join().await.map_err(|e| {
|
||||
logging::error!(
|
||||
"Previous flush job failed, region: {}, err: {}",
|
||||
shared.name,
|
||||
e
|
||||
);
|
||||
logging::error!(e; "Previous flush job failed, region: {}", shared.name);
|
||||
e
|
||||
})?;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user