chore: upgrade Rust to nightly 2022-12-20 (#772)

* chore: upgrade Rust to nightly 2022-12-20

* chore: upgrade Rust to nightly 2022-12-20

Co-authored-by: luofucong <luofucong@greptime.com>
This commit is contained in:
LFC
2022-12-21 19:32:30 +08:00
committed by GitHub
parent d0ebcc3b5a
commit ea9af42091
145 changed files with 366 additions and 515 deletions

View File

@@ -24,7 +24,7 @@ on:
name: Code coverage
env:
RUST_TOOLCHAIN: nightly-2022-07-14
RUST_TOOLCHAIN: nightly-2022-12-20
jobs:
coverage:

View File

@@ -23,7 +23,7 @@ on:
name: CI
env:
RUST_TOOLCHAIN: nightly-2022-07-14
RUST_TOOLCHAIN: nightly-2022-12-20
jobs:
typos:

View File

@@ -10,7 +10,7 @@ on:
name: Release
env:
RUST_TOOLCHAIN: nightly-2022-07-14
RUST_TOOLCHAIN: nightly-2022-12-20
# FIXME(zyy17): Would be better to use `gh release list -L 1 | cut -f 3` to get the latest release version tag, but for a long time, we will stay at 'v0.1.0-alpha-*'.
SCHEDULED_BUILD_VERSION_PREFIX: v0.1.0-alpha

View File

@@ -15,7 +15,6 @@
//! Use the taxi trip records from New York City dataset to bench. You can download the dataset from
//! [here](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
#![feature(once_cell)]
#![allow(clippy::print_stdout)]
use std::collections::HashMap;
@@ -94,7 +93,7 @@ async fn write_data(
.unwrap();
let progress_bar = mpb.add(ProgressBar::new(row_num as _));
progress_bar.set_style(pb_style);
progress_bar.set_message(format!("{:?}", path));
progress_bar.set_message(format!("{path:?}"));
let mut total_rpc_elapsed_ms = 0;
@@ -115,10 +114,7 @@ async fn write_data(
progress_bar.inc(row_count as _);
}
progress_bar.finish_with_message(format!(
"file {:?} done in {}ms",
path, total_rpc_elapsed_ms
));
progress_bar.finish_with_message(format!("file {path:?} done in {total_rpc_elapsed_ms}ms",));
total_rpc_elapsed_ms
}
@@ -355,12 +351,12 @@ fn query_set() -> HashMap<String, String> {
ret.insert(
"count_all".to_string(),
format!("SELECT COUNT(*) FROM {};", TABLE_NAME),
format!("SELECT COUNT(*) FROM {TABLE_NAME};"),
);
ret.insert(
"fare_amt_by_passenger".to_string(),
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {} GROUP BY passenger_count",TABLE_NAME)
format!("SELECT passenger_count, MIN(fare_amount), MAX(fare_amount), SUM(fare_amount) FROM {TABLE_NAME} GROUP BY passenger_count")
);
ret
@@ -373,7 +369,7 @@ async fn do_write(args: &Args, client: &Client) {
let mut write_jobs = JoinSet::new();
let create_table_result = admin.create(create_table_expr()).await;
println!("Create table result: {:?}", create_table_result);
println!("Create table result: {create_table_result:?}");
let progress_bar_style = ProgressStyle::with_template(
"[{elapsed_precise}] {bar:60.cyan/blue} {pos:>7}/{len:7} {msg}",
@@ -406,7 +402,7 @@ async fn do_write(args: &Args, client: &Client) {
async fn do_query(num_iter: usize, db: &Database) {
for (query_name, query) in query_set() {
println!("Running query: {}", query);
println!("Running query: {query}");
for i in 0..num_iter {
let now = Instant::now();
let _res = db.select(Select::Sql(query.clone())).await.unwrap();

View File

@@ -1 +1 @@
nightly-2022-07-14
nightly-2022-12-20

View File

@@ -33,48 +33,38 @@ const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
lazy_static! {
static ref CATALOG_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})$",
CATALOG_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN
"^{CATALOG_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref SCHEMA_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})$",
SCHEMA_KEY_PREFIX, ALPHANUMERICS_NAME_PATTERN, ALPHANUMERICS_NAME_PATTERN
"^{SCHEMA_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_GLOBAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})-({})$",
TABLE_GLOBAL_KEY_PREFIX,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN
"^{TABLE_GLOBAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})$"
))
.unwrap();
}
lazy_static! {
static ref TABLE_REGIONAL_KEY_PATTERN: Regex = Regex::new(&format!(
"^{}-({})-({})-({})-([0-9]+)$",
TABLE_REGIONAL_KEY_PREFIX,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN,
ALPHANUMERICS_NAME_PATTERN
"^{TABLE_REGIONAL_KEY_PREFIX}-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-({ALPHANUMERICS_NAME_PATTERN})-([0-9]+)$"
))
.unwrap();
}
pub fn build_catalog_prefix() -> String {
format!("{}-", CATALOG_KEY_PREFIX)
format!("{CATALOG_KEY_PREFIX}-")
}
pub fn build_schema_prefix(catalog_name: impl AsRef<str>) -> String {
format!("{}-{}-", SCHEMA_KEY_PREFIX, catalog_name.as_ref())
format!("{SCHEMA_KEY_PREFIX}-{}-", catalog_name.as_ref())
}
pub fn build_table_global_prefix(
@@ -82,8 +72,7 @@ pub fn build_table_global_prefix(
schema_name: impl AsRef<str>,
) -> String {
format!(
"{}-{}-{}-",
TABLE_GLOBAL_KEY_PREFIX,
"{TABLE_GLOBAL_KEY_PREFIX}-{}-{}-",
catalog_name.as_ref(),
schema_name.as_ref()
)
@@ -378,7 +367,7 @@ mod tests {
table_info,
};
let serialized = serde_json::to_string(&value).unwrap();
let deserialized = TableGlobalValue::parse(&serialized).unwrap();
let deserialized = TableGlobalValue::parse(serialized).unwrap();
assert_eq!(value, deserialized);
}
}

View File

@@ -157,7 +157,7 @@ pub struct RegisterSchemaRequest {
/// Formats table fully-qualified name
pub fn format_full_table_name(catalog: &str, schema: &str, table: &str) -> String {
format!("{}.{}.{}", catalog, schema, table)
format!("{catalog}.{schema}.{table}")
}
pub trait CatalogProviderFactory {
@@ -187,8 +187,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
.await
.with_context(|_| CreateTableSnafu {
table_info: format!(
"{}.{}.{}, id: {}",
catalog_name, schema_name, table_name, table_id,
"{catalog_name}.{schema_name}.{table_name}, id: {table_id}",
),
})?;
manager
@@ -200,7 +199,7 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table: table.clone(),
})
.await?;
info!("Created and registered system table: {}", table_name);
info!("Created and registered system table: {table_name}");
table
};
if let Some(hook) = req.open_hook {

View File

@@ -338,7 +338,7 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
schema_info: format!("{catalog_name}.{schema_name}"),
})?;
{
@@ -452,7 +452,7 @@ impl CatalogManager for LocalCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
schema_info: format!("{catalog_name}.{schema_name}"),
})?;
schema.table(table_name)
}

View File

@@ -331,10 +331,7 @@ impl RemoteCatalogManager {
.open_table(&context, request)
.await
.with_context(|_| OpenTableSnafu {
table_info: format!(
"{}.{}.{}, id:{}",
catalog_name, schema_name, table_name, table_id
),
table_info: format!("{catalog_name}.{schema_name}.{table_name}, id:{table_id}"),
})? {
Some(table) => {
info!(
@@ -355,7 +352,7 @@ impl RemoteCatalogManager {
.clone()
.try_into()
.context(InvalidTableSchemaSnafu {
table_info: format!("{}.{}.{}", catalog_name, schema_name, table_name,),
table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
schema: meta.schema.clone(),
})?;
let req = CreateTableRequest {
@@ -477,7 +474,7 @@ impl CatalogManager for RemoteCatalogManager {
let schema = catalog
.schema(schema_name)?
.with_context(|| SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
schema_info: format!("{catalog_name}.{schema_name}"),
})?;
schema.table(table_name)
}

View File

@@ -197,7 +197,7 @@ pub fn build_table_insert_request(full_table_name: String, table_id: TableId) ->
}
pub fn build_schema_insert_request(catalog_name: String, schema_name: String) -> InsertRequest {
let full_schema_name = format!("{}.{}", catalog_name, schema_name);
let full_schema_name = format!("{catalog_name}.{schema_name}");
build_insert_request(
EntryType::Schema,
full_schema_name.as_bytes(),
@@ -390,7 +390,7 @@ mod tests {
if let Entry::Catalog(e) = entry {
assert_eq!("some_catalog", e.catalog_name);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}
@@ -407,7 +407,7 @@ mod tests {
assert_eq!("some_catalog", e.catalog_name);
assert_eq!("some_schema", e.schema_name);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}
@@ -426,7 +426,7 @@ mod tests {
assert_eq!("some_table", e.table_name);
assert_eq!(42, e.table_id);
} else {
panic!("Unexpected type: {:?}", entry);
panic!("Unexpected type: {entry:?}");
}
}

View File

@@ -69,8 +69,7 @@ mod tests {
assert!(
err.to_string()
.contains("Table `greptime.public.test_table` already exists"),
"Actual error message: {}",
err
"Actual error message: {err}",
);
}

View File

@@ -189,10 +189,10 @@ impl TableEngine for MockTableEngine {
unimplemented!()
}
fn get_table<'a>(
fn get_table(
&self,
_ctx: &EngineContext,
table_ref: &'a TableReference,
table_ref: &TableReference,
) -> table::Result<Option<TableRef>> {
futures::executor::block_on(async {
Ok(self
@@ -204,7 +204,7 @@ impl TableEngine for MockTableEngine {
})
}
fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
futures::executor::block_on(async {
self.tables
.read()

View File

@@ -221,8 +221,7 @@ impl TryFrom<StartCommand> for FrontendOptions {
if addr == datanode_grpc_addr {
return IllegalConfigSnafu {
msg: format!(
"gRPC listen address conflicts with datanode reserved gRPC addr: {}",
datanode_grpc_addr
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
),
}
.fail();

View File

@@ -131,7 +131,7 @@ mod tests {
assert!(ErrorCompat::backtrace(&err).is_some());
let msg = format!("{:?}", err);
let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!(msg, fmt_msg);
@@ -151,7 +151,7 @@ mod tests {
assert!(err.as_any().downcast_ref::<MockError>().is_some());
assert!(err.source().is_some());
let msg = format!("{:?}", err);
let msg = format!("{err:?}");
assert!(msg.contains("\nBacktrace:\n"));
assert!(msg.contains("Caused by"));

View File

@@ -31,11 +31,11 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
write!(f, "{}.", self.0)?;
if let Some(source) = self.0.source() {
// Source error use debug format for more verbose info.
write!(f, " Caused by: {:?}", source)?;
write!(f, " Caused by: {source:?}")?;
}
if let Some(backtrace) = self.0.backtrace_opt() {
// Add a newline to separate causes and backtrace.
write!(f, "\nBacktrace:\n{}", backtrace)?;
write!(f, "\nBacktrace:\n{backtrace}")?;
}
Ok(())

View File

@@ -87,7 +87,7 @@ impl StatusCode {
impl fmt::Display for StatusCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// The current debug format is suitable to display.
write!(f, "{:?}", self)
write!(f, "{self:?}")
}
}
@@ -96,7 +96,7 @@ mod tests {
use super::*;
fn assert_status_code_display(code: StatusCode, msg: &str) {
let code_msg = format!("{}", code);
let code_msg = format!("{code}");
assert_eq!(msg, code_msg);
}

View File

@@ -343,7 +343,7 @@ mod tests {
Arc::new(Int64Vector::from_vec(fp.clone())),
];
let vector = interp(&args).unwrap();
assert!(matches!(vector.get(0), Value::Float64(v) if v==x[0] as f64));
assert!(matches!(vector.get(0), Value::Float64(v) if v == x[0]));
// x=None output:Null
let input = vec![None, Some(0.0), Some(0.3)];

View File

@@ -127,12 +127,7 @@ mod tests {
assert_eq!(4, vec.len());
for i in 0..4 {
assert_eq!(
i == 0 || i == 3,
vec.get_data(i).unwrap(),
"failed at {}",
i
)
assert_eq!(i == 0 || i == 3, vec.get_data(i).unwrap(), "Failed at {i}",)
}
}
_ => unreachable!(),

View File

@@ -106,7 +106,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
.iter()
.any(|column| column.name == expr.time_index),
MissingTimestampColumnSnafu {
msg: format!("CreateExpr: {:?}", expr)
msg: format!("CreateExpr: {expr:?}")
}
);

View File

@@ -154,7 +154,7 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
collect_values!(values.i32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Int64 => {
collect_values!(values.i64_values, |v| ValueRef::from(*v as i64))
collect_values!(values.i64_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint8 => {
collect_values!(values.u8_values, |v| ValueRef::from(*v as u8))
@@ -166,7 +166,7 @@ fn collect_column_values(column_datatype: ColumnDataType, values: &Values) -> Ve
collect_values!(values.u32_values, |v| ValueRef::from(*v))
}
ColumnDataType::Uint64 => {
collect_values!(values.u64_values, |v| ValueRef::from(*v as u64))
collect_values!(values.u64_values, |v| ValueRef::from(*v))
}
ColumnDataType::Float32 => collect_values!(values.f32_values, |v| ValueRef::from(*v)),
ColumnDataType::Float64 => collect_values!(values.f64_values, |v| ValueRef::from(*v)),

View File

@@ -1,4 +1,3 @@
#![feature(assert_matches)]
// Copyright 2022 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -26,7 +26,7 @@ async fn do_bench_channel_manager() {
let join = tokio::spawn(async move {
for _ in 0..10000 {
let idx = rand::random::<usize>() % 100;
let ret = m_clone.get(format!("{}", idx));
let ret = m_clone.get(format!("{idx}"));
assert!(ret.is_ok());
}
});

View File

@@ -120,7 +120,7 @@ impl ChannelManager {
fn build_endpoint(&self, addr: &str) -> Result<Endpoint> {
let mut endpoint =
Endpoint::new(format!("http://{}", addr)).context(error::CreateChannelSnafu)?;
Endpoint::new(format!("http://{addr}")).context(error::CreateChannelSnafu)?;
if let Some(dur) = self.config.timeout {
endpoint = endpoint.timeout(dur);

View File

@@ -161,12 +161,7 @@ mod tests {
assert_eq!(4, vec.len());
for i in 0..4 {
assert_eq!(
i == 0 || i == 3,
vec.get_data(i).unwrap(),
"failed at {}",
i
)
assert_eq!(i == 0 || i == 3, vec.get_data(i).unwrap(), "Failed at {i}")
}
}
_ => unreachable!(),

View File

@@ -131,7 +131,7 @@ impl DfAccumulator for DfAccumulatorAdaptor {
let state_types = self.creator.state_types()?;
if state_values.len() != state_types.len() {
return error::BadAccumulatorImplSnafu {
err_msg: format!("Accumulator {:?} returned state values size do not match its state types size.", self),
err_msg: format!("Accumulator {self:?} returned state values size do not match its state types size."),
}
.fail()?;
}

View File

@@ -231,8 +231,7 @@ mod tests {
assert_eq!(
result.unwrap_err().to_string(),
format!(
"Failed to create RecordBatches, reason: expect RecordBatch schema equals {:?}, actual: {:?}",
schema1, schema2
"Failed to create RecordBatches, reason: expect RecordBatch schema equals {schema1:?}, actual: {schema2:?}",
)
);

View File

@@ -61,7 +61,7 @@ pub(crate) fn to_df_expr(
| RexType::Cast(_)
| RexType::Subquery(_)
| RexType::Enum(_) => UnsupportedExprSnafu {
name: format!("substrait expression {:?}", expr_rex_type),
name: format!("substrait expression {expr_rex_type:?}"),
}
.fail()?,
}
@@ -109,7 +109,7 @@ pub fn convert_scalar_function(
let fn_name = ctx
.find_scalar_fn(anchor)
.with_context(|| InvalidParametersSnafu {
reason: format!("Unregistered scalar function reference: {}", anchor),
reason: format!("Unregistered scalar function reference: {anchor}"),
})?;
// convenient util
@@ -435,7 +435,7 @@ pub fn convert_scalar_function(
// skip Wildcard, unimplemented.
// end other direct expr
_ => UnsupportedExprSnafu {
name: format!("scalar function {}", fn_name),
name: format!("scalar function {fn_name}"),
}
.fail()?,
};
@@ -595,8 +595,8 @@ pub fn convert_column(column: &Column, schema: &Schema) -> Result<FieldReference
schema
.column_index_by_name(column_name)
.with_context(|| MissingFieldSnafu {
field: format!("{:?}", column),
plan: format!("schema: {:?}", schema),
field: format!("{column:?}"),
plan: format!("schema: {schema:?}"),
})?;
Ok(FieldReference {

View File

@@ -236,7 +236,7 @@ impl DFLogicalSubstraitConvertor {
.map_err(BoxedError::new)
.context(InternalSnafu)?
.context(TableNotFoundSnafu {
name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
name: format!("{catalog_name}.{schema_name}.{table_name}"),
})?;
let adapter = Arc::new(DefaultTableSource::new(Arc::new(
DfTableProviderAdapter::new(table_ref),
@@ -281,7 +281,7 @@ impl DFLogicalSubstraitConvertor {
// TODO(ruihang): Support limit(fetch)
Ok(LogicalPlan::TableScan(TableScan {
table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
source: adapter,
projection,
projected_schema,
@@ -397,8 +397,7 @@ impl DFLogicalSubstraitConvertor {
| LogicalPlan::Analyze(_)
| LogicalPlan::Extension(_) => InvalidParametersSnafu {
reason: format!(
"Trying to convert DDL/DML plan to substrait proto, plan: {:?}",
plan
"Trying to convert DDL/DML plan to substrait proto, plan: {plan:?}",
),
}
.fail()?,
@@ -572,7 +571,7 @@ mod test {
let proto = convertor.encode(plan.clone()).unwrap();
let tripped_plan = convertor.decode(proto, catalog).unwrap();
assert_eq!(format!("{:?}", plan), format!("{:?}", tripped_plan));
assert_eq!(format!("{plan:?}"), format!("{tripped_plan:?}"));
}
#[tokio::test]
@@ -606,8 +605,7 @@ mod test {
let table_scan_plan = LogicalPlan::TableScan(TableScan {
table_name: format!(
"{}.{}.{}",
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME
"{DEFAULT_CATALOG_NAME}.{DEFAULT_SCHEMA_NAME}.{DEFAULT_TABLE_NAME}",
),
source: adapter,
projection: Some(projection),

View File

@@ -87,7 +87,7 @@ pub fn to_concrete_type(ty: &SType) -> Result<(ConcreteDataType, bool)> {
| Kind::List(_)
| Kind::Map(_)
| Kind::UserDefinedTypeReference(_) => UnsupportedSubstraitTypeSnafu {
ty: format!("{:?}", kind),
ty: format!("{kind:?}"),
}
.fail(),
}
@@ -154,7 +154,7 @@ pub(crate) fn scalar_value_as_literal_type(v: &ScalarValue) -> Result<LiteralTyp
// TODO(LFC): Implement other conversions: ScalarValue => LiteralType
_ => {
return error::UnsupportedExprSnafu {
name: format!("{:?}", v),
name: format!("{v:?}"),
}
.fail()
}
@@ -177,7 +177,7 @@ pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue
// TODO(LFC): Implement other conversions: Kind => ScalarValue
_ => {
return error::UnsupportedSubstraitTypeSnafu {
ty: format!("{:?}", kind),
ty: format!("{kind:?}"),
}
.fail()
}
@@ -194,7 +194,7 @@ pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue
// TODO(LFC): Implement other conversions: LiteralType => ScalarValue
_ => {
return error::UnsupportedSubstraitTypeSnafu {
ty: format!("{:?}", t),
ty: format!("{t:?}"),
}
.fail()
}

View File

@@ -28,7 +28,7 @@ pub fn set_panic_hook() {
let default_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic| {
let backtrace = Backtrace::new();
let backtrace = format!("{:?}", backtrace);
let backtrace = format!("{backtrace:?}");
if let Some(location) = panic.location() {
tracing::error!(
message = %panic,

View File

@@ -155,7 +155,7 @@ pub enum Error {
#[snafu(display("Failed to init backend, config: {:#?}, source: {}", config, source))]
InitBackend {
config: ObjectStoreConfig,
config: Box<ObjectStoreConfig>,
source: object_store::Error,
backtrace: Backtrace,
},

View File

@@ -233,7 +233,7 @@ pub(crate) async fn new_fs_object_store(data_dir: &str) -> Result<ObjectStore> {
.context(error::CreateDirSnafu { dir: &data_dir })?;
info!("The file storage directory is: {}", &data_dir);
let atomic_write_dir = format!("{}/.tmp/", data_dir);
let atomic_write_dir = format!("{data_dir}/.tmp/");
let accessor = FsBuilder::default()
.root(&data_dir)

View File

@@ -117,7 +117,7 @@ impl Instance {
}
Some(select_expr::Expr::LogicalPlan(plan)) => self.execute_logical(plan).await,
_ => UnsupportedExprSnafu {
name: format!("{:?}", expr),
name: format!("{expr:?}"),
}
.fail(),
}
@@ -175,7 +175,7 @@ impl GrpcQueryHandler for Instance {
Some(object_expr::Expr::Select(select_expr)) => self.handle_select(select_expr).await,
other => {
return servers::error::NotSupportedSnafu {
feat: format!("{:?}", other),
feat: format!("{other:?}"),
}
.fail();
}
@@ -200,7 +200,7 @@ impl GrpcAdminHandler for Instance {
}
other => {
return servers::error::NotSupportedSnafu {
feat: format!("{:?}", other),
feat: format!("{other:?}"),
}
.fail();
}

View File

@@ -187,8 +187,7 @@ fn table_idents_to_full_name(
)),
_ => error::InvalidSqlSnafu {
msg: format!(
"expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {}",
obj_name
"expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {obj_name}",
),
}.fail(),
}

View File

@@ -40,7 +40,7 @@ impl Services {
pub async fn try_new(instance: InstanceRef, opts: &DatanodeOptions) -> Result<Self> {
let grpc_runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(opts.rpc_runtime_size as usize)
.worker_threads(opts.rpc_runtime_size)
.thread_name("grpc-io-handlers")
.build()
.context(RuntimeResourceSnafu)?,
@@ -54,7 +54,7 @@ impl Services {
Mode::Distributed => {
let mysql_io_runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(opts.mysql_runtime_size as usize)
.worker_threads(opts.mysql_runtime_size)
.thread_name("mysql-io-handlers")
.build()
.context(RuntimeResourceSnafu)?,

View File

@@ -202,8 +202,7 @@ mod tests {
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains("Missing timestamp column"),
"actual: {}",
err_msg
"actual: {err_msg}",
);
}

View File

@@ -96,7 +96,7 @@ impl SqlHandler {
result
}
pub(crate) fn get_table<'a>(&self, table_ref: &'a TableReference) -> Result<TableRef> {
pub(crate) fn get_table(&self, table_ref: &TableReference) -> Result<TableRef> {
self.table_engine
.get_table(&EngineContext::default(), table_ref)
.with_context(|_| GetTableSnafu {

View File

@@ -61,7 +61,7 @@ impl SqlHandler {
let alter_kind = match alter_table.alter_operation() {
AlterTableOperation::AddConstraint(table_constraint) => {
return error::InvalidSqlSnafu {
msg: format!("unsupported table constraint {}", table_constraint),
msg: format!("unsupported table constraint {table_constraint}"),
}
.fail()
}

View File

@@ -143,7 +143,7 @@ impl SqlHandler {
)?;
} else {
return error::InvalidSqlSnafu {
msg: format!("Cannot recognize named UNIQUE constraint: {}", name),
msg: format!("Cannot recognize named UNIQUE constraint: {name}"),
}
.fail();
}
@@ -158,8 +158,7 @@ impl SqlHandler {
} else {
return error::InvalidSqlSnafu {
msg: format!(
"Unrecognized non-primary unnamed UNIQUE constraint: {:?}",
name
"Unrecognized non-primary unnamed UNIQUE constraint: {name:?}",
),
}
.fail();
@@ -167,7 +166,7 @@ impl SqlHandler {
}
_ => {
return ConstraintNotSupportedSnafu {
constraint: format!("{:?}", c),
constraint: format!("{c:?}"),
}
.fail();
}

View File

@@ -487,12 +487,11 @@ async fn test_insert_with_default_value_for_type(type_name: &str) {
let create_sql = format!(
r#"create table test_table(
host string,
ts {} DEFAULT CURRENT_TIMESTAMP,
ts {type_name} DEFAULT CURRENT_TIMESTAMP,
cpu double default 0,
TIME INDEX (ts),
PRIMARY KEY(host)
) engine=mito with(regions=1);"#,
type_name
);
let output = execute_sql(&instance, &create_sql).await;
assert!(matches!(output, Output::AffectedRows(1)));

View File

@@ -40,8 +40,8 @@ pub struct TestGuard {
}
pub fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard) {
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{}", name)).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{}", name)).unwrap();
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
storage: ObjectStoreConfig::File {

View File

@@ -139,7 +139,7 @@ mod tests {
map.insert(false, 2);
let result = serde_json::to_string(&map).context(SerializeSnafu);
assert!(result.is_err(), "serialize result is: {:?}", result);
assert!(result.is_err(), "serialize result is: {result:?}");
let err = serde_json::to_string(&map)
.context(SerializeSnafu)
.err()

View File

@@ -12,9 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(generic_associated_types)]
#![feature(assert_matches)]
pub mod arrow_array;
pub mod data_type;
pub mod error;

View File

@@ -57,8 +57,8 @@ impl TryFrom<ColumnDefaultConstraint> for Vec<u8> {
impl Display for ColumnDefaultConstraint {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ColumnDefaultConstraint::Function(expr) => write!(f, "{}", expr),
ColumnDefaultConstraint::Value(v) => write!(f, "{}", v),
ColumnDefaultConstraint::Function(expr) => write!(f, "{expr}"),
ColumnDefaultConstraint::Value(v) => write!(f, "{v}"),
}
}
}
@@ -172,10 +172,7 @@ fn create_current_timestamp_vector(
std::iter::repeat(util::current_time_millis()).take(num_rows),
))),
_ => error::DefaultValueTypeSnafu {
reason: format!(
"Not support to assign current timestamp to {:?} type",
data_type
),
reason: format!("Not support to assign current timestamp to {data_type:?} type",),
}
.fail(),
}
@@ -301,6 +298,6 @@ mod tests {
let err = constraint
.create_default_vector(&data_type, false, 4)
.unwrap_err();
assert!(matches!(err, Error::DefaultValueType { .. }), "{:?}", err);
assert!(matches!(err, Error::DefaultValueType { .. }), "{err:?}");
}
}

View File

@@ -83,7 +83,7 @@ impl LogicalPrimitiveType for DateType {
ValueRef::Null => Ok(None),
ValueRef::Date(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
msg: format!("Failed to cast value {:?} to Date", other,),
msg: format!("Failed to cast value {other:?} to Date"),
}
.fail(),
}

View File

@@ -84,7 +84,7 @@ impl LogicalPrimitiveType for DateTimeType {
ValueRef::Null => Ok(None),
ValueRef::DateTime(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
msg: format!("Failed to cast value {:?} to DateTime", other,),
msg: format!("Failed to cast value {other:?} to DateTime"),
}
.fail(),
}

View File

@@ -49,7 +49,7 @@ impl DataType for NullType {
}
fn create_mutable_vector(&self, _capacity: usize) -> Box<dyn MutableVector> {
Box::new(NullVectorBuilder::default())
Box::<NullVectorBuilder>::default()
}
fn is_timestamp_compatible(&self) -> bool {

View File

@@ -70,31 +70,31 @@ impl Display for Value {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Value::Null => write!(f, "{}", self.data_type().name()),
Value::Boolean(v) => write!(f, "{}", v),
Value::UInt8(v) => write!(f, "{}", v),
Value::UInt16(v) => write!(f, "{}", v),
Value::UInt32(v) => write!(f, "{}", v),
Value::UInt64(v) => write!(f, "{}", v),
Value::Int8(v) => write!(f, "{}", v),
Value::Int16(v) => write!(f, "{}", v),
Value::Int32(v) => write!(f, "{}", v),
Value::Int64(v) => write!(f, "{}", v),
Value::Float32(v) => write!(f, "{}", v),
Value::Float64(v) => write!(f, "{}", v),
Value::Boolean(v) => write!(f, "{v}"),
Value::UInt8(v) => write!(f, "{v}"),
Value::UInt16(v) => write!(f, "{v}"),
Value::UInt32(v) => write!(f, "{v}"),
Value::UInt64(v) => write!(f, "{v}"),
Value::Int8(v) => write!(f, "{v}"),
Value::Int16(v) => write!(f, "{v}"),
Value::Int32(v) => write!(f, "{v}"),
Value::Int64(v) => write!(f, "{v}"),
Value::Float32(v) => write!(f, "{v}"),
Value::Float64(v) => write!(f, "{v}"),
Value::String(v) => write!(f, "{}", v.as_utf8()),
Value::Binary(v) => {
let hex = v
.iter()
.map(|b| format!("{:02x}", b))
.map(|b| format!("{b:02x}"))
.collect::<Vec<String>>()
.join("");
write!(f, "{}", hex)
write!(f, "{hex}")
}
Value::Date(v) => write!(f, "{}", v),
Value::DateTime(v) => write!(f, "{}", v),
Value::Date(v) => write!(f, "{v}"),
Value::DateTime(v) => write!(f, "{v}"),
Value::Timestamp(v) => write!(f, "{}", v.to_iso8601_string()),
Value::List(v) => {
let default = Box::new(vec![]);
let default = Box::<Vec<Value>>::default();
let items = v.items().as_ref().unwrap_or(&default);
let items = items
.iter()
@@ -146,7 +146,7 @@ impl Value {
Value::Null => Ok(None),
Value::List(v) => Ok(Some(v)),
other => error::CastTypeSnafu {
msg: format!("Failed to cast {:?} to list value", other),
msg: format!("Failed to cast {other:?} to list value"),
}
.fail(),
}
@@ -214,8 +214,7 @@ impl Value {
output_type_id == value_type_id || self.is_null(),
error::ToScalarValueSnafu {
reason: format!(
"expect value to return output_type {:?}, actual: {:?}",
output_type_id, value_type_id,
"expect value to return output_type {output_type_id:?}, actual: {value_type_id:?}",
),
}
);
@@ -1345,7 +1344,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(vec![])),
Some(Box::default()),
ConcreteDataType::timestamp_second_datatype(),
))
.to_string(),
@@ -1353,7 +1352,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(vec![])),
Some(Box::default()),
ConcreteDataType::timestamp_millisecond_datatype(),
))
.to_string(),
@@ -1361,7 +1360,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(vec![])),
Some(Box::default()),
ConcreteDataType::timestamp_microsecond_datatype(),
))
.to_string(),
@@ -1369,7 +1368,7 @@ mod tests {
);
assert_eq!(
Value::List(ListValue::new(
Some(Box::new(vec![])),
Some(Box::default()),
ConcreteDataType::timestamp_nanosecond_datatype(),
))
.to_string(),

View File

@@ -252,7 +252,7 @@ mod tests {
#[test]
fn test_serialize_binary_vector_to_json() {
let vector = BinaryVector::from(BinaryArray::from_iter_values(&[
let vector = BinaryVector::from(BinaryArray::from_iter_values([
vec![1, 2, 3],
vec![1, 2, 3],
]));
@@ -281,7 +281,7 @@ mod tests {
#[test]
fn test_from_arrow_array() {
let arrow_array = BinaryArray::from_iter_values(&[vec![1, 2, 3], vec![1, 2, 3]]);
let arrow_array = BinaryArray::from_iter_values([vec![1, 2, 3], vec![1, 2, 3]]);
let original = BinaryArray::from(arrow_array.data().clone());
let vector = BinaryVector::from(arrow_array);
assert_eq!(original, vector.array);

View File

@@ -296,7 +296,7 @@ mod tests {
let vec = BooleanVector::from(input.clone());
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
assert_eq!(Some(v), vec.get_data(i), "failed at {}", i)
assert_eq!(Some(v), vec.get_data(i), "Failed at {i}")
}
}
@@ -306,7 +306,7 @@ mod tests {
let vec = input.iter().collect::<BooleanVector>();
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
assert_eq!(v, vec.get_data(i), "failed at {}", i)
assert_eq!(v, vec.get_data(i), "Failed at {i}")
}
}
@@ -316,7 +316,7 @@ mod tests {
let vec = BooleanVector::from(input.clone());
assert_eq!(4, vec.len());
for (i, v) in input.into_iter().enumerate() {
assert_eq!(v, vec.get_data(i), "failed at {}", i)
assert_eq!(v, vec.get_data(i), "failed at {i}")
}
}

View File

@@ -203,7 +203,7 @@ mod tests {
let a = Int32Vector::from_slice(vec![1]);
let c = ConstantVector::new(Arc::new(a), 10);
let s = format!("{:?}", c);
let s = format!("{c:?}");
assert_eq!(s, "ConstantVector([Int32(1); 10])");
}

View File

@@ -37,7 +37,7 @@ mod tests {
#[test]
fn test_datetime_vector() {
let v = DateTimeVector::new(PrimitiveArray::from_slice(&[1, 2, 3]));
let v = DateTimeVector::new(PrimitiveArray::from_slice([1, 2, 3]));
assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
assert_eq!(3, v.len());
assert_eq!("DateTimeVector", v.vector_type_name());

View File

@@ -205,7 +205,7 @@ impl Helper {
| ScalarValue::Time64Microsecond(_)
| ScalarValue::Time64Nanosecond(_) => {
return error::ConversionSnafu {
from: format!("Unsupported scalar value: {}", value),
from: format!("Unsupported scalar value: {value}"),
}
.fail()
}

View File

@@ -157,10 +157,7 @@ impl From<ListArray> for ListVector {
fn from(array: ListArray) -> Self {
let item_type = ConcreteDataType::from_arrow_type(match array.data_type() {
ArrowDataType::List(field) => field.data_type(),
other => panic!(
"Try to create ListVector from an arrow array with type {:?}",
other
),
other => panic!("Try to create ListVector from an arrow array with type {other:?}"),
});
Self { array, item_type }
}

View File

@@ -167,7 +167,7 @@ impl MutableVector for NullVectorBuilder {
ensure!(
value.is_null(),
error::CastTypeSnafu {
msg: format!("Failed to cast value ref {:?} to null", value),
msg: format!("Failed to cast value ref {value:?} to null"),
}
);
@@ -243,7 +243,7 @@ mod tests {
#[test]
fn test_debug_null_vector() {
let array = NullVector::new(1024 * 1024);
assert_eq!(format!("{:?}", array), "NullVector(1048576)");
assert_eq!(format!("{array:?}"), "NullVector(1048576)");
}
#[test]

View File

@@ -45,11 +45,11 @@ mod tests {
};
fn check_filter_primitive(expect: &[i32], input: &[i32], filter: &[bool]) {
let v = Int32Vector::from_slice(&input);
let v = Int32Vector::from_slice(input);
let filter = BooleanVector::from_slice(filter);
let out = v.filter(&filter).unwrap();
let expect: VectorRef = Arc::new(Int32Vector::from_slice(&expect));
let expect: VectorRef = Arc::new(Int32Vector::from_slice(expect));
assert_eq!(expect, out);
}

View File

@@ -365,7 +365,7 @@ pub(crate) fn replicate_primitive<T: LogicalPrimitiveType>(
return vector.get_slice(0, 0);
}
let mut builder = PrimitiveVectorBuilder::<T>::with_capacity(*offsets.last().unwrap() as usize);
let mut builder = PrimitiveVectorBuilder::<T>::with_capacity(*offsets.last().unwrap());
let mut previous_offset = 0;

View File

@@ -291,7 +291,7 @@ impl SchemaProvider for FrontendSchemaProvider {
}
Some(r) => r,
};
let val = TableGlobalValue::from_bytes(&res.1).context(InvalidCatalogValueSnafu)?;
let val = TableGlobalValue::from_bytes(res.1).context(InvalidCatalogValueSnafu)?;
let table = Arc::new(DistTable::new(
table_name,

View File

@@ -643,7 +643,7 @@ impl GrpcQueryHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| server_error::ExecuteQuerySnafu {
query: format!("{:?}", insert_expr),
query: format!("{insert_expr:?}"),
})?;
let object_result = match output {
Output::AffectedRows(rows) => ObjectResultBuilder::default()

View File

@@ -241,12 +241,12 @@ impl DistInstance {
.schema(schema_name)
.context(CatalogSnafu)?
.context(SchemaNotFoundSnafu {
schema_info: format!("{}.{}", catalog_name, schema_name),
schema_info: format!("{catalog_name}.{schema_name}"),
})?
.table(table_name)
.context(CatalogSnafu)?
.context(TableNotFoundSnafu {
table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
})?;
let dist_table = table
@@ -392,7 +392,7 @@ impl GrpcAdminHandler for DistInstance {
}
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu {
query: format!("{:?}", query),
query: format!("{query:?}"),
})
}
}

View File

@@ -51,7 +51,7 @@ impl Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| server_error::ExecuteQuerySnafu {
query: format!("{:?}", data_point),
query: format!("{data_point:?}"),
})?;
Ok(())
}

View File

@@ -54,8 +54,7 @@ fn negotiate_response_type(accepted_response_types: &[i32]) -> ServerResult<Resp
.find(|t| is_supported(**t))
.with_context(|| error::NotSupportedSnafu {
feat: format!(
"server does not support any of the requested response types: {:?}",
accepted_response_types
"server does not support any of the requested response types: {accepted_response_types:?}",
),
})?;
@@ -131,7 +130,7 @@ impl PrometheusProtocolHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| error::ExecuteInsertSnafu {
msg: format!("{:?}", request),
msg: format!("{request:?}"),
})?;
}
Mode::Distributed => {
@@ -139,7 +138,7 @@ impl PrometheusProtocolHandler for Instance {
.await
.map_err(BoxedError::new)
.with_context(|_| error::ExecuteInsertSnafu {
msg: format!("{:?}", request),
msg: format!("{request:?}"),
})?;
}
}

View File

@@ -66,7 +66,7 @@ impl WriteSpliter {
{
Ok(region_id) => region_id,
Err(e) => {
let reason = format!("{:?}", e);
let reason = format!("{e:?}");
return FindRegionSnafu { reason }.fail();
}
};

View File

@@ -162,9 +162,9 @@ impl DistTable {
filters: &[Expr],
) -> Result<Vec<RegionNumber>> {
let regions = if let Some((first, rest)) = filters.split_first() {
let mut target = self.find_regions0(partition_rule.clone(), first)?;
let mut target = Self::find_regions0(partition_rule.clone(), first)?;
for filter in rest {
let regions = self.find_regions0(partition_rule.clone(), filter)?;
let regions = Self::find_regions0(partition_rule.clone(), filter)?;
// When all filters are provided as a collection, it often implicitly states that
// "all filters must be satisfied". So we join all the results here.
@@ -193,7 +193,6 @@ impl DistTable {
// - expr with arithmetic like "a + 1 < 10" (should have been optimized in logic plan?)
// - not comparison or neither "AND" nor "OR" operations, for example, "a LIKE x"
fn find_regions0(
&self,
partition_rule: PartitionRuleRef<Error>,
filter: &Expr,
) -> Result<HashSet<RegionNumber>> {
@@ -222,9 +221,9 @@ impl DistTable {
if matches!(op, Operator::And | Operator::Or) =>
{
let left_regions =
self.find_regions0(partition_rule.clone(), &(*left.clone()).into())?;
Self::find_regions0(partition_rule.clone(), &(*left.clone()).into())?;
let right_regions =
self.find_regions0(partition_rule.clone(), &(*right.clone()).into())?;
Self::find_regions0(partition_rule.clone(), &(*right.clone()).into())?;
let regions = match op {
Operator::And => left_regions
.intersection(&right_regions)

View File

@@ -44,7 +44,7 @@ impl TableRoutes {
.await
.map_err(|e| {
error::GetCacheSnafu {
err_msg: format!("{:?}", e),
err_msg: format!("{e:?}"),
}
.build()
})

View File

@@ -82,7 +82,7 @@ impl DatanodeInstance {
let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
let mut builder = LogicalPlanBuilder::scan_with_filters(
&table_scan.table_name.to_string(),
table_scan.table_name.to_string(),
Arc::new(DefaultTableSource::new(table_provider)),
table_scan.projection.clone(),
table_scan

View File

@@ -59,8 +59,8 @@ pub(crate) async fn create_frontend_instance(test_name: &str) -> (Arc<Instance>,
}
fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard) {
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{}", name)).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{}", name)).unwrap();
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
storage: ObjectStoreConfig::File {
@@ -138,8 +138,8 @@ async fn create_dist_datanode_instance(
meta_srv: MockInfo,
) -> Arc<DatanodeInstance> {
let current = common_time::util::current_time_millis();
let wal_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-wal-{}", current)).unwrap();
let data_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-data-{}", current)).unwrap();
let wal_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-wal-{current}")).unwrap();
let data_tmp_dir = TempDir::new_in("/tmp", &format!("dist_datanode-data-{current}")).unwrap();
let opts = DatanodeOptions {
node_id: Some(datanode_id),
wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),

View File

@@ -145,7 +145,7 @@ impl Buffer for ChunkList {
}
left -= actual;
} else {
panic!("Advance step [{}] exceeds max readable bytes", by);
panic!("Advance step [{by}] exceeds max readable bytes");
}
}
}

View File

@@ -653,7 +653,7 @@ fn read_at(file: &Arc<File>, offset: usize, file_length: usize) -> Result<Chunk>
if offset > file_length {
return Err(Eof);
}
let size = CHUNK_SIZE.min((file_length - offset) as usize);
let size = CHUNK_SIZE.min(file_length - offset);
let mut data = Box::new([0u8; CHUNK_SIZE]);
crate::fs::io::pread_exact(file.as_ref(), &mut data[0..size], offset as u64)?;
Ok(Chunk::new(data, size))
@@ -684,7 +684,7 @@ mod tests {
let mut file = LogFile::open(path.clone(), &config)
.await
.unwrap_or_else(|_| panic!("Failed to open file: {}", path));
.unwrap_or_else(|_| panic!("Failed to open file: {path}"));
file.start().await.expect("Failed to start log file");
assert_eq!(
@@ -873,7 +873,7 @@ mod tests {
let mut file = LogFile::open(path.clone(), &config)
.await
.unwrap_or_else(|_| panic!("Failed to open file: {}", path));
.unwrap_or_else(|_| panic!("Failed to open file: {path}"));
let state = file.state.clone();
file.start().await.unwrap();

View File

@@ -97,13 +97,13 @@ mod tests {
#[test]
pub fn test_padding_file_name() {
let id = u64::MIN;
assert_eq!("00000000000000000000", format!("{:020}", id));
assert_eq!("00000000000000000000", format!("{id:020}"));
let id = 123u64;
assert_eq!("00000000000000000123", format!("{:020}", id));
assert_eq!("00000000000000000123", format!("{id:020}"));
let id = 123123123123u64;
assert_eq!("00000000123123123123", format!("{:020}", id));
assert_eq!("00000000123123123123", format!("{id:020}"));
let id = u64::MAX;
assert_eq!(u64::MAX.to_string(), format!("{:020}", id));
assert_eq!(u64::MAX.to_string(), format!("{id:020}"));
}
#[test]

View File

@@ -20,9 +20,9 @@ use snafu::ResultExt;
use crate::error::{Error, IoSnafu};
pub fn pread_exact(file: &File, buf: &mut [u8], offset: u64) -> Result<(), Error> {
file.read_exact_at(buf, offset as u64).context(IoSnafu)
file.read_exact_at(buf, offset).context(IoSnafu)
}
pub fn pwrite_all(file: &File, buf: &[u8], offset: u64) -> Result<(), Error> {
file.write_all_at(buf, offset as u64).context(IoSnafu)
file.write_all_at(buf, offset).context(IoSnafu)
}

View File

@@ -93,8 +93,7 @@ impl LocalFileLogStore {
Arc::get_mut(active_file)
.with_context(|| InternalSnafu {
msg: format!(
"Concurrent modification on log store {} start is not allowed",
active_file_name
"Concurrent modification on log store {active_file_name} start is not allowed"
),
})?
.start()
@@ -143,9 +142,9 @@ impl LocalFileLogStore {
let file = LogFile::open(path, config).await?;
info!("Load log store file {}: {:?}", start_id, file);
if map.contains_key(&start_id) {
error!("Log file with start entry id: {} already exists", start_id);
error!("Log file with start entry id: {start_id} already exists");
return DuplicateFileSnafu {
msg: format!("File with start id: {} duplicates on start", start_id),
msg: format!("File with start id: {start_id} duplicates on start"),
}
.fail();
}

View File

@@ -326,7 +326,7 @@ mod tests {
async fn gen_data(&self) {
for i in 0..10 {
let req = PutRequest::new()
.with_key(self.key(&format!("key-{}", i)))
.with_key(self.key(&format!("key-{i}")))
.with_value(format!("{}-{}", "value", i).into_bytes())
.with_prev_kv();
let res = self.client.put(req).await;
@@ -547,7 +547,7 @@ mod tests {
let kvs = res.unwrap().take_kvs();
assert_eq!(10, kvs.len());
for (i, mut kv) in kvs.into_iter().enumerate() {
assert_eq!(tc.key(&format!("key-{}", i)), kv.take_key());
assert_eq!(tc.key(&format!("key-{i}")), kv.take_key());
assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
}
}

View File

@@ -31,7 +31,7 @@ pub(crate) const TABLE_ROUTE_PREFIX: &str = "__meta_table_route";
lazy_static! {
static ref DATANODE_KEY_PATTERN: Regex =
Regex::new(&format!("^{}-([0-9]+)-([0-9]+)$", DN_LEASE_PREFIX)).unwrap();
Regex::new(&format!("^{DN_LEASE_PREFIX}-([0-9]+)-([0-9]+)$")).unwrap();
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LeaseKey {
@@ -52,10 +52,10 @@ impl FromStr for LeaseKey {
let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid cluster_id: {}", cluster_id),
err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {}", node_id),
err_msg: format!("invalid node_id: {node_id}"),
})?;
Ok(Self {
@@ -118,7 +118,7 @@ impl TryFrom<LeaseValue> for Vec<u8> {
fn try_from(dn_value: LeaseValue) -> Result<Self> {
Ok(serde_json::to_string(&dn_value)
.context(error::SerializeToJsonSnafu {
input: format!("{:?}", dn_value),
input: format!("{dn_value:?}"),
})?
.into_bytes())
}

View File

@@ -53,5 +53,5 @@ where
#[inline]
pub fn get_lease_prefix(cluster_id: u64) -> Vec<u8> {
format!("{}-{}", DN_LEASE_PREFIX, cluster_id).into_bytes()
format!("{DN_LEASE_PREFIX}-{cluster_id}").into_bytes()
}

View File

@@ -100,7 +100,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
pusher_key.as_ref().unwrap_or(&"unknow".to_string())
);
if let Some(key) = pusher_key {
let _ = handler_group.unregister(&key);
let _ = handler_group.unregister(&key).await;
}
});

View File

@@ -164,7 +164,7 @@ async fn handle_delete(req: DeleteRequest, ctx: Context) -> Result<RouteResponse
let tgv = get_table_global_value(&ctx.kv_store, &tgk)
.await?
.with_context(|| error::TableNotFoundSnafu {
name: format!("{}", tgk),
name: format!("{tgk}"),
})?;
let trk = TableRouteKey::with_table_global_key(tgv.table_id() as u64, &tgk);
let (_, trv) = remove_table_route_value(&ctx.kv_store, &trk).await?;
@@ -272,11 +272,11 @@ async fn get_table_global_value(
kv_store: &KvStoreRef,
key: &TableGlobalKey,
) -> Result<Option<TableGlobalValue>> {
let tg_key = format!("{}", key).into_bytes();
let tg_key = format!("{key}").into_bytes();
let tv = get_from_store(kv_store, tg_key).await?;
match tv {
Some(tv) => {
let tv = TableGlobalValue::from_bytes(&tv).context(error::InvalidCatalogValueSnafu)?;
let tv = TableGlobalValue::from_bytes(tv).context(error::InvalidCatalogValueSnafu)?;
Ok(Some(tv))
}
None => Ok(None),

View File

@@ -49,7 +49,7 @@ const INIT_TABLE_VERSION: TableVersion = 0;
/// Generate region name in the form of "{TABLE_ID}_{REGION_NUMBER}"
#[inline]
fn region_name(table_id: TableId, n: u32) -> String {
format!("{}_{:010}", table_id, n)
format!("{table_id}_{n:010}")
}
#[inline]
@@ -59,7 +59,7 @@ fn region_id(table_id: TableId, n: u32) -> RegionId {
#[inline]
fn table_dir(schema_name: &str, table_id: TableId) -> String {
format!("{}/{}/", schema_name, table_id)
format!("{schema_name}/{table_id}/")
}
/// [TableEngine] implementation.
@@ -109,15 +109,15 @@ impl<S: StorageEngine> TableEngine for MitoEngine<S> {
Ok(self.inner.alter_table(ctx, req).await?)
}
fn get_table<'a>(
fn get_table(
&self,
_ctx: &EngineContext,
table_ref: &'a TableReference,
table_ref: &TableReference,
) -> TableResult<Option<TableRef>> {
Ok(self.inner.get_table(table_ref))
}
fn table_exists<'a>(&self, _ctx: &EngineContext, table_ref: &'a TableReference) -> bool {
fn table_exists(&self, _ctx: &EngineContext, table_ref: &TableReference) -> bool {
self.inner.get_table(table_ref).is_some()
}
@@ -292,7 +292,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
return Ok(table);
} else {
return TableExistsSnafu {
table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
table_name: format!("{catalog_name}.{schema_name}.{table_name}"),
}
.fail();
}
@@ -459,7 +459,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
Ok(table)
}
fn get_table<'a>(&self, table_ref: &'a TableReference) -> Option<TableRef> {
fn get_table(&self, table_ref: &TableReference) -> Option<TableRef> {
self.tables
.read()
.unwrap()
@@ -876,7 +876,7 @@ mod tests {
let result = table_engine.create_table(&ctx, request).await;
assert!(result.is_err());
assert!(matches!(result, Err(e) if format!("{:?}", e).contains("Table already exists")));
assert!(matches!(result, Err(e) if format!("{e:?}").contains("Table already exists")));
}
#[tokio::test]

View File

@@ -56,7 +56,7 @@ use crate::manifest::TableManifest;
#[inline]
fn table_manifest_dir(table_dir: &str) -> String {
format!("{}/manifest/", table_dir)
format!("{table_dir}/manifest/")
}
/// [Table] implementation.
@@ -284,7 +284,7 @@ impl Stream for ChunkStream {
#[inline]
fn column_qualified_name(table_name: &str, region_name: &str, column_name: &str) -> String {
format!("{}.{}.{}", table_name, region_name, column_name)
format!("{table_name}.{region_name}.{column_name}")
}
impl<R: Region> MitoTable<R> {

View File

@@ -281,7 +281,7 @@ mod tests {
// TODO(sunng87): do not rely on to_string for compare
assert_eq!(
format!("{:?}", plan),
format!("{plan:?}"),
r#"DfPlan(Limit: skip=0, fetch=20
Projection: SUM(numbers.number)
Aggregate: groupBy=[[]], aggr=[[SUM(numbers.number)]]

View File

@@ -34,11 +34,7 @@ use datatypes::arrow::datatypes::DataType;
pub struct TypeConversionRule;
impl OptimizerRule for TypeConversionRule {
fn optimize(
&self,
plan: &LogicalPlan,
optimizer_config: &mut OptimizerConfig,
) -> Result<LogicalPlan> {
fn optimize(&self, plan: &LogicalPlan, _config: &mut OptimizerConfig) -> Result<LogicalPlan> {
let mut converter = TypeConverter {
schemas: plan.all_schemas(),
};
@@ -46,7 +42,7 @@ impl OptimizerRule for TypeConversionRule {
match plan {
LogicalPlan::Filter(filter) => Ok(LogicalPlan::Filter(Filter::try_new(
filter.predicate().clone().rewrite(&mut converter)?,
Arc::new(self.optimize(filter.input(), optimizer_config)?),
Arc::new(self.optimize(filter.input(), _config)?),
)?)),
LogicalPlan::TableScan(TableScan {
table_name,
@@ -92,7 +88,7 @@ impl OptimizerRule for TypeConversionRule {
let inputs = plan.inputs();
let new_inputs = inputs
.iter()
.map(|plan| self.optimize(plan, optimizer_config))
.map(|plan| self.optimize(plan, _config))
.collect::<Result<Vec<_>>>()?;
let expr = plan
@@ -175,8 +171,7 @@ impl<'a> TypeConverter<'a> {
let casted_right = Self::cast_scalar_value(value, left_type)?;
if casted_right.is_null() {
return Err(DataFusionError::Plan(format!(
"column:{:?} value:{:?} is invalid",
col, value
"column:{col:?} value:{value:?} is invalid",
)));
}
if reverse {

View File

@@ -82,10 +82,7 @@ async fn execute_argmax<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select ARGMAX({}) as argmax from {}",
column_name, table_name
);
let sql = format!("select ARGMAX({column_name}) as argmax from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -82,10 +82,7 @@ async fn execute_argmin<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select argmin({}) as argmin from {}",
column_name, table_name
);
let sql = format!("select argmin({column_name}) as argmin from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -81,7 +81,7 @@ pub async fn get_numbers_from_table<'s, T>(
where
T: WrapperType,
{
let sql = format!("SELECT {} FROM {}", column_name, table_name);
let sql = format!("SELECT {column_name} FROM {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -78,7 +78,7 @@ async fn execute_mean<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!("select MEAN({}) as mean from {}", column_name, table_name);
let sql = format!("select MEAN({column_name}) as mean from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -217,10 +217,7 @@ where
Arc::new(|| Arc::new(MySumAccumulatorCreator::default())),
)));
let sql = format!(
"select MY_SUM({}) as my_sum from {}",
column_name, table_name
);
let sql = format!("select MY_SUM({column_name}) as my_sum from {table_name}");
let plan = engine.sql_to_plan(&sql, Arc::new(QueryContext::new()))?;
let output = engine.execute(&plan).await?;

View File

@@ -85,7 +85,7 @@ where
let expected_value = numbers.iter().map(|&n| n.as_()).collect::<Vec<f64>>();
let expected_value: inc_stats::Percentiles<f64> = expected_value.iter().cloned().collect();
let expected_value = expected_value.percentile(&0.5).unwrap();
let expected_value = expected_value.percentile(0.5).unwrap();
assert_eq!(value, expected_value.into());
Ok(())
}
@@ -95,10 +95,7 @@ async fn execute_percentile<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select PERCENTILE({},50.0) as percentile from {}",
column_name, table_name
);
let sql = format!("select PERCENTILE({column_name},50.0) as percentile from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -78,10 +78,7 @@ async fn execute_polyval<'a>(
table_name: &'a str,
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select POLYVAL({}, 0) as polyval from {}",
column_name, table_name
);
let sql = format!("select POLYVAL({column_name}, 0) as polyval from {table_name}");
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))
.unwrap();

View File

@@ -76,8 +76,7 @@ async fn execute_scipy_stats_norm_cdf<'a>(
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select SCIPYSTATSNORMCDF({},2.0) as scipy_stats_norm_cdf from {}",
column_name, table_name
"select SCIPYSTATSNORMCDF({column_name},2.0) as scipy_stats_norm_cdf from {table_name}",
);
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))

View File

@@ -76,8 +76,7 @@ async fn execute_scipy_stats_norm_pdf<'a>(
engine: Arc<dyn QueryEngine>,
) -> RecordResult<Vec<RecordBatch>> {
let sql = format!(
"select SCIPYSTATSNORMPDF({},2.0) as scipy_stats_norm_pdf from {}",
column_name, table_name
"select SCIPYSTATSNORMPDF({column_name},2.0) as scipy_stats_norm_pdf from {table_name}"
);
let plan = engine
.sql_to_plan(&sql, Arc::new(QueryContext::new()))

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(iterator_try_reduce)]
pub mod engine;
pub mod error;
#[cfg(feature = "python")]

View File

@@ -145,7 +145,7 @@ fn try_into_py_obj(col: DFColValue, vm: &VirtualMachine) -> PyResult<PyObjectRef
DFColValue::Array(arr) => {
let ret = PyVector::from(
HelperVec::try_into_vector(arr)
.map_err(|err| vm.new_type_error(format!("Unsupported type: {:#?}", err)))?,
.map_err(|err| vm.new_type_error(format!("Unsupported type: {err:#?}")))?,
)
.into_pyobject(vm);
Ok(ret)
@@ -319,13 +319,11 @@ pub(crate) mod greptime_builtin {
let func: Option<FunctionRef> = FUNCTION_REGISTRY.get_function(name);
let res = match func {
Some(f) => f.eval(Default::default(), &v),
None => return Err(vm.new_type_error(format!("Can't find function {}", name))),
None => return Err(vm.new_type_error(format!("Can't find function {name}"))),
};
match res {
Ok(v) => Ok(v.into()),
Err(err) => {
Err(vm.new_runtime_error(format!("Fail to evaluate the function,: {}", err)))
}
Err(err) => Err(vm.new_runtime_error(format!("Fail to evaluate the function,: {err}"))),
}
}
@@ -338,26 +336,24 @@ pub(crate) mod greptime_builtin {
let func = FUNCTION_REGISTRY.get_aggr_function(name);
let f = match func {
Some(f) => f.create().creator(),
None => return Err(vm.new_type_error(format!("Can't find function {}", name))),
None => return Err(vm.new_type_error(format!("Can't find function {name}"))),
};
let types: Vec<_> = v.iter().map(|v| v.data_type()).collect();
let acc = f(&types);
let mut acc = match acc {
Ok(acc) => acc,
Err(err) => {
return Err(vm.new_runtime_error(format!("Failed to create accumulator: {}", err)))
return Err(vm.new_runtime_error(format!("Failed to create accumulator: {err}")))
}
};
match acc.update_batch(&v) {
Ok(_) => (),
Err(err) => {
return Err(vm.new_runtime_error(format!("Failed to update batch: {}", err)))
}
Err(err) => return Err(vm.new_runtime_error(format!("Failed to update batch: {err}"))),
};
let res = match acc.evaluate() {
Ok(r) => r,
Err(err) => {
return Err(vm.new_runtime_error(format!("Failed to evaluate accumulator: {}", err)))
return Err(vm.new_runtime_error(format!("Failed to evaluate accumulator: {err}")))
}
};
let res = val_to_pyobj(res, vm);
@@ -792,7 +788,7 @@ pub(crate) mod greptime_builtin {
ConstantVector::new(Arc::new(Int64Vector::from_vec(vec![pow])) as _, len_base);
Arc::new(ret) as _
} else {
return Err(vm.new_type_error(format!("Unsupported type({:#?}) for pow()", pow)));
return Err(vm.new_type_error(format!("Unsupported type({pow:#?}) for pow()")));
};
// pyfunction can return PyResult<...>, args can be like PyObjectRef or anything
// impl IntoPyNativeFunc, see rustpython-vm function for more details
@@ -837,8 +833,7 @@ pub(crate) mod greptime_builtin {
let ret = cur.slice(0, 0);
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
return Ok(ret.into());
@@ -850,8 +845,7 @@ pub(crate) mod greptime_builtin {
})?;
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -864,8 +858,7 @@ pub(crate) mod greptime_builtin {
let ret = cur.slice(0, 0);
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
return Ok(ret.into());
@@ -877,8 +870,7 @@ pub(crate) mod greptime_builtin {
})?;
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -929,7 +921,7 @@ pub(crate) mod greptime_builtin {
.as_any()
.downcast_ref::<Int64Array>()
.ok_or_else(|| {
vm.new_type_error(format!("ts must be int64, found: {:?}", ts_array_ref))
vm.new_type_error(format!("ts must be int64, found: {ts_array_ref:?}"))
})?;
let slices = {
let oldest = aggregate::min(ts)
@@ -975,7 +967,7 @@ pub(crate) mod greptime_builtin {
},
Err(err) => Err(vm
.new_runtime_error(
format!("expect `interval()`'s `func` return a PyVector(`vector`) or int/float/bool, found return to be {:?}, error msg: {err}", obj)
format!("expect `interval()`'s `func` return a PyVector(`vector`) or int/float/bool, found return to be {obj:?}, error msg: {err}")
)
)
}
@@ -1019,8 +1011,7 @@ pub(crate) mod greptime_builtin {
};
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -1036,8 +1027,7 @@ pub(crate) mod greptime_builtin {
};
let ret = Helper::try_into_vector(ret.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
ret, e
"Can't cast result into vector, result: {ret:?}, err: {e:?}",
))
})?;
Ok(ret.into())

View File

@@ -88,7 +88,7 @@ fn convert_scalar_to_py_obj_and_back() {
let col = try_into_columnar_value(list_obj, vm);
if let Err(err) = col {
let reason = format_py_error(err, vm);
assert!(format!("{}", reason).contains(
assert!(format!("{reason}").contains(
"TypeError: All elements in a list should be same type to cast to Datafusion list!"
));
}
@@ -353,7 +353,7 @@ fn run_builtin_fn_testcases() {
},
Err(err) => {
if !err_res.contains(&err){
panic!("Error message not containing, expect {err_res}, found {}", err)
panic!("Error message not containing, expect {err_res}, found {err}")
}
}
}

View File

@@ -185,9 +185,9 @@ fn try_into_columns(
col_len: usize,
) -> Result<Vec<VectorRef>> {
if is_instance::<PyTuple>(obj, vm) {
let tuple = obj.payload::<PyTuple>().with_context(|| {
ret_other_error_with(format!("can't cast obj {:?} to PyTuple)", obj))
})?;
let tuple = obj
.payload::<PyTuple>()
.with_context(|| ret_other_error_with(format!("can't cast obj {obj:?} to PyTuple)")))?;
let cols = tuple
.iter()
.map(|obj| py_vec_obj_to_array(obj, vm, col_len))
@@ -206,7 +206,7 @@ fn select_from_rb(rb: &RecordBatch, fetch_names: &[String]) -> Result<Vec<PyVect
.iter()
.map(|name| {
let vector = rb.column_by_name(name).with_context(|| OtherSnafu {
reason: format!("Can't find field name {}", name),
reason: format!("Can't find field name {name}"),
})?;
Ok(PyVector::from(vector.clone()))
})
@@ -227,7 +227,7 @@ fn check_args_anno_real_type(
ensure!(
anno_ty
.to_owned()
.map(|v| v.datatype == None // like a vector[_]
.map(|v| v.datatype.is_none() // like a vector[_]
|| v.datatype == Some(real_ty.to_owned()) && v.is_nullable == is_nullable)
.unwrap_or(true),
OtherSnafu {

View File

@@ -117,10 +117,7 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re
// It's safe to unwrap loc, it is always exists.
stmts.push(gen_call(name, deco_args, &loc.unwrap()));
} else {
return fail_parse_error!(
format!("Expect statement in script, found: {:?}", top),
None,
);
return fail_parse_error!(format!("Expect statement in script, found: {top:?}"), None);
}
// use `compile::Mode::BlockExpr` so it return the result of statement
compile_top(

View File

@@ -99,7 +99,7 @@ fn try_into_datatype(ty: &str, loc: &Location) -> Result<Option<DataType>> {
"_" => Ok(None),
// note the different between "_" and _
_ => fail_parse_error!(
format!("Unknown datatype: {ty} at {:?}", loc),
format!("Unknown datatype: {ty} at {loc:?}"),
Some(loc.to_owned())
),
}
@@ -209,10 +209,7 @@ fn check_annotation_ret_slice(sub: &ast::Expr<()>) -> Result<&ast::Expr<()>> {
ensure!(
id == "vector",
ret_parse_error(
format!(
"Wrong type annotation, expect `vector[...]`, found `{}`",
id
),
format!("Wrong type annotation, expect `vector[...]`, found `{id}`"),
Some(value.location)
)
);

View File

@@ -221,7 +221,7 @@ pub fn get_error_reason_loc(err: &Error) -> (String, Option<Location>) {
Error::PyRuntime { msg, .. } => (msg.clone(), None),
Error::PyParse { source, .. } => (source.error.to_string(), Some(source.location)),
Error::PyCompile { source, .. } => (source.error.to_string(), Some(source.location)),
_ => (format!("Unknown error: {:?}", err), None),
_ => (format!("Unknown error: {err:?}"), None),
}
}

View File

@@ -153,7 +153,7 @@ fn run_ron_testcases() {
} => {
let rb = create_sample_recordbatch();
let res = coprocessor::exec_coprocessor(&testcase.code, &rb);
assert!(res.is_err(), "{:#?}\nExpect Err(...), actual Ok(...)", res);
assert!(res.is_err(), "{res:#?}\nExpect Err(...), actual Ok(...)");
if let Err(res) = res {
error!(
"{}",

View File

@@ -40,7 +40,7 @@ pub fn format_py_error(excep: PyBaseExceptionRef, vm: &VirtualMachine) -> error:
let mut msg = String::new();
if let Err(e) = vm.write_exception(&mut msg, &excep) {
return error::Error::PyRuntime {
msg: format!("Failed to write exception msg, err: {}", e),
msg: format!("Failed to write exception msg, err: {e}"),
backtrace: Backtrace::generate(),
};
}
@@ -59,9 +59,9 @@ pub fn py_vec_obj_to_array(
) -> Result<VectorRef, error::Error> {
// It's ugly, but we can't find a better way right now.
if is_instance::<PyVector>(obj, vm) {
let pyv = obj.payload::<PyVector>().with_context(|| {
ret_other_error_with(format!("can't cast obj {:?} to PyVector", obj))
})?;
let pyv = obj
.payload::<PyVector>()
.with_context(|| ret_other_error_with(format!("can't cast obj {obj:?} to PyVector")))?;
Ok(pyv.as_vector_ref())
} else if is_instance::<PyInt>(obj, vm) {
let val = obj
@@ -110,6 +110,6 @@ pub fn py_vec_obj_to_array(
_ => unreachable!(),
}
} else {
ret_other_error_with(format!("Expect a vector or a constant, found {:?}", obj)).fail()
ret_other_error_with(format!("Expect a vector or a constant, found {obj:?}")).fail()
}
}

View File

@@ -59,28 +59,26 @@ fn emit_cast_error(
dst_ty: &ArrowDataType,
) -> PyBaseExceptionRef {
vm.new_type_error(format!(
"Can't cast source operand of type {:?} into target type of {:?}",
src_ty, dst_ty
"Can't cast source operand of type {src_ty:?} into target type of {dst_ty:?}",
))
}
/// Performs `val - arr`.
fn arrow_rsub(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
arithmetic::subtract_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rsub error: {}", e)))
arithmetic::subtract_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rsub error: {e}")))
}
/// Performs `val / arr`
fn arrow_rtruediv(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
arithmetic::divide_dyn(val, arr)
.map_err(|e| vm.new_type_error(format!("rtruediv error: {}", e)))
arithmetic::divide_dyn(val, arr).map_err(|e| vm.new_type_error(format!("rtruediv error: {e}")))
}
/// Performs `val / arr`, but cast to i64.
fn arrow_rfloordiv(arr: &dyn Array, val: &dyn Array, vm: &VirtualMachine) -> PyResult<ArrayRef> {
let array = arithmetic::divide_dyn(val, arr)
.map_err(|e| vm.new_type_error(format!("rtruediv divide error: {}", e)))?;
.map_err(|e| vm.new_type_error(format!("rtruediv divide error: {e}")))?;
compute::cast(&array, &ArrowDataType::Int64)
.map_err(|e| vm.new_type_error(format!("rtruediv cast error: {}", e)))
.map_err(|e| vm.new_type_error(format!("rtruediv cast error: {e}")))
}
fn wrap_result<F>(f: F) -> impl Fn(&dyn Array, &dyn Array, &VirtualMachine) -> PyResult<ArrayRef>
@@ -88,7 +86,7 @@ where
F: Fn(&dyn Array, &dyn Array) -> ArrowResult<ArrayRef>,
{
move |left, right, vm| {
f(left, right).map_err(|e| vm.new_type_error(format!("arithmetic error {}", e)))
f(left, right).map_err(|e| vm.new_type_error(format!("arithmetic error {e}")))
}
}
@@ -154,8 +152,7 @@ impl PyVector {
v
} else {
return Err(vm.new_type_error(format!(
"Can't cast pyobject {:?} into concrete type {:?}",
obj, datatype
"Can't cast pyobject {obj:?} into concrete type {datatype:?}",
)));
};
// Safety: `pyobj_try_to_typed_val()` has checked the data type.
@@ -262,8 +259,7 @@ impl PyVector {
Ok(Helper::try_into_vector(result.clone())
.map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
result, e
"Can't cast result into vector, result: {result:?}, err: {e:?}",
))
})?
.into())
@@ -305,13 +301,12 @@ impl PyVector {
let right = cast(right, &target_type, vm)?;
let result = op(left.as_ref(), right.as_ref())
.map_err(|e| vm.new_type_error(format!("Can't compute op, error: {}", e)))?;
.map_err(|e| vm.new_type_error(format!("Can't compute op, error: {e}")))?;
Ok(Helper::try_into_vector(result.clone())
.map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
result, e
"Can't cast result into vector, result: {result:?}, err: {e:?}",
))
})?
.into())
@@ -549,8 +544,7 @@ impl PyVector {
res.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?;
let ret = Helper::try_into_vector(res.clone()).map_err(|e| {
vm.new_type_error(format!(
"Can't cast result into vector, result: {:?}, err: {:?}",
res, e
"Can't cast result into vector, result: {res:?}, err: {e:?}",
))
})?;
Ok(ret.into())
@@ -580,7 +574,7 @@ impl PyVector {
let res = compute::filter(self.to_arrow_array().as_ref(), mask)
.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?;
let ret = Helper::try_into_vector(res.clone()).map_err(|e| {
vm.new_type_error(format!("Can't cast result into vector, err: {:?}", e))
vm.new_type_error(format!("Can't cast result into vector, err: {e:?}"))
})?;
Ok(Self::from(ret).into_pyobject(vm))
} else {
@@ -683,7 +677,7 @@ fn get_arrow_scalar_op(
move |a: &dyn Array, b: &dyn Array, vm| -> PyResult<ArrayRef> {
let array =
op_bool_arr(a, b).map_err(|e| vm.new_type_error(format!("scalar op error: {}", e)))?;
op_bool_arr(a, b).map_err(|e| vm.new_type_error(format!("scalar op error: {e}")))?;
Ok(Arc::new(array))
}
}
@@ -932,7 +926,7 @@ fn get_concrete_type(obj: &PyObjectRef, vm: &VirtualMachine) -> PyResult<Concret
} else if is_instance::<PyStr>(obj, vm) {
Ok(ConcreteDataType::string_datatype())
} else {
Err(vm.new_type_error(format!("Unsupported pyobject type: {:?}", obj)))
Err(vm.new_type_error(format!("Unsupported pyobject type: {obj:?}")))
}
}
@@ -1205,7 +1199,7 @@ pub mod tests {
}
}
} else {
panic!("{code}: {:?}", result)
panic!("{code}: {result:?}")
}
}
}

Some files were not shown because too many files have changed in this diff Show More