diff --git a/Cargo.toml b/Cargo.toml index da64abd1da..92f320b267 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ resolver = "2" [workspace.package] version = "0.18.0" -edition = "2021" +edition = "2024" license = "Apache-2.0" [workspace.lints] diff --git a/src/api/src/error.rs b/src/api/src/error.rs index bc37363060..0b35bbc934 100644 --- a/src/api/src/error.rs +++ b/src/api/src/error.rs @@ -19,8 +19,8 @@ use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; use common_time::timestamp::TimeUnit; use datatypes::prelude::ConcreteDataType; -use snafu::prelude::*; use snafu::Location; +use snafu::prelude::*; pub type Result = std::result::Result; diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs index ae51802de9..88b34381f0 100644 --- a/src/api/src/helper.rs +++ b/src/api/src/helper.rs @@ -16,15 +16,15 @@ use std::collections::HashSet; use std::sync::Arc; use common_base::BitVec; -use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION}; use common_decimal::Decimal128; +use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION}; use common_time::time::Time; use common_time::timestamp::TimeUnit; use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp}; use datatypes::prelude::{ConcreteDataType, ValueRef}; use datatypes::scalars::ScalarVector; use datatypes::types::{ - Int16Type, Int8Type, IntervalType, TimeType, TimestampType, UInt16Type, UInt8Type, + Int8Type, Int16Type, IntervalType, TimeType, TimestampType, UInt8Type, UInt16Type, }; use datatypes::value::{OrderedF32, OrderedF64, Value}; use datatypes::vectors::{ @@ -295,7 +295,7 @@ impl TryFrom for ColumnDataTypeWrapper { | ConcreteDataType::Struct(_) | ConcreteDataType::Dictionary(_) | ConcreteDataType::Duration(_) => { - return error::IntoColumnDataTypeSnafu { from: datatype }.fail() + return error::IntoColumnDataTypeSnafu { from: datatype }.fail(); } }; let datatype_extension = match column_datatype { diff --git a/src/api/src/v1/column_def.rs b/src/api/src/v1/column_def.rs index 3c3d37aa3a..ec3d7da3bf 100644 --- a/src/api/src/v1/column_def.rs +++ b/src/api/src/v1/column_def.rs @@ -15,9 +15,9 @@ use std::collections::HashMap; use datatypes::schema::{ - ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions, - SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, - SKIPPING_INDEX_KEY, + COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer, + FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions, + SkippingIndexType, }; use greptime_proto::v1::{ Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType, diff --git a/src/auth/src/common.rs b/src/auth/src/common.rs index 3c48f527e4..96b8432580 100644 --- a/src/auth/src/common.rs +++ b/src/auth/src/common.rs @@ -17,13 +17,13 @@ use std::sync::Arc; use common_base::secrets::SecretString; use digest::Digest; use sha1::Sha1; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu}; use crate::user_info::DefaultUserInfo; -use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER}; +use crate::user_provider::static_user_provider::{STATIC_USER_PROVIDER, StaticUserProvider}; use crate::user_provider::watch_file_user_provider::{ - WatchFileUserProvider, WATCH_FILE_USER_PROVIDER, + WATCH_FILE_USER_PROVIDER, WatchFileUserProvider, }; use crate::{UserInfoRef, UserProviderRef}; diff --git a/src/auth/src/lib.rs b/src/auth/src/lib.rs index 140a9cf7be..54397cef36 100644 --- a/src/auth/src/lib.rs +++ b/src/auth/src/lib.rs @@ -22,13 +22,13 @@ mod user_provider; pub mod tests; pub use common::{ - auth_mysql, static_user_provider_from_option, user_provider_from_option, userinfo_by_name, - HashedPassword, Identity, Password, + HashedPassword, Identity, Password, auth_mysql, static_user_provider_from_option, + user_provider_from_option, userinfo_by_name, }; pub use permission::{PermissionChecker, PermissionReq, PermissionResp}; pub use user_info::UserInfo; -pub use user_provider::static_user_provider::StaticUserProvider; pub use user_provider::UserProvider; +pub use user_provider::static_user_provider::StaticUserProvider; /// pub type alias pub type UserInfoRef = std::sync::Arc; diff --git a/src/auth/src/tests.rs b/src/auth/src/tests.rs index ef5bf9a6b5..1e3e7399a6 100644 --- a/src/auth/src/tests.rs +++ b/src/auth/src/tests.rs @@ -21,7 +21,7 @@ use crate::error::{ UserPasswordMismatchSnafu, }; use crate::user_info::DefaultUserInfo; -use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider}; +use crate::{Identity, Password, UserInfoRef, UserProvider, auth_mysql}; pub struct DatabaseAuthInfo<'a> { pub catalog: &'a str, diff --git a/src/auth/src/user_provider.rs b/src/auth/src/user_provider.rs index 526e72b775..099f48e437 100644 --- a/src/auth/src/user_provider.rs +++ b/src/auth/src/user_provider.rs @@ -22,7 +22,7 @@ use std::io::BufRead; use std::path::Path; use common_base::secrets::ExposeSecret; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::common::{Identity, Password}; use crate::error::{ @@ -30,7 +30,7 @@ use crate::error::{ UserNotFoundSnafu, UserPasswordMismatchSnafu, }; use crate::user_info::DefaultUserInfo; -use crate::{auth_mysql, UserInfoRef}; +use crate::{UserInfoRef, auth_mysql}; #[async_trait::async_trait] pub trait UserProvider: Send + Sync { diff --git a/src/auth/src/user_provider/static_user_provider.rs b/src/auth/src/user_provider/static_user_provider.rs index 59a4791f7a..74ece0652b 100644 --- a/src/auth/src/user_provider/static_user_provider.rs +++ b/src/auth/src/user_provider/static_user_provider.rs @@ -102,10 +102,10 @@ pub mod test { use common_test_util::temp_dir::create_temp_dir; + use crate::UserProvider; use crate::user_info::DefaultUserInfo; use crate::user_provider::static_user_provider::StaticUserProvider; use crate::user_provider::{Identity, Password}; - use crate::UserProvider; async fn test_authenticate(provider: &dyn UserProvider, username: &str, password: &str) { let re = provider @@ -143,12 +143,13 @@ pub mod test { let file = File::create(&file_path); let file = file.unwrap(); let mut lw = LineWriter::new(file); - assert!(lw - .write_all( + assert!( + lw.write_all( b"root=123456 admin=654321", ) - .is_ok()); + .is_ok() + ); lw.flush().unwrap(); } diff --git a/src/auth/src/user_provider/watch_file_user_provider.rs b/src/auth/src/user_provider/watch_file_user_provider.rs index 4a654f2f31..c28413d917 100644 --- a/src/auth/src/user_provider/watch_file_user_provider.rs +++ b/src/auth/src/user_provider/watch_file_user_provider.rs @@ -20,7 +20,7 @@ use std::sync::{Arc, Mutex}; use async_trait::async_trait; use common_telemetry::{info, warn}; use notify::{EventKind, RecursiveMode, Watcher}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result}; use crate::user_info::DefaultUserInfo; @@ -133,9 +133,9 @@ pub mod test { use common_test_util::temp_dir::create_temp_dir; use tokio::time::sleep; + use crate::UserProvider; use crate::user_provider::watch_file_user_provider::WatchFileUserProvider; use crate::user_provider::{Identity, Password}; - use crate::UserProvider; async fn test_authenticate( provider: &dyn UserProvider, diff --git a/src/cache/src/lib.rs b/src/cache/src/lib.rs index f267a74346..59d2490f7c 100644 --- a/src/cache/src/lib.rs +++ b/src/cache/src/lib.rs @@ -19,9 +19,9 @@ use std::time::Duration; use catalog::kvbackend::new_table_cache; use common_meta::cache::{ - new_schema_cache, new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache, - new_table_route_cache, new_table_schema_cache, new_view_info_cache, CacheRegistry, - CacheRegistryBuilder, LayeredCacheRegistryBuilder, + CacheRegistry, CacheRegistryBuilder, LayeredCacheRegistryBuilder, new_schema_cache, + new_table_flownode_set_cache, new_table_info_cache, new_table_name_cache, + new_table_route_cache, new_table_schema_cache, new_view_info_cache, }; use common_meta::kv_backend::KvBackendRef; use moka::future::CacheBuilder; diff --git a/src/catalog/src/kvbackend.rs b/src/catalog/src/kvbackend.rs index 3f61894a30..d7f32fc66d 100644 --- a/src/catalog/src/kvbackend.rs +++ b/src/catalog/src/kvbackend.rs @@ -21,4 +21,4 @@ mod table_cache; pub use builder::KvBackendCatalogManagerBuilder; pub use manager::KvBackendCatalogManager; -pub use table_cache::{new_table_cache, TableCache, TableCacheRef}; +pub use table_cache::{TableCache, TableCacheRef, new_table_cache}; diff --git a/src/catalog/src/kvbackend/builder.rs b/src/catalog/src/kvbackend/builder.rs index 2834673baa..51a6154948 100644 --- a/src/catalog/src/kvbackend/builder.rs +++ b/src/catalog/src/kvbackend/builder.rs @@ -16,8 +16,8 @@ use std::sync::Arc; use common_catalog::consts::DEFAULT_CATALOG_NAME; use common_meta::cache::LayeredCacheRegistryRef; -use common_meta::key::flow::FlowMetadataManager; use common_meta::key::TableMetadataManager; +use common_meta::key::flow::FlowMetadataManager; use common_meta::kv_backend::KvBackendRef; use common_procedure::ProcedureManagerRef; use moka::sync::Cache; @@ -26,8 +26,8 @@ use partition::manager::PartitionRuleManager; #[cfg(feature = "enterprise")] use crate::information_schema::InformationSchemaTableFactoryRef; use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider}; -use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY}; use crate::kvbackend::KvBackendCatalogManager; +use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog}; use crate::process_manager::ProcessManagerRef; use crate::system_schema::pg_catalog::PGCatalogProvider; diff --git a/src/catalog/src/kvbackend/client.rs b/src/catalog/src/kvbackend/client.rs index 8818e74a48..f74509217f 100644 --- a/src/catalog/src/kvbackend/client.rs +++ b/src/catalog/src/kvbackend/client.rs @@ -24,12 +24,12 @@ use common_meta::error::Error::CacheNotGet; use common_meta::error::{CacheNotGetSnafu, Error, ExternalSnafu, GetKvCacheSnafu, Result}; use common_meta::kv_backend::txn::{Txn, TxnResponse}; use common_meta::kv_backend::{KvBackend, KvBackendRef, TxnService}; +use common_meta::rpc::KeyValue; use common_meta::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use common_meta::rpc::KeyValue; use common_telemetry::debug; use meta_client::client::MetaClient; use moka::future::{Cache, CacheBuilder}; @@ -461,17 +461,17 @@ impl KvBackend for MetaKvBackend { #[cfg(test)] mod tests { use std::any::Any; - use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; + use std::sync::atomic::{AtomicU32, Ordering}; use async_trait::async_trait; use common_meta::kv_backend::{KvBackend, TxnService}; + use common_meta::rpc::KeyValue; use common_meta::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; - use common_meta::rpc::KeyValue; use dashmap::DashMap; use super::CachedKvBackend; diff --git a/src/catalog/src/kvbackend/manager.rs b/src/catalog/src/kvbackend/manager.rs index 4008fe38f9..902f15c09e 100644 --- a/src/catalog/src/kvbackend/manager.rs +++ b/src/catalog/src/kvbackend/manager.rs @@ -26,12 +26,12 @@ use common_meta::cache::{ LayeredCacheRegistryRef, TableInfoCacheRef, TableNameCacheRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef, }; +use common_meta::key::TableMetadataManagerRef; use common_meta::key::catalog_name::CatalogNameKey; use common_meta::key::flow::FlowMetadataManager; use common_meta::key::schema_name::SchemaNameKey; use common_meta::key::table_info::{TableInfoManager, TableInfoValue}; use common_meta::key::table_name::TableNameKey; -use common_meta::key::TableMetadataManagerRef; use common_meta::kv_backend::KvBackendRef; use common_procedure::ProcedureManagerRef; use futures_util::stream::BoxStream; @@ -41,15 +41,16 @@ use partition::manager::PartitionRuleManagerRef; use session::context::{Channel, QueryContext}; use snafu::prelude::*; use store_api::metric_engine_consts::METRIC_ENGINE_NAME; +use table::TableRef; use table::dist_table::DistTable; use table::metadata::{TableId, TableInfoRef}; -use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; use table::table::PartitionRules; +use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use table::table_name::TableName; -use table::TableRef; use tokio::sync::Semaphore; use tokio_stream::wrappers::ReceiverStream; +use crate::CatalogManager; use crate::error::{ CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu, ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu, @@ -59,9 +60,8 @@ use crate::information_schema::InformationSchemaTableFactoryRef; use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider}; use crate::kvbackend::TableCacheRef; use crate::process_manager::ProcessManagerRef; -use crate::system_schema::pg_catalog::PGCatalogProvider; use crate::system_schema::SystemSchemaProvider; -use crate::CatalogManager; +use crate::system_schema::pg_catalog::PGCatalogProvider; /// Access all existing catalog, schema and tables. /// diff --git a/src/catalog/src/kvbackend/table_cache.rs b/src/catalog/src/kvbackend/table_cache.rs index c890960bd6..ea328c3e17 100644 --- a/src/catalog/src/kvbackend/table_cache.rs +++ b/src/catalog/src/kvbackend/table_cache.rs @@ -20,9 +20,9 @@ use common_meta::instruction::CacheIdent; use futures::future::BoxFuture; use moka::future::Cache; use snafu::OptionExt; +use table::TableRef; use table::dist_table::DistTable; use table::table_name::TableName; -use table::TableRef; pub type TableCacheRef = Arc; diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs index b664c08b60..694594d052 100644 --- a/src/catalog/src/lib.rs +++ b/src/catalog/src/lib.rs @@ -25,8 +25,8 @@ use common_catalog::consts::{INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME}; use futures::future::BoxFuture; use futures_util::stream::BoxStream; use session::context::QueryContext; -use table::metadata::{TableId, TableInfoRef}; use table::TableRef; +use table::metadata::{TableId, TableInfoRef}; use crate::error::Result; diff --git a/src/catalog/src/memory.rs b/src/catalog/src/memory.rs index f1216e80af..2d5452d616 100644 --- a/src/catalog/src/memory.rs +++ b/src/catalog/src/memory.rs @@ -14,4 +14,4 @@ pub mod manager; -pub use manager::{new_memory_catalog_manager, MemoryCatalogManager}; +pub use manager::{MemoryCatalogManager, new_memory_catalog_manager}; diff --git a/src/catalog/src/memory/manager.rs b/src/catalog/src/memory/manager.rs index 4c3fbd4c23..666fa8d5b9 100644 --- a/src/catalog/src/memory/manager.rs +++ b/src/catalog/src/memory/manager.rs @@ -28,8 +28,8 @@ use common_meta::kv_backend::memory::MemoryKvBackend; use futures_util::stream::BoxStream; use session::context::QueryContext; use snafu::OptionExt; -use table::metadata::{TableId, TableInfoRef}; use table::TableRef; +use table::metadata::{TableId, TableInfoRef}; use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu}; use crate::information_schema::InformationSchemaProvider; @@ -419,7 +419,7 @@ pub fn new_memory_catalog_manager() -> Result> { mod tests { use common_catalog::consts::*; use futures_util::TryStreamExt; - use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; + use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use super::*; @@ -454,16 +454,18 @@ mod tests { tables[0].table_info().table_id() ); - assert!(catalog_list - .table( - DEFAULT_CATALOG_NAME, - DEFAULT_SCHEMA_NAME, - "not_exists", - None - ) - .await - .unwrap() - .is_none()); + assert!( + catalog_list + .table( + DEFAULT_CATALOG_NAME, + DEFAULT_SCHEMA_NAME, + "not_exists", + None + ) + .await + .unwrap() + .is_none() + ); } #[test] @@ -486,11 +488,13 @@ mod tests { table: NumbersTable::table(2333), }; catalog.register_table_sync(register_table_req).unwrap(); - assert!(catalog - .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None) - .await - .unwrap() - .is_some()); + assert!( + catalog + .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None) + .await + .unwrap() + .is_some() + ); let deregister_table_req = DeregisterTableRequest { catalog: DEFAULT_CATALOG_NAME.to_string(), @@ -498,10 +502,12 @@ mod tests { table_name: table_name.to_string(), }; catalog.deregister_table_sync(deregister_table_req).unwrap(); - assert!(catalog - .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None) - .await - .unwrap() - .is_none()); + assert!( + catalog + .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name, None) + .await + .unwrap() + .is_none() + ); } } diff --git a/src/catalog/src/process_manager.rs b/src/catalog/src/process_manager.rs index 4fe89b59e8..353ec01714 100644 --- a/src/catalog/src/process_manager.rs +++ b/src/catalog/src/process_manager.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::fmt::{Debug, Display, Formatter}; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, RwLock}; @@ -30,7 +30,7 @@ use common_time::util::current_time_millis; use meta_client::MetaClientRef; use promql_parser::parser::EvalStmt; use rand::random; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::statements::statement::Statement; use crate::error; diff --git a/src/catalog/src/system_schema/information_schema.rs b/src/catalog/src/system_schema/information_schema.rs index 82e3b8e68e..724859c904 100644 --- a/src/catalog/src/system_schema/information_schema.rs +++ b/src/catalog/src/system_schema/information_schema.rs @@ -36,8 +36,8 @@ use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME use common_error::ext::ErrorExt; use common_meta::cluster::NodeInfo; use common_meta::datanode::RegionStat; -use common_meta::key::flow::flow_state::FlowStat; use common_meta::key::flow::FlowMetadataManager; +use common_meta::key::flow::flow_state::FlowStat; use common_meta::kv_backend::KvBackendRef; use common_procedure::ProcedureInfo; use common_recordbatch::SendableRecordBatchStream; @@ -46,12 +46,13 @@ use lazy_static::lazy_static; use paste::paste; use process_list::InformationSchemaProcessList; use store_api::storage::{ScanRequest, TableId}; -use table::metadata::TableType; use table::TableRef; +use table::metadata::TableType; pub use table_names::*; use views::InformationSchemaViews; use self::columns::InformationSchemaColumns; +use crate::CatalogManager; use crate::error::{Error, Result}; use crate::process_manager::ProcessManagerRef; use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo; @@ -69,7 +70,6 @@ pub(crate) use crate::system_schema::predicate::Predicates; use crate::system_schema::{ SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef, }; -use crate::CatalogManager; lazy_static! { // Memory tables in `information_schema`. diff --git a/src/catalog/src/system_schema/information_schema/cluster_info.rs b/src/catalog/src/system_schema/information_schema/cluster_info.rs index 7672791df6..8b95d85f4c 100644 --- a/src/catalog/src/system_schema/information_schema/cluster_info.rs +++ b/src/catalog/src/system_schema/information_schema/cluster_info.rs @@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_time::timestamp::Timestamp; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::timestamp::TimestampMillisecond; @@ -36,10 +36,10 @@ use datatypes::vectors::{ use snafu::ResultExt; use store_api::storage::{ScanRequest, TableId}; -use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; -use crate::system_schema::information_schema::{InformationTable, Predicates, CLUSTER_INFO}; -use crate::system_schema::utils; use crate::CatalogManager; +use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; +use crate::system_schema::information_schema::{CLUSTER_INFO, InformationTable, Predicates}; +use crate::system_schema::utils; const PEER_ID: &str = "peer_id"; const PEER_TYPE: &str = "peer_type"; diff --git a/src/catalog/src/system_schema/information_schema/columns.rs b/src/catalog/src/system_schema/information_schema/columns.rs index 0c866f4841..df39e75e98 100644 --- a/src/catalog/src/system_schema/information_schema/columns.rs +++ b/src/catalog/src/system_schema/information_schema/columns.rs @@ -23,9 +23,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, DataType, MutableVector}; use datatypes::scalars::ScalarVectorBuilder; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; @@ -38,12 +38,12 @@ use snafu::{OptionExt, ResultExt}; use sql::statements; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::information_schema::Predicates; -use crate::system_schema::information_schema::{InformationTable, COLUMNS}; -use crate::CatalogManager; +use crate::system_schema::information_schema::{COLUMNS, InformationTable}; #[derive(Debug)] pub(super) struct InformationSchemaColumns { diff --git a/src/catalog/src/system_schema/information_schema/flows.rs b/src/catalog/src/system_schema/information_schema/flows.rs index 24b75e62e5..838c41ab4e 100644 --- a/src/catalog/src/system_schema/information_schema/flows.rs +++ b/src/catalog/src/system_schema/information_schema/flows.rs @@ -16,10 +16,10 @@ use std::sync::{Arc, Weak}; use common_catalog::consts::INFORMATION_SCHEMA_FLOW_TABLE_ID; use common_error::ext::BoxedError; +use common_meta::key::FlowId; +use common_meta::key::flow::FlowMetadataManager; use common_meta::key::flow::flow_info::FlowInfoValue; use common_meta::key::flow::flow_state::FlowStat; -use common_meta::key::flow::FlowMetadataManager; -use common_meta::key::FlowId; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{DfSendableRecordBatchStream, RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; @@ -38,14 +38,14 @@ use futures::TryStreamExt; use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; -use crate::information_schema::{Predicates, FLOWS}; +use crate::information_schema::{FLOWS, Predicates}; use crate::system_schema::information_schema::InformationTable; use crate::system_schema::utils; -use crate::CatalogManager; const INIT_CAPACITY: usize = 42; diff --git a/src/catalog/src/system_schema/information_schema/information_memory_table.rs b/src/catalog/src/system_schema/information_schema/information_memory_table.rs index 20d14a4ba5..03fbd16e13 100644 --- a/src/catalog/src/system_schema/information_schema/information_memory_table.rs +++ b/src/catalog/src/system_schema/information_schema/information_memory_table.rs @@ -89,9 +89,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec vec![ Arc::new(StringVector::from(vec![build_info.branch.to_string()])), Arc::new(StringVector::from(vec![build_info.commit.to_string()])), - Arc::new(StringVector::from(vec![build_info - .commit_short - .to_string()])), + Arc::new(StringVector::from(vec![ + build_info.commit_short.to_string(), + ])), Arc::new(StringVector::from(vec![build_info.clean.to_string()])), Arc::new(StringVector::from(vec![build_info.version.to_string()])), ], @@ -369,17 +369,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec TRIGGERS => ( vec![ string_column("TRIGGER_NAME"), - ColumnSchema::new( - "trigger_id", - ConcreteDataType::uint64_datatype(), - false, - ), + ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false), string_column("TRIGGER_DEFINITION"), - ColumnSchema::new( - "flownode_id", - ConcreteDataType::uint64_datatype(), - true, - ), + ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true), ], vec![], ), diff --git a/src/catalog/src/system_schema/information_schema/key_column_usage.rs b/src/catalog/src/system_schema/information_schema/key_column_usage.rs index ffcd5eaaa5..01ca90556f 100644 --- a/src/catalog/src/system_schema/information_schema/key_column_usage.rs +++ b/src/catalog/src/system_schema/information_schema/key_column_usage.rs @@ -20,9 +20,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, FulltextBackend, Schema, SchemaRef}; use datatypes::value::Value; @@ -31,11 +31,11 @@ use futures_util::TryStreamExt; use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; -use crate::system_schema::information_schema::{InformationTable, Predicates, KEY_COLUMN_USAGE}; -use crate::CatalogManager; +use crate::system_schema::information_schema::{InformationTable, KEY_COLUMN_USAGE, Predicates}; pub const CONSTRAINT_SCHEMA: &str = "constraint_schema"; pub const CONSTRAINT_NAME: &str = "constraint_name"; @@ -277,15 +277,15 @@ impl InformationSchemaKeyColumnUsageBuilder { constraints.push(CONSTRAINT_NAME_INVERTED_INDEX); greptime_index_type.push(INDEX_TYPE_INVERTED_INDEX); } - if let Ok(Some(options)) = column.fulltext_options() { - if options.enable { - constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX); - let index_type = match options.backend { - FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM, - FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY, - }; - greptime_index_type.push(index_type); - } + if let Ok(Some(options)) = column.fulltext_options() + && options.enable + { + constraints.push(CONSTRAINT_NAME_FULLTEXT_INDEX); + let index_type = match options.backend { + FulltextBackend::Bloom => INDEX_TYPE_FULLTEXT_BLOOM, + FulltextBackend::Tantivy => INDEX_TYPE_FULLTEXT_TANTIVY, + }; + greptime_index_type.push(index_type); } if column.is_skipping_indexed() { constraints.push(CONSTRAINT_NAME_SKIPPING_INDEX); diff --git a/src/catalog/src/system_schema/information_schema/partitions.rs b/src/catalog/src/system_schema/information_schema/partitions.rs index 7fed20ec3c..3550648b43 100644 --- a/src/catalog/src/system_schema/information_schema/partitions.rs +++ b/src/catalog/src/system_schema/information_schema/partitions.rs @@ -21,9 +21,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::timestamp::TimestampMicrosecond; @@ -39,13 +39,13 @@ use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; use table::metadata::{TableInfo, TableType}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::kvbackend::KvBackendCatalogManager; -use crate::system_schema::information_schema::{InformationTable, Predicates, PARTITIONS}; -use crate::CatalogManager; +use crate::system_schema::information_schema::{InformationTable, PARTITIONS, Predicates}; const TABLE_CATALOG: &str = "table_catalog"; const TABLE_SCHEMA: &str = "table_schema"; diff --git a/src/catalog/src/system_schema/information_schema/procedure_info.rs b/src/catalog/src/system_schema/information_schema/procedure_info.rs index 3ae237fdf1..b9a892d2ac 100644 --- a/src/catalog/src/system_schema/information_schema/procedure_info.rs +++ b/src/catalog/src/system_schema/information_schema/procedure_info.rs @@ -22,9 +22,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_time::timestamp::Timestamp; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::timestamp::TimestampMillisecond; @@ -33,10 +33,10 @@ use datatypes::vectors::{StringVectorBuilder, TimestampMillisecondVectorBuilder} use snafu::ResultExt; use store_api::storage::{ScanRequest, TableId}; -use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; -use crate::system_schema::information_schema::{InformationTable, Predicates, PROCEDURE_INFO}; -use crate::system_schema::utils; use crate::CatalogManager; +use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; +use crate::system_schema::information_schema::{InformationTable, PROCEDURE_INFO, Predicates}; +use crate::system_schema::utils; const PROCEDURE_ID: &str = "procedure_id"; const PROCEDURE_TYPE: &str = "procedure_type"; diff --git a/src/catalog/src/system_schema/information_schema/region_peers.rs b/src/catalog/src/system_schema/information_schema/region_peers.rs index 1d46a9db01..111dc15cc5 100644 --- a/src/catalog/src/system_schema/information_schema/region_peers.rs +++ b/src/catalog/src/system_schema/information_schema/region_peers.rs @@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::common::HashMap; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::value::Value; @@ -35,13 +35,13 @@ use snafu::{OptionExt, ResultExt}; use store_api::storage::{RegionId, ScanRequest, TableId}; use table::metadata::TableType; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, FindRegionRoutesSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::kvbackend::KvBackendCatalogManager; use crate::system_schema::information_schema::{InformationTable, Predicates, REGION_PEERS}; -use crate::CatalogManager; pub const TABLE_CATALOG: &str = "table_catalog"; pub const TABLE_SCHEMA: &str = "table_schema"; diff --git a/src/catalog/src/system_schema/information_schema/region_statistics.rs b/src/catalog/src/system_schema/information_schema/region_statistics.rs index 6199aef3aa..7d90810f05 100644 --- a/src/catalog/src/system_schema/information_schema/region_statistics.rs +++ b/src/catalog/src/system_schema/information_schema/region_statistics.rs @@ -30,11 +30,11 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, UInt64VectorB use snafu::ResultExt; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result}; use crate::information_schema::Predicates; use crate::system_schema::information_schema::{InformationTable, REGION_STATISTICS}; use crate::system_schema::utils; -use crate::CatalogManager; const REGION_ID: &str = "region_id"; const TABLE_ID: &str = "table_id"; diff --git a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs b/src/catalog/src/system_schema/information_schema/runtime_metrics.rs index 2af178c7a9..5ccb871321 100644 --- a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs +++ b/src/catalog/src/system_schema/information_schema/runtime_metrics.rs @@ -21,9 +21,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_time::util::current_time_millis; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, MutableVector}; use datatypes::scalars::ScalarVectorBuilder; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; diff --git a/src/catalog/src/system_schema/information_schema/schemata.rs b/src/catalog/src/system_schema/information_schema/schemata.rs index 6738267d3f..c8756057cd 100644 --- a/src/catalog/src/system_schema/information_schema/schemata.rs +++ b/src/catalog/src/system_schema/information_schema/schemata.rs @@ -21,9 +21,9 @@ use common_meta::key::schema_name::SchemaNameKey; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::value::Value; @@ -31,13 +31,13 @@ use datatypes::vectors::StringVectorBuilder; use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, TableMetadataManagerSnafu, UpgradeWeakCatalogManagerRefSnafu, }; use crate::system_schema::information_schema::{InformationTable, Predicates, SCHEMATA}; use crate::system_schema::utils; -use crate::CatalogManager; pub const CATALOG_NAME: &str = "catalog_name"; pub const SCHEMA_NAME: &str = "schema_name"; diff --git a/src/catalog/src/system_schema/information_schema/table_constraints.rs b/src/catalog/src/system_schema/information_schema/table_constraints.rs index 77ac93632f..8c975207f2 100644 --- a/src/catalog/src/system_schema/information_schema/table_constraints.rs +++ b/src/catalog/src/system_schema/information_schema/table_constraints.rs @@ -20,9 +20,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, MutableVector}; use datatypes::scalars::ScalarVectorBuilder; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; @@ -32,15 +32,15 @@ use futures::TryStreamExt; use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; +use crate::information_schema::Predicates; use crate::information_schema::key_column_usage::{ CONSTRAINT_NAME_PRI, CONSTRAINT_NAME_TIME_INDEX, }; -use crate::information_schema::Predicates; use crate::system_schema::information_schema::{InformationTable, TABLE_CONSTRAINTS}; -use crate::CatalogManager; /// The `TABLE_CONSTRAINTS` table describes which tables have constraints. #[derive(Debug)] diff --git a/src/catalog/src/system_schema/information_schema/tables.rs b/src/catalog/src/system_schema/information_schema/tables.rs index f96d47b16d..01f6f0d0bd 100644 --- a/src/catalog/src/system_schema/information_schema/tables.rs +++ b/src/catalog/src/system_schema/information_schema/tables.rs @@ -23,9 +23,9 @@ use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use common_telemetry::error; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::value::Value; @@ -37,12 +37,12 @@ use snafu::{OptionExt, ResultExt}; use store_api::storage::{RegionId, ScanRequest, TableId}; use table::metadata::{TableInfo, TableType}; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::system_schema::information_schema::{InformationTable, Predicates, TABLES}; use crate::system_schema::utils; -use crate::CatalogManager; pub const TABLE_CATALOG: &str = "table_catalog"; pub const TABLE_SCHEMA: &str = "table_schema"; diff --git a/src/catalog/src/system_schema/information_schema/views.rs b/src/catalog/src/system_schema/information_schema/views.rs index d0424f528d..d1eb3b80ac 100644 --- a/src/catalog/src/system_schema/information_schema/views.rs +++ b/src/catalog/src/system_schema/information_schema/views.rs @@ -20,9 +20,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef}; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::value::Value; @@ -32,13 +32,13 @@ use snafu::{OptionExt, ResultExt}; use store_api::storage::{ScanRequest, TableId}; use table::metadata::TableType; +use crate::CatalogManager; use crate::error::{ CastManagerSnafu, CreateRecordBatchSnafu, GetViewCacheSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, ViewInfoNotFoundSnafu, }; use crate::kvbackend::KvBackendCatalogManager; use crate::system_schema::information_schema::{InformationTable, Predicates, VIEWS}; -use crate::CatalogManager; const INIT_CAPACITY: usize = 42; pub const TABLE_CATALOG: &str = "table_catalog"; diff --git a/src/catalog/src/system_schema/memory_table.rs b/src/catalog/src/system_schema/memory_table.rs index a6ea55ca96..de5ff120b2 100644 --- a/src/catalog/src/system_schema/memory_table.rs +++ b/src/catalog/src/system_schema/memory_table.rs @@ -21,9 +21,9 @@ use common_error::ext::BoxedError; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::execution::TaskContext; +use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter; use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream; -use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream; use datatypes::schema::SchemaRef; use datatypes::vectors::VectorRef; use snafu::ResultExt; diff --git a/src/catalog/src/system_schema/pg_catalog.rs b/src/catalog/src/system_schema/pg_catalog.rs index 61dd47f13e..ed6b00943f 100644 --- a/src/catalog/src/system_schema/pg_catalog.rs +++ b/src/catalog/src/system_schema/pg_catalog.rs @@ -34,10 +34,10 @@ use table::TableRef; pub use table_names::*; use self::pg_namespace::oid_map::{PGNamespaceOidMap, PGNamespaceOidMapRef}; +use crate::CatalogManager; use crate::system_schema::memory_table::MemoryTable; use crate::system_schema::utils::tables::u32_column; use crate::system_schema::{SystemSchemaProvider, SystemSchemaProviderInner, SystemTableRef}; -use crate::CatalogManager; lazy_static! { static ref MEMORY_TABLES: &'static [&'static str] = &[table_names::PG_TYPE]; diff --git a/src/catalog/src/system_schema/pg_catalog/pg_class.rs b/src/catalog/src/system_schema/pg_catalog/pg_class.rs index d34a6b4c6b..e66bccec3f 100644 --- a/src/catalog/src/system_schema/pg_catalog/pg_class.rs +++ b/src/catalog/src/system_schema/pg_catalog/pg_class.rs @@ -32,15 +32,15 @@ use snafu::{OptionExt, ResultExt}; use store_api::storage::ScanRequest; use table::metadata::TableType; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::information_schema::Predicates; -use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef; -use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_CLASS}; -use crate::system_schema::utils::tables::{string_column, u32_column}; use crate::system_schema::SystemTable; -use crate::CatalogManager; +use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef; +use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_CLASS, query_ctx}; +use crate::system_schema::utils::tables::{string_column, u32_column}; // === column name === pub const RELNAME: &str = "relname"; diff --git a/src/catalog/src/system_schema/pg_catalog/pg_database.rs b/src/catalog/src/system_schema/pg_catalog/pg_database.rs index 8a788a25da..0f0dbdcfbd 100644 --- a/src/catalog/src/system_schema/pg_catalog/pg_database.rs +++ b/src/catalog/src/system_schema/pg_catalog/pg_database.rs @@ -29,15 +29,15 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef}; use snafu::{OptionExt, ResultExt}; use store_api::storage::ScanRequest; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::information_schema::Predicates; -use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef; -use crate::system_schema::pg_catalog::{query_ctx, OID_COLUMN_NAME, PG_DATABASE}; -use crate::system_schema::utils::tables::{string_column, u32_column}; use crate::system_schema::SystemTable; -use crate::CatalogManager; +use crate::system_schema::pg_catalog::pg_namespace::oid_map::PGNamespaceOidMapRef; +use crate::system_schema::pg_catalog::{OID_COLUMN_NAME, PG_DATABASE, query_ctx}; +use crate::system_schema::utils::tables::{string_column, u32_column}; // === column name === pub const DATNAME: &str = "datname"; diff --git a/src/catalog/src/system_schema/pg_catalog/pg_namespace.rs b/src/catalog/src/system_schema/pg_catalog/pg_namespace.rs index f8fc3e09c4..07e25b9d41 100644 --- a/src/catalog/src/system_schema/pg_catalog/pg_namespace.rs +++ b/src/catalog/src/system_schema/pg_catalog/pg_namespace.rs @@ -35,16 +35,16 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder, VectorRef}; use snafu::{OptionExt, ResultExt}; use store_api::storage::ScanRequest; +use crate::CatalogManager; use crate::error::{ CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu, }; use crate::information_schema::Predicates; +use crate::system_schema::SystemTable; use crate::system_schema::pg_catalog::{ - query_ctx, PGNamespaceOidMapRef, OID_COLUMN_NAME, PG_NAMESPACE, + OID_COLUMN_NAME, PG_NAMESPACE, PGNamespaceOidMapRef, query_ctx, }; use crate::system_schema::utils::tables::{string_column, u32_column}; -use crate::system_schema::SystemTable; -use crate::CatalogManager; const NSPNAME: &str = "nspname"; const INIT_CAPACITY: usize = 42; diff --git a/src/catalog/src/system_schema/predicate.rs b/src/catalog/src/system_schema/predicate.rs index 9704f7cf4e..7957ba454d 100644 --- a/src/catalog/src/system_schema/predicate.rs +++ b/src/catalog/src/system_schema/predicate.rs @@ -339,18 +339,22 @@ mod tests { assert!(!p.eval(&wrong_row).unwrap()); assert!(p.eval(&[]).is_none()); assert!(p.eval(&[("c", &a_value)]).is_none()); - assert!(!p - .eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)]) - .unwrap()); - assert!(!p - .eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)]) - .unwrap()); - assert!(p - .eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)]) - .is_none()); - assert!(!p - .eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)]) - .unwrap()); + assert!( + !p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)]) + .unwrap() + ); + assert!( + !p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)]) + .unwrap() + ); + assert!( + p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)]) + .is_none() + ); + assert!( + !p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)]) + .unwrap() + ); //Predicate::Or let p = Predicate::Or(Box::new(p1), Box::new(p2)); @@ -358,18 +362,22 @@ mod tests { assert!(p.eval(&wrong_row).unwrap()); assert!(p.eval(&[]).is_none()); assert!(p.eval(&[("c", &a_value)]).is_none()); - assert!(!p - .eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)]) - .unwrap()); - assert!(p - .eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)]) - .unwrap()); - assert!(p - .eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)]) - .unwrap()); - assert!(p - .eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)]) - .is_none()); + assert!( + !p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &a_value)]) + .unwrap() + ); + assert!( + p.eval(&[(a_col.as_str(), &b_value), (b_col.as_str(), &b_value)]) + .unwrap() + ); + assert!( + p.eval(&[(a_col.as_ref(), &a_value), ("c", &a_value)]) + .unwrap() + ); + assert!( + p.eval(&[(a_col.as_ref(), &b_value), ("c", &a_value)]) + .is_none() + ); } #[test] diff --git a/src/catalog/src/system_schema/utils.rs b/src/catalog/src/system_schema/utils.rs index 8d8af84bf0..9231a48e98 100644 --- a/src/catalog/src/system_schema/utils.rs +++ b/src/catalog/src/system_schema/utils.rs @@ -17,10 +17,10 @@ use std::sync::Weak; use common_meta::key::TableMetadataManagerRef; use snafu::OptionExt; +use crate::CatalogManager; use crate::error::{GetInformationExtensionSnafu, Result, UpgradeWeakCatalogManagerRefSnafu}; use crate::information_schema::InformationExtensionRef; use crate::kvbackend::KvBackendCatalogManager; -use crate::CatalogManager; pub mod tables; diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs index 3a80b5da3c..2bfc777798 100644 --- a/src/catalog/src/table_source.rs +++ b/src/catalog/src/table_source.rs @@ -17,27 +17,27 @@ use std::sync::Arc; use bytes::Bytes; use common_catalog::format_full_table_name; -use common_query::logical_plan::{rename_logical_plan_columns, SubstraitPlanDecoderRef}; +use common_query::logical_plan::{SubstraitPlanDecoderRef, rename_logical_plan_columns}; use datafusion::common::{ResolvedTableReference, TableReference}; use datafusion::datasource::view::ViewTable; -use datafusion::datasource::{provider_as_source, TableProvider}; +use datafusion::datasource::{TableProvider, provider_as_source}; use datafusion::logical_expr::TableSource; use itertools::Itertools; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use table::metadata::TableType; use table::table::adapter::DfTableProviderAdapter; pub mod dummy_catalog; use dummy_catalog::DummyCatalogList; use table::TableRef; +use crate::CatalogManagerRef; use crate::error::{ CastManagerSnafu, DecodePlanSnafu, GetViewCacheSnafu, ProjectViewColumnsSnafu, QueryAccessDeniedSnafu, Result, TableNotExistSnafu, ViewInfoNotFoundSnafu, ViewPlanColumnsChangedSnafu, }; use crate::kvbackend::KvBackendCatalogManager; -use crate::CatalogManagerRef; pub struct DfTableSourceProvider { catalog_manager: CatalogManagerRef, @@ -272,7 +272,7 @@ mod tests { use common_query::logical_plan::SubstraitPlanDecoder; use datafusion::catalog::CatalogProviderList; use datafusion::logical_expr::builder::LogicalTableSource; - use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder}; + use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit}; use crate::information_schema::NoopInformationExtension; diff --git a/src/catalog/src/table_source/dummy_catalog.rs b/src/catalog/src/table_source/dummy_catalog.rs index 376f77a861..db49db0eed 100644 --- a/src/catalog/src/table_source/dummy_catalog.rs +++ b/src/catalog/src/table_source/dummy_catalog.rs @@ -25,8 +25,8 @@ use datafusion::datasource::TableProvider; use snafu::OptionExt; use table::table::adapter::DfTableProviderAdapter; -use crate::error::TableNotExistSnafu; use crate::CatalogManagerRef; +use crate::error::TableNotExistSnafu; /// Delegate the resolving requests to the `[CatalogManager]` unconditionally. #[derive(Clone)] diff --git a/src/cli/src/bench/metadata.rs b/src/cli/src/bench/metadata.rs index 68dfb5441a..afbaa5b7dd 100644 --- a/src/cli/src/bench/metadata.rs +++ b/src/cli/src/bench/metadata.rs @@ -14,8 +14,8 @@ use std::time::Instant; -use common_meta::key::table_route::TableRouteValue; use common_meta::key::TableMetadataManagerRef; +use common_meta::key::table_route::TableRouteValue; use table::table_name::TableName; use crate::bench::{ diff --git a/src/cli/src/data.rs b/src/cli/src/data.rs index a74823b756..bac7f3e308 100644 --- a/src/cli/src/data.rs +++ b/src/cli/src/data.rs @@ -18,9 +18,9 @@ mod import; use clap::Subcommand; use common_error::ext::BoxedError; +use crate::Tool; use crate::data::export::ExportCommand; use crate::data::import::ImportCommand; -use crate::Tool; /// Command for data operations including exporting data from and importing data into GreptimeDB. #[derive(Subcommand)] diff --git a/src/cli/src/data/export.rs b/src/cli/src/data/export.rs index 4d5f817949..a9f68bf9c9 100644 --- a/src/cli/src/data/export.rs +++ b/src/cli/src/data/export.rs @@ -24,18 +24,18 @@ use common_error::ext::BoxedError; use common_telemetry::{debug, error, info}; use object_store::layers::LoggingLayer; use object_store::services::Oss; -use object_store::{services, ObjectStore}; +use object_store::{ObjectStore, services}; use serde_json::Value; use snafu::{OptionExt, ResultExt}; use tokio::sync::Semaphore; use tokio::time::Instant; -use crate::database::{parse_proxy_opts, DatabaseClient}; +use crate::database::{DatabaseClient, parse_proxy_opts}; use crate::error::{ EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu, SchemaNotFoundSnafu, }; -use crate::{database, Tool}; +use crate::{Tool, database}; type TableReference = (String, String, String); diff --git a/src/cli/src/data/import.rs b/src/cli/src/data/import.rs index 39d45f1061..102de8ac91 100644 --- a/src/cli/src/data/import.rs +++ b/src/cli/src/data/import.rs @@ -25,9 +25,9 @@ use snafu::{OptionExt, ResultExt}; use tokio::sync::Semaphore; use tokio::time::Instant; -use crate::database::{parse_proxy_opts, DatabaseClient}; +use crate::database::{DatabaseClient, parse_proxy_opts}; use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu}; -use crate::{database, Tool}; +use crate::{Tool, database}; #[derive(Debug, Default, Clone, ValueEnum)] enum ImportTarget { diff --git a/src/cli/src/database.rs b/src/cli/src/database.rs index 24c4514fbc..db98c38e38 100644 --- a/src/cli/src/database.rs +++ b/src/cli/src/database.rs @@ -14,15 +14,15 @@ use std::time::Duration; -use base64::engine::general_purpose; use base64::Engine; +use base64::engine::general_purpose; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::ext::BoxedError; use humantime::format_duration; use serde_json::Value; +use servers::http::GreptimeQueryOutput; use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT; use servers::http::result::greptime_result_v1::GreptimedbV1Response; -use servers::http::GreptimeQueryOutput; use snafu::ResultExt; use crate::error::{ diff --git a/src/cli/src/metadata.rs b/src/cli/src/metadata.rs index 80486214da..aea06bc9c3 100644 --- a/src/cli/src/metadata.rs +++ b/src/cli/src/metadata.rs @@ -21,10 +21,10 @@ mod utils; use clap::Subcommand; use common_error::ext::BoxedError; +use crate::Tool; use crate::metadata::control::{DelCommand, GetCommand}; use crate::metadata::repair::RepairLogicalTablesCommand; use crate::metadata::snapshot::SnapshotCommand; -use crate::Tool; /// Command for managing metadata operations, /// including saving and restoring metadata snapshots, diff --git a/src/cli/src/metadata/common.rs b/src/cli/src/metadata/common.rs index 2a9b2d1c4d..4019ed3ef8 100644 --- a/src/cli/src/metadata/common.rs +++ b/src/cli/src/metadata/common.rs @@ -16,9 +16,9 @@ use std::sync::Arc; use clap::Parser; use common_error::ext::BoxedError; +use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::chroot::ChrootKvBackend; use common_meta::kv_backend::etcd::EtcdStore; -use common_meta::kv_backend::KvBackendRef; use meta_srv::bootstrap::create_etcd_client_with_tls; use meta_srv::metasrv::BackendImpl; use servers::tls::{TlsMode, TlsOption}; diff --git a/src/cli/src/metadata/control/del.rs b/src/cli/src/metadata/control/del.rs index 6afe472607..70ce59c1cd 100644 --- a/src/cli/src/metadata/control/del.rs +++ b/src/cli/src/metadata/control/del.rs @@ -18,9 +18,9 @@ mod table; use clap::Subcommand; use common_error::ext::BoxedError; +use crate::Tool; use crate::metadata::control::del::key::DelKeyCommand; use crate::metadata::control::del::table::DelTableCommand; -use crate::Tool; /// The prefix of the tombstone keys. pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/"; diff --git a/src/cli/src/metadata/control/del/key.rs b/src/cli/src/metadata/control/del/key.rs index 6b765835de..ccf3b1089c 100644 --- a/src/cli/src/metadata/control/del/key.rs +++ b/src/cli/src/metadata/control/del/key.rs @@ -19,9 +19,9 @@ use common_meta::key::tombstone::TombstoneManager; use common_meta::kv_backend::KvBackendRef; use common_meta::rpc::store::RangeRequest; +use crate::Tool; use crate::metadata::common::StoreConfig; use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX; -use crate::Tool; /// Delete key-value pairs logically from the metadata store. #[derive(Debug, Default, Parser)] @@ -102,8 +102,8 @@ mod tests { use common_meta::kv_backend::{KvBackend, KvBackendRef}; use common_meta::rpc::store::RangeRequest; - use crate::metadata::control::del::key::KeyDeleter; use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX; + use crate::metadata::control::del::key::KeyDeleter; use crate::metadata::control::test_utils::put_key; #[tokio::test] diff --git a/src/cli/src/metadata/control/del/table.rs b/src/cli/src/metadata/control/del/table.rs index d62ab1ed31..f030a4d952 100644 --- a/src/cli/src/metadata/control/del/table.rs +++ b/src/cli/src/metadata/control/del/table.rs @@ -18,16 +18,16 @@ use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::format_full_table_name; use common_error::ext::BoxedError; use common_meta::ddl::utils::get_region_wal_options; -use common_meta::key::table_name::TableNameManager; use common_meta::key::TableMetadataManager; +use common_meta::key::table_name::TableNameManager; use common_meta::kv_backend::KvBackendRef; use store_api::storage::TableId; +use crate::Tool; use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu}; use crate::metadata::common::StoreConfig; use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX; use crate::metadata::control::utils::get_table_id_by_name; -use crate::Tool; /// Delete table metadata logically from the metadata store. #[derive(Debug, Default, Parser)] @@ -183,15 +183,15 @@ mod tests { use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; - use common_meta::key::table_route::TableRouteValue; use common_meta::key::TableMetadataManager; + use common_meta::key::table_route::TableRouteValue; use common_meta::kv_backend::chroot::ChrootKvBackend; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::{KvBackend, KvBackendRef}; use common_meta::rpc::store::RangeRequest; - use crate::metadata::control::del::table::TableMetadataDeleter; use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX; + use crate::metadata::control::del::table::TableMetadataDeleter; use crate::metadata::control::test_utils::prepare_physical_table_metadata; #[tokio::test] diff --git a/src/cli/src/metadata/control/get.rs b/src/cli/src/metadata/control/get.rs index 38a488b53d..4f1f8e7edc 100644 --- a/src/cli/src/metadata/control/get.rs +++ b/src/cli/src/metadata/control/get.rs @@ -19,18 +19,18 @@ use clap::{Parser, Subcommand}; use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::format_full_table_name; use common_error::ext::BoxedError; +use common_meta::key::TableMetadataManager; use common_meta::key::table_info::TableInfoKey; use common_meta::key::table_route::TableRouteKey; -use common_meta::key::TableMetadataManager; use common_meta::kv_backend::KvBackendRef; -use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; +use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use common_meta::rpc::store::RangeRequest; use futures::TryStreamExt; +use crate::Tool; use crate::error::InvalidArgumentsSnafu; use crate::metadata::common::StoreConfig; use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_fromatter}; -use crate::Tool; /// Getting metadata from metadata store. #[derive(Subcommand)] diff --git a/src/cli/src/metadata/repair.rs b/src/cli/src/metadata/repair.rs index 98b2f8fee9..af279e3e38 100644 --- a/src/cli/src/metadata/repair.rs +++ b/src/cli/src/metadata/repair.rs @@ -31,18 +31,18 @@ use common_meta::key::TableMetadataManager; use common_meta::kv_backend::KvBackendRef; use common_meta::node_manager::NodeManagerRef; use common_meta::peer::Peer; -use common_meta::rpc::router::{find_leaders, RegionRoute}; +use common_meta::rpc::router::{RegionRoute, find_leaders}; use common_telemetry::{error, info, warn}; use futures::TryStreamExt; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::storage::TableId; +use crate::Tool; use crate::error::{ InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu, }; use crate::metadata::common::StoreConfig; use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator}; -use crate::Tool; /// Repair metadata of logical tables. #[derive(Debug, Default, Parser)] @@ -301,7 +301,10 @@ impl RepairTool { warn!( "Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}", full_table_name, - failed_peers.iter().map(|(peer, _)| peer.id).collect::>() + failed_peers + .iter() + .map(|(peer, _)| peer.id) + .collect::>() ); let create_table_expr = @@ -320,8 +323,7 @@ impl RepairTool { } info!( "Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode", - full_table_name, - peer.id + full_table_name, peer.id ); // If the alter table request fails for any datanode, we attempt to create the table on that datanode diff --git a/src/cli/src/metadata/repair/alter_table.rs b/src/cli/src/metadata/repair/alter_table.rs index 53827d0b42..c9d196d2d1 100644 --- a/src/cli/src/metadata/repair/alter_table.rs +++ b/src/cli/src/metadata/repair/alter_table.rs @@ -13,11 +13,11 @@ // limitations under the License. use client::api::v1::alter_table_expr::Kind; -use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader}; +use client::api::v1::region::{AlterRequests, RegionRequest, RegionRequestHeader, region_request}; use client::api::v1::{AddColumn, AddColumns, AlterTableExpr}; use common_meta::ddl::alter_logical_tables::make_alter_region_request; use common_meta::peer::Peer; -use common_meta::rpc::router::{find_leader_regions, RegionRoute}; +use common_meta::rpc::router::{RegionRoute, find_leader_regions}; use operator::expr_helper::column_schemas_to_defs; use snafu::ResultExt; use store_api::storage::{RegionId, TableId}; diff --git a/src/cli/src/metadata/repair/create_table.rs b/src/cli/src/metadata/repair/create_table.rs index e37c823c7e..444f3558d2 100644 --- a/src/cli/src/metadata/repair/create_table.rs +++ b/src/cli/src/metadata/repair/create_table.rs @@ -14,12 +14,12 @@ use std::collections::HashMap; -use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader}; use client::api::v1::CreateTableExpr; +use client::api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request}; use common_meta::ddl::create_logical_tables::create_region_request_builder; use common_meta::ddl::utils::region_storage_path; use common_meta::peer::Peer; -use common_meta::rpc::router::{find_leader_regions, RegionRoute}; +use common_meta::rpc::router::{RegionRoute, find_leader_regions}; use operator::expr_helper::column_schemas_to_defs; use snafu::ResultExt; use store_api::storage::{RegionId, TableId}; diff --git a/src/cli/src/metadata/snapshot.rs b/src/cli/src/metadata/snapshot.rs index 30110e9881..4084f90653 100644 --- a/src/cli/src/metadata/snapshot.rs +++ b/src/cli/src/metadata/snapshot.rs @@ -19,13 +19,13 @@ use clap::{Parser, Subcommand}; use common_base::secrets::{ExposeSecret, SecretString}; use common_error::ext::BoxedError; use common_meta::snapshot::MetadataSnapshotManager; -use object_store::services::{Fs, S3}; use object_store::ObjectStore; +use object_store::services::{Fs, S3}; use snafu::{OptionExt, ResultExt}; +use crate::Tool; use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu}; use crate::metadata::common::StoreConfig; -use crate::Tool; /// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information. #[derive(Subcommand)] @@ -258,11 +258,13 @@ impl Tool for MetaRestoreTool { Ok(()) } else if !self.force { common_telemetry::warn!( - "The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option." - ); + "The target source is not clean, if you want to restore the metadata snapshot forcefully, please use --force option." + ); Ok(()) } else { - common_telemetry::info!("The target source is not clean, We will restore the metadata snapshot with --force."); + common_telemetry::info!( + "The target source is not clean, We will restore the metadata snapshot with --force." + ); self.inner .restore(&self.source_file) .await diff --git a/src/cli/src/metadata/utils.rs b/src/cli/src/metadata/utils.rs index e4c89b2342..47c6e6cb1e 100644 --- a/src/cli/src/metadata/utils.rs +++ b/src/cli/src/metadata/utils.rs @@ -17,9 +17,9 @@ use std::collections::VecDeque; use async_stream::try_stream; use common_catalog::consts::METRIC_ENGINE; use common_catalog::format_full_table_name; +use common_meta::key::TableMetadataManager; use common_meta::key::table_name::TableNameKey; use common_meta::key::table_route::TableRouteValue; -use common_meta::key::TableMetadataManager; use common_meta::kv_backend::KvBackendRef; use futures::Stream; use snafu::{OptionExt, ResultExt}; diff --git a/src/client/src/client.rs b/src/client/src/client.rs index 81c0810ba2..1506ac5208 100644 --- a/src/client/src/client.rs +++ b/src/client/src/client.rs @@ -14,11 +14,11 @@ use std::sync::Arc; +use api::v1::HealthCheckRequest; use api::v1::flow::flow_client::FlowClient as PbFlowClient; use api::v1::health_check_client::HealthCheckClient; use api::v1::prometheus_gateway_client::PrometheusGatewayClient; use api::v1::region::region_client::RegionClient as PbRegionClient; -use api::v1::HealthCheckRequest; use arrow_flight::flight_service_client::FlightServiceClient; use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption}; use parking_lot::RwLock; @@ -27,7 +27,7 @@ use tonic::codec::CompressionEncoding; use tonic::transport::Channel; use crate::load_balance::{LoadBalance, Loadbalancer}; -use crate::{error, Result}; +use crate::{Result, error}; pub struct FlightClient { addr: String, diff --git a/src/client/src/client_manager.rs b/src/client/src/client_manager.rs index e9b804a499..80afd2fb32 100644 --- a/src/client/src/client_manager.rs +++ b/src/client/src/client_manager.rs @@ -21,9 +21,9 @@ use common_meta::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, F use common_meta::peer::Peer; use moka::future::{Cache, CacheBuilder}; +use crate::Client; use crate::flow::FlowRequester; use crate::region::RegionRequester; -use crate::Client; pub struct NodeClients { channel_manager: ChannelManager, diff --git a/src/client/src/database.rs b/src/client/src/database.rs index 2b608d0010..0646c3e2a3 100644 --- a/src/client/src/database.rs +++ b/src/client/src/database.rs @@ -27,8 +27,8 @@ use api::v1::{ }; use arrow_flight::{FlightData, Ticket}; use async_stream::stream; -use base64::prelude::BASE64_STANDARD; use base64::Engine; +use base64::prelude::BASE64_STANDARD; use common_catalog::build_db_string; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::ext::BoxedError; @@ -42,7 +42,7 @@ use common_telemetry::{error, warn}; use futures::future; use futures_util::{Stream, StreamExt, TryStreamExt}; use prost::Message; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap, MetadataValue}; use tonic::transport::Channel; @@ -50,7 +50,7 @@ use crate::error::{ ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu, InvalidTonicMetadataValueSnafu, }; -use crate::{error, from_grpc_response, Client, Result}; +use crate::{Client, Result, error, from_grpc_response}; type FlightDataStream = Pin + Send>>; @@ -379,11 +379,10 @@ impl Database { tonic_code, e ); - let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu { + Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu { addr: client.addr().to_string(), tonic_code, - }); - error + }) })?; let flight_data_stream = response.into_inner(); diff --git a/src/client/src/error.rs b/src/client/src/error.rs index e25f66bc9e..0e70296c26 100644 --- a/src/client/src/error.rs +++ b/src/client/src/error.rs @@ -18,9 +18,9 @@ use common_error::define_from_tonic_status; use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; -use snafu::{location, Location, Snafu}; -use tonic::metadata::errors::InvalidMetadataValue; +use snafu::{Location, Snafu, location}; use tonic::Code; +use tonic::metadata::errors::InvalidMetadataValue; #[derive(Snafu)] #[snafu(visibility(pub))] diff --git a/src/client/src/flow.rs b/src/client/src/flow.rs index 7c3479d412..2f633dc498 100644 --- a/src/client/src/flow.rs +++ b/src/client/src/flow.rs @@ -18,8 +18,8 @@ use common_error::ext::BoxedError; use common_meta::node_manager::Flownode; use snafu::ResultExt; -use crate::error::{FlowServerSnafu, Result}; use crate::Client; +use crate::error::{FlowServerSnafu, Result}; #[derive(Debug)] pub struct FlowRequester { diff --git a/src/client/src/region.rs b/src/client/src/region.rs index 2472aee2b2..6e5a286083 100644 --- a/src/client/src/region.rs +++ b/src/client/src/region.rs @@ -15,8 +15,8 @@ use std::sync::Arc; use api::region::RegionResponse; -use api::v1::region::RegionRequest; use api::v1::ResponseHeader; +use api::v1::region::RegionRequest; use arc_swap::ArcSwapOption; use arrow_flight::Ticket; use async_stream::stream; @@ -33,7 +33,7 @@ use common_telemetry::error; use common_telemetry::tracing_context::TracingContext; use prost::Message; use query::query_engine::DefaultSerializer; -use snafu::{location, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, location}; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; use tokio_stream::StreamExt; @@ -41,7 +41,7 @@ use crate::error::{ self, ConvertFlightDataSnafu, FlightGetSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu, }; -use crate::{metrics, Client, Error}; +use crate::{Client, Error, metrics}; #[derive(Debug)] pub struct RegionRequester { @@ -115,11 +115,10 @@ impl RegionRequester { flight_client.addr(), tonic_code ); - let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu { + Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu { addr: flight_client.addr().to_string(), tonic_code, - }); - error + }) })?; let flight_data_stream = response.into_inner(); diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs index 9aed5b7c37..cf72b3d32f 100644 --- a/src/cmd/src/bin/greptime.rs +++ b/src/cmd/src/bin/greptime.rs @@ -18,7 +18,7 @@ use clap::{Parser, Subcommand}; use cmd::datanode::builder::InstanceBuilder; use cmd::error::{InitTlsProviderSnafu, Result}; use cmd::options::GlobalOptions; -use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App}; +use cmd::{App, cli, datanode, flownode, frontend, metasrv, standalone}; use common_base::Plugins; use common_version::{verbose_version, version}; use servers::install_ring_crypto_provider; @@ -143,8 +143,10 @@ async fn start(cli: Command) -> Result<()> { } fn setup_human_panic() { - human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version()) - .homepage("https://github.com/GreptimeTeam/greptimedb/discussions")); + human_panic::setup_panic!( + human_panic::Metadata::new("GreptimeDB", version()) + .homepage("https://github.com/GreptimeTeam/greptimedb/discussions") + ); common_telemetry::set_panic_hook(); } diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs index 0c879c38a3..84e797c291 100644 --- a/src/cmd/src/cli.rs +++ b/src/cmd/src/cli.rs @@ -20,7 +20,7 @@ use snafu::ResultExt; use tracing_appender::non_blocking::WorkerGuard; use crate::options::GlobalOptions; -use crate::{error, App, Result}; +use crate::{App, Result, error}; pub const APP_NAME: &str = "greptime-cli"; use async_trait::async_trait; @@ -109,7 +109,7 @@ mod tests { use crate::error::Result as CmdResult; use crate::options::GlobalOptions; - use crate::{cli, standalone, App}; + use crate::{App, cli, standalone}; #[tokio::test(flavor = "multi_thread")] async fn test_export_create_table_with_quoted_names() -> CmdResult<()> { diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index a99a531ec8..641d3fc5fd 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -20,20 +20,20 @@ use std::time::Duration; use async_trait::async_trait; use clap::Parser; use common_config::Configurable; -use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR}; +use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions}; use common_telemetry::{info, warn}; use common_wal::config::DatanodeWalConfig; use datanode::datanode::Datanode; use meta_client::MetaClientOptions; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tracing_appender::non_blocking::WorkerGuard; +use crate::App; use crate::datanode::builder::InstanceBuilder; use crate::error::{ LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu, }; use crate::options::{GlobalOptions, GreptimeOptions}; -use crate::App; pub const APP_NAME: &str = "greptime-datanode"; @@ -187,29 +187,39 @@ impl StartCommand { if let Some(addr) = &self.rpc_bind_addr { opts.grpc.bind_addr.clone_from(addr); } else if let Some(addr) = &opts.rpc_addr { - warn!("Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead."); + warn!( + "Use the deprecated attribute `DatanodeOptions.rpc_addr`, please use `grpc.addr` instead." + ); opts.grpc.bind_addr.clone_from(addr); } if let Some(server_addr) = &self.rpc_server_addr { opts.grpc.server_addr.clone_from(server_addr); } else if let Some(server_addr) = &opts.rpc_hostname { - warn!("Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead."); + warn!( + "Use the deprecated attribute `DatanodeOptions.rpc_hostname`, please use `grpc.hostname` instead." + ); opts.grpc.server_addr.clone_from(server_addr); } if let Some(runtime_size) = opts.rpc_runtime_size { - warn!("Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead."); + warn!( + "Use the deprecated attribute `DatanodeOptions.rpc_runtime_size`, please use `grpc.runtime_size` instead." + ); opts.grpc.runtime_size = runtime_size; } if let Some(max_recv_message_size) = opts.rpc_max_recv_message_size { - warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead."); + warn!( + "Use the deprecated attribute `DatanodeOptions.rpc_max_recv_message_size`, please use `grpc.max_recv_message_size` instead." + ); opts.grpc.max_recv_message_size = max_recv_message_size; } if let Some(max_send_message_size) = opts.rpc_max_send_message_size { - warn!("Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead."); + warn!( + "Use the deprecated attribute `DatanodeOptions.rpc_max_send_message_size`, please use `grpc.max_send_message_size` instead." + ); opts.grpc.max_send_message_size = max_send_message_size; } @@ -430,20 +440,24 @@ mod tests { #[test] fn test_try_from_cmd() { - assert!((StartCommand { - metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]), - ..Default::default() - }) - .load_options(&GlobalOptions::default()) - .is_err()); + assert!( + (StartCommand { + metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]), + ..Default::default() + }) + .load_options(&GlobalOptions::default()) + .is_err() + ); // Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value - assert!((StartCommand { - node_id: Some(42), - ..Default::default() - }) - .load_options(&GlobalOptions::default()) - .is_ok()); + assert!( + (StartCommand { + node_id: Some(42), + ..Default::default() + }) + .load_options(&GlobalOptions::default()) + .is_ok() + ); } #[test] diff --git a/src/cmd/src/datanode/builder.rs b/src/cmd/src/datanode/builder.rs index e4a5b69864..cfde0c349a 100644 --- a/src/cmd/src/datanode/builder.rs +++ b/src/cmd/src/datanode/builder.rs @@ -26,7 +26,7 @@ use meta_client::MetaClientType; use snafu::{OptionExt, ResultExt}; use tracing_appender::non_blocking::WorkerGuard; -use crate::datanode::{DatanodeOptions, Instance, APP_NAME}; +use crate::datanode::{APP_NAME, DatanodeOptions, Instance}; use crate::error::{MetaClientInitSnafu, MissingConfigSnafu, Result, StartDatanodeSnafu}; use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile}; diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs index 5488a2e175..f62fb7919e 100644 --- a/src/cmd/src/flownode.rs +++ b/src/cmd/src/flownode.rs @@ -25,20 +25,20 @@ use common_base::Plugins; use common_config::{Configurable, DEFAULT_DATA_HOME}; use common_grpc::channel_manager::ChannelConfig; use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder}; +use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler; use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler; -use common_meta::heartbeat::handler::HandlerGroupExecutor; -use common_meta::key::flow::FlowMetadataManager; use common_meta::key::TableMetadataManager; +use common_meta::key::flow::FlowMetadataManager; use common_telemetry::info; -use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR}; +use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions}; use common_version::{short_version, verbose_version}; use flow::{ - get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder, - FrontendClient, FrontendInvoker, + FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder, FrontendClient, FrontendInvoker, + get_flow_auth_options, }; use meta_client::{MetaClientOptions, MetaClientType}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tracing_appender::non_blocking::WorkerGuard; use crate::error::{ @@ -46,7 +46,7 @@ use crate::error::{ MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu, }; use crate::options::{GlobalOptions, GreptimeOptions}; -use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App}; +use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile}; pub const APP_NAME: &str = "greptime-flownode"; diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs index fb04045ea8..cab9912203 100644 --- a/src/cmd/src/frontend.rs +++ b/src/cmd/src/frontend.rs @@ -27,11 +27,11 @@ use common_base::Plugins; use common_config::{Configurable, DEFAULT_DATA_HOME}; use common_grpc::channel_manager::ChannelConfig; use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder}; +use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler; use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler; -use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_telemetry::info; -use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR}; +use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions}; use common_time::timezone::set_default_timezone; use common_version::{short_version, verbose_version}; use frontend::frontend::Frontend; @@ -48,7 +48,7 @@ use tracing_appender::non_blocking::WorkerGuard; use crate::error::{self, Result}; use crate::options::{GlobalOptions, GreptimeOptions}; -use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App}; +use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile}; type FrontendOptions = GreptimeOptions; diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs index 6d63e21276..78baacd13d 100644 --- a/src/cmd/src/lib.rs +++ b/src/cmd/src/lib.rs @@ -46,7 +46,7 @@ lazy_static::lazy_static! { /// wait for the close signal, for unix platform it's SIGINT or SIGTERM #[cfg(unix)] async fn start_wait_for_close_signal() -> std::io::Result<()> { - use tokio::signal::unix::{signal, SignalKind}; + use tokio::signal::unix::{SignalKind, signal}; let mut sigint = signal(SignalKind::interrupt())?; let mut sigterm = signal(SignalKind::terminate())?; @@ -93,13 +93,13 @@ pub trait App: Send { self.start().await?; - if self.wait_signal() { - if let Err(e) = start_wait_for_close_signal().await { - error!(e; "Failed to listen for close signal"); - // It's unusual to fail to listen for close signal, maybe there's something unexpected in - // the underlying system. So we stop the app instead of running nonetheless to let people - // investigate the issue. - } + if self.wait_signal() + && let Err(e) = start_wait_for_close_signal().await + { + error!(e; "Failed to listen for close signal"); + // It's unusual to fail to listen for close signal, maybe there's something unexpected in + // the underlying system. So we stop the app instead of running nonetheless to let people + // investigate the issue. } self.stop().await?; diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs index 8e59559bf1..addaba71a4 100644 --- a/src/cmd/src/metasrv.rs +++ b/src/cmd/src/metasrv.rs @@ -21,7 +21,7 @@ use clap::Parser; use common_base::Plugins; use common_config::Configurable; use common_telemetry::info; -use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR}; +use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions}; use common_version::{short_version, verbose_version}; use meta_srv::bootstrap::MetasrvInstance; use meta_srv::metasrv::BackendImpl; @@ -30,7 +30,7 @@ use tracing_appender::non_blocking::WorkerGuard; use crate::error::{self, LoadLayeredConfigSnafu, Result, StartMetaServerSnafu}; use crate::options::{GlobalOptions, GreptimeOptions}; -use crate::{create_resource_limit_metrics, log_versions, maybe_activate_heap_profile, App}; +use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile}; type MetasrvOptions = GreptimeOptions; diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs index ced13e048c..bda5c4b689 100644 --- a/src/cmd/src/standalone.rs +++ b/src/cmd/src/standalone.rs @@ -24,10 +24,10 @@ use catalog::kvbackend::KvBackendCatalogManagerBuilder; use catalog::process_manager::ProcessManager; use clap::Parser; use client::api::v1::meta::RegionRole; -use common_base::readable_size::ReadableSize; use common_base::Plugins; +use common_base::readable_size::ReadableSize; use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID}; -use common_config::{metadata_store_dir, Configurable, KvBackendConfig}; +use common_config::{Configurable, KvBackendConfig, metadata_store_dir}; use common_error::ext::BoxedError; use common_meta::cache::LayeredCacheRegistryBuilder; use common_meta::cluster::{NodeInfo, NodeStatus}; @@ -36,8 +36,8 @@ use common_meta::ddl::flow_meta::FlowMetadataAllocator; use common_meta::ddl::table_meta::TableMetadataAllocator; use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl}; use common_meta::ddl_manager::DdlManager; -use common_meta::key::flow::flow_state::FlowStat; use common_meta::key::flow::FlowMetadataManager; +use common_meta::key::flow::flow_state::FlowStat; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; use common_meta::kv_backend::KvBackendRef; use common_meta::peer::Peer; @@ -45,12 +45,12 @@ use common_meta::procedure_executor::LocalProcedureExecutor; use common_meta::region_keeper::MemoryRegionKeeper; use common_meta::region_registry::LeaderRegionRegistry; use common_meta::sequence::SequenceBuilder; -use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef}; +use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator}; use common_options::memory::MemoryOptions; use common_procedure::{ProcedureInfo, ProcedureManagerRef}; use common_telemetry::info; use common_telemetry::logging::{ - LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR, + DEFAULT_LOGGING_DIR, LoggingOptions, SlowQueryOptions, TracingOptions, }; use common_time::timezone::set_default_timezone; use common_version::{short_version, verbose_version}; @@ -85,7 +85,7 @@ use tracing_appender::non_blocking::WorkerGuard; use crate::error::{Result, StartFlownodeSnafu}; use crate::options::{GlobalOptions, GreptimeOptions}; -use crate::{create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile, App}; +use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile}; pub const APP_NAME: &str = "greptime-standalone"; diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs index 9058a579b0..da2dfd456c 100644 --- a/src/cmd/tests/load_config_test.rs +++ b/src/cmd/tests/load_config_test.rs @@ -18,9 +18,9 @@ use cmd::options::GreptimeOptions; use cmd::standalone::StandaloneOptions; use common_config::{Configurable, DEFAULT_DATA_HOME}; use common_options::datanode::{ClientOptions, DatanodeClientOptions}; -use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT}; -use common_wal::config::raft_engine::RaftEngineConfig; +use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions}; use common_wal::config::DatanodeWalConfig; +use common_wal::config::raft_engine::RaftEngineConfig; use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig}; use file_engine::config::EngineConfig as FileEngineConfig; use flow::FlownodeOptions; diff --git a/src/common/base/src/cancellation.rs b/src/common/base/src/cancellation.rs index 155b964158..94aa872834 100644 --- a/src/common/base/src/cancellation.rs +++ b/src/common/base/src/cancellation.rs @@ -19,8 +19,8 @@ use std::fmt::{Debug, Display, Formatter}; use std::future::Future; use std::pin::Pin; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::task::{Context, Poll}; use futures::task::AtomicWaker; diff --git a/src/common/base/src/range_read.rs b/src/common/base/src/range_read.rs index 5fabc8cacb..ff92bd03e4 100644 --- a/src/common/base/src/range_read.rs +++ b/src/common/base/src/range_read.rs @@ -17,8 +17,8 @@ use std::io; use std::ops::Range; use std::path::Path; use std::pin::Pin; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use std::task::{Context, Poll}; use async_trait::async_trait; diff --git a/src/common/base/src/readable_size.rs b/src/common/base/src/readable_size.rs index 4298989291..da5eae9bd9 100644 --- a/src/common/base/src/readable_size.rs +++ b/src/common/base/src/readable_size.rs @@ -7,7 +7,7 @@ use std::ops::{Div, Mul}; use std::str::FromStr; use serde::de::{Unexpected, Visitor}; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; const UNIT: u64 = 1; diff --git a/src/common/base/src/secrets.rs b/src/common/base/src/secrets.rs index b3f4347578..b31275136a 100644 --- a/src/common/base/src/secrets.rs +++ b/src/common/base/src/secrets.rs @@ -34,7 +34,7 @@ use std::fmt::Debug; use std::{any, fmt}; -use serde::{de, ser, Deserialize, Serialize}; +use serde::{Deserialize, Serialize, de, ser}; use zeroize::{Zeroize, ZeroizeOnDrop}; /// Wrapper type for strings that contains secrets. See also [SecretBox]. diff --git a/src/common/config/src/config.rs b/src/common/config/src/config.rs index 5ef7358969..07299acce6 100644 --- a/src/common/config/src/config.rs +++ b/src/common/config/src/config.rs @@ -13,8 +13,8 @@ // limitations under the License. use config::{Environment, File, FileFormat}; -use serde::de::DeserializeOwned; use serde::Serialize; +use serde::de::DeserializeOwned; use snafu::ResultExt; use crate::error::{LoadLayeredConfigSnafu, Result, SerdeJsonSnafu, TomlFormatSnafu}; diff --git a/src/common/datasource/src/buffered_writer.rs b/src/common/datasource/src/buffered_writer.rs index 8ce34070cc..19fb9ee5fc 100644 --- a/src/common/datasource/src/buffered_writer.rs +++ b/src/common/datasource/src/buffered_writer.rs @@ -45,11 +45,11 @@ pub trait ArrowWriterCloser { } impl< - T: AsyncWrite + Send + Unpin, - U: DfRecordBatchEncoder + ArrowWriterCloser, - F: Fn(String) -> Fut, - Fut: Future>, - > LazyBufferedWriter + T: AsyncWrite + Send + Unpin, + U: DfRecordBatchEncoder + ArrowWriterCloser, + F: Fn(String) -> Fut, + Fut: Future>, +> LazyBufferedWriter { /// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage /// if any row's been written. @@ -67,11 +67,11 @@ impl< } impl< - T: AsyncWrite + Send + Unpin, - U: DfRecordBatchEncoder, - F: Fn(String) -> Fut, - Fut: Future>, - > LazyBufferedWriter + T: AsyncWrite + Send + Unpin, + U: DfRecordBatchEncoder, + F: Fn(String) -> Fut, + Fut: Future>, +> LazyBufferedWriter { /// Closes the writer and flushes the buffer data. pub async fn close_inner_writer(&mut self) -> Result<()> { diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs index 1991ff825e..fad03da2ee 100644 --- a/src/common/datasource/src/file_format.rs +++ b/src/common/datasource/src/file_format.rs @@ -42,11 +42,11 @@ use self::csv::CsvFormat; use self::json::JsonFormat; use self::orc::OrcFormat; use self::parquet::ParquetFormat; +use crate::DEFAULT_WRITE_BUFFER_SIZE; use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter}; use crate::compression::CompressionType; use crate::error::{self, Result}; use crate::share_buffer::SharedBuffer; -use crate::DEFAULT_WRITE_BUFFER_SIZE; pub const FORMAT_COMPRESSION_TYPE: &str = "compression_type"; pub const FORMAT_DELIMITER: &str = "delimiter"; @@ -158,10 +158,10 @@ pub fn open_with_decoder DataFusionResult>( let stream = futures::stream::poll_fn(move |cx| { loop { - if buffered.is_empty() { - if let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) { - buffered = result?; - }; + if buffered.is_empty() + && let Some(result) = futures::ready!(upstream.poll_next_unpin(cx)) + { + buffered = result?; } let decoded = decoder.decode(buffered.as_ref())?; diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs index 757d207e89..9b817d35e3 100644 --- a/src/common/datasource/src/file_format/csv.rs +++ b/src/common/datasource/src/file_format/csv.rs @@ -30,7 +30,7 @@ use tokio_util::io::SyncIoBridge; use crate::buffered_writer::DfRecordBatchEncoder; use crate::compression::CompressionType; use crate::error::{self, Result}; -use crate::file_format::{self, stream_to_file, FileFormat}; +use crate::file_format::{self, FileFormat, stream_to_file}; use crate::share_buffer::SharedBuffer; #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -160,8 +160,8 @@ mod tests { use super::*; use crate::file_format::{ - FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER, - FORMAT_SCHEMA_INFER_MAX_RECORD, + FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER, + FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, }; use crate::test_util::{format_schema, test_store}; diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs index 099313af55..c234eec846 100644 --- a/src/common/datasource/src/file_format/json.rs +++ b/src/common/datasource/src/file_format/json.rs @@ -17,7 +17,7 @@ use std::io::BufReader; use std::str::FromStr; use arrow::json; -use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter}; +use arrow::json::reader::{ValueIter, infer_json_schema_from_iterator}; use arrow::json::writer::LineDelimited; use arrow::record_batch::RecordBatch; use arrow_schema::Schema; @@ -32,7 +32,7 @@ use tokio_util::io::SyncIoBridge; use crate::buffered_writer::DfRecordBatchEncoder; use crate::compression::CompressionType; use crate::error::{self, Result}; -use crate::file_format::{self, stream_to_file, FileFormat}; +use crate::file_format::{self, FileFormat, stream_to_file}; use crate::share_buffer::SharedBuffer; #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -133,7 +133,7 @@ mod tests { use common_test_util::find_workspace_path; use super::*; - use crate::file_format::{FileFormat, FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD}; + use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat}; use crate::test_util::{format_schema, test_store}; fn test_data_root() -> String { diff --git a/src/common/datasource/src/file_format/orc.rs b/src/common/datasource/src/file_format/orc.rs index 27a285b2e9..6b00b41036 100644 --- a/src/common/datasource/src/file_format/orc.rs +++ b/src/common/datasource/src/file_format/orc.rs @@ -15,8 +15,8 @@ use arrow_schema::Schema; use async_trait::async_trait; use bytes::Bytes; -use futures::future::BoxFuture; use futures::FutureExt; +use futures::future::BoxFuture; use object_store::ObjectStore; use orc_rust::arrow_reader::ArrowReaderBuilder; use orc_rust::async_arrow_reader::ArrowStreamReader; diff --git a/src/common/datasource/src/file_format/parquet.rs b/src/common/datasource/src/file_format/parquet.rs index 6bbb1a6d54..d89a8f030a 100644 --- a/src/common/datasource/src/file_format/parquet.rs +++ b/src/common/datasource/src/file_format/parquet.rs @@ -21,29 +21,29 @@ use async_trait::async_trait; use datafusion::datasource::physical_plan::{FileMeta, ParquetFileReaderFactory}; use datafusion::error::Result as DatafusionResult; use datafusion::parquet::arrow::async_reader::AsyncFileReader; -use datafusion::parquet::arrow::{parquet_to_arrow_schema, ArrowWriter}; +use datafusion::parquet::arrow::{ArrowWriter, parquet_to_arrow_schema}; use datafusion::parquet::errors::{ParquetError, Result as ParquetResult}; use datafusion::parquet::file::metadata::ParquetMetaData; use datafusion::parquet::format::FileMetaData; -use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::physical_plan::SendableRecordBatchStream; +use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datatypes::schema::SchemaRef; -use futures::future::BoxFuture; use futures::StreamExt; +use futures::future::BoxFuture; use object_store::{FuturesAsyncReader, ObjectStore}; -use parquet::arrow::arrow_reader::ArrowReaderOptions; use parquet::arrow::AsyncArrowWriter; +use parquet::arrow::arrow_reader::ArrowReaderOptions; use parquet::basic::{Compression, Encoding, ZstdLevel}; use parquet::file::properties::{WriterProperties, WriterPropertiesBuilder}; use parquet::schema::types::ColumnPath; use snafu::ResultExt; use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt, FuturesAsyncWriteCompatExt}; +use crate::DEFAULT_WRITE_BUFFER_SIZE; use crate::buffered_writer::{ArrowWriterCloser, DfRecordBatchEncoder}; use crate::error::{self, Result, WriteObjectSnafu, WriteParquetSnafu}; use crate::file_format::FileFormat; use crate::share_buffer::SharedBuffer; -use crate::DEFAULT_WRITE_BUFFER_SIZE; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct ParquetFormat {} diff --git a/src/common/datasource/src/file_format/tests.rs b/src/common/datasource/src/file_format/tests.rs index 9f5e7f3f7c..d44f0f70fa 100644 --- a/src/common/datasource/src/file_format/tests.rs +++ b/src/common/datasource/src/file_format/tests.rs @@ -24,8 +24,8 @@ use datafusion::datasource::physical_plan::{ }; use datafusion::datasource::source::DataSourceExec; use datafusion::execution::context::TaskContext; -use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::physical_plan::ExecutionPlan; +use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::prelude::SessionContext; use datafusion_orc::OrcSource; use futures::StreamExt; @@ -204,15 +204,15 @@ async fn test_orc_opener() { config: scan_config(schema.clone(), None, path, file_source.clone()), file_source: file_source.clone(), expected: vec![ - "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", - "| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |", - "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", - "| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |", - "| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |", - "| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |", - "| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |", - "| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |", - "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", + "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", + "| double_a | a | b | str_direct | d | e | f | int_short_repeated | int_neg_short_repeated | int_delta | int_neg_delta | int_direct | int_neg_direct | bigint_direct | bigint_neg_direct | bigint_other | utf8_increase | utf8_decrease | timestamp_simple | date_simple |", + "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", + "| 1.0 | 1.0 | true | a | a | ddd | aaaaa | 5 | -5 | 1 | 5 | 1 | -1 | 1 | -1 | 5 | a | eeeee | 2023-04-01T20:15:30.002 | 2023-04-01 |", + "| 2.0 | 2.0 | false | cccccc | bb | cc | bbbbb | 5 | -5 | 2 | 4 | 6 | -6 | 6 | -6 | -5 | bb | dddd | 2021-08-22T07:26:44.525777 | 2023-03-01 |", + "| 3.0 | | | | | | | | | | | | | | | 1 | ccc | ccc | 2023-01-01T00:00:00 | 2023-01-01 |", + "| 4.0 | 4.0 | true | ddd | ccc | bb | ccccc | 5 | -5 | 4 | 2 | 3 | -3 | 3 | -3 | 5 | dddd | bb | 2023-02-01T00:00:00 | 2023-02-01 |", + "| 5.0 | 5.0 | false | ee | ddd | a | ddddd | 5 | -5 | 5 | 1 | 2 | -2 | 2 | -2 | 5 | eeeee | a | 2023-03-01T00:00:00 | 2023-03-01 |", + "+----------+-----+-------+------------+-----+-----+-------+--------------------+------------------------+-----------+---------------+------------+----------------+---------------+-------------------+--------------+---------------+---------------+----------------------------+-------------+", ], }, Test { diff --git a/src/common/datasource/src/lister.rs b/src/common/datasource/src/lister.rs index 6134469396..97bbadfe12 100644 --- a/src/common/datasource/src/lister.rs +++ b/src/common/datasource/src/lister.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use futures::{future, TryStreamExt}; +use futures::{TryStreamExt, future}; use object_store::{Entry, ObjectStore}; use regex::Regex; use snafu::ResultExt; diff --git a/src/common/datasource/src/object_store/fs.rs b/src/common/datasource/src/object_store/fs.rs index 5ffbbfa314..95e5a69144 100644 --- a/src/common/datasource/src/object_store/fs.rs +++ b/src/common/datasource/src/object_store/fs.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use object_store::ObjectStore; use object_store::services::Fs; use object_store::util::DefaultLoggingInterceptor; -use object_store::ObjectStore; use snafu::ResultExt; use crate::error::{BuildBackendSnafu, Result}; diff --git a/src/common/datasource/src/object_store/oss.rs b/src/common/datasource/src/object_store/oss.rs index c7e8eb5dd7..47cd9b489f 100644 --- a/src/common/datasource/src/object_store/oss.rs +++ b/src/common/datasource/src/object_store/oss.rs @@ -14,8 +14,8 @@ use std::collections::HashMap; -use object_store::services::Oss; use object_store::ObjectStore; +use object_store::services::Oss; use snafu::ResultExt; use crate::error::{self, Result}; diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs index 8f2e6464dd..356b6fcf75 100644 --- a/src/common/datasource/src/object_store/s3.rs +++ b/src/common/datasource/src/object_store/s3.rs @@ -14,9 +14,9 @@ use std::collections::HashMap; +use object_store::ObjectStore; use object_store::services::S3; use object_store::util::DefaultLoggingInterceptor; -use object_store::ObjectStore; use snafu::ResultExt; use crate::error::{self, Result}; diff --git a/src/common/datasource/src/test_util.rs b/src/common/datasource/src/test_util.rs index 0f2c6918dc..5f9bffe5a1 100644 --- a/src/common/datasource/src/test_util.rs +++ b/src/common/datasource/src/test_util.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use arrow_schema::{DataType, Field, Schema, SchemaRef}; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use datafusion::datasource::file_format::file_compression_type::FileCompressionType; use datafusion::datasource::listing::PartitionedFile; use datafusion::datasource::object_store::ObjectStoreUrl; @@ -24,8 +24,8 @@ use datafusion::datasource::physical_plan::{ JsonOpener, JsonSource, }; use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; -use object_store::services::Fs; use object_store::ObjectStore; +use object_store::services::Fs; use crate::file_format::csv::stream_to_csv; use crate::file_format::json::stream_to_json; @@ -111,15 +111,17 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi let output_path = format!("{}/{}", dir.path().display(), "output"); - assert!(stream_to_json( - Box::pin(stream), - tmp_store.clone(), - &output_path, - threshold(size), - 8 - ) - .await - .is_ok()); + assert!( + stream_to_json( + Box::pin(stream), + tmp_store.clone(), + &output_path, + threshold(size), + 8 + ) + .await + .is_ok() + ); let written = tmp_store.read(&output_path).await.unwrap(); let origin = store.read(origin_path).await.unwrap(); @@ -148,15 +150,17 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz let output_path = format!("{}/{}", dir.path().display(), "output"); - assert!(stream_to_csv( - Box::pin(stream), - tmp_store.clone(), - &output_path, - threshold(size), - 8 - ) - .await - .is_ok()); + assert!( + stream_to_csv( + Box::pin(stream), + tmp_store.clone(), + &output_path, + threshold(size), + 8 + ) + .await + .is_ok() + ); let written = tmp_store.read(&output_path).await.unwrap(); let origin = store.read(origin_path).await.unwrap(); diff --git a/src/common/event-recorder/src/recorder.rs b/src/common/event-recorder/src/recorder.rs index 62d0733387..ddf0bcdae0 100644 --- a/src/common/event-recorder/src/recorder.rs +++ b/src/common/event-recorder/src/recorder.rs @@ -32,7 +32,7 @@ use humantime::format_duration; use itertools::Itertools; use serde::{Deserialize, Serialize}; use store_api::mito_engine_options::{APPEND_MODE_KEY, TTL_KEY}; -use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::sync::mpsc::{Receiver, Sender, channel}; use tokio::task::JoinHandle; use tokio::time::sleep; use tokio_util::sync::CancellationToken; diff --git a/src/common/frontend/src/selector.rs b/src/common/frontend/src/selector.rs index e70f622fa0..4e6cc9566c 100644 --- a/src/common/frontend/src/selector.rs +++ b/src/common/frontend/src/selector.rs @@ -18,8 +18,8 @@ use std::time::Duration; use common_grpc::channel_manager::{ChannelConfig, ChannelManager}; use common_meta::cluster::{ClusterInfo, NodeInfo, Role}; use greptime_proto::v1::frontend::{ - frontend_client, KillProcessRequest, KillProcessResponse, ListProcessRequest, - ListProcessResponse, + KillProcessRequest, KillProcessResponse, ListProcessRequest, ListProcessResponse, + frontend_client, }; use meta_client::MetaClientRef; use snafu::ResultExt; diff --git a/src/common/frontend/src/slow_query_event.rs b/src/common/frontend/src/slow_query_event.rs index 3845d59414..0e65443acb 100644 --- a/src/common/frontend/src/slow_query_event.rs +++ b/src/common/frontend/src/slow_query_event.rs @@ -16,8 +16,8 @@ use std::any::Any; use api::v1::value::ValueData; use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType}; -use common_event_recorder::error::Result; use common_event_recorder::Event; +use common_event_recorder::error::Result; use serde::Serialize; pub const SLOW_QUERY_TABLE_NAME: &str = "slow_queries"; diff --git a/src/common/function/src/admin/flush_compact_table.rs b/src/common/function/src/admin/flush_compact_table.rs index 7ae56f3f05..378b4181cd 100644 --- a/src/common/function/src/admin/flush_compact_table.rs +++ b/src/common/function/src/admin/flush_compact_table.rs @@ -14,7 +14,7 @@ use std::str::FromStr; -use api::v1::region::{compact_request, StrictWindow}; +use api::v1::region::{StrictWindow, compact_request}; use arrow::datatypes::DataType as ArrowDataType; use common_error::ext::BoxedError; use common_macro::admin_fn; @@ -27,7 +27,7 @@ use datafusion_expr::{Signature, Volatility}; use datatypes::prelude::*; use session::context::QueryContextRef; use session::table_name::table_name_to_full_name; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use table::requests::{CompactTableRequest, FlushTableRequest}; use crate::handlers::TableMutationHandlerRef; @@ -133,13 +133,19 @@ fn parse_compact_params( table_name, compact_request::Options::Regular(Default::default()), ), - [ValueRef::String(table_name), ValueRef::String(compact_ty_str)] => { + [ + ValueRef::String(table_name), + ValueRef::String(compact_ty_str), + ] => { let compact_type = parse_compact_type(compact_ty_str, None)?; (table_name, compact_type) } - [ValueRef::String(table_name), ValueRef::String(compact_ty_str), ValueRef::String(options_str)] => - { + [ + ValueRef::String(table_name), + ValueRef::String(compact_ty_str), + ValueRef::String(options_str), + ] => { let compact_type = parse_compact_type(compact_ty_str, Some(options_str))?; (table_name, compact_type) } @@ -148,7 +154,7 @@ fn parse_compact_params( function: "compact_table", datatypes: params.iter().map(|v| v.data_type()).collect::>(), } - .fail() + .fail(); } }; @@ -384,22 +390,26 @@ mod tests { ), ]); - assert!(parse_compact_params( - &["table", "strict_window", "abc"] - .into_iter() - .map(ValueRef::String) - .collect::>(), - &QueryContext::arc(), - ) - .is_err()); + assert!( + parse_compact_params( + &["table", "strict_window", "abc"] + .into_iter() + .map(ValueRef::String) + .collect::>(), + &QueryContext::arc(), + ) + .is_err() + ); - assert!(parse_compact_params( - &["a.b.table", "strict_window", "abc"] - .into_iter() - .map(ValueRef::String) - .collect::>(), - &QueryContext::arc(), - ) - .is_err()); + assert!( + parse_compact_params( + &["a.b.table", "strict_window", "abc"] + .into_iter() + .map(ValueRef::String) + .collect::>(), + &QueryContext::arc(), + ) + .is_err() + ); } } diff --git a/src/common/function/src/admin/reconcile_table.rs b/src/common/function/src/admin/reconcile_table.rs index 10d2c5fdbf..0c82c2ddc9 100644 --- a/src/common/function/src/admin/reconcile_table.rs +++ b/src/common/function/src/admin/reconcile_table.rs @@ -54,15 +54,16 @@ pub(crate) async fn reconcile_table( ) -> Result { let (table_name, resolve_strategy) = match params { [ValueRef::String(table_name)] => (table_name, ResolveStrategy::UseLatest), - [ValueRef::String(table_name), ValueRef::String(resolve_strategy)] => { - (table_name, parse_resolve_strategy(resolve_strategy)?) - } + [ + ValueRef::String(table_name), + ValueRef::String(resolve_strategy), + ] => (table_name, parse_resolve_strategy(resolve_strategy)?), _ => { return UnsupportedInputDataTypeSnafu { function: FN_NAME, datatypes: params.iter().map(|v| v.data_type()).collect::>(), } - .fail() + .fail(); } }; let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx) diff --git a/src/common/function/src/aggrs/aggr_wrapper.rs b/src/common/function/src/aggrs/aggr_wrapper.rs index e41927281f..2d2a73ad3b 100644 --- a/src/common/function/src/aggrs/aggr_wrapper.rs +++ b/src/common/function/src/aggrs/aggr_wrapper.rs @@ -28,8 +28,8 @@ use arrow::array::StructArray; use arrow_schema::{FieldRef, Fields}; use common_telemetry::debug; use datafusion::functions_aggregate::all_default_aggregate_functions; -use datafusion::optimizer::analyzer::type_coercion::TypeCoercion; use datafusion::optimizer::AnalyzerRule; +use datafusion::optimizer::analyzer::type_coercion::TypeCoercion; use datafusion::physical_planner::create_aggregate_expr_and_maybe_filter; use datafusion_common::{Column, ScalarValue}; use datafusion_expr::expr::{AggregateFunction, AggregateFunctionParams}; @@ -207,8 +207,9 @@ impl StateMergeHelper { let upper_plan = LogicalPlan::Aggregate(upper_check).recompute_schema()?; if *upper_plan.schema() != *aggr_plan.schema() { return Err(datafusion_common::DataFusionError::Internal(format!( - "Upper aggregate plan's schema is not the same as the original aggregate plan's schema: \n[transformed]:{}\n[original]:{}", - upper_plan.schema(), aggr_plan.schema() + "Upper aggregate plan's schema is not the same as the original aggregate plan's schema: \n[transformed]:{}\n[original]:{}", + upper_plan.schema(), + aggr_plan.schema() ))); } diff --git a/src/common/function/src/aggrs/aggr_wrapper/tests.rs b/src/common/function/src/aggrs/aggr_wrapper/tests.rs index 805b45b3c8..3f82a8fa9b 100644 --- a/src/common/function/src/aggrs/aggr_wrapper/tests.rs +++ b/src/common/function/src/aggrs/aggr_wrapper/tests.rs @@ -25,8 +25,8 @@ use datafusion::datasource::DefaultTableSource; use datafusion::execution::{RecordBatchStream, SendableRecordBatchStream, TaskContext}; use datafusion::functions_aggregate::average::avg_udaf; use datafusion::functions_aggregate::sum::sum_udaf; -use datafusion::optimizer::analyzer::type_coercion::TypeCoercion; use datafusion::optimizer::AnalyzerRule; +use datafusion::optimizer::analyzer::type_coercion::TypeCoercion; use datafusion::physical_plan::aggregates::AggregateExec; use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; use datafusion::physical_plan::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties}; @@ -35,7 +35,7 @@ use datafusion::prelude::SessionContext; use datafusion_common::{Column, TableReference}; use datafusion_expr::expr::AggregateFunction; use datafusion_expr::sqlparser::ast::NullTreatment; -use datafusion_expr::{lit, Aggregate, Expr, LogicalPlan, SortExpr, TableScan}; +use datafusion_expr::{Aggregate, Expr, LogicalPlan, SortExpr, TableScan, lit}; use datafusion_physical_expr::aggregate::AggregateExprBuilder; use datafusion_physical_expr::{EquivalenceProperties, Partitioning}; use datatypes::arrow_array::StringArray; @@ -264,36 +264,38 @@ async fn test_sum_udaf() { Aggregate::try_new( Arc::new(expected_lower_plan), vec![], - vec![Expr::AggregateFunction(AggregateFunction::new_udf( - Arc::new( - MergeWrapper::new( - sum.clone(), - Arc::new( - AggregateExprBuilder::new( - Arc::new(sum.clone()), - vec![Arc::new( - datafusion::physical_expr::expressions::Column::new( - "number", 0, - ), - )], - ) - .schema(Arc::new(dummy_table_scan().schema().as_arrow().clone())) - .alias("sum(number)") - .build() - .unwrap(), - ), - vec![DataType::Int64], - ) - .unwrap() - .into(), - ), - vec![Expr::Column(Column::new_unqualified("__sum_state(number)"))], - false, - None, - vec![], - None, - )) - .alias("sum(number)")], + vec![ + Expr::AggregateFunction(AggregateFunction::new_udf( + Arc::new( + MergeWrapper::new( + sum.clone(), + Arc::new( + AggregateExprBuilder::new( + Arc::new(sum.clone()), + vec![Arc::new( + datafusion::physical_expr::expressions::Column::new( + "number", 0, + ), + )], + ) + .schema(Arc::new(dummy_table_scan().schema().as_arrow().clone())) + .alias("sum(number)") + .build() + .unwrap(), + ), + vec![DataType::Int64], + ) + .unwrap() + .into(), + ), + vec![Expr::Column(Column::new_unqualified("__sum_state(number)"))], + false, + None, + vec![], + None, + )) + .alias("sum(number)"), + ], ) .unwrap(), ); @@ -444,15 +446,17 @@ async fn test_avg_udaf() { Aggregate::try_new( Arc::new(coerced_aggr_state_plan.clone()), vec![], - vec![Expr::AggregateFunction(AggregateFunction::new_udf( - Arc::new(expected_merge_fn.into()), - vec![Expr::Column(Column::new_unqualified("__avg_state(number)"))], - false, - None, - vec![], - None, - )) - .alias("avg(number)")], + vec![ + Expr::AggregateFunction(AggregateFunction::new_udf( + Arc::new(expected_merge_fn.into()), + vec![Expr::Column(Column::new_unqualified("__avg_state(number)"))], + false, + None, + vec![], + None, + )) + .alias("avg(number)"), + ], ) .unwrap(), ); diff --git a/src/common/function/src/aggrs/approximate/hll.rs b/src/common/function/src/aggrs/approximate/hll.rs index de9de12d59..30596bea3b 100644 --- a/src/common/function/src/aggrs/approximate/hll.rs +++ b/src/common/function/src/aggrs/approximate/hll.rs @@ -105,10 +105,10 @@ impl HllState { } fn merge(&mut self, raw: &[u8]) { - if let Ok(serialized) = bincode::deserialize::(raw) { - if let Ok(()) = self.hll.merge(&serialized) { - return; - } + if let Ok(serialized) = bincode::deserialize::(raw) + && let Ok(()) = self.hll.merge(&serialized) + { + return; } trace!("Warning: Failed to merge HyperLogLog from {:?}", raw); } @@ -153,7 +153,7 @@ impl DfAccumulator for HllState { return not_impl_err!( "HLL functions do not support data type: {}", array.data_type() - ) + ); } } diff --git a/src/common/function/src/aggrs/approximate/uddsketch.rs b/src/common/function/src/aggrs/approximate/uddsketch.rs index 92c952b7b3..24b5c5455c 100644 --- a/src/common/function/src/aggrs/approximate/uddsketch.rs +++ b/src/common/function/src/aggrs/approximate/uddsketch.rs @@ -98,11 +98,11 @@ impl UddSketchState { { return Err(DataFusionError::Plan(format!( "Merging UDDSketch with different parameters: arguments={:?} vs actual input={:?}", + (self.uddsketch.max_allowed_buckets(), self.error_rate), ( - self.uddsketch.max_allowed_buckets(), - self.error_rate - ), - (uddsketch.uddsketch.max_allowed_buckets(), uddsketch.error_rate) + uddsketch.uddsketch.max_allowed_buckets(), + uddsketch.error_rate + ) ))); } self.uddsketch.merge_sketch(&uddsketch.uddsketch); @@ -130,7 +130,7 @@ fn downcast_accumulator_args(args: AccumulatorArgs) -> DfResult<(u64, f64)> { "{} not supported for bucket size: {}", UDDSKETCH_STATE_NAME, &args.exprs[0] - ) + ); } }; @@ -145,7 +145,7 @@ fn downcast_accumulator_args(args: AccumulatorArgs) -> DfResult<(u64, f64)> { "{} not supported for error rate: {}", UDDSKETCH_STATE_NAME, &args.exprs[1] - ) + ); } }; @@ -168,7 +168,7 @@ impl DfAccumulator for UddSketchState { return not_impl_err!( "UDDSketch functions do not support data type: {}", array.data_type() - ) + ); } } diff --git a/src/common/function/src/aggrs/count_hash.rs b/src/common/function/src/aggrs/count_hash.rs index 2e1a2bb8a6..ded88107e6 100644 --- a/src/common/function/src/aggrs/count_hash.rs +++ b/src/common/function/src/aggrs/count_hash.rs @@ -28,9 +28,9 @@ use datafusion_common::cast::as_list_array; use datafusion_common::error::Result; use datafusion_common::hash_utils::create_hashes; use datafusion_common::utils::SingleRowListArrayBuilder; -use datafusion_common::{internal_err, not_impl_err, ScalarValue}; +use datafusion_common::{ScalarValue, internal_err, not_impl_err}; use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs}; -use datafusion_expr::utils::{format_state_name, AggregateOrderSensitivity}; +use datafusion_expr::utils::{AggregateOrderSensitivity, format_state_name}; use datafusion_expr::{ Accumulator, AggregateUDF, AggregateUDFImpl, EmitTo, GroupsAccumulator, ReversedUDAF, SetMonotonicity, Signature, TypeSignature, Volatility, diff --git a/src/common/function/src/aggrs/geo/encoding.rs b/src/common/function/src/aggrs/geo/encoding.rs index d868246d7f..ef07f15344 100644 --- a/src/common/function/src/aggrs/geo/encoding.rs +++ b/src/common/function/src/aggrs/geo/encoding.rs @@ -20,8 +20,8 @@ use datafusion::common::cast::as_primitive_array; use datafusion::error::{DataFusionError, Result as DfResult}; use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF, Volatility}; use datafusion::prelude::create_udaf; -use datafusion_common::cast::{as_list_array, as_struct_array}; use datafusion_common::ScalarValue; +use datafusion_common::cast::{as_list_array, as_struct_array}; use datatypes::arrow::array::{Float64Array, Int64Array, ListArray, StructArray}; use datatypes::arrow::datatypes::{ DataType, Field, Float64Type, Int64Type, TimeUnit, TimestampNanosecondType, diff --git a/src/common/function/src/aggrs/geo/geo_path.rs b/src/common/function/src/aggrs/geo/geo_path.rs index 08abe0c731..b09745eaeb 100644 --- a/src/common/function/src/aggrs/geo/geo_path.rs +++ b/src/common/function/src/aggrs/geo/geo_path.rs @@ -19,9 +19,9 @@ use datafusion::common::cast::as_primitive_array; use datafusion::error::{DataFusionError, Result as DfResult}; use datafusion::logical_expr::{Accumulator as DfAccumulator, AggregateUDF, Volatility}; use datafusion::prelude::create_udaf; +use datafusion_common::ScalarValue; use datafusion_common::cast::{as_list_array, as_struct_array}; use datafusion_common::utils::SingleRowListArrayBuilder; -use datafusion_common::ScalarValue; use datatypes::arrow::array::{Float64Array, Int64Array, ListArray, StructArray}; use datatypes::arrow::datatypes::{ DataType, Field, Float64Type, Int64Type, TimeUnit, TimestampNanosecondType, diff --git a/src/common/function/src/aggrs/vector/product.rs b/src/common/function/src/aggrs/vector/product.rs index 51404a5c0a..d22ea96f45 100644 --- a/src/common/function/src/aggrs/vector/product.rs +++ b/src/common/function/src/aggrs/vector/product.rs @@ -101,7 +101,7 @@ impl VectorProduct { return Err(datafusion_common::DataFusionError::NotImplemented(format!( "unsupported data type {} for `VEC_PRODUCT`", values[0].data_type() - ))) + ))); } }; if vectors.len() != values[0].len() { @@ -208,11 +208,13 @@ mod tests { // test update with constant vector let mut vec_product = VectorProduct::default(); - let v: Vec = vec![Arc::new(ConstantVector::new( - Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])), - 4, - )) - .to_arrow_array()]; + let v: Vec = vec![ + Arc::new(ConstantVector::new( + Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])), + 4, + )) + .to_arrow_array(), + ]; vec_product.update_batch(&v).unwrap(); diff --git a/src/common/function/src/aggrs/vector/sum.rs b/src/common/function/src/aggrs/vector/sum.rs index 9532db8654..b6ff942791 100644 --- a/src/common/function/src/aggrs/vector/sum.rs +++ b/src/common/function/src/aggrs/vector/sum.rs @@ -117,7 +117,7 @@ impl VectorSum { return Err(datafusion_common::DataFusionError::NotImplemented(format!( "unsupported data type {} for `VEC_SUM`", values[0].data_type() - ))) + ))); } } Ok(()) @@ -212,11 +212,13 @@ mod tests { // test update with constant vector let mut vec_sum = VectorSum::default(); - let v: Vec = vec![Arc::new(ConstantVector::new( - Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])), - 4, - )) - .to_arrow_array()]; + let v: Vec = vec![ + Arc::new(ConstantVector::new( + Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])), + 4, + )) + .to_arrow_array(), + ]; vec_sum.update_batch(&v).unwrap(); assert_eq!( ScalarValue::Binary(Some(veclit_to_binlit(&[4.0, 8.0, 12.0]))), diff --git a/src/common/function/src/flush_flow.rs b/src/common/function/src/flush_flow.rs index 68fcb3dd14..c4ea554585 100644 --- a/src/common/function/src/flush_flow.rs +++ b/src/common/function/src/flush_flow.rs @@ -22,7 +22,7 @@ use common_query::error::{ use datafusion_expr::{Signature, Volatility}; use datatypes::value::{Value, ValueRef}; use session::context::QueryContextRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sql::ast::ObjectNamePartExt; use sql::parser::ParserContext; @@ -91,7 +91,7 @@ fn parse_flush_flow( obj_name ), } - .fail() + .fail(); } }; Ok((catalog_name, flow_name)) diff --git a/src/common/function/src/handlers.rs b/src/common/function/src/handlers.rs index 7289de6763..c5ef9ad220 100644 --- a/src/common/function/src/handlers.rs +++ b/src/common/function/src/handlers.rs @@ -22,8 +22,8 @@ use common_meta::rpc::procedure::{ AddRegionFollowerRequest, MigrateRegionRequest, ProcedureStateResponse, RemoveRegionFollowerRequest, }; -use common_query::error::Result; use common_query::Output; +use common_query::error::Result; use session::context::QueryContextRef; use store_api::storage::RegionId; use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest}; @@ -39,7 +39,7 @@ pub trait TableMutationHandler: Send + Sync { /// Trigger a flush task for table. async fn flush(&self, request: FlushTableRequest, ctx: QueryContextRef) - -> Result; + -> Result; /// Trigger a compaction task for table. async fn compact( @@ -50,7 +50,7 @@ pub trait TableMutationHandler: Send + Sync { /// Trigger a flush task for a table region. async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef) - -> Result; + -> Result; /// Trigger a compaction task for a table region. async fn compact_region( diff --git a/src/common/function/src/scalars/date/date_add.rs b/src/common/function/src/scalars/date/date_add.rs index d42fb3d931..682bbab7ce 100644 --- a/src/common/function/src/scalars/date/date_add.rs +++ b/src/common/function/src/scalars/date/date_add.rs @@ -20,7 +20,7 @@ use datatypes::arrow::compute::kernels::numeric; use datatypes::arrow::datatypes::{DataType, IntervalUnit, TimeUnit}; use datatypes::prelude::ConcreteDataType; use datatypes::vectors::{Helper, VectorRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; use crate::helper; diff --git a/src/common/function/src/scalars/date/date_format.rs b/src/common/function/src/scalars/date/date_format.rs index 5efea2adc7..95104c473d 100644 --- a/src/common/function/src/scalars/date/date_format.rs +++ b/src/common/function/src/scalars/date/date_format.rs @@ -20,7 +20,7 @@ use datafusion_expr::Signature; use datatypes::arrow::datatypes::{DataType, TimeUnit}; use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder}; use datatypes::vectors::{StringVectorBuilder, VectorRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; use crate::helper; diff --git a/src/common/function/src/scalars/date/date_sub.rs b/src/common/function/src/scalars/date/date_sub.rs index 8ad4d7f414..939ce93892 100644 --- a/src/common/function/src/scalars/date/date_sub.rs +++ b/src/common/function/src/scalars/date/date_sub.rs @@ -20,7 +20,7 @@ use datatypes::arrow::compute::kernels::numeric; use datatypes::arrow::datatypes::{DataType, IntervalUnit, TimeUnit}; use datatypes::prelude::ConcreteDataType; use datatypes::vectors::{Helper, VectorRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; use crate::helper; diff --git a/src/common/function/src/scalars/expression/ctx.rs b/src/common/function/src/scalars/expression/ctx.rs index 362997ab24..87543dc5db 100644 --- a/src/common/function/src/scalars/expression/ctx.rs +++ b/src/common/function/src/scalars/expression/ctx.rs @@ -13,8 +13,8 @@ // limitations under the License. use common_query::error::Error; -use common_time::timezone::get_timezone; use common_time::Timezone; +use common_time::timezone::get_timezone; pub struct EvalContext { pub timezone: Timezone, diff --git a/src/common/function/src/scalars/expression/is_null.rs b/src/common/function/src/scalars/expression/is_null.rs index 7834915711..bf8abe3c5a 100644 --- a/src/common/function/src/scalars/expression/is_null.rs +++ b/src/common/function/src/scalars/expression/is_null.rs @@ -24,7 +24,7 @@ use datafusion_expr::{Signature, Volatility}; use datatypes::data_type::ConcreteDataType; use datatypes::prelude::VectorRef; use datatypes::vectors::Helper; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; diff --git a/src/common/function/src/scalars/geo/geohash.rs b/src/common/function/src/scalars/geo/geohash.rs index a0f1406150..8b8b00957d 100644 --- a/src/common/function/src/scalars/geo/geohash.rs +++ b/src/common/function/src/scalars/geo/geohash.rs @@ -25,7 +25,7 @@ use datatypes::scalars::{Scalar, ScalarVectorBuilder}; use datatypes::value::{ListValue, Value}; use datatypes::vectors::{ListVectorBuilder, MutableVector, StringVectorBuilder, VectorRef}; use geohash::Coord; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; diff --git a/src/common/function/src/scalars/geo/h3.rs b/src/common/function/src/scalars/geo/h3.rs index 1063ff9c8f..43cb0058cb 100644 --- a/src/common/function/src/scalars/geo/h3.rs +++ b/src/common/function/src/scalars/geo/h3.rs @@ -26,7 +26,7 @@ use datatypes::scalars::{Scalar, ScalarVectorBuilder}; use datatypes::value::{ListValue, Value}; use datatypes::vectors::{ BooleanVectorBuilder, Float64VectorBuilder, Int32VectorBuilder, ListVectorBuilder, - MutableVector, StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef, + MutableVector, StringVectorBuilder, UInt8VectorBuilder, UInt64VectorBuilder, VectorRef, }; use derive_more::Display; use h3o::{CellIndex, LatLng, Resolution}; @@ -967,11 +967,11 @@ impl Function for H3CellContains { // get cell resolution, and find cell_this's parent at // this solution, test if cell_that equals the parent let resolution = cell_that.resolution(); - if let Some(cell_this_parent) = cell_this.parent(resolution) { - if cell_this_parent == *cell_that { - result = Some(true); - break; - } + if let Some(cell_this_parent) = cell_this.parent(resolution) + && cell_this_parent == *cell_that + { + result = Some(true); + break; } } } diff --git a/src/common/function/src/scalars/hll_count.rs b/src/common/function/src/scalars/hll_count.rs index 49dfe8e0ef..0ec0d8eec3 100644 --- a/src/common/function/src/scalars/hll_count.rs +++ b/src/common/function/src/scalars/hll_count.rs @@ -160,10 +160,12 @@ mod tests { let args: Vec = vec![]; let result = function.eval(&FunctionContext::default(), &args); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("hll_count expects 1 argument")); + assert!( + result + .unwrap_err() + .to_string() + .contains("hll_count expects 1 argument") + ); // Test with invalid binary data let args: Vec = vec![Arc::new(BinaryVector::from(vec![Some(vec![1, 2, 3])]))]; // Invalid binary data diff --git a/src/common/function/src/scalars/json/json_to_string.rs b/src/common/function/src/scalars/json/json_to_string.rs index 4544ba46ed..4160f73b12 100644 --- a/src/common/function/src/scalars/json/json_to_string.rs +++ b/src/common/function/src/scalars/json/json_to_string.rs @@ -78,7 +78,7 @@ impl Function for JsonToStringFunction { return InvalidFuncArgsSnafu { err_msg: format!("Illegal json binary: {:?}", json), } - .fail() + .fail(); } }, _ => None, diff --git a/src/common/function/src/scalars/json/parse_json.rs b/src/common/function/src/scalars/json/parse_json.rs index 37aae8b042..71c31c93ac 100644 --- a/src/common/function/src/scalars/json/parse_json.rs +++ b/src/common/function/src/scalars/json/parse_json.rs @@ -75,7 +75,7 @@ impl Function for ParseJsonFunction { json_string ), } - .fail() + .fail(); } }, _ => None, diff --git a/src/common/function/src/scalars/matches.rs b/src/common/function/src/scalars/matches.rs index 71b4a69764..d513427356 100644 --- a/src/common/function/src/scalars/matches.rs +++ b/src/common/function/src/scalars/matches.rs @@ -29,7 +29,7 @@ use datatypes::arrow::array::RecordBatch; use datatypes::arrow::datatypes::{DataType, Field}; use datatypes::prelude::VectorRef; use datatypes::vectors::BooleanVector; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::ConcreteDataType; use crate::function::{Function, FunctionContext}; @@ -751,13 +751,13 @@ impl Tokenizer { let phase = self.consume_next_phase(true, pattern)?; tokens.push(Token::Phase(phase)); // consume a writespace (or EOF) after quotes - if let Some(ending_separator) = self.consume_next(pattern) { - if ending_separator != ' ' { - return InvalidFuncArgsSnafu { - err_msg: "Expect a space after quotes ('\"')", - } - .fail(); + if let Some(ending_separator) = self.consume_next(pattern) + && ending_separator != ' ' + { + return InvalidFuncArgsSnafu { + err_msg: "Expect a space after quotes ('\"')", } + .fail(); } } _ => { @@ -776,8 +776,7 @@ impl Tokenizer { fn consume_next(&mut self, pattern: &str) -> Option { self.cursor += 1; - let c = pattern.chars().nth(self.cursor); - c + pattern.chars().nth(self.cursor) } fn step_next(&mut self) { diff --git a/src/common/function/src/scalars/math/clamp.rs b/src/common/function/src/scalars/math/clamp.rs index 7f04faf509..81ad16bf10 100644 --- a/src/common/function/src/scalars/math/clamp.rs +++ b/src/common/function/src/scalars/math/clamp.rs @@ -18,15 +18,15 @@ use std::sync::Arc; use common_query::error::{InvalidFuncArgsSnafu, Result}; use datafusion::arrow::array::{ArrayIter, PrimitiveArray}; use datafusion::logical_expr::Volatility; -use datafusion_expr::type_coercion::aggregates::NUMERICS; use datafusion_expr::Signature; +use datafusion_expr::type_coercion::aggregates::NUMERICS; use datatypes::data_type::{ConcreteDataType, DataType}; use datatypes::prelude::VectorRef; use datatypes::types::LogicalPrimitiveType; use datatypes::value::TryAsPrimitive; use datatypes::vectors::PrimitiveVector; use datatypes::with_match_primitive_type_id; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::function::{Function, FunctionContext}; diff --git a/src/common/function/src/scalars/math/modulo.rs b/src/common/function/src/scalars/math/modulo.rs index 8dfacce1f4..a317edcb21 100644 --- a/src/common/function/src/scalars/math/modulo.rs +++ b/src/common/function/src/scalars/math/modulo.rs @@ -24,7 +24,7 @@ use datatypes::arrow::compute::kernels::numeric; use datatypes::arrow::datatypes::DataType as ArrowDataType; use datatypes::prelude::ConcreteDataType; use datatypes::vectors::{Helper, VectorRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; diff --git a/src/common/function/src/scalars/test.rs b/src/common/function/src/scalars/test.rs index 081be9d033..8c1911405e 100644 --- a/src/common/function/src/scalars/test.rs +++ b/src/common/function/src/scalars/test.rs @@ -22,7 +22,7 @@ use datatypes::data_type::ConcreteDataType; use datatypes::prelude::VectorRef; use crate::function::{Function, FunctionContext}; -use crate::scalars::expression::{scalar_binary_op, EvalContext}; +use crate::scalars::expression::{EvalContext, scalar_binary_op}; #[derive(Clone, Default)] pub(crate) struct TestAndFunction; diff --git a/src/common/function/src/scalars/uddsketch_calc.rs b/src/common/function/src/scalars/uddsketch_calc.rs index 5c9ec8e883..0e36839f21 100644 --- a/src/common/function/src/scalars/uddsketch_calc.rs +++ b/src/common/function/src/scalars/uddsketch_calc.rs @@ -198,10 +198,12 @@ mod tests { let args: Vec = vec![Arc::new(Float64Vector::from_vec(vec![0.95]))]; let result = function.eval(&FunctionContext::default(), &args); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("uddsketch_calc expects 2 arguments")); + assert!( + result + .unwrap_err() + .to_string() + .contains("uddsketch_calc expects 2 arguments") + ); // Test with invalid binary data let args: Vec = vec![ diff --git a/src/common/function/src/scalars/vector/convert/parse_vector.rs b/src/common/function/src/scalars/vector/convert/parse_vector.rs index ac0af4ab06..8cdc55324e 100644 --- a/src/common/function/src/scalars/vector/convert/parse_vector.rs +++ b/src/common/function/src/scalars/vector/convert/parse_vector.rs @@ -20,7 +20,7 @@ use datatypes::prelude::ConcreteDataType; use datatypes::scalars::ScalarVectorBuilder; use datatypes::types::parse_string_to_vector_type_value; use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::function::{Function, FunctionContext}; diff --git a/src/common/function/src/scalars/vector/distance/cos.rs b/src/common/function/src/scalars/vector/distance/cos.rs index b9d972b7b0..7bec4bd99f 100644 --- a/src/common/function/src/scalars/vector/distance/cos.rs +++ b/src/common/function/src/scalars/vector/distance/cos.rs @@ -33,11 +33,7 @@ pub fn cos(lhs: &[f32], rhs: &[f32]) -> f32 { let cos_similar = dot_product / (lhs_norm * rhs_norm); let res = 1.0 - cos_similar; - if res.abs() < f32::EPSILON { - 0.0 - } else { - res - } + if res.abs() < f32::EPSILON { 0.0 } else { res } } #[cfg(test)] diff --git a/src/common/function/src/state.rs b/src/common/function/src/state.rs index 510bb613a6..ea331fd57b 100644 --- a/src/common/function/src/state.rs +++ b/src/common/function/src/state.rs @@ -40,8 +40,8 @@ impl FunctionState { AddRegionFollowerRequest, MigrateRegionRequest, ProcedureStateResponse, RemoveRegionFollowerRequest, }; - use common_query::error::Result; use common_query::Output; + use common_query::error::Result; use session::context::QueryContextRef; use store_api::storage::RegionId; use table::requests::{ diff --git a/src/common/function/src/system/pg_catalog/pg_get_userbyid.rs b/src/common/function/src/system/pg_catalog/pg_get_userbyid.rs index 5d178f787f..c6bf1ee1ab 100644 --- a/src/common/function/src/system/pg_catalog/pg_get_userbyid.rs +++ b/src/common/function/src/system/pg_catalog/pg_get_userbyid.rs @@ -23,7 +23,7 @@ use datatypes::with_match_primitive_type_id; use num_traits::AsPrimitive; use crate::function::{Function, FunctionContext}; -use crate::scalars::expression::{scalar_unary_op, EvalContext}; +use crate::scalars::expression::{EvalContext, scalar_unary_op}; #[derive(Clone, Debug, Default)] pub struct PGGetUserByIdFunction; diff --git a/src/common/function/src/system/pg_catalog/table_is_visible.rs b/src/common/function/src/system/pg_catalog/table_is_visible.rs index 9bd8341b9c..2c1d0e9bda 100644 --- a/src/common/function/src/system/pg_catalog/table_is_visible.rs +++ b/src/common/function/src/system/pg_catalog/table_is_visible.rs @@ -23,7 +23,7 @@ use datatypes::with_match_primitive_type_id; use num_traits::AsPrimitive; use crate::function::{Function, FunctionContext}; -use crate::scalars::expression::{scalar_unary_op, EvalContext}; +use crate::scalars::expression::{EvalContext, scalar_unary_op}; #[derive(Clone, Debug, Default)] pub struct PGTableIsVisibleFunction; diff --git a/src/common/greptimedb-telemetry/src/lib.rs b/src/common/greptimedb-telemetry/src/lib.rs index c0b58ba2e7..13f80a5473 100644 --- a/src/common/greptimedb-telemetry/src/lib.rs +++ b/src/common/greptimedb-telemetry/src/lib.rs @@ -170,9 +170,12 @@ pub trait Collector { } fn print_anonymous_usage_data_disclaimer() { - info!("Attention: GreptimeDB now collects anonymous usage data to help improve its roadmap and prioritize features."); info!( - "To learn more about this anonymous program and how to deactivate it if you don't want to participate, please visit the following URL: "); + "Attention: GreptimeDB now collects anonymous usage data to help improve its roadmap and prioritize features." + ); + info!( + "To learn more about this anonymous program and how to deactivate it if you don't want to participate, please visit the following URL: " + ); info!("https://docs.greptime.com/reference/telemetry"); } @@ -308,19 +311,19 @@ impl GreptimeDBTelemetry { mod tests { use std::convert::Infallible; use std::env; - use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::Arc; + use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::time::Duration; use common_test_util::ports; use common_version::build_info; - use hyper::service::{make_service_fn, service_fn}; use hyper::Server; + use hyper::service::{make_service_fn, service_fn}; use reqwest::{Client, Response}; use tokio::spawn; use crate::{ - default_get_uuid, format_uptime, Collector, GreptimeDBTelemetry, Mode, StatisticData, + Collector, GreptimeDBTelemetry, Mode, StatisticData, default_get_uuid, format_uptime, }; static COUNT: AtomicUsize = std::sync::atomic::AtomicUsize::new(0); diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs index ca0108d8f7..cdca83663b 100644 --- a/src/common/grpc-expr/src/alter.rs +++ b/src/common/grpc-expr/src/alter.rs @@ -19,13 +19,13 @@ use api::v1::column_def::{ as_fulltext_option_analyzer, as_fulltext_option_backend, as_skipping_index_type, }; use api::v1::{ - column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr, - DropColumns, FulltextBackend as PbFulltextBackend, ModifyColumnTypes, RenameTable, - SemanticType, SkippingIndexType as PbSkippingIndexType, + AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr, DropColumns, + FulltextBackend as PbFulltextBackend, ModifyColumnTypes, RenameTable, SemanticType, + SkippingIndexType as PbSkippingIndexType, column_def, }; use common_query::AddColumnLocation; use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema, SkippingIndexOptions}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::region_request::{SetRegionOption, UnsetRegionOption}; use table::metadata::{TableId, TableMeta}; use table::requests::{ diff --git a/src/common/grpc-expr/src/delete.rs b/src/common/grpc-expr/src/delete.rs index ff737fcdfc..d48893a7d2 100644 --- a/src/common/grpc-expr/src/delete.rs +++ b/src/common/grpc-expr/src/delete.rs @@ -17,7 +17,7 @@ use std::collections::HashMap; use api::helper::ColumnDataTypeWrapper; use api::v1::{Column, DeleteRequest as GrpcDeleteRequest}; use datatypes::prelude::ConcreteDataType; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use table::requests::DeleteRequest; use crate::error::{ColumnDataTypeSnafu, IllegalDeleteRequestSnafu, Result}; @@ -70,8 +70,8 @@ pub fn to_table_delete_request( mod tests { use std::sync::Arc; - use api::v1::column::Values; use api::v1::ColumnDataType; + use api::v1::column::Values; use datatypes::prelude::{ScalarVector, VectorRef}; use datatypes::vectors::{Int32Vector, StringVector}; diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs index eb70ddba94..08857faf7b 100644 --- a/src/common/grpc-expr/src/insert.rs +++ b/src/common/grpc-expr/src/insert.rs @@ -17,7 +17,7 @@ use api::v1::column::Values; use common_base::BitVec; use datatypes::data_type::{ConcreteDataType, DataType}; use datatypes::prelude::VectorRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{CreateVectorSnafu, Result, UnexpectedValuesLengthSnafu}; diff --git a/src/common/grpc-expr/src/util.rs b/src/common/grpc-expr/src/util.rs index fe32bdcde9..b807b211b6 100644 --- a/src/common/grpc-expr/src/util.rs +++ b/src/common/grpc-expr/src/util.rs @@ -21,7 +21,7 @@ use api::v1::{ ColumnOptions, ColumnSchema, CreateTableExpr, JsonTypeExtension, SemanticType, }; use datatypes::schema::Schema; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use table::metadata::TableId; use table::table_reference::TableReference; @@ -81,15 +81,15 @@ fn infer_column_datatype( let column_type = ColumnDataType::try_from(datatype).context(UnknownColumnDataTypeSnafu { datatype })?; - if matches!(&column_type, ColumnDataType::Binary) { - if let Some(ext) = datatype_extension { - let type_ext = ext - .type_ext - .as_ref() - .context(error::MissingFieldSnafu { field: "type_ext" })?; - if *type_ext == TypeExt::JsonType(JsonTypeExtension::JsonBinary.into()) { - return Ok(ColumnDataType::Json); - } + if matches!(&column_type, ColumnDataType::Binary) + && let Some(ext) = datatype_extension + { + let type_ext = ext + .type_ext + .as_ref() + .context(error::MissingFieldSnafu { field: "type_ext" })?; + if *type_ext == TypeExt::JsonType(JsonTypeExtension::JsonBinary.into()) { + return Ok(ColumnDataType::Json); } } @@ -444,9 +444,11 @@ mod tests { let schema = Arc::new(SchemaBuilder::try_from(columns).unwrap().build().unwrap()); - assert!(extract_new_columns(&schema, ColumnExpr::from_columns(&[])) - .unwrap() - .is_none()); + assert!( + extract_new_columns(&schema, ColumnExpr::from_columns(&[])) + .unwrap() + .is_none() + ); let insert_batch = mock_insert_batch(); diff --git a/src/common/grpc/benches/bench_flight_decoder.rs b/src/common/grpc/benches/bench_flight_decoder.rs index 5e72e55fce..1f90f5873d 100644 --- a/src/common/grpc/benches/bench_flight_decoder.rs +++ b/src/common/grpc/benches/bench_flight_decoder.rs @@ -18,7 +18,7 @@ use arrow_flight::FlightData; use bytes::Bytes; use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage}; use common_recordbatch::DfRecordBatch; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; use datatypes::arrow; use datatypes::arrow::array::{ArrayRef, Int64Array, StringArray, TimestampMillisecondArray}; use datatypes::arrow::datatypes::DataType; diff --git a/src/common/grpc/benches/channel_manager.rs b/src/common/grpc/benches/channel_manager.rs index 33928297e3..c37c59c325 100644 --- a/src/common/grpc/benches/channel_manager.rs +++ b/src/common/grpc/benches/channel_manager.rs @@ -13,7 +13,7 @@ // limitations under the License. use common_grpc::channel_manager::ChannelManager; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; #[tokio::main] async fn do_bench_channel_manager() { diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs index 0ee2df822b..b3862f7e22 100644 --- a/src/common/grpc/src/channel_manager.rs +++ b/src/common/grpc/src/channel_manager.rs @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}; use std::time::Duration; use common_base::readable_size::ReadableSize; use common_telemetry::info; -use dashmap::mapref::entry::Entry; use dashmap::DashMap; +use dashmap::mapref::entry::Entry; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; diff --git a/src/common/grpc/src/flight.rs b/src/common/grpc/src/flight.rs index a1f65e6501..ae06b02558 100644 --- a/src/common/grpc/src/flight.rs +++ b/src/common/grpc/src/flight.rs @@ -27,12 +27,12 @@ use datatypes::arrow::array::ArrayRef; use datatypes::arrow::buffer::Buffer; use datatypes::arrow::datatypes::{DataType, Schema as ArrowSchema, SchemaRef}; use datatypes::arrow::error::ArrowError; -use datatypes::arrow::ipc::{convert, reader, root_as_message, writer, MessageHeader}; +use datatypes::arrow::ipc::{MessageHeader, convert, reader, root_as_message, writer}; use flatbuffers::FlatBufferBuilder; -use prost::bytes::Bytes as ProstBytes; use prost::Message; +use prost::bytes::Bytes as ProstBytes; use snafu::{OptionExt, ResultExt}; -use vec1::{vec1, Vec1}; +use vec1::{Vec1, vec1}; use crate::error; use crate::error::{DecodeFlightDataSnafu, InvalidFlightDataSnafu, Result}; @@ -317,7 +317,7 @@ pub fn flight_messages_to_recordbatches( return InvalidFlightDataSnafu { reason: "First Flight Message must be schema!", } - .fail() + .fail(); } }; @@ -328,7 +328,7 @@ pub fn flight_messages_to_recordbatches( return InvalidFlightDataSnafu { reason: "Expect the following Flight Messages are all Recordbatches!", } - .fail() + .fail(); } } } @@ -355,7 +355,7 @@ fn build_none_flight_msg() -> Bytes { mod test { use arrow_flight::utils::batches_to_flight_data; use datatypes::arrow::array::{ - DictionaryArray, Int32Array, StringArray, UInt32Array, UInt8Array, + DictionaryArray, Int32Array, StringArray, UInt8Array, UInt32Array, }; use datatypes::arrow::datatypes::{DataType, Field, Schema}; @@ -393,10 +393,12 @@ mod test { let result = decoder.try_decode(d2); assert!(matches!(result, Err(Error::InvalidFlightData { .. }))); - assert!(result - .unwrap_err() - .to_string() - .contains("Should have decoded schema first!")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Should have decoded schema first!") + ); let message = decoder.try_decode(d1)?.unwrap(); assert!(matches!(message, FlightMessage::Schema(_))); @@ -444,17 +446,21 @@ mod test { let result = flight_messages_to_recordbatches(vec![m2.clone(), m1.clone(), m3.clone()]); assert!(matches!(result, Err(Error::InvalidFlightData { .. }))); - assert!(result - .unwrap_err() - .to_string() - .contains("First Flight Message must be schema!")); + assert!( + result + .unwrap_err() + .to_string() + .contains("First Flight Message must be schema!") + ); let result = flight_messages_to_recordbatches(vec![m1.clone(), m2.clone(), m1.clone()]); assert!(matches!(result, Err(Error::InvalidFlightData { .. }))); - assert!(result - .unwrap_err() - .to_string() - .contains("Expect the following Flight Messages are all Recordbatches!")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Expect the following Flight Messages are all Recordbatches!") + ); let actual = flight_messages_to_recordbatches(vec![m1, m2, m3]).unwrap(); assert_eq!(actual, recordbatches); diff --git a/src/common/grpc/src/precision.rs b/src/common/grpc/src/precision.rs index dd3f65c978..0bab074bc5 100644 --- a/src/common/grpc/src/precision.rs +++ b/src/common/grpc/src/precision.rs @@ -83,7 +83,7 @@ impl TryFrom for TimeUnit { _ => { return Err(Error::NotSupported { feat: format!("convert {precision} into TimeUnit"), - }) + }); } }) } diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs index 8886c90f67..b69790d669 100644 --- a/src/common/grpc/src/select.rs +++ b/src/common/grpc/src/select.rs @@ -18,11 +18,11 @@ use common_base::BitVec; use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType}; use datatypes::vectors::{ BinaryVector, BooleanVector, DateVector, Decimal128Vector, Float32Vector, Float64Vector, - Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector, + Int8Vector, Int16Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector, - TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, - UInt32Vector, UInt64Vector, UInt8Vector, VectorRef, + TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt8Vector, + UInt16Vector, UInt32Vector, UInt64Vector, VectorRef, }; use snafu::OptionExt; diff --git a/src/common/macro/src/admin_fn.rs b/src/common/macro/src/admin_fn.rs index 5e8701e4d7..443dcd3c86 100644 --- a/src/common/macro/src/admin_fn.rs +++ b/src/common/macro/src/admin_fn.rs @@ -16,8 +16,8 @@ use proc_macro::TokenStream; use quote::quote; use syn::spanned::Spanned; use syn::{ - parse_macro_input, Attribute, Ident, ItemFn, Path, Signature, Type, TypePath, TypeReference, - Visibility, + Attribute, Ident, ItemFn, Path, Signature, Type, TypePath, TypeReference, Visibility, + parse_macro_input, }; use crate::utils::extract_input_types; diff --git a/src/common/macro/src/aggr_func.rs b/src/common/macro/src/aggr_func.rs index 2e17e70b5a..45649e520b 100644 --- a/src/common/macro/src/aggr_func.rs +++ b/src/common/macro/src/aggr_func.rs @@ -16,11 +16,11 @@ use proc_macro::TokenStream; use quote::{quote, quote_spanned}; use syn::parse::Parser; use syn::spanned::Spanned; -use syn::{parse_macro_input, DeriveInput, ItemStruct}; +use syn::{DeriveInput, ItemStruct, parse_macro_input}; pub(crate) fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream { let name = &ast.ident; - let gen = quote! { + let generated = quote! { impl common_query::logical_plan::accumulator::AggrFuncTypeStore for #name { fn input_types(&self) -> std::result::Result, common_query::error::Error> { let input_types = self.input_types.load(); @@ -40,7 +40,7 @@ pub(crate) fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream { } } }; - gen.into() + generated.into() } pub(crate) fn impl_as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStream { diff --git a/src/common/macro/src/lib.rs b/src/common/macro/src/lib.rs index d904df8e8f..5a58c4d7fb 100644 --- a/src/common/macro/src/lib.rs +++ b/src/common/macro/src/lib.rs @@ -25,7 +25,7 @@ use print_caller::process_print_caller; use proc_macro::TokenStream; use quote::quote; use range_fn::process_range_fn; -use syn::{parse_macro_input, Data, DeriveInput, Fields}; +use syn::{Data, DeriveInput, Fields, parse_macro_input}; use crate::admin_fn::process_admin_fn; use crate::row::into_row::derive_into_row_impl; @@ -175,7 +175,7 @@ pub fn derive_meta_builder(input: TokenStream) -> TokenStream { } }); - let gen = quote! { + let generated = quote! { impl From<&TableMeta> for TableMetaBuilder { fn from(meta: &TableMeta) -> Self { let mut builder = Self { @@ -188,7 +188,7 @@ pub fn derive_meta_builder(input: TokenStream) -> TokenStream { } }; - gen.into() + generated.into() } /// Derive macro to convert a struct to a row. diff --git a/src/common/macro/src/print_caller.rs b/src/common/macro/src/print_caller.rs index 7b6b012b48..0d47866f27 100644 --- a/src/common/macro/src/print_caller.rs +++ b/src/common/macro/src/print_caller.rs @@ -13,8 +13,8 @@ // limitations under the License. use proc_macro::TokenStream; -use quote::{quote, ToTokens}; -use syn::{parse_macro_input, ItemFn, LitInt}; +use quote::{ToTokens, quote}; +use syn::{ItemFn, LitInt, parse_macro_input}; pub(crate) fn process_print_caller(args: TokenStream, input: TokenStream) -> TokenStream { let mut depth = 1; diff --git a/src/common/macro/src/range_fn.rs b/src/common/macro/src/range_fn.rs index b995e12789..5ee21ca4b0 100644 --- a/src/common/macro/src/range_fn.rs +++ b/src/common/macro/src/range_fn.rs @@ -16,7 +16,7 @@ use proc_macro::TokenStream; use quote::quote; use syn::spanned::Spanned; use syn::{ - parse_macro_input, Attribute, Ident, ItemFn, Signature, Type, TypeReference, Visibility, + Attribute, Ident, ItemFn, Signature, Type, TypeReference, Visibility, parse_macro_input, }; use crate::utils::extract_input_types; diff --git a/src/common/macro/src/row/attribute.rs b/src/common/macro/src/row/attribute.rs index f50b6cb462..21252644a4 100644 --- a/src/common/macro/src/row/attribute.rs +++ b/src/common/macro/src/row/attribute.rs @@ -20,7 +20,7 @@ use syn::spanned::Spanned; use syn::{Attribute, LitStr, Meta, Result}; use crate::row::utils::{ - column_data_type_from_str, semantic_type_from_str, ColumnDataTypeWithExtension, SemanticType, + ColumnDataTypeWithExtension, SemanticType, column_data_type_from_str, semantic_type_from_str, }; use crate::row::{ META_KEY_COL, META_KEY_DATATYPE, META_KEY_NAME, META_KEY_SEMANTIC, META_KEY_SKIP, @@ -51,9 +51,7 @@ pub(crate) fn parse_column_attribute(attr: &Attribute) -> Result { let mut attribute = ColumnAttribute::default(); - list.parse_nested_meta(|meta| { - parse_column_attribute_field(&meta, &mut attribute) - })?; + list.parse_nested_meta(|meta| parse_column_attribute_field(&meta, &mut attribute))?; Ok(attribute) } _ => Err(syn::Error::new( diff --git a/src/common/macro/src/row/into_row.rs b/src/common/macro/src/row/into_row.rs index 58d0d7eb0e..4d3bf81e15 100644 --- a/src/common/macro/src/row/into_row.rs +++ b/src/common/macro/src/row/into_row.rs @@ -18,8 +18,8 @@ use syn::spanned::Spanned; use syn::{DeriveInput, Result}; use crate::row::utils::{ - convert_column_data_type_to_value_data_ident, extract_struct_fields, get_column_data_type, - parse_fields_from_fields_named, ParsedField, + ParsedField, convert_column_data_type_to_value_data_ident, extract_struct_fields, + get_column_data_type, parse_fields_from_fields_named, }; use crate::row::{META_KEY_COL, META_KEY_DATATYPE}; diff --git a/src/common/macro/src/row/schema.rs b/src/common/macro/src/row/schema.rs index ac4d232f99..8a033b5aad 100644 --- a/src/common/macro/src/row/schema.rs +++ b/src/common/macro/src/row/schema.rs @@ -19,8 +19,8 @@ use syn::spanned::Spanned; use syn::{DeriveInput, Result}; use crate::row::utils::{ - convert_semantic_type_to_proto_semantic_type, extract_struct_fields, get_column_data_type, - parse_fields_from_fields_named, ColumnDataTypeWithExtension, ParsedField, + ColumnDataTypeWithExtension, ParsedField, convert_semantic_type_to_proto_semantic_type, + extract_struct_fields, get_column_data_type, parse_fields_from_fields_named, }; use crate::row::{META_KEY_COL, META_KEY_DATATYPE}; diff --git a/src/common/macro/src/row/to_row.rs b/src/common/macro/src/row/to_row.rs index db62e1129f..c4f3006fd5 100644 --- a/src/common/macro/src/row/to_row.rs +++ b/src/common/macro/src/row/to_row.rs @@ -18,8 +18,8 @@ use syn::spanned::Spanned; use syn::{DeriveInput, Result}; use crate::row::utils::{ - convert_column_data_type_to_value_data_ident, extract_struct_fields, get_column_data_type, - parse_fields_from_fields_named, ParsedField, + ParsedField, convert_column_data_type_to_value_data_ident, extract_struct_fields, + get_column_data_type, parse_fields_from_fields_named, }; use crate::row::{META_KEY_COL, META_KEY_DATATYPE}; diff --git a/src/common/macro/src/row/utils.rs b/src/common/macro/src/row/utils.rs index eb6de00b2c..7b6547847a 100644 --- a/src/common/macro/src/row/utils.rs +++ b/src/common/macro/src/row/utils.rs @@ -23,7 +23,7 @@ use syn::{ Path, PathArguments, PathSegment, Result, Type, TypePath, TypeReference, }; -use crate::row::attribute::{find_column_attribute, parse_column_attribute, ColumnAttribute}; +use crate::row::attribute::{ColumnAttribute, find_column_attribute, parse_column_attribute}; static SEMANTIC_TYPES: Lazy> = Lazy::new(|| { HashMap::from([ @@ -149,21 +149,16 @@ fn field_type(ty: &Type) -> FieldType<'_> { segments, }, }) = ty + && leading_colon.is_none() + && segments.len() == 1 + && let Some(PathSegment { + ident, + arguments: PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }), + }) = segments.first() + && let (1, Some(GenericArgument::Type(t))) = (args.len(), args.first()) + && ident == "Option" { - if leading_colon.is_none() && segments.len() == 1 { - if let Some(PathSegment { - ident, - arguments: - PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }), - }) = segments.first() - { - if let (1, Some(GenericArgument::Type(t))) = (args.len(), args.first()) { - if ident == "Option" { - return FieldType::Optional(t); - } - } - } - } + return FieldType::Optional(t); } FieldType::Required(ty) diff --git a/src/common/macro/src/stack_trace_debug.rs b/src/common/macro/src/stack_trace_debug.rs index f82f4746d3..f7bc49e326 100644 --- a/src/common/macro/src/stack_trace_debug.rs +++ b/src/common/macro/src/stack_trace_debug.rs @@ -17,7 +17,7 @@ use proc_macro2::{Literal, Span, TokenStream as TokenStream2, TokenTree}; use quote::{quote, quote_spanned}; use syn::spanned::Spanned; -use syn::{parenthesized, Attribute, Ident, ItemEnum, Variant}; +use syn::{Attribute, Ident, ItemEnum, Variant, parenthesized}; pub fn stack_trace_style_impl(args: TokenStream2, input: TokenStream2) -> TokenStream2 { let input_cloned: TokenStream2 = input.clone(); diff --git a/src/common/mem-prof/src/jemalloc.rs b/src/common/mem-prof/src/jemalloc.rs index cabcc81127..05966b4754 100644 --- a/src/common/mem-prof/src/jemalloc.rs +++ b/src/common/mem-prof/src/jemalloc.rs @@ -14,7 +14,7 @@ mod error; -use std::ffi::{c_char, CString}; +use std::ffi::{CString, c_char}; use std::io::BufReader; use std::path::PathBuf; @@ -23,8 +23,8 @@ use error::{ OpenTempFileSnafu, ProfilingNotEnabledSnafu, ReadOptProfSnafu, ReadProfActiveSnafu, }; use jemalloc_pprof_mappings::MAPPINGS; -use jemalloc_pprof_utils::{parse_jeheap, FlamegraphOptions, StackProfile}; -use snafu::{ensure, ResultExt}; +use jemalloc_pprof_utils::{FlamegraphOptions, StackProfile, parse_jeheap}; +use snafu::{ResultExt, ensure}; use tokio::io::AsyncReadExt; use crate::error::{FlamegraphSnafu, ParseJeHeapSnafu, Result}; diff --git a/src/common/meta/src/cache.rs b/src/common/meta/src/cache.rs index 9ce996b27d..f16290937a 100644 --- a/src/common/meta/src/cache.rs +++ b/src/common/meta/src/cache.rs @@ -18,14 +18,14 @@ mod registry; mod table; pub use container::{CacheContainer, Initializer, Invalidator, TokenFilter}; -pub use flow::{new_table_flownode_set_cache, TableFlownodeSetCache, TableFlownodeSetCacheRef}; +pub use flow::{TableFlownodeSetCache, TableFlownodeSetCacheRef, new_table_flownode_set_cache}; pub use registry::{ CacheRegistry, CacheRegistryBuilder, CacheRegistryRef, LayeredCacheRegistry, LayeredCacheRegistryBuilder, LayeredCacheRegistryRef, }; pub use table::{ - new_schema_cache, new_table_info_cache, new_table_name_cache, new_table_route_cache, - new_table_schema_cache, new_view_info_cache, SchemaCache, SchemaCacheRef, TableInfoCache, - TableInfoCacheRef, TableNameCache, TableNameCacheRef, TableRoute, TableRouteCache, - TableRouteCacheRef, TableSchemaCache, TableSchemaCacheRef, ViewInfoCache, ViewInfoCacheRef, + SchemaCache, SchemaCacheRef, TableInfoCache, TableInfoCacheRef, TableNameCache, + TableNameCacheRef, TableRoute, TableRouteCache, TableRouteCacheRef, TableSchemaCache, + TableSchemaCacheRef, ViewInfoCache, ViewInfoCacheRef, new_schema_cache, new_table_info_cache, + new_table_name_cache, new_table_route_cache, new_table_schema_cache, new_view_info_cache, }; diff --git a/src/common/meta/src/cache/container.rs b/src/common/meta/src/cache/container.rs index 289c2c9208..0510476d15 100644 --- a/src/common/meta/src/cache/container.rs +++ b/src/common/meta/src/cache/container.rs @@ -16,7 +16,7 @@ use std::borrow::Borrow; use std::hash::Hash; use std::sync::Arc; -use futures::future::{join_all, BoxFuture}; +use futures::future::{BoxFuture, join_all}; use moka::future::Cache; use snafu::{OptionExt, ResultExt}; @@ -194,8 +194,8 @@ where #[cfg(test)] mod tests { - use std::sync::atomic::{AtomicI32, Ordering}; use std::sync::Arc; + use std::sync::atomic::{AtomicI32, Ordering}; use moka::future::{Cache, CacheBuilder}; diff --git a/src/common/meta/src/cache/flow.rs b/src/common/meta/src/cache/flow.rs index 4430e9fce3..cdaf4ed0fb 100644 --- a/src/common/meta/src/cache/flow.rs +++ b/src/common/meta/src/cache/flow.rs @@ -14,5 +14,5 @@ mod table_flownode; pub use table_flownode::{ - new_table_flownode_set_cache, TableFlownodeSetCache, TableFlownodeSetCacheRef, + TableFlownodeSetCache, TableFlownodeSetCacheRef, new_table_flownode_set_cache, }; diff --git a/src/common/meta/src/cache/flow/table_flownode.rs b/src/common/meta/src/cache/flow/table_flownode.rs index 6123b34d3d..a7777f3361 100644 --- a/src/common/meta/src/cache/flow/table_flownode.rs +++ b/src/common/meta/src/cache/flow/table_flownode.rs @@ -205,11 +205,11 @@ mod tests { use moka::future::CacheBuilder; use table::table_name::TableName; - use crate::cache::flow::table_flownode::{new_table_flownode_set_cache, FlowIdent}; + use crate::cache::flow::table_flownode::{FlowIdent, new_table_flownode_set_cache}; use crate::instruction::{CacheIdent, CreateFlow, DropFlow}; + use crate::key::flow::FlowMetadataManager; use crate::key::flow::flow_info::FlowInfoValue; use crate::key::flow::flow_route::FlowRouteValue; - use crate::key::flow::FlowMetadataManager; use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; diff --git a/src/common/meta/src/cache/registry.rs b/src/common/meta/src/cache/registry.rs index 20e378cf19..d541525f98 100644 --- a/src/common/meta/src/cache/registry.rs +++ b/src/common/meta/src/cache/registry.rs @@ -142,8 +142,8 @@ impl CacheRegistry { #[cfg(test)] mod tests { - use std::sync::atomic::{AtomicBool, AtomicI32, Ordering}; use std::sync::Arc; + use std::sync::atomic::{AtomicBool, AtomicI32, Ordering}; use moka::future::{Cache, CacheBuilder}; diff --git a/src/common/meta/src/cache/table.rs b/src/common/meta/src/cache/table.rs index 87ad9a5a16..d0b2eb463e 100644 --- a/src/common/meta/src/cache/table.rs +++ b/src/common/meta/src/cache/table.rs @@ -19,9 +19,9 @@ mod table_route; mod table_schema; mod view_info; -pub use schema::{new_schema_cache, SchemaCache, SchemaCacheRef}; -pub use table_info::{new_table_info_cache, TableInfoCache, TableInfoCacheRef}; -pub use table_name::{new_table_name_cache, TableNameCache, TableNameCacheRef}; -pub use table_route::{new_table_route_cache, TableRoute, TableRouteCache, TableRouteCacheRef}; -pub use table_schema::{new_table_schema_cache, TableSchemaCache, TableSchemaCacheRef}; -pub use view_info::{new_view_info_cache, ViewInfoCache, ViewInfoCacheRef}; +pub use schema::{SchemaCache, SchemaCacheRef, new_schema_cache}; +pub use table_info::{TableInfoCache, TableInfoCacheRef, new_table_info_cache}; +pub use table_name::{TableNameCache, TableNameCacheRef, new_table_name_cache}; +pub use table_route::{TableRoute, TableRouteCache, TableRouteCacheRef, new_table_route_cache}; +pub use table_schema::{TableSchemaCache, TableSchemaCacheRef, new_table_schema_cache}; +pub use view_info::{ViewInfoCache, ViewInfoCacheRef, new_view_info_cache}; diff --git a/src/common/meta/src/cache/table/table_info.rs b/src/common/meta/src/cache/table/table_info.rs index c3444516a5..3b967188be 100644 --- a/src/common/meta/src/cache/table/table_info.rs +++ b/src/common/meta/src/cache/table/table_info.rs @@ -86,8 +86,8 @@ mod tests { use super::*; use crate::ddl::test_util::create_table::test_create_table_task; - use crate::key::table_route::TableRouteValue; use crate::key::TableMetadataManager; + use crate::key::table_route::TableRouteValue; use crate::kv_backend::memory::MemoryKvBackend; #[tokio::test] diff --git a/src/common/meta/src/cache/table/table_route.rs b/src/common/meta/src/cache/table/table_route.rs index f759265927..47abdaa728 100644 --- a/src/common/meta/src/cache/table/table_route.rs +++ b/src/common/meta/src/cache/table/table_route.rs @@ -116,8 +116,8 @@ mod tests { use super::*; use crate::ddl::test_util::create_table::test_create_table_task; - use crate::key::table_route::TableRouteValue; use crate::key::TableMetadataManager; + use crate::key::table_route::TableRouteValue; use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; use crate::rpc::router::{Region, RegionRoute}; diff --git a/src/common/meta/src/cache_invalidator.rs b/src/common/meta/src/cache_invalidator.rs index 13ad6dff36..8a1ffc9d6f 100644 --- a/src/common/meta/src/cache_invalidator.rs +++ b/src/common/meta/src/cache_invalidator.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use crate::error::Result; use crate::flow_name::FlowName; use crate::instruction::{CacheIdent, DropFlow}; +use crate::key::MetadataKey; use crate::key::flow::flow_info::FlowInfoKey; use crate::key::flow::flow_name::FlowNameKey; use crate::key::flow::flow_route::FlowRouteKey; @@ -28,7 +29,6 @@ use crate::key::table_info::TableInfoKey; use crate::key::table_name::TableNameKey; use crate::key::table_route::TableRouteKey; use crate::key::view_info::ViewInfoKey; -use crate::key::MetadataKey; /// KvBackend cache invalidator #[async_trait::async_trait] diff --git a/src/common/meta/src/cluster.rs b/src/common/meta/src/cluster.rs index f334734e7b..d17bb62496 100644 --- a/src/common/meta/src/cluster.rs +++ b/src/common/meta/src/cluster.rs @@ -20,7 +20,7 @@ use common_error::ext::ErrorExt; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::datanode::RegionStat; use crate::error::{ diff --git a/src/common/meta/src/datanode.rs b/src/common/meta/src/datanode.rs index f08c8060c5..2c57a09353 100644 --- a/src/common/meta/src/datanode.rs +++ b/src/common/meta/src/datanode.rs @@ -20,7 +20,7 @@ use common_time::util as time_util; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::region_engine::{RegionRole, RegionStatistic}; use store_api::storage::RegionId; use table::metadata::TableId; diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs index 0fc65419c6..e12331d4a2 100644 --- a/src/common/meta/src/ddl.rs +++ b/src/common/meta/src/ddl.rs @@ -17,16 +17,16 @@ use std::sync::Arc; use store_api::storage::{RegionId, RegionNumber, TableId}; +use crate::DatanodeId; use crate::cache_invalidator::CacheInvalidatorRef; use crate::ddl::flow_meta::FlowMetadataAllocatorRef; use crate::ddl::table_meta::TableMetadataAllocatorRef; +use crate::key::TableMetadataManagerRef; use crate::key::flow::FlowMetadataManagerRef; use crate::key::table_route::PhysicalTableRouteValue; -use crate::key::TableMetadataManagerRef; use crate::node_manager::NodeManagerRef; use crate::region_keeper::MemoryRegionKeeperRef; use crate::region_registry::LeaderRegionRegistryRef; -use crate::DatanodeId; pub mod alter_database; pub mod alter_logical_tables; @@ -36,7 +36,7 @@ pub mod create_flow; pub mod create_logical_tables; pub mod create_table; mod create_table_template; -pub(crate) use create_table_template::{build_template_from_raw_table_info, CreateRequestBuilder}; +pub(crate) use create_table_template::{CreateRequestBuilder, build_template_from_raw_table_info}; pub mod create_view; pub mod drop_database; pub mod drop_flow; diff --git a/src/common/meta/src/ddl/alter_database.rs b/src/common/meta/src/ddl/alter_database.rs index 077f31126a..6e199cb92a 100644 --- a/src/common/meta/src/ddl/alter_database.rs +++ b/src/common/meta/src/ddl/alter_database.rs @@ -17,16 +17,16 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status}; use common_telemetry::tracing::info; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use strum::AsRefStr; use crate::cache_invalidator::Context; -use crate::ddl::utils::map_to_procedure_error; use crate::ddl::DdlContext; +use crate::ddl::utils::map_to_procedure_error; use crate::error::{Result, SchemaNotFoundSnafu}; use crate::instruction::CacheIdent; -use crate::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue}; use crate::key::DeserializedValueWithBytes; +use crate::key::schema_name::{SchemaName, SchemaNameKey, SchemaNameValue}; use crate::lock_key::{CatalogLock, SchemaLock}; use crate::rpc::ddl::UnsetDatabaseOption::{self}; use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption}; diff --git a/src/common/meta/src/ddl/alter_logical_tables.rs b/src/common/meta/src/ddl/alter_logical_tables.rs index 5e14077762..0a8b985dba 100644 --- a/src/common/meta/src/ddl/alter_logical_tables.rs +++ b/src/common/meta/src/ddl/alter_logical_tables.rs @@ -31,17 +31,17 @@ use strum::AsRefStr; use table::metadata::TableId; use crate::cache_invalidator::Context as CacheContext; +use crate::ddl::DdlContext; use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor; use crate::ddl::alter_logical_tables::validator::{ - retain_unskipped, AlterLogicalTableValidator, ValidatorResult, + AlterLogicalTableValidator, ValidatorResult, retain_unskipped, }; use crate::ddl::utils::{extract_column_metadatas, map_to_procedure_error, sync_follower_regions}; -use crate::ddl::DdlContext; use crate::error::Result; use crate::instruction::CacheIdent; +use crate::key::DeserializedValueWithBytes; use crate::key::table_info::TableInfoValue; use crate::key::table_route::PhysicalTableRouteValue; -use crate::key::DeserializedValueWithBytes; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; use crate::metrics; use crate::rpc::ddl::AlterTableTask; @@ -173,7 +173,9 @@ impl AlterLogicalTablesProcedure { { self.data.physical_columns = column_metadatas; } else { - warn!("altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged"); + warn!( + "altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged" + ); } self.submit_sync_region_requests(results, region_routes) .await; diff --git a/src/common/meta/src/ddl/alter_logical_tables/executor.rs b/src/common/meta/src/ddl/alter_logical_tables/executor.rs index fde4a334ca..58e8796cdb 100644 --- a/src/common/meta/src/ddl/alter_logical_tables/executor.rs +++ b/src/common/meta/src/ddl/alter_logical_tables/executor.rs @@ -17,8 +17,8 @@ use std::collections::HashMap; use api::region::RegionResponse; use api::v1::alter_table_expr::Kind; use api::v1::region::{ - alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests, - RegionColumnDef, RegionRequest, RegionRequestHeader, + AddColumn, AddColumns, AlterRequest, AlterRequests, RegionColumnDef, RegionRequest, + RegionRequestHeader, alter_request, region_request, }; use api::v1::{self, AlterTableExpr}; use common_telemetry::tracing_context::TracingContext; @@ -33,7 +33,7 @@ use crate::instruction::CacheIdent; use crate::key::table_info::TableInfoValue; use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef}; use crate::node_manager::NodeManagerRef; -use crate::rpc::router::{find_leaders, region_distribution, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leaders, region_distribution}; /// [AlterLogicalTablesExecutor] performs: /// - Alters logical regions on the datanodes. @@ -113,7 +113,9 @@ impl<'a> AlterLogicalTablesExecutor<'a> { physical_columns: &[ColumnMetadata], ) -> Result<()> { if physical_columns.is_empty() { - warn!("No physical columns found, leaving the physical table's schema unchanged when altering logical tables"); + warn!( + "No physical columns found, leaving the physical table's schema unchanged when altering logical tables" + ); return Ok(()); } diff --git a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs index 80256f7b28..3828cc0349 100644 --- a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs +++ b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs @@ -16,13 +16,13 @@ use common_grpc_expr::alter_expr_to_request; use snafu::ResultExt; use table::metadata::{RawTableInfo, TableInfo}; -use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor; use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure; +use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor; use crate::ddl::utils::table_info::batch_update_table_info_values; use crate::error; use crate::error::{ConvertAlterTableRequestSnafu, Result}; -use crate::key::table_info::TableInfoValue; use crate::key::DeserializedValueWithBytes; +use crate::key::table_info::TableInfoValue; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::region_distribution; diff --git a/src/common/meta/src/ddl/alter_logical_tables/validator.rs b/src/common/meta/src/ddl/alter_logical_tables/validator.rs index a6407e8403..f703584c50 100644 --- a/src/common/meta/src/ddl/alter_logical_tables/validator.rs +++ b/src/common/meta/src/ddl/alter_logical_tables/validator.rs @@ -14,9 +14,9 @@ use std::collections::HashSet; -use api::v1::alter_table_expr::Kind; use api::v1::AlterTableExpr; -use snafu::{ensure, OptionExt}; +use api::v1::alter_table_expr::Kind; +use snafu::{OptionExt, ensure}; use store_api::storage::TableId; use table::table_reference::TableReference; diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs index ac882cf9a9..2fcf9df5fb 100644 --- a/src/common/meta/src/ddl/alter_table.rs +++ b/src/common/meta/src/ddl/alter_table.rs @@ -19,8 +19,8 @@ mod region_request; use std::vec; use api::region::RegionResponse; -use api::v1::alter_table_expr::Kind; use api::v1::RenameTable; +use api::v1::alter_table_expr::Kind; use async_trait::async_trait; use common_error::ext::BoxedError; use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu}; @@ -30,19 +30,19 @@ use common_procedure::{ }; use common_telemetry::{error, info, warn}; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY; use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId, TableInfo}; use table::table_reference::TableReference; +use crate::ddl::DdlContext; use crate::ddl::alter_table::executor::AlterTableExecutor; use crate::ddl::utils::{ - extract_column_metadatas, handle_multiple_results, map_to_procedure_error, - sync_follower_regions, MultipleResults, + MultipleResults, extract_column_metadatas, handle_multiple_results, map_to_procedure_error, + sync_follower_regions, }; -use crate::ddl::DdlContext; use crate::error::{AbortProcedureSnafu, NoLeaderSnafu, PutPoisonSnafu, Result, RetryLaterSnafu}; use crate::key::table_info::TableInfoValue; use crate::key::{DeserializedValueWithBytes, RegionDistribution}; @@ -50,7 +50,7 @@ use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock}; use crate::metrics; use crate::poison_key::table_poison_key; use crate::rpc::ddl::AlterTableTask; -use crate::rpc::router::{find_leaders, region_distribution, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leaders, region_distribution}; /// The alter table procedure pub struct AlterTableProcedure { @@ -232,7 +232,9 @@ impl AlterTableProcedure { { self.data.column_metadatas = column_metadatas; } else { - warn!("altering table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged"); + warn!( + "altering table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged" + ); } self.data.state = AlterTableState::UpdateMetadata; Ok(()) @@ -291,7 +293,9 @@ impl AlterTableProcedure { ) .await?; - info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}"); + info!( + "Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}" + ); self.data.state = AlterTableState::InvalidateTableCache; Ok(Status::executing(true)) } diff --git a/src/common/meta/src/ddl/alter_table/executor.rs b/src/common/meta/src/ddl/alter_table/executor.rs index 31bf1da125..99b6a1e012 100644 --- a/src/common/meta/src/ddl/alter_table/executor.rs +++ b/src/common/meta/src/ddl/alter_table/executor.rs @@ -15,15 +15,15 @@ use std::collections::HashMap; use api::region::RegionResponse; -use api::v1::region::region_request::Body; -use api::v1::region::{alter_request, AlterRequest, RegionRequest, RegionRequestHeader}; use api::v1::AlterTableExpr; +use api::v1::region::region_request::Body; +use api::v1::region::{AlterRequest, RegionRequest, RegionRequestHeader, alter_request}; use common_catalog::format_full_table_name; use common_grpc_expr::alter_expr_to_request; use common_telemetry::tracing_context::TracingContext; use common_telemetry::{debug, info}; use futures::future; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::storage::{RegionId, TableId}; use table::metadata::{RawTableInfo, TableInfo}; @@ -38,7 +38,7 @@ use crate::key::table_info::TableInfoValue; use crate::key::table_name::TableNameKey; use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef}; use crate::node_manager::NodeManagerRef; -use crate::rpc::router::{find_leaders, region_distribution, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leaders, region_distribution}; /// [AlterTableExecutor] performs: /// - Alters the metadata of the table. diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs index 950dd92dce..c087d11c27 100644 --- a/src/common/meta/src/ddl/alter_table/region_request.rs +++ b/src/common/meta/src/ddl/alter_table/region_request.rs @@ -16,7 +16,7 @@ use std::collections::HashSet; use api::v1::alter_table_expr::Kind; use api::v1::region::{ - alter_request, AddColumn, AddColumns, DropColumn, DropColumns, RegionColumnDef, + AddColumn, AddColumns, DropColumn, DropColumns, RegionColumnDef, alter_request, }; use snafu::OptionExt; use table::metadata::RawTableInfo; @@ -122,27 +122,27 @@ mod tests { use api::v1::add_column_location::LocationType; use api::v1::alter_table_expr::Kind; - use api::v1::region::region_request::Body; use api::v1::region::RegionColumnDef; + use api::v1::region::region_request::Body; use api::v1::{ - region, AddColumn, AddColumnLocation, AddColumns, AlterTableExpr, ColumnDataType, - ColumnDef as PbColumnDef, ModifyColumnType, ModifyColumnTypes, SemanticType, + AddColumn, AddColumnLocation, AddColumns, AlterTableExpr, ColumnDataType, + ColumnDef as PbColumnDef, ModifyColumnType, ModifyColumnTypes, SemanticType, region, }; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use store_api::storage::{RegionId, TableId}; - use crate::ddl::alter_table::executor::make_alter_region_request; + use crate::ddl::DdlContext; use crate::ddl::alter_table::AlterTableProcedure; + use crate::ddl::alter_table::executor::make_alter_region_request; use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ - build_raw_table_info_from_expr, TestCreateTableExprBuilder, + TestCreateTableExprBuilder, build_raw_table_info_from_expr, }; - use crate::ddl::DdlContext; use crate::key::table_route::TableRouteValue; use crate::peer::Peer; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::{Region, RegionRoute}; - use crate::test_util::{new_ddl_context, MockDatanodeManager}; + use crate::test_util::{MockDatanodeManager, new_ddl_context}; /// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`. async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) { diff --git a/src/common/meta/src/ddl/create_database.rs b/src/common/meta/src/ddl/create_database.rs index 320f1ecbcf..5addeb7d29 100644 --- a/src/common/meta/src/ddl/create_database.rs +++ b/src/common/meta/src/ddl/create_database.rs @@ -18,12 +18,12 @@ use async_trait::async_trait; use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu}; use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status}; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DefaultOnNull}; -use snafu::{ensure, ResultExt}; +use serde_with::{DefaultOnNull, serde_as}; +use snafu::{ResultExt, ensure}; use strum::AsRefStr; -use crate::ddl::utils::map_to_procedure_error; use crate::ddl::DdlContext; +use crate::ddl::utils::map_to_procedure_error; use crate::error::{self, Result}; use crate::key::schema_name::{SchemaNameKey, SchemaNameValue}; use crate::lock_key::{CatalogLock, SchemaLock}; diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs index 5f5424faf0..0ad6b782bc 100644 --- a/src/common/meta/src/ddl/create_flow.rs +++ b/src/common/meta/src/ddl/create_flow.rs @@ -17,9 +17,9 @@ mod metadata; use std::collections::BTreeMap; use std::fmt; +use api::v1::ExpireAfter; use api::v1::flow::flow_request::Body as PbFlowRequest; use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader}; -use api::v1::ExpireAfter; use async_trait::async_trait; use common_catalog::format_full_flow_name; use common_procedure::error::{FromJsonSnafu, ToJsonSnafu}; @@ -31,13 +31,13 @@ use common_telemetry::tracing_context::TracingContext; use futures::future::join_all; use itertools::Itertools; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use strum::AsRefStr; use table::metadata::TableId; use crate::cache_invalidator::Context; -use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::ddl::DdlContext; +use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::error::{self, Result, UnexpectedSnafu}; use crate::instruction::{CacheIdent, CreateFlow, DropFlow}; use crate::key::flow::flow_info::FlowInfoValue; diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs index 312251d6b4..5ab7ced1bb 100644 --- a/src/common/meta/src/ddl/create_logical_tables.rs +++ b/src/common/meta/src/ddl/create_logical_tables.rs @@ -34,17 +34,17 @@ use store_api::storage::{RegionId, RegionNumber}; use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId}; +use crate::ddl::DdlContext; use crate::ddl::utils::{ add_peer_context_if_needed, extract_column_metadatas, map_to_procedure_error, sync_follower_regions, }; -use crate::ddl::DdlContext; use crate::error::Result; use crate::key::table_route::TableRouteValue; use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock}; use crate::metrics; use crate::rpc::ddl::CreateTableTask; -use crate::rpc::router::{find_leaders, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leaders}; pub struct CreateLogicalTablesProcedure { pub context: DdlContext, @@ -175,7 +175,9 @@ impl CreateLogicalTablesProcedure { { self.data.physical_columns = column_metadatas; } else { - warn!("creating logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged"); + warn!( + "creating logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged" + ); } self.submit_sync_region_requests(&results, region_routes) diff --git a/src/common/meta/src/ddl/create_logical_tables/region_request.rs b/src/common/meta/src/ddl/create_logical_tables/region_request.rs index 233a971e4a..c9ee183a17 100644 --- a/src/common/meta/src/ddl/create_logical_tables/region_request.rs +++ b/src/common/meta/src/ddl/create_logical_tables/region_request.rs @@ -14,8 +14,8 @@ use std::collections::HashMap; -use api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader}; use api::v1::CreateTableExpr; +use api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request}; use common_telemetry::debug; use common_telemetry::tracing_context::TracingContext; use store_api::storage::{RegionId, TableId}; @@ -23,12 +23,12 @@ use table::metadata::RawTableInfo; use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure; use crate::ddl::create_table_template::{ - build_template, build_template_from_raw_table_info, CreateRequestBuilder, + CreateRequestBuilder, build_template, build_template_from_raw_table_info, }; use crate::ddl::utils::region_storage_path; use crate::error::Result; use crate::peer::Peer; -use crate::rpc::router::{find_leader_regions, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leader_regions}; impl CreateLogicalTablesProcedure { pub(crate) fn make_request( diff --git a/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs b/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs index c6e50b01d8..cd24d07a78 100644 --- a/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs +++ b/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs @@ -29,7 +29,9 @@ use crate::instruction::CacheIdent; impl CreateLogicalTablesProcedure { pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> { if self.data.physical_columns.is_empty() { - warn!("No physical columns found, leaving the physical table's schema unchanged when creating logical tables"); + warn!( + "No physical columns found, leaving the physical table's schema unchanged when creating logical tables" + ); return Ok(()); } diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs index 5efe518e26..6bb01f584a 100644 --- a/src/common/meta/src/ddl/create_table.rs +++ b/src/common/meta/src/ddl/create_table.rs @@ -26,7 +26,7 @@ use common_telemetry::tracing_context::TracingContext; use common_telemetry::{info, warn}; use futures::future::join_all; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY; use store_api::storage::{RegionId, RegionNumber}; @@ -34,7 +34,7 @@ use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId}; use table::table_reference::TableReference; -use crate::ddl::create_table_template::{build_template, CreateRequestBuilder}; +use crate::ddl::create_table_template::{CreateRequestBuilder, build_template}; use crate::ddl::utils::raw_table_info::update_table_info_column_ids; use crate::ddl::utils::{ add_peer_context_if_needed, convert_region_routes_to_detecting_regions, @@ -49,7 +49,7 @@ use crate::metrics; use crate::region_keeper::OperatingRegionGuard; use crate::rpc::ddl::CreateTableTask; use crate::rpc::router::{ - find_leader_regions, find_leaders, operating_leader_regions, RegionRoute, + RegionRoute, find_leader_regions, find_leaders, operating_leader_regions, }; pub struct CreateTableProcedure { pub context: DdlContext, @@ -265,7 +265,9 @@ impl CreateTableProcedure { { self.creator.data.column_metadatas = column_metadatas; } else { - warn!("creating table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged"); + warn!( + "creating table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged" + ); } self.creator.data.state = CreateTableState::CreateMetadata; diff --git a/src/common/meta/src/ddl/create_view.rs b/src/common/meta/src/ddl/create_view.rs index f89aae8a3c..8392362042 100644 --- a/src/common/meta/src/ddl/create_view.rs +++ b/src/common/meta/src/ddl/create_view.rs @@ -17,7 +17,7 @@ use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSn use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status}; use common_telemetry::info; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId, TableType}; use table::table_reference::TableReference; diff --git a/src/common/meta/src/ddl/drop_database.rs b/src/common/meta/src/ddl/drop_database.rs index 55af78de30..d3c5c5831e 100644 --- a/src/common/meta/src/ddl/drop_database.rs +++ b/src/common/meta/src/ddl/drop_database.rs @@ -31,8 +31,8 @@ use snafu::ResultExt; use tonic::async_trait; use self::start::DropDatabaseStart; -use crate::ddl::utils::map_to_procedure_error; use crate::ddl::DdlContext; +use crate::ddl::utils::map_to_procedure_error; use crate::error::Result; use crate::key::table_name::TableNameValue; use crate::lock_key::{CatalogLock, SchemaLock}; diff --git a/src/common/meta/src/ddl/drop_database/cursor.rs b/src/common/meta/src/ddl/drop_database/cursor.rs index 4df007bda8..0a1180a52e 100644 --- a/src/common/meta/src/ddl/drop_database/cursor.rs +++ b/src/common/meta/src/ddl/drop_database/cursor.rs @@ -23,10 +23,10 @@ use table::metadata::{TableId, TableType}; use table::table_name::TableName; use crate::cache_invalidator::Context; +use crate::ddl::DdlContext; use crate::ddl::drop_database::executor::DropDatabaseExecutor; use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata; use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State}; -use crate::ddl::DdlContext; use crate::error::{Result, TableInfoNotFoundSnafu}; use crate::instruction::CacheIdent; use crate::key::table_route::TableRouteValue; @@ -210,7 +210,7 @@ mod tests { use crate::ddl::drop_database::metadata::DropDatabaseRemoveMetadata; use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State}; use crate::ddl::test_util::{create_logical_table, create_physical_table}; - use crate::test_util::{new_ddl_context, MockDatanodeManager}; + use crate::test_util::{MockDatanodeManager, new_ddl_context}; #[tokio::test] async fn test_next_without_logical_tables() { diff --git a/src/common/meta/src/ddl/drop_database/end.rs b/src/common/meta/src/ddl/drop_database/end.rs index 577a0d6c8c..480d0792eb 100644 --- a/src/common/meta/src/ddl/drop_database/end.rs +++ b/src/common/meta/src/ddl/drop_database/end.rs @@ -17,8 +17,8 @@ use std::any::Any; use common_procedure::Status; use serde::{Deserialize, Serialize}; -use crate::ddl::drop_database::{DropDatabaseContext, State}; use crate::ddl::DdlContext; +use crate::ddl::drop_database::{DropDatabaseContext, State}; use crate::error::Result; #[derive(Debug, Serialize, Deserialize)] diff --git a/src/common/meta/src/ddl/drop_database/executor.rs b/src/common/meta/src/ddl/drop_database/executor.rs index 36fb196819..21ab264a92 100644 --- a/src/common/meta/src/ddl/drop_database/executor.rs +++ b/src/common/meta/src/ddl/drop_database/executor.rs @@ -21,15 +21,15 @@ use snafu::OptionExt; use table::metadata::TableId; use table::table_name::TableName; +use crate::ddl::DdlContext; use crate::ddl::drop_database::cursor::DropDatabaseCursor; use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State}; use crate::ddl::drop_table::executor::DropTableExecutor; use crate::ddl::utils::get_region_wal_options; -use crate::ddl::DdlContext; use crate::error::{self, Result}; use crate::key::table_route::TableRouteValue; use crate::region_keeper::OperatingRegionGuard; -use crate::rpc::router::{operating_leader_regions, RegionRoute}; +use crate::rpc::router::{RegionRoute, operating_leader_regions}; #[derive(Debug, Serialize, Deserialize)] pub(crate) struct DropDatabaseExecutor { @@ -155,7 +155,7 @@ mod tests { use crate::key::datanode_table::DatanodeTableKey; use crate::peer::Peer; use crate::rpc::router::region_distribution; - use crate::test_util::{new_ddl_context, MockDatanodeHandler, MockDatanodeManager}; + use crate::test_util::{MockDatanodeHandler, MockDatanodeManager, new_ddl_context}; #[derive(Clone)] pub struct NaiveDatanodeHandler; diff --git a/src/common/meta/src/ddl/drop_database/metadata.rs b/src/common/meta/src/ddl/drop_database/metadata.rs index 61a30360b5..c71eded7af 100644 --- a/src/common/meta/src/ddl/drop_database/metadata.rs +++ b/src/common/meta/src/ddl/drop_database/metadata.rs @@ -18,9 +18,9 @@ use common_procedure::Status; use serde::{Deserialize, Serialize}; use crate::cache_invalidator::Context; +use crate::ddl::DdlContext; use crate::ddl::drop_database::end::DropDatabaseEnd; use crate::ddl::drop_database::{DropDatabaseContext, State}; -use crate::ddl::DdlContext; use crate::error::Result; use crate::instruction::CacheIdent; use crate::key::schema_name::{SchemaName, SchemaNameKey}; @@ -104,7 +104,7 @@ mod tests { use crate::ddl::drop_database::metadata::{DropDatabaseRemoveMetadata, DropMetadataBroadcast}; use crate::ddl::drop_database::{DropDatabaseContext, State}; use crate::key::schema_name::SchemaNameKey; - use crate::test_util::{new_ddl_context, MockDatanodeManager}; + use crate::test_util::{MockDatanodeManager, new_ddl_context}; #[tokio::test] async fn test_next() { @@ -129,12 +129,14 @@ mod tests { .downcast_ref::() .unwrap(); assert!(!status.is_done()); - assert!(!ddl_context - .table_metadata_manager - .schema_manager() - .exists(SchemaNameKey::new("foo", "bar")) - .await - .unwrap()); + assert!( + !ddl_context + .table_metadata_manager + .schema_manager() + .exists(SchemaNameKey::new("foo", "bar")) + .await + .unwrap() + ); let mut state = DropMetadataBroadcast; let (state, status) = state.next(&ddl_context, &mut ctx).await.unwrap(); diff --git a/src/common/meta/src/ddl/drop_database/start.rs b/src/common/meta/src/ddl/drop_database/start.rs index 792eeac8dd..775071d684 100644 --- a/src/common/meta/src/ddl/drop_database/start.rs +++ b/src/common/meta/src/ddl/drop_database/start.rs @@ -18,10 +18,10 @@ use common_procedure::Status; use serde::{Deserialize, Serialize}; use snafu::ensure; +use crate::ddl::DdlContext; use crate::ddl::drop_database::cursor::DropDatabaseCursor; use crate::ddl::drop_database::end::DropDatabaseEnd; use crate::ddl::drop_database::{DropDatabaseContext, DropTableTarget, State}; -use crate::ddl::DdlContext; use crate::error::{self, Result}; use crate::key::schema_name::SchemaNameKey; @@ -81,7 +81,7 @@ mod tests { use crate::ddl::drop_database::{DropDatabaseContext, State}; use crate::error; use crate::key::schema_name::SchemaNameKey; - use crate::test_util::{new_ddl_context, MockDatanodeManager}; + use crate::test_util::{MockDatanodeManager, new_ddl_context}; #[tokio::test] async fn test_schema_not_exists_err() { diff --git a/src/common/meta/src/ddl/drop_flow.rs b/src/common/meta/src/ddl/drop_flow.rs index 7cfb2747f2..3177cd8821 100644 --- a/src/common/meta/src/ddl/drop_flow.rs +++ b/src/common/meta/src/ddl/drop_flow.rs @@ -14,7 +14,7 @@ mod metadata; -use api::v1::flow::{flow_request, DropRequest, FlowRequest}; +use api::v1::flow::{DropRequest, FlowRequest, flow_request}; use async_trait::async_trait; use common_catalog::format_full_flow_name; use common_error::ext::ErrorExt; @@ -26,12 +26,12 @@ use common_procedure::{ use common_telemetry::info; use futures::future::join_all; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use strum::AsRefStr; use crate::cache_invalidator::Context; -use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::ddl::DdlContext; +use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::error::{self, Result}; use crate::flow_name::FlowName; use crate::instruction::{CacheIdent, DropFlow}; @@ -115,10 +115,10 @@ impl DropFlowProcedure { }; drop_flow_tasks.push(async move { - if let Err(err) = requester.handle(request).await { - if err.status_code() != StatusCode::FlowNotFound { - return Err(add_peer_context_if_needed(peer.clone())(err)); - } + if let Err(err) = requester.handle(request).await + && err.status_code() != StatusCode::FlowNotFound + { + return Err(add_peer_context_if_needed(peer.clone())(err)); } Ok(()) }); diff --git a/src/common/meta/src/ddl/drop_flow/metadata.rs b/src/common/meta/src/ddl/drop_flow/metadata.rs index fa740b1775..0437098be3 100644 --- a/src/common/meta/src/ddl/drop_flow/metadata.rs +++ b/src/common/meta/src/ddl/drop_flow/metadata.rs @@ -13,7 +13,7 @@ // limitations under the License. use common_catalog::format_full_flow_name; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::ddl::drop_flow::DropFlowProcedure; use crate::error::{self, Result}; diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs index 65ba9cf086..394d749e07 100644 --- a/src/common/meta/src/ddl/drop_table.rs +++ b/src/common/meta/src/ddl/drop_table.rs @@ -35,15 +35,15 @@ use table::metadata::TableId; use table::table_reference::TableReference; use self::executor::DropTableExecutor; -use crate::ddl::utils::map_to_procedure_error; use crate::ddl::DdlContext; +use crate::ddl::utils::map_to_procedure_error; use crate::error::{self, Result}; use crate::key::table_route::TableRouteValue; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; use crate::metrics; use crate::region_keeper::OperatingRegionGuard; use crate::rpc::ddl::DropTableTask; -use crate::rpc::router::{operating_leader_regions, RegionRoute}; +use crate::rpc::router::{RegionRoute, operating_leader_regions}; pub struct DropTableProcedure { /// The context of procedure runtime. diff --git a/src/common/meta/src/ddl/drop_table/executor.rs b/src/common/meta/src/ddl/drop_table/executor.rs index 7cc6589f6a..63ec05ab1c 100644 --- a/src/common/meta/src/ddl/drop_table/executor.rs +++ b/src/common/meta/src/ddl/drop_table/executor.rs @@ -15,8 +15,8 @@ use std::collections::HashMap; use api::v1::region::{ - region_request, CloseRequest as PbCloseRegionRequest, DropRequest as PbDropRegionRequest, - RegionRequest, RegionRequestHeader, + CloseRequest as PbCloseRegionRequest, DropRequest as PbDropRegionRequest, RegionRequest, + RegionRequestHeader, region_request, }; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; @@ -30,15 +30,15 @@ use table::metadata::TableId; use table::table_name::TableName; use crate::cache_invalidator::Context; -use crate::ddl::utils::{add_peer_context_if_needed, convert_region_routes_to_detecting_regions}; use crate::ddl::DdlContext; +use crate::ddl::utils::{add_peer_context_if_needed, convert_region_routes_to_detecting_regions}; use crate::error::{self, Result}; use crate::instruction::CacheIdent; use crate::key::table_name::TableNameKey; use crate::key::table_route::TableRouteValue; use crate::rpc::router::{ - find_follower_regions, find_followers, find_leader_regions, find_leaders, - operating_leader_regions, RegionRoute, + RegionRoute, find_follower_regions, find_followers, find_leader_regions, find_leaders, + operating_leader_regions, }; /// [Control] indicated to the caller whether to go to the next step. @@ -243,10 +243,10 @@ impl DropTableExecutor { let datanode = datanode.clone(); let requester = requester.clone(); drop_region_tasks.push(async move { - if let Err(err) = requester.handle(request).await { - if err.status_code() != StatusCode::RegionNotFound { - return Err(add_peer_context_if_needed(datanode)(err)); - } + if let Err(err) = requester.handle(request).await + && err.status_code() != StatusCode::RegionNotFound + { + return Err(add_peer_context_if_needed(datanode)(err)); } Ok(()) }); @@ -284,10 +284,10 @@ impl DropTableExecutor { let datanode = datanode.clone(); let requester = requester.clone(); close_region_tasks.push(async move { - if let Err(err) = requester.handle(request).await { - if err.status_code() != StatusCode::RegionNotFound { - return Err(add_peer_context_if_needed(datanode)(err)); - } + if let Err(err) = requester.handle(request).await + && err.status_code() != StatusCode::RegionNotFound + { + return Err(add_peer_context_if_needed(datanode)(err)); } Ok(()) }); @@ -328,10 +328,10 @@ mod tests { use super::*; use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ - build_raw_table_info_from_expr, TestCreateTableExprBuilder, + TestCreateTableExprBuilder, build_raw_table_info_from_expr, }; use crate::key::table_route::TableRouteValue; - use crate::test_util::{new_ddl_context, MockDatanodeManager}; + use crate::test_util::{MockDatanodeManager, new_ddl_context}; fn test_create_raw_table_info(name: &str) -> RawTableInfo { let create_table = TestCreateTableExprBuilder::default() diff --git a/src/common/meta/src/ddl/drop_view.rs b/src/common/meta/src/ddl/drop_view.rs index 2f0907ed37..d339f3249c 100644 --- a/src/common/meta/src/ddl/drop_view.rs +++ b/src/common/meta/src/ddl/drop_view.rs @@ -19,14 +19,14 @@ use common_procedure::{ }; use common_telemetry::info; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId, TableType}; use table::table_reference::TableReference; use crate::cache_invalidator::Context; -use crate::ddl::utils::map_to_procedure_error; use crate::ddl::DdlContext; +use crate::ddl::utils::map_to_procedure_error; use crate::error::{self, Result}; use crate::instruction::CacheIdent; use crate::key::table_name::TableNameKey; diff --git a/src/common/meta/src/ddl/table_meta.rs b/src/common/meta/src/ddl/table_meta.rs index 712e196131..b2906e8438 100644 --- a/src/common/meta/src/ddl/table_meta.rs +++ b/src/common/meta/src/ddl/table_meta.rs @@ -27,7 +27,7 @@ use crate::peer::Peer; use crate::rpc::ddl::CreateTableTask; use crate::rpc::router::{Region, RegionRoute}; use crate::sequence::SequenceRef; -use crate::wal_options_allocator::{allocate_region_wal_options, WalOptionsAllocatorRef}; +use crate::wal_options_allocator::{WalOptionsAllocatorRef, allocate_region_wal_options}; pub type TableMetadataAllocatorRef = Arc; diff --git a/src/common/meta/src/ddl/test_util.rs b/src/common/meta/src/ddl/test_util.rs index 7d90d60953..d97448d6b9 100644 --- a/src/common/meta/src/ddl/test_util.rs +++ b/src/common/meta/src/ddl/test_util.rs @@ -38,7 +38,7 @@ use table::metadata::{RawTableInfo, TableId}; use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure; use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ - build_raw_table_info_from_expr, TestCreateTableExprBuilder, + TestCreateTableExprBuilder, build_raw_table_info_from_expr, }; use crate::ddl::{DdlContext, TableMetadata}; use crate::key::table_route::TableRouteValue; diff --git a/src/common/meta/src/ddl/test_util/create_table.rs b/src/common/meta/src/ddl/test_util/create_table.rs index b6e785804a..13ca2ec195 100644 --- a/src/common/meta/src/ddl/test_util/create_table.rs +++ b/src/common/meta/src/ddl/test_util/create_table.rs @@ -19,7 +19,7 @@ use api::v1::meta::Partition; use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType}; use chrono::DateTime; use common_catalog::consts::{ - DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO2_ENGINE, MITO_ENGINE, + DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE, MITO2_ENGINE, }; use datatypes::schema::RawSchema; use derive_builder::Builder; diff --git a/src/common/meta/src/ddl/test_util/datanode_handler.rs b/src/common/meta/src/ddl/test_util/datanode_handler.rs index cd8005510b..839dbf9358 100644 --- a/src/common/meta/src/ddl/test_util/datanode_handler.rs +++ b/src/common/meta/src/ddl/test_util/datanode_handler.rs @@ -16,8 +16,8 @@ use std::collections::HashMap; use std::sync::Arc; use api::region::RegionResponse; -use api::v1::region::region_request::Body; use api::v1::region::RegionRequest; +use api::v1::region::region_request::Body; use common_error::ext::{BoxedError, ErrorExt, StackError}; use common_error::status_code::StatusCode; use common_query::request::QueryRequest; diff --git a/src/common/meta/src/ddl/tests/alter_logical_tables.rs b/src/common/meta/src/ddl/tests/alter_logical_tables.rs index 20733bec03..139f90eed2 100644 --- a/src/common/meta/src/ddl/tests/alter_logical_tables.rs +++ b/src/common/meta/src/ddl/tests/alter_logical_tables.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use api::region::RegionResponse; use api::v1::meta::Peer; use api::v1::region::sync_request::ManifestInfo; -use api::v1::region::{region_request, MetricManifestInfo, RegionRequest, SyncRequest}; +use api::v1::region::{MetricManifestInfo, RegionRequest, SyncRequest, region_request}; use api::v1::{ColumnDataType, SemanticType}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_procedure::{Procedure, ProcedureId, Status}; @@ -26,8 +26,8 @@ use common_procedure_test::MockContextProvider; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY}; use store_api::region_engine::RegionManifestInfo; -use store_api::storage::consts::ReservedColumnId; use store_api::storage::RegionId; +use store_api::storage::consts::ReservedColumnId; use tokio::sync::mpsc; use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure; @@ -45,7 +45,7 @@ use crate::key::table_name::TableNameKey; use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue}; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::{Region, RegionRoute}; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; fn make_alter_logical_table_add_column_task( schema: Option<&str>, diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs index 08e39ced45..e16a85b403 100644 --- a/src/common/meta/src/ddl/tests/alter_table.rs +++ b/src/common/meta/src/ddl/tests/alter_table.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use api::region::RegionResponse; use api::v1::alter_table_expr::Kind; use api::v1::region::sync_request::ManifestInfo; -use api::v1::region::{region_request, RegionRequest}; +use api::v1::region::{RegionRequest, region_request}; use api::v1::{ AddColumn, AddColumns, AlterTableExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType, SetTableOptions, @@ -58,7 +58,7 @@ use crate::peer::Peer; use crate::poison_key::table_poison_key; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::{Region, RegionRoute}; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; fn prepare_table_route(table_id: u32) -> TableRouteValue { TableRouteValue::physical(vec![ diff --git a/src/common/meta/src/ddl/tests/create_flow.rs b/src/common/meta/src/ddl/tests/create_flow.rs index f9968bebc9..d073f5aa38 100644 --- a/src/common/meta/src/ddl/tests/create_flow.rs +++ b/src/common/meta/src/ddl/tests/create_flow.rs @@ -21,15 +21,15 @@ use common_procedure_test::execute_procedure_until_done; use session::context::QueryContext as SessionQueryContext; use table::table_name::TableName; +use crate::ddl::DdlContext; use crate::ddl::create_flow::{CreateFlowData, CreateFlowProcedure, CreateFlowState, FlowType}; use crate::ddl::test_util::create_table::test_create_table_task; use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler; -use crate::ddl::DdlContext; use crate::error; -use crate::key::table_route::TableRouteValue; use crate::key::FlowId; +use crate::key::table_route::TableRouteValue; use crate::rpc::ddl::{CreateFlowTask, FlowQueryContext, QueryContext}; -use crate::test_util::{new_ddl_context, MockFlownodeManager}; +use crate::test_util::{MockFlownodeManager, new_ddl_context}; pub(crate) fn test_create_flow_task( name: &str, diff --git a/src/common/meta/src/ddl/tests/create_logical_tables.rs b/src/common/meta/src/ddl/tests/create_logical_tables.rs index 5f0dad3385..74ca72250f 100644 --- a/src/common/meta/src/ddl/tests/create_logical_tables.rs +++ b/src/common/meta/src/ddl/tests/create_logical_tables.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use api::region::RegionResponse; use api::v1::meta::Peer; use api::v1::region::sync_request::ManifestInfo; -use api::v1::region::{region_request, MetricManifestInfo, RegionRequest, SyncRequest}; +use api::v1::region::{MetricManifestInfo, RegionRequest, SyncRequest, region_request}; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status}; @@ -26,21 +26,21 @@ use common_procedure_test::MockContextProvider; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY}; use store_api::region_engine::RegionManifestInfo; -use store_api::storage::consts::ReservedColumnId; use store_api::storage::RegionId; +use store_api::storage::consts::ReservedColumnId; use tokio::sync::mpsc; +use crate::ddl::TableMetadata; use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure; use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler}; use crate::ddl::test_util::{ assert_column_name, create_physical_table_metadata, get_raw_table_info, test_column_metadatas, test_create_logical_table_task, test_create_physical_table_task, }; -use crate::ddl::TableMetadata; use crate::error::{Error, Result}; use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue}; use crate::rpc::router::{Region, RegionRoute}; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; fn make_creates_request_handler( column_metadatas: Vec, diff --git a/src/common/meta/src/ddl/tests/create_table.rs b/src/common/meta/src/ddl/tests/create_table.rs index 307655016c..5f0a5d8f48 100644 --- a/src/common/meta/src/ddl/tests/create_table.rs +++ b/src/common/meta/src/ddl/tests/create_table.rs @@ -18,13 +18,13 @@ use std::sync::Arc; use api::region::RegionResponse; use api::v1::meta::{Partition, Peer}; -use api::v1::region::{region_request, RegionRequest}; +use api::v1::region::{RegionRequest, region_request}; use api::v1::{ColumnDataType, SemanticType}; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status}; use common_procedure_test::{ - execute_procedure_until, execute_procedure_until_done, MockContextProvider, + MockContextProvider, execute_procedure_until, execute_procedure_until_done, }; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; @@ -36,7 +36,7 @@ use tokio::sync::mpsc; use crate::ddl::create_table::{CreateTableProcedure, CreateTableState}; use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ - build_raw_table_info_from_expr, TestCreateTableExprBuilder, + TestCreateTableExprBuilder, build_raw_table_info_from_expr, }; use crate::ddl::test_util::datanode_handler::{ DatanodeWatcher, NaiveDatanodeHandler, RetryErrorDatanodeHandler, @@ -47,7 +47,7 @@ use crate::error::{Error, Result}; use crate::key::table_route::TableRouteValue; use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::ddl::CreateTableTask; -use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context, new_ddl_context_with_kv_backend}; fn create_request_handler(_peer: Peer, request: RegionRequest) -> Result { let _ = _peer; @@ -325,16 +325,20 @@ async fn test_memory_region_keeper_guard_dropped_on_procedure_done() { assert_eq!(guards.len(), 1); let (datanode_id, region_id) = (0, RegionId::new(procedure.table_id(), 0)); assert_eq!(guards[0].info(), (datanode_id, region_id)); - assert!(ddl_context - .memory_region_keeper - .contains(datanode_id, region_id)); + assert!( + ddl_context + .memory_region_keeper + .contains(datanode_id, region_id) + ); execute_procedure_until_done(&mut procedure).await; // Ensure that when run to the end, the opening regions should be cleared: let guards = &procedure.creator.opening_regions; assert!(guards.is_empty()); - assert!(!ddl_context - .memory_region_keeper - .contains(datanode_id, region_id)); + assert!( + !ddl_context + .memory_region_keeper + .contains(datanode_id, region_id) + ); } diff --git a/src/common/meta/src/ddl/tests/create_view.rs b/src/common/meta/src/ddl/tests/create_view.rs index 5680fce71b..e730176fc6 100644 --- a/src/common/meta/src/ddl/tests/create_view.rs +++ b/src/common/meta/src/ddl/tests/create_view.rs @@ -30,7 +30,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler; use crate::ddl::tests::create_table::test_create_table_task; use crate::error::Error; use crate::rpc::ddl::CreateViewTask; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; pub(crate) fn test_table_names() -> HashSet { let mut set = HashSet::new(); diff --git a/src/common/meta/src/ddl/tests/drop_database.rs b/src/common/meta/src/ddl/tests/drop_database.rs index 8118cda53d..81cc29a51b 100644 --- a/src/common/meta/src/ddl/tests/drop_database.rs +++ b/src/common/meta/src/ddl/tests/drop_database.rs @@ -17,16 +17,16 @@ use std::sync::Arc; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId}; use common_procedure_test::{ - execute_procedure_until, execute_procedure_until_done, MockContextProvider, + MockContextProvider, execute_procedure_until, execute_procedure_until_done, }; use futures::TryStreamExt; -use crate::ddl::drop_database::executor::DropDatabaseExecutor; use crate::ddl::drop_database::DropDatabaseProcedure; +use crate::ddl::drop_database::executor::DropDatabaseExecutor; use crate::ddl::test_util::datanode_handler::{NaiveDatanodeHandler, RetryErrorDatanodeHandler}; use crate::ddl::test_util::{create_logical_table, create_physical_table}; use crate::key::schema_name::SchemaNameKey; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; #[tokio::test] async fn test_drop_database_with_logical_tables() { diff --git a/src/common/meta/src/ddl/tests/drop_flow.rs b/src/common/meta/src/ddl/tests/drop_flow.rs index 9afb36a7d4..8de42f5c96 100644 --- a/src/common/meta/src/ddl/tests/drop_flow.rs +++ b/src/common/meta/src/ddl/tests/drop_flow.rs @@ -27,7 +27,7 @@ use crate::ddl::tests::create_flow::create_test_flow; use crate::error; use crate::key::table_route::TableRouteValue; use crate::rpc::ddl::DropFlowTask; -use crate::test_util::{new_ddl_context, MockFlownodeManager}; +use crate::test_util::{MockFlownodeManager, new_ddl_context}; fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> DropFlowTask { DropFlowTask { diff --git a/src/common/meta/src/ddl/tests/drop_table.rs b/src/common/meta/src/ddl/tests/drop_table.rs index 9983e19ec5..2a303deb44 100644 --- a/src/common/meta/src/ddl/tests/drop_table.rs +++ b/src/common/meta/src/ddl/tests/drop_table.rs @@ -15,7 +15,7 @@ use std::collections::HashMap; use std::sync::Arc; -use api::v1::region::{region_request, RegionRequest}; +use api::v1::region::{RegionRequest, region_request}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; @@ -27,6 +27,7 @@ use store_api::storage::RegionId; use table::metadata::TableId; use tokio::sync::mpsc; +use crate::ddl::TableMetadata; use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure; use crate::ddl::drop_table::{DropTableProcedure, DropTableState}; use crate::ddl::test_util::create_table::test_create_table_task; @@ -35,13 +36,12 @@ use crate::ddl::test_util::{ create_logical_table, create_physical_table, create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task, }; -use crate::ddl::TableMetadata; use crate::key::table_route::TableRouteValue; use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; use crate::rpc::ddl::DropTableTask; use crate::rpc::router::{Region, RegionRoute}; -use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context, new_ddl_context_with_kv_backend}; #[tokio::test] async fn test_on_prepare_table_not_exists_err() { @@ -276,18 +276,22 @@ async fn test_memory_region_keeper_guard_dropped_on_procedure_done() { assert_eq!(guards.len(), 1); let (datanode_id, region_id) = (0, RegionId::new(physical_table_id, 0)); assert_eq!(guards[0].info(), (datanode_id, region_id)); - assert!(ddl_context - .memory_region_keeper - .contains(datanode_id, region_id)); + assert!( + ddl_context + .memory_region_keeper + .contains(datanode_id, region_id) + ); execute_procedure_until_done(&mut procedure).await; // Ensure that when run to the end, the dropping regions should be cleared: let guards = &procedure.dropping_regions; assert!(guards.is_empty()); - assert!(!ddl_context - .memory_region_keeper - .contains(datanode_id, region_id)); + assert!( + !ddl_context + .memory_region_keeper + .contains(datanode_id, region_id) + ); }; inner_test(new_drop_table_task("s", logical_table_id, false)).await; diff --git a/src/common/meta/src/ddl/tests/drop_view.rs b/src/common/meta/src/ddl/tests/drop_view.rs index f6e8391e35..824e2a56ba 100644 --- a/src/common/meta/src/ddl/tests/drop_view.rs +++ b/src/common/meta/src/ddl/tests/drop_view.rs @@ -25,7 +25,7 @@ use crate::ddl::test_util::create_table::test_create_table_task; use crate::ddl::tests::create_view::{test_create_view_task, test_table_names}; use crate::key::table_route::TableRouteValue; use crate::rpc::ddl::DropViewTask; -use crate::test_util::{new_ddl_context, MockDatanodeManager}; +use crate::test_util::{MockDatanodeManager, new_ddl_context}; fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> DropViewTask { DropViewTask { @@ -145,13 +145,15 @@ async fn test_drop_view_success() { .await .unwrap(); - assert!(ddl_context - .table_metadata_manager - .view_info_manager() - .get(view_id) - .await - .unwrap() - .is_some()); + assert!( + ddl_context + .table_metadata_manager + .view_info_manager() + .get(view_id) + .await + .unwrap() + .is_some() + ); let task = new_drop_view_task(view_name, view_id, false); // Prepare success @@ -160,13 +162,15 @@ async fn test_drop_view_success() { assert_eq!(DropViewState::InvalidateViewCache, procedure.state()); // Assert view info is removed - assert!(ddl_context - .table_metadata_manager - .view_info_manager() - .get(view_id) - .await - .unwrap() - .is_none()); + assert!( + ddl_context + .table_metadata_manager + .view_info_manager() + .get(view_id) + .await + .unwrap() + .is_none() + ); // Drop again let task = new_drop_view_task(view_name, view_id, false); diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs index 82434ed184..2206f0b6f8 100644 --- a/src/common/meta/src/ddl/truncate_table.rs +++ b/src/common/meta/src/ddl/truncate_table.rs @@ -14,8 +14,8 @@ use api::helper::to_pb_time_ranges; use api::v1::region::{ - region_request, truncate_request, RegionRequest, RegionRequestHeader, - TruncateRequest as PbTruncateRegionRequest, + RegionRequest, RegionRequestHeader, TruncateRequest as PbTruncateRegionRequest, region_request, + truncate_request, }; use async_trait::async_trait; use common_procedure::error::{FromJsonSnafu, ToJsonSnafu}; @@ -26,23 +26,23 @@ use common_telemetry::debug; use common_telemetry::tracing_context::TracingContext; use futures::future::join_all; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::storage::RegionId; use strum::AsRefStr; use table::metadata::{RawTableInfo, TableId}; use table::table_name::TableName; use table::table_reference::TableReference; -use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::ddl::DdlContext; +use crate::ddl::utils::{add_peer_context_if_needed, map_to_procedure_error}; use crate::error::{ConvertTimeRangesSnafu, Result, TableNotFoundSnafu}; +use crate::key::DeserializedValueWithBytes; use crate::key::table_info::TableInfoValue; use crate::key::table_name::TableNameKey; -use crate::key::DeserializedValueWithBytes; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; use crate::metrics; use crate::rpc::ddl::TruncateTableTask; -use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leader_regions, find_leaders}; pub struct TruncateTableProcedure { context: DdlContext, diff --git a/src/common/meta/src/ddl/utils.rs b/src/common/meta/src/ddl/utils.rs index ea6d8512c9..9c96bc3fd0 100644 --- a/src/common/meta/src/ddl/utils.rs +++ b/src/common/meta/src/ddl/utils.rs @@ -24,8 +24,8 @@ use std::fmt::Debug; use api::region::RegionResponse; use api::v1::region::sync_request::ManifestInfo; use api::v1::region::{ - region_request, MetricManifestInfo, MitoManifestInfo, RegionRequest, RegionRequestHeader, - SyncRequest, + MetricManifestInfo, MitoManifestInfo, RegionRequest, RegionRequestHeader, SyncRequest, + region_request, }; use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE}; use common_error::ext::BoxedError; @@ -34,7 +34,7 @@ use common_telemetry::tracing_context::TracingContext; use common_telemetry::{error, info, warn}; use common_wal::options::WalOptions; use futures::future::join_all; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, MANIFEST_INFO_EXTENSION_KEY}; use store_api::region_engine::RegionManifestInfo; @@ -53,7 +53,7 @@ use crate::key::table_route::TableRouteValue; use crate::key::{TableMetadataManager, TableMetadataManagerRef}; use crate::peer::Peer; use crate::rpc::ddl::CreateTableTask; -use crate::rpc::router::{find_follower_regions, find_followers, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_follower_regions, find_followers}; /// Adds [Peer] context if the error is unretryable. pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error { diff --git a/src/common/meta/src/ddl/utils/raw_table_info.rs b/src/common/meta/src/ddl/utils/raw_table_info.rs index 812f9185c2..c952e23469 100644 --- a/src/common/meta/src/ddl/utils/raw_table_info.rs +++ b/src/common/meta/src/ddl/utils/raw_table_info.rs @@ -98,10 +98,7 @@ pub(crate) fn update_table_info_column_ids( if table_column_names != column_names { warn!( "Column metadata doesn't match the table schema for table {}, table_id: {}, column in table: {:?}, column in metadata: {:?}", - raw_table_info.name, - raw_table_info.ident.table_id, - table_column_names, - column_names, + raw_table_info.name, raw_table_info.ident.table_id, table_column_names, column_names, ); return; } diff --git a/src/common/meta/src/ddl/utils/region_metadata_lister.rs b/src/common/meta/src/ddl/utils/region_metadata_lister.rs index 30bacd04e7..ed8e9fb792 100644 --- a/src/common/meta/src/ddl/utils/region_metadata_lister.rs +++ b/src/common/meta/src/ddl/utils/region_metadata_lister.rs @@ -25,7 +25,7 @@ use store_api::storage::{RegionId, TableId}; use crate::ddl::utils::add_peer_context_if_needed; use crate::error::{DecodeJsonSnafu, Result}; use crate::node_manager::NodeManagerRef; -use crate::rpc::router::{find_leaders, region_distribution, RegionRoute}; +use crate::rpc::router::{RegionRoute, find_leaders, region_distribution}; /// Collects the region metadata from the datanodes. pub struct RegionMetadataLister { @@ -118,8 +118,8 @@ mod tests { use api::region::RegionResponse; use api::v1::meta::Peer; - use api::v1::region::region_request::Body; use api::v1::region::RegionRequest; + use api::v1::region::region_request::Body; use store_api::metadata::RegionMetadata; use store_api::storage::RegionId; use tokio::sync::mpsc; diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs index 2126f7dc78..9ade13052d 100644 --- a/src/common/meta/src/ddl_manager.rs +++ b/src/common/meta/src/ddl_manager.rs @@ -15,12 +15,12 @@ use std::sync::Arc; use common_procedure::{ - watcher, BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId, + BoxedProcedureLoader, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher, }; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{debug, info, tracing}; use derive_builder::Builder; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::TableId; use crate::ddl::alter_database::AlterDatabaseProcedure; @@ -36,7 +36,7 @@ use crate::ddl::drop_flow::DropFlowProcedure; use crate::ddl::drop_table::DropTableProcedure; use crate::ddl::drop_view::DropViewProcedure; use crate::ddl::truncate_table::TruncateTableProcedure; -use crate::ddl::{utils, DdlContext}; +use crate::ddl::{DdlContext, utils}; use crate::error::{ EmptyDdlTasksSnafu, ProcedureOutputSnafu, RegisterProcedureLoaderSnafu, Result, SubmitProcedureSnafu, TableInfoNotFoundSnafu, TableNotFoundSnafu, TableRouteNotFoundSnafu, @@ -47,10 +47,6 @@ use crate::key::table_name::TableNameKey; use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef}; use crate::procedure_executor::ExecutorContext; #[cfg(feature = "enterprise")] -use crate::rpc::ddl::trigger::CreateTriggerTask; -#[cfg(feature = "enterprise")] -use crate::rpc::ddl::trigger::DropTriggerTask; -#[cfg(feature = "enterprise")] use crate::rpc::ddl::DdlTask::CreateTrigger; #[cfg(feature = "enterprise")] use crate::rpc::ddl::DdlTask::DropTrigger; @@ -59,6 +55,10 @@ use crate::rpc::ddl::DdlTask::{ CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView, TruncateTable, }; +#[cfg(feature = "enterprise")] +use crate::rpc::ddl::trigger::CreateTriggerTask; +#[cfg(feature = "enterprise")] +use crate::rpc::ddl::trigger::DropTriggerTask; use crate::rpc::ddl::{ AlterDatabaseTask, AlterTableTask, CreateDatabaseTask, CreateFlowTask, CreateTableTask, CreateViewTask, DropDatabaseTask, DropFlowTask, DropTableTask, DropViewTask, QueryContext, @@ -624,7 +624,9 @@ async fn handle_create_logical_table_tasks( .submit_create_logical_table_tasks(create_table_tasks, physical_table_id) .await?; - info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}"); + info!( + "{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}" + ); let procedure_id = id.to_string(); let output = output.context(ProcedureOutputSnafu { @@ -856,7 +858,9 @@ async fn handle_alter_logical_table_tasks( .submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id) .await?; - info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}"); + info!( + "{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}" + ); let procedure_id = id.to_string(); @@ -908,8 +912,8 @@ mod tests { use crate::ddl::table_meta::TableMetadataAllocator; use crate::ddl::truncate_table::TruncateTableProcedure; use crate::ddl::{DdlContext, NoopRegionFailureDetectorControl}; - use crate::key::flow::FlowMetadataManager; use crate::key::TableMetadataManager; + use crate::key::flow::FlowMetadataManager; use crate::kv_backend::memory::MemoryKvBackend; use crate::node_manager::{DatanodeManager, DatanodeRef, FlownodeManager, FlownodeRef}; use crate::peer::Peer; diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs index 7a0d1eca72..a6493397c6 100644 --- a/src/common/meta/src/error.rs +++ b/src/common/meta/src/error.rs @@ -25,8 +25,8 @@ use snafu::{Location, Snafu}; use store_api::storage::RegionId; use table::metadata::TableId; -use crate::peer::Peer; use crate::DatanodeId; +use crate::peer::Peer; #[derive(Snafu)] #[snafu(visibility(pub))] diff --git a/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs b/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs index fb9d1702fd..b6f6274540 100644 --- a/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs +++ b/src/common/meta/src/heartbeat/handler/parse_mailbox_message.rs @@ -30,11 +30,11 @@ impl HeartbeatResponseHandler for ParseMailboxMessageHandler { } async fn handle(&self, ctx: &mut HeartbeatResponseHandlerContext) -> Result { - if let Some(message) = &ctx.response.mailbox_message { - if message.payload.is_some() { - // mailbox_message_to_incoming_message will raise an error if payload is none - ctx.incoming_message = Some(mailbox_message_to_incoming_message(message.clone())?) - } + if let Some(message) = &ctx.response.mailbox_message + && message.payload.is_some() + { + // mailbox_message_to_incoming_message will raise an error if payload is none + ctx.incoming_message = Some(mailbox_message_to_incoming_message(message.clone())?) } Ok(HandleControl::Continue) diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs index ea1ca22971..0813f4d249 100644 --- a/src/common/meta/src/key.rs +++ b/src/common/meta/src/key.rs @@ -134,7 +134,7 @@ use regex::Regex; pub use schema_metadata_manager::{SchemaMetadataManager, SchemaMetadataManagerRef}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::RegionNumber; use table::metadata::{RawTableInfo, TableId}; use table::table_name::TableName; @@ -151,18 +151,18 @@ use self::flow::flow_name::FlowNameValue; use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue}; use self::table_route::{TableRouteManager, TableRouteValue}; use self::tombstone::TombstoneManager; +use crate::DatanodeId; use crate::error::{self, Result, SerdeJsonSnafu}; use crate::key::flow::flow_state::FlowStateValue; use crate::key::node_address::NodeAddressValue; use crate::key::table_route::TableRouteKey; use crate::key::topic_region::TopicRegionValue; use crate::key::txn_helper::TxnOpGetResponseSet; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; -use crate::rpc::router::{region_distribution, LeaderState, RegionRoute}; +use crate::kv_backend::txn::{Txn, TxnOp}; +use crate::rpc::router::{LeaderState, RegionRoute, region_distribution}; use crate::rpc::store::BatchDeleteRequest; use crate::state_store::PoisonValue; -use crate::DatanodeId; pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*"; pub const TOPIC_NAME_PATTERN: &str = r"[a-zA-Z0-9_:-][a-zA-Z0-9_:\-\.@#]*"; @@ -1321,10 +1321,10 @@ impl TableMetadataManager { let mut updated = 0; for route in &mut new_region_routes { - if let Some(state) = next_region_route_status(route) { - if route.set_leader_state(state) { - updated += 1; - } + if let Some(state) = next_region_route_status(route) + && route.set_leader_state(state) + { + updated += 1; } } @@ -1472,15 +1472,15 @@ mod tests { use crate::key::table_name::TableNameKey; use crate::key::table_route::TableRouteValue; use crate::key::{ - DeserializedValueWithBytes, RegionDistribution, RegionRoleSet, TableMetadataManager, - ViewInfoValue, TOPIC_REGION_PREFIX, + DeserializedValueWithBytes, RegionDistribution, RegionRoleSet, TOPIC_REGION_PREFIX, + TableMetadataManager, ViewInfoValue, }; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; - use crate::rpc::router::{region_distribution, LeaderState, Region, RegionRoute}; + use crate::rpc::router::{LeaderState, Region, RegionRoute, region_distribution}; use crate::rpc::store::RangeRequest; - use crate::wal_options_allocator::{allocate_region_wal_options, WalOptionsAllocator}; + use crate::wal_options_allocator::{WalOptionsAllocator, allocate_region_wal_options}; #[test] fn test_deserialized_value_with_bytes() { @@ -1635,26 +1635,30 @@ mod tests { .unwrap(); // if metadata was already created, it should be ok. - assert!(create_physical_table_metadata( - &table_metadata_manager, - table_info.clone(), - region_routes.clone(), - region_wal_options.clone(), - ) - .await - .is_ok()); + assert!( + create_physical_table_metadata( + &table_metadata_manager, + table_info.clone(), + region_routes.clone(), + region_wal_options.clone(), + ) + .await + .is_ok() + ); let mut modified_region_routes = region_routes.clone(); modified_region_routes.push(region_route.clone()); // if remote metadata was exists, it should return an error. - assert!(create_physical_table_metadata( - &table_metadata_manager, - table_info.clone(), - modified_region_routes, - region_wal_options.clone(), - ) - .await - .is_err()); + assert!( + create_physical_table_metadata( + &table_metadata_manager, + table_info.clone(), + modified_region_routes, + region_wal_options.clone(), + ) + .await + .is_err() + ); let (remote_table_info, remote_table_route) = table_metadata_manager .get_full_table_info(10) @@ -1709,20 +1713,24 @@ mod tests { .unwrap(); // if metadata was already created, it should be ok. - assert!(table_metadata_manager - .create_logical_tables_metadata(tables_data) - .await - .is_ok()); + assert!( + table_metadata_manager + .create_logical_tables_metadata(tables_data) + .await + .is_ok() + ); let mut modified_region_routes = region_routes.clone(); modified_region_routes.push(new_region_route(2, 3)); let modified_table_route_value = TableRouteValue::physical(modified_region_routes.clone()); let modified_tables_data = vec![(table_info.clone(), modified_table_route_value)]; // if remote metadata was exists, it should return an error. - assert!(table_metadata_manager - .create_logical_tables_metadata(modified_tables_data) - .await - .is_err()); + assert!( + table_metadata_manager + .create_logical_tables_metadata(modified_tables_data) + .await + .is_err() + ); let (remote_table_info, remote_table_route) = table_metadata_manager .get_full_table_info(table_id) @@ -1825,26 +1833,32 @@ mod tests { ) .await .unwrap(); - assert!(table_metadata_manager - .table_info_manager() - .get(table_id) - .await - .unwrap() - .is_none()); - assert!(table_metadata_manager - .table_route_manager() - .table_route_storage() - .get(table_id) - .await - .unwrap() - .is_none()); - assert!(table_metadata_manager - .datanode_table_manager() - .tables(datanode_id) - .try_collect::>() - .await - .unwrap() - .is_empty()); + assert!( + table_metadata_manager + .table_info_manager() + .get(table_id) + .await + .unwrap() + .is_none() + ); + assert!( + table_metadata_manager + .table_route_manager() + .table_route_storage() + .get(table_id) + .await + .unwrap() + .is_none() + ); + assert!( + table_metadata_manager + .datanode_table_manager() + .tables(datanode_id) + .try_collect::>() + .await + .unwrap() + .is_empty() + ); // Checks removed values let table_info = table_metadata_manager .table_info_manager() @@ -1912,10 +1926,12 @@ mod tests { DeserializedValueWithBytes::from_inner(table_info_value.update(modified_table_info)); // if the table_info_value is wrong, it should return an error. // The ABA problem. - assert!(table_metadata_manager - .rename_table(&modified_table_info_value, new_table_name.clone()) - .await - .is_err()); + assert!( + table_metadata_manager + .rename_table(&modified_table_info_value, new_table_name.clone()) + .await + .is_err() + ); let old_table_name = TableNameKey::new( &table_info.catalog_name, @@ -1928,12 +1944,14 @@ mod tests { &new_table_name, ); - assert!(table_metadata_manager - .table_name_manager() - .get(old_table_name) - .await - .unwrap() - .is_none()); + assert!( + table_metadata_manager + .table_name_manager() + .get(old_table_name) + .await + .unwrap() + .is_none() + ); assert_eq!( table_metadata_manager @@ -1998,10 +2016,12 @@ mod tests { ); // if the current_table_info_value is wrong, it should return an error. // The ABA problem. - assert!(table_metadata_manager - .update_table_info(&wrong_table_info_value, None, new_table_info) - .await - .is_err()) + assert!( + table_metadata_manager + .update_table_info(&wrong_table_info_value, None, new_table_info) + .await + .is_err() + ) } #[tokio::test] @@ -2078,17 +2098,21 @@ mod tests { Some(LeaderState::Downgrading) ); - assert!(updated_route_value.region_routes().unwrap()[0] - .leader_down_since - .is_some()); + assert!( + updated_route_value.region_routes().unwrap()[0] + .leader_down_since + .is_some() + ); assert_eq!( updated_route_value.region_routes().unwrap()[1].leader_state, Some(LeaderState::Downgrading) ); - assert!(updated_route_value.region_routes().unwrap()[1] - .leader_down_since - .is_some()); + assert!( + updated_route_value.region_routes().unwrap()[1] + .leader_down_since + .is_some() + ); } async fn assert_datanode_table( @@ -2217,22 +2241,24 @@ mod tests { ]) .unwrap(), ); - assert!(table_metadata_manager - .update_table_route( - table_id, - RegionInfo { - engine: engine.to_string(), - region_storage_path: region_storage_path.to_string(), - region_options: HashMap::new(), - region_wal_options: HashMap::new(), - }, - &wrong_table_route_value, - new_region_routes, - &HashMap::new(), - &HashMap::new(), - ) - .await - .is_err()); + assert!( + table_metadata_manager + .update_table_route( + table_id, + RegionInfo { + engine: engine.to_string(), + region_storage_path: region_storage_path.to_string(), + region_options: HashMap::new(), + region_wal_options: HashMap::new(), + }, + &wrong_table_route_value, + new_region_routes, + &HashMap::new(), + &HashMap::new(), + ) + .await + .is_err() + ); } #[tokio::test] @@ -2496,18 +2522,20 @@ mod tests { )); // if the current_view_info_value is wrong, it should return an error. // The ABA problem. - assert!(table_metadata_manager - .update_view_info( - view_id, - &wrong_view_info_value, - new_logical_plan.clone(), - new_table_names.clone(), - vec!["c".to_string()], - vec!["number3".to_string()], - wrong_definition.to_string(), - ) - .await - .is_err()); + assert!( + table_metadata_manager + .update_view_info( + view_id, + &wrong_view_info_value, + new_logical_plan.clone(), + new_table_names.clone(), + vec!["c".to_string()], + vec!["number3".to_string()], + wrong_definition.to_string(), + ) + .await + .is_err() + ); // The view_info is not changed. let current_view_info = table_metadata_manager diff --git a/src/common/meta/src/key/catalog_name.rs b/src/common/meta/src/key/catalog_name.rs index dddbeed3d2..5e5b7c2383 100644 --- a/src/common/meta/src/key/catalog_name.rs +++ b/src/common/meta/src/key/catalog_name.rs @@ -20,11 +20,11 @@ use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use crate::error::{self, Error, InvalidMetadataSnafu, Result}; -use crate::key::{MetadataKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX}; +use crate::key::{CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX, MetadataKey}; use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; /// The catalog name key, indices all catalog names /// diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs index 19b4e7da48..68105a478a 100644 --- a/src/common/meta/src/key/datanode_table.rs +++ b/src/common/meta/src/key/datanode_table.rs @@ -21,19 +21,19 @@ use snafu::OptionExt; use store_api::storage::RegionNumber; use table::metadata::TableId; +use crate::DatanodeId; use crate::error::{DatanodeTableInfoNotFoundSnafu, InvalidMetadataSnafu, Result}; use crate::key::table_route::PhysicalTableRouteValue; use crate::key::{ - MetadataKey, MetadataValue, RegionDistribution, RegionRoleSet, DATANODE_TABLE_KEY_PATTERN, - DATANODE_TABLE_KEY_PREFIX, + DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX, MetadataKey, MetadataValue, + RegionDistribution, RegionRoleSet, }; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; +use crate::kv_backend::txn::{Txn, TxnOp}; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; +use crate::rpc::KeyValue; use crate::rpc::router::region_distribution; use crate::rpc::store::{BatchGetRequest, RangeRequest}; -use crate::rpc::KeyValue; -use crate::DatanodeId; #[serde_with::serde_as] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] diff --git a/src/common/meta/src/key/flow.rs b/src/common/meta/src/key/flow.rs index 0320022751..546071f2a0 100644 --- a/src/common/meta/src/key/flow.rs +++ b/src/common/meta/src/key/flow.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use common_telemetry::info; use flow_route::{FlowRouteKey, FlowRouteManager, FlowRouteValue}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use table_flow::TableFlowValue; use self::flow_info::{FlowInfoKey, FlowInfoValue}; @@ -40,8 +40,8 @@ use crate::key::flow::flownode_flow::FlownodeFlowManager; pub use crate::key::flow::table_flow::{TableFlowManager, TableFlowManagerRef}; use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey}; -use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::Txn; use crate::rpc::store::BatchDeleteRequest; /// The key of `__flow/` scope. @@ -399,11 +399,11 @@ mod tests { use table::table_name::TableName; use super::*; - use crate::key::flow::table_flow::TableFlowKey; + use crate::FlownodeId; use crate::key::FlowPartitionId; + use crate::key::flow::table_flow::TableFlowKey; use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; - use crate::FlownodeId; #[derive(Debug)] struct MockKey { @@ -970,9 +970,10 @@ mod tests { .await .unwrap_err(); assert_matches!(err, error::Error::Unexpected { .. }); - assert!(err - .to_string() - .contains("Reads different flow id when updating flow")); + assert!( + err.to_string() + .contains("Reads different flow id when updating flow") + ); } #[tokio::test] diff --git a/src/common/meta/src/key/flow/flow_info.rs b/src/common/meta/src/key/flow/flow_info.rs index 9ec60830f4..7b277e36ef 100644 --- a/src/common/meta/src/key/flow/flow_info.rs +++ b/src/common/meta/src/key/flow/flow_info.rs @@ -23,13 +23,13 @@ use snafu::OptionExt; use table::metadata::TableId; use table::table_name::TableName; +use crate::FlownodeId; use crate::error::{self, Result}; use crate::key::flow::FlowScoped; use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue}; -use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::kv_backend::KvBackendRef; -use crate::FlownodeId; +use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; const FLOW_INFO_KEY_PREFIX: &str = "info"; diff --git a/src/common/meta/src/key/flow/flow_name.rs b/src/common/meta/src/key/flow/flow_name.rs index d368ca7c3a..d0967d2b8a 100644 --- a/src/common/meta/src/key/flow/flow_name.rs +++ b/src/common/meta/src/key/flow/flow_name.rs @@ -25,11 +25,11 @@ use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{ BytesAdapter, DeserializedValueWithBytes, FlowId, MetadataKey, MetadataValue, NAME_PATTERN, }; -use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; +use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; const FLOW_NAME_KEY_PREFIX: &str = "name"; diff --git a/src/common/meta/src/key/flow/flow_route.rs b/src/common/meta/src/key/flow/flow_route.rs index 83102f0d49..ed82151b44 100644 --- a/src/common/meta/src/key/flow/flow_route.rs +++ b/src/common/meta/src/key/flow/flow_route.rs @@ -20,17 +20,17 @@ use snafu::OptionExt; use crate::error::{self, Result}; use crate::key::flow::flow_info::FlowInfoValue; -use crate::key::flow::{flownode_addr_helper, FlowScoped}; +use crate::key::flow::{FlowScoped, flownode_addr_helper}; use crate::key::node_address::NodeAddressKey; use crate::key::{ BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue, }; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Txn, TxnOp}; use crate::peer::Peer; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; const FLOW_ROUTE_KEY_PREFIX: &str = "route"; diff --git a/src/common/meta/src/key/flow/flownode_flow.rs b/src/common/meta/src/key/flow/flownode_flow.rs index e7924ad702..38ab07457e 100644 --- a/src/common/meta/src/key/flow/flownode_flow.rs +++ b/src/common/meta/src/key/flow/flownode_flow.rs @@ -12,22 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -use futures::stream::BoxStream; use futures::TryStreamExt; +use futures::stream::BoxStream; use lazy_static::lazy_static; use regex::Regex; use snafu::OptionExt; -use crate::error::{self, Result}; -use crate::key::flow::flow_info::FlowInfoValue; -use crate::key::flow::FlowScoped; -use crate::key::{BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey}; -use crate::kv_backend::txn::{Txn, TxnOp}; -use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; -use crate::rpc::KeyValue; use crate::FlownodeId; +use crate::error::{self, Result}; +use crate::key::flow::FlowScoped; +use crate::key::flow::flow_info::FlowInfoValue; +use crate::key::{BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey}; +use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Txn, TxnOp}; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; +use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; lazy_static! { static ref FLOWNODE_FLOW_KEY_PATTERN: Regex = Regex::new(&format!( @@ -245,8 +245,8 @@ impl FlownodeFlowManager { #[cfg(test)] mod tests { - use crate::key::flow::flownode_flow::FlownodeFlowKey; use crate::key::MetadataKey; + use crate::key::flow::flownode_flow::FlownodeFlowKey; #[test] fn test_key_serialization() { diff --git a/src/common/meta/src/key/flow/table_flow.rs b/src/common/meta/src/key/flow/table_flow.rs index 63f97e6602..a35d6178ae 100644 --- a/src/common/meta/src/key/flow/table_flow.rs +++ b/src/common/meta/src/key/flow/table_flow.rs @@ -21,20 +21,20 @@ use serde::{Deserialize, Serialize}; use snafu::OptionExt; use table::metadata::TableId; +use crate::FlownodeId; use crate::error::{self, Result}; use crate::key::flow::flow_info::FlowInfoValue; -use crate::key::flow::{flownode_addr_helper, FlowScoped}; +use crate::key::flow::{FlowScoped, flownode_addr_helper}; use crate::key::node_address::NodeAddressKey; use crate::key::{ BytesAdapter, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetadataKey, MetadataValue, }; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Txn, TxnOp}; use crate::peer::Peer; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; -use crate::FlownodeId; +use crate::rpc::store::RangeRequest; const TABLE_FLOW_KEY_PREFIX: &str = "source_table"; diff --git a/src/common/meta/src/key/runtime_switch.rs b/src/common/meta/src/key/runtime_switch.rs index f5eb9b058c..61c8ceab28 100644 --- a/src/common/meta/src/key/runtime_switch.rs +++ b/src/common/meta/src/key/runtime_switch.rs @@ -154,8 +154,8 @@ mod tests { use crate::key::runtime_switch::RuntimeSwitchManager; use crate::key::{LEGACY_MAINTENANCE_KEY, MAINTENANCE_KEY}; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::store::PutRequest; #[tokio::test] diff --git a/src/common/meta/src/key/schema_metadata_manager.rs b/src/common/meta/src/key/schema_metadata_manager.rs index 2cc9d3982a..2b94d4c56f 100644 --- a/src/common/meta/src/key/schema_metadata_manager.rs +++ b/src/common/meta/src/key/schema_metadata_manager.rs @@ -21,7 +21,7 @@ use store_api::storage::TableId; use crate::cache::{SchemaCacheRef, TableSchemaCacheRef}; use crate::error::TableInfoNotFoundSnafu; -use crate::{error, SchemaOptions}; +use crate::{SchemaOptions, error}; pub type SchemaMetadataManagerRef = Arc; diff --git a/src/common/meta/src/key/schema_name.rs b/src/common/meta/src/key/schema_name.rs index f93cba897f..9923597d8f 100644 --- a/src/common/meta/src/key/schema_name.rs +++ b/src/common/meta/src/key/schema_name.rs @@ -20,7 +20,7 @@ use common_time::DatabaseTimeToLive; use futures::stream::BoxStream; use humantime_serde::re::humantime; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::ensure_values; use crate::error::{self, Error, InvalidMetadataSnafu, ParseOptionSnafu, Result}; @@ -28,11 +28,11 @@ use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{ DeserializedValueWithBytes, MetadataKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX, }; -use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::RangeRequest; +use crate::kv_backend::txn::Txn; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; const OPT_KEY_TTL: &str = "ttl"; diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs index 2de5abd764..42cdbe3dc8 100644 --- a/src/common/meta/src/key/table_info.rs +++ b/src/common/meta/src/key/table_info.rs @@ -29,8 +29,8 @@ use crate::key::{ DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PATTERN, TABLE_INFO_KEY_PREFIX, }; -use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::Txn; use crate::rpc::store::BatchGetRequest; /// The key stores the metadata of the table. @@ -155,7 +155,7 @@ impl TableInfoManager { table_info_value: &TableInfoValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult + use<>, )> { let key = TableInfoKey::new(table_id); let raw_key = key.to_bytes(); @@ -177,7 +177,7 @@ impl TableInfoManager { new_table_info_value: &TableInfoValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> TableInfoDecodeResult + use<>, )> { let key = TableInfoKey::new(table_id); let raw_key = key.to_bytes(); diff --git a/src/common/meta/src/key/table_name.rs b/src/common/meta/src/key/table_name.rs index 2cb8586293..c66535f743 100644 --- a/src/common/meta/src/key/table_name.rs +++ b/src/common/meta/src/key/table_name.rs @@ -25,12 +25,12 @@ use table::table_reference::TableReference; use crate::error::{Error, InvalidMetadataSnafu, Result}; use crate::key::{MetadataKey, MetadataValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX}; +use crate::kv_backend::KvBackendRef; use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::txn::{Txn, TxnOp}; -use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::{BatchGetRequest, RangeRequest}; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::{BatchGetRequest, RangeRequest}; #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct TableNameKey<'a> { diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs index a17e8f2079..5f6782f002 100644 --- a/src/common/meta/src/key/table_route.rs +++ b/src/common/meta/src/key/table_route.rs @@ -17,7 +17,7 @@ use std::fmt::Display; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::{RegionId, RegionNumber}; use table::metadata::TableId; @@ -31,9 +31,9 @@ use crate::key::{ DeserializedValueWithBytes, MetadataKey, MetadataValue, RegionDistribution, TABLE_ROUTE_KEY_PATTERN, TABLE_ROUTE_PREFIX, }; -use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; -use crate::rpc::router::{region_distribution, RegionRoute}; +use crate::kv_backend::txn::Txn; +use crate::rpc::router::{RegionRoute, region_distribution}; use crate::rpc::store::BatchGetRequest; /// The key stores table routes @@ -576,7 +576,7 @@ impl TableRouteStorage { table_route_value: &TableRouteValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult + use<>, )> { let key = TableRouteKey::new(table_id); let raw_key = key.to_bytes(); @@ -600,7 +600,7 @@ impl TableRouteStorage { new_table_route_value: &TableRouteValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> TableRouteValueDecodeResult + use<>, )> { let key = TableRouteKey::new(table_id); let raw_key = key.to_bytes(); @@ -755,10 +755,10 @@ fn set_addresses( }; for region_route in &mut physical_table_route.region_routes { - if let Some(leader) = &mut region_route.leader_peer { - if let Some(node_addr) = node_addrs.get(&leader.id) { - leader.addr = node_addr.peer.addr.clone(); - } + if let Some(leader) = &mut region_route.leader_peer + && let Some(node_addr) = node_addrs.get(&leader.id) + { + leader.addr = node_addr.peer.addr.clone(); } for follower in &mut region_route.follower_peers { if let Some(node_addr) = node_addrs.get(&follower.id) { diff --git a/src/common/meta/src/key/tombstone.rs b/src/common/meta/src/key/tombstone.rs index 3048d14318..ed1d2f28c2 100644 --- a/src/common/meta/src/key/tombstone.rs +++ b/src/common/meta/src/key/tombstone.rs @@ -19,8 +19,8 @@ use snafu::ensure; use crate::error::{self, Result}; use crate::key::txn_helper::TxnOpGetResponseSet; -use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::rpc::store::{BatchDeleteRequest, BatchGetRequest}; /// [TombstoneManager] provides the ability to: @@ -257,8 +257,8 @@ mod tests { use crate::error::Error; use crate::key::tombstone::TombstoneManager; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::store::PutRequest; #[derive(Debug, Clone)] @@ -685,9 +685,10 @@ mod tests { .move_values(keys, dest_keys) .await .unwrap_err(); - assert!(err - .to_string() - .contains("The length of keys(2) does not match the length of dest_keys(3)."),); + assert!( + err.to_string() + .contains("The length of keys(2) does not match the length of dest_keys(3)."), + ); let moved_keys = tombstone_manager.move_values(vec![], vec![]).await.unwrap(); assert_eq!(0, moved_keys); diff --git a/src/common/meta/src/key/topic_name.rs b/src/common/meta/src/key/topic_name.rs index 4f45be7d8f..5497fbe478 100644 --- a/src/common/meta/src/key/topic_name.rs +++ b/src/common/meta/src/key/topic_name.rs @@ -15,19 +15,19 @@ use std::fmt::{self, Display}; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::ensure_values; use crate::error::{self, DecodeJsonSnafu, Error, InvalidMetadataSnafu, Result, UnexpectedSnafu}; use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{ - DeserializedValueWithBytes, MetadataKey, MetadataValue, KAFKA_TOPIC_KEY_PATTERN, - KAFKA_TOPIC_KEY_PREFIX, LEGACY_TOPIC_KEY_PREFIX, + DeserializedValueWithBytes, KAFKA_TOPIC_KEY_PATTERN, KAFKA_TOPIC_KEY_PREFIX, + LEGACY_TOPIC_KEY_PREFIX, MetadataKey, MetadataValue, }; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; -use crate::rpc::store::{BatchPutRequest, RangeRequest}; +use crate::kv_backend::txn::{Txn, TxnOp}; use crate::rpc::KeyValue; +use crate::rpc::store::{BatchPutRequest, RangeRequest}; #[derive(Debug, Clone, PartialEq)] pub struct TopicNameKey<'a> { @@ -241,8 +241,8 @@ mod tests { use std::sync::Arc; use super::*; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::store::PutRequest; #[tokio::test] diff --git a/src/common/meta/src/key/topic_region.rs b/src/common/meta/src/key/topic_region.rs index 7e36adf623..844a46735f 100644 --- a/src/common/meta/src/key/topic_region.rs +++ b/src/common/meta/src/key/topic_region.rs @@ -24,12 +24,12 @@ use table::metadata::TableId; use crate::ddl::utils::parse_region_wal_options; use crate::error::{Error, InvalidMetadataSnafu, Result}; use crate::key::{MetadataKey, MetadataValue, TOPIC_REGION_PATTERN, TOPIC_REGION_PREFIX}; -use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Txn, TxnOp}; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchGetRequest, BatchPutRequest, PutRequest, RangeRequest, }; -use crate::rpc::KeyValue; // The TopicRegionKey is a key for the topic-region mapping in the kvbackend. // The layout of the key is `__topic_region/{topic_name}/{region_id}`. diff --git a/src/common/meta/src/key/txn_helper.rs b/src/common/meta/src/key/txn_helper.rs index 33a3390f9e..7963225eab 100644 --- a/src/common/meta/src/key/txn_helper.rs +++ b/src/common/meta/src/key/txn_helper.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use serde::de::DeserializeOwned; use serde::Serialize; +use serde::de::DeserializeOwned; use crate::error::Result; use crate::key::{DeserializedValueWithBytes, MetadataValue}; diff --git a/src/common/meta/src/key/view_info.rs b/src/common/meta/src/key/view_info.rs index 75ccdb6f29..82be00b26d 100644 --- a/src/common/meta/src/key/view_info.rs +++ b/src/common/meta/src/key/view_info.rs @@ -27,8 +27,8 @@ use crate::key::{ DeserializedValueWithBytes, MetadataKey, MetadataValue, VIEW_INFO_KEY_PATTERN, VIEW_INFO_KEY_PREFIX, }; -use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::Txn; use crate::rpc::store::BatchGetRequest; /// The VIEW logical plan encoded bytes diff --git a/src/common/meta/src/kv_backend.rs b/src/common/meta/src/kv_backend.rs index ecc93c3ff5..cdd7102e11 100644 --- a/src/common/meta/src/kv_backend.rs +++ b/src/common/meta/src/kv_backend.rs @@ -21,12 +21,12 @@ pub use txn::TxnService; use crate::error::Error; use crate::kv_backend::txn::{Txn, TxnOpResponse}; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; pub mod chroot; pub mod etcd; diff --git a/src/common/meta/src/kv_backend/chroot.rs b/src/common/meta/src/kv_backend/chroot.rs index 0cadc7b346..1a7e7c615a 100644 --- a/src/common/meta/src/kv_backend/chroot.rs +++ b/src/common/meta/src/kv_backend/chroot.rs @@ -17,12 +17,12 @@ use std::any::Any; use crate::error::Error; use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnResponse}; use crate::kv_backend::{KvBackend, KvBackendRef, TxnService}; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; pub struct ChrootKvBackend { root: Vec, diff --git a/src/common/meta/src/kv_backend/etcd.rs b/src/common/meta/src/kv_backend/etcd.rs index 7bd172427e..d8f46ba806 100644 --- a/src/common/meta/src/kv_backend/etcd.rs +++ b/src/common/meta/src/kv_backend/etcd.rs @@ -19,18 +19,18 @@ use common_telemetry::info; use etcd_client::{ Client, DeleteOptions, GetOptions, PutOptions, Txn, TxnOp, TxnOpResponse, TxnResponse, }; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{self, Error, Result}; use crate::kv_backend::txn::{Txn as KvTxn, TxnResponse as KvTxnResponse}; use crate::kv_backend::{KvBackend, KvBackendRef, TxnService}; use crate::metrics::METRIC_META_TXN_REQUEST; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; const DEFAULT_MAX_DECODING_SIZE: usize = 32 * 1024 * 1024; // 32MB diff --git a/src/common/meta/src/kv_backend/memory.rs b/src/common/meta/src/kv_backend/memory.rs index a913eef81d..39eec1f4a4 100644 --- a/src/common/meta/src/kv_backend/memory.rs +++ b/src/common/meta/src/kv_backend/memory.rs @@ -23,12 +23,12 @@ use common_error::ext::ErrorExt; use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse}; use crate::kv_backend::{KvBackend, KvBackendRef, ResettableKvBackend, TxnService}; use crate::metrics::METRIC_META_TXN_REQUEST; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; pub struct MemoryKvBackend { kvs: RwLock, Vec>>, @@ -195,10 +195,10 @@ impl KvBackend for MemoryKvBackend { let deleted = keys.len() as i64; for key in keys { - if let Some(value) = kvs.remove(&key) { - if prev_kv { - prev_kvs.push((key.clone(), value).into()) - } + if let Some(value) = kvs.remove(&key) + && prev_kv + { + prev_kvs.push((key.clone(), value).into()) } } diff --git a/src/common/meta/src/kv_backend/rds.rs b/src/common/meta/src/kv_backend/rds.rs index 35a5716544..8acab1eb65 100644 --- a/src/common/meta/src/kv_backend/rds.rs +++ b/src/common/meta/src/kv_backend/rds.rs @@ -26,12 +26,12 @@ use crate::kv_backend::txn::{ }; use crate::kv_backend::{KvBackend, TxnService}; use crate::metrics::METRIC_META_TXN_REQUEST; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; const RDS_STORE_OP_BATCH_GET: &str = "batch_get"; const RDS_STORE_OP_BATCH_PUT: &str = "batch_put"; diff --git a/src/common/meta/src/kv_backend/rds/mysql.rs b/src/common/meta/src/kv_backend/rds/mysql.rs index cb54d2cc7a..07130d5982 100644 --- a/src/common/meta/src/kv_backend/rds/mysql.rs +++ b/src/common/meta/src/kv_backend/rds/mysql.rs @@ -23,17 +23,17 @@ use sqlx::{MySql, MySqlPool, Row, Transaction as MySqlTransaction}; use strum::AsRefStr; use crate::error::{CreateMySqlPoolSnafu, MySqlExecutionSnafu, MySqlTransactionSnafu, Result}; -use crate::kv_backend::rds::{ - Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction, - RDS_STORE_OP_BATCH_DELETE, RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT, - RDS_STORE_OP_RANGE_DELETE, RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT, -}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::rds::{ + Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RDS_STORE_OP_BATCH_DELETE, + RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT, RDS_STORE_OP_RANGE_DELETE, + RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT, RdsStore, Transaction, +}; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; const MYSQL_STORE_NAME: &str = "mysql_store"; diff --git a/src/common/meta/src/kv_backend/rds/postgres.rs b/src/common/meta/src/kv_backend/rds/postgres.rs index fa8abadffa..16fa1b1008 100644 --- a/src/common/meta/src/kv_backend/rds/postgres.rs +++ b/src/common/meta/src/kv_backend/rds/postgres.rs @@ -19,11 +19,11 @@ use std::sync::Arc; use common_telemetry::debug; use deadpool_postgres::{Config, Pool, Runtime}; +// TLS-related imports (feature-gated) +use rustls::ClientConfig; use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; use rustls::server::ParsedCertificate; -// TLS-related imports (feature-gated) -use rustls::ClientConfig; use rustls::{DigitallySignedStruct, Error as TlsError, SignatureScheme}; use rustls_pemfile::{certs, private_key}; use snafu::ResultExt; @@ -36,17 +36,17 @@ use crate::error::{ CreatePostgresPoolSnafu, GetPostgresConnectionSnafu, LoadTlsCertificateSnafu, PostgresExecutionSnafu, PostgresTlsConfigSnafu, PostgresTransactionSnafu, Result, }; -use crate::kv_backend::rds::{ - Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction, - RDS_STORE_OP_BATCH_DELETE, RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT, - RDS_STORE_OP_RANGE_DELETE, RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT, -}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::rds::{ + Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RDS_STORE_OP_BATCH_DELETE, + RDS_STORE_OP_BATCH_GET, RDS_STORE_OP_BATCH_PUT, RDS_STORE_OP_RANGE_DELETE, + RDS_STORE_OP_RANGE_QUERY, RDS_STORE_TXN_RETRY_COUNT, RdsStore, Transaction, +}; +use crate::rpc::KeyValue; use crate::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, RangeRequest, RangeResponse, }; -use crate::rpc::KeyValue; /// TLS mode configuration for PostgreSQL connections. /// This mirrors the TlsMode from servers::tls to avoid circular dependencies. @@ -832,7 +832,9 @@ impl PgStore { Err(e) => { if tls_config.mode == TlsMode::Prefer { // Fallback to insecure connection if TLS fails - common_telemetry::info!("Failed to create TLS connector, falling back to insecure connection"); + common_telemetry::info!( + "Failed to create TLS connector, falling back to insecure connection" + ); cfg.create_pool(Some(Runtime::Tokio1), NoTls) .context(CreatePostgresPoolSnafu)? } else { @@ -901,7 +903,7 @@ mod tests { test_txn_compare_less, test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op, unprepare_kv, }; - use crate::{maybe_skip_postgres15_integration_test, maybe_skip_postgres_integration_test}; + use crate::{maybe_skip_postgres_integration_test, maybe_skip_postgres15_integration_test}; async fn build_pg_kv_backend(table_name: &str) -> Option { let endpoints = std::env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default(); @@ -1097,9 +1099,10 @@ mod tests { fn test_pg_template_with_schema() { let factory = PgSqlTemplateFactory::new(Some("test_schema"), "greptime_metakv"); let t = factory.build(); - assert!(t - .create_table_statement - .contains("\"test_schema\".\"greptime_metakv\"")); + assert!( + t.create_table_statement + .contains("\"test_schema\".\"greptime_metakv\"") + ); let upsert = t.generate_batch_upsert_query(1); assert!(upsert.contains("\"test_schema\".\"greptime_metakv\"")); let get = t.generate_batch_get_query(1); diff --git a/src/common/meta/src/kv_backend/test.rs b/src/common/meta/src/kv_backend/test.rs index 7e2c364b82..84ee277ccf 100644 --- a/src/common/meta/src/kv_backend/test.rs +++ b/src/common/meta/src/kv_backend/test.rs @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU8, Ordering}; use txn::{Compare, CompareOp, TxnOp}; use super::{KvBackend, *}; use crate::error::Error; -use crate::rpc::store::{BatchGetRequest, PutRequest}; use crate::rpc::KeyValue; +use crate::rpc::store::{BatchGetRequest, PutRequest}; use crate::util; pub fn mock_kvs(prefix: Vec) -> Vec { @@ -50,13 +50,15 @@ pub async fn prepare_kv(kv_backend: &impl KvBackend) { pub async fn prepare_kv_with_prefix(kv_backend: &impl KvBackend, prefix: Vec) { let kvs = mock_kvs(prefix); - assert!(kv_backend - .batch_put(BatchPutRequest { - kvs, - ..Default::default() - }) - .await - .is_ok()); + assert!( + kv_backend + .batch_put(BatchPutRequest { + kvs, + ..Default::default() + }) + .await + .is_ok() + ); } pub async fn unprepare_kv(kv_backend: &impl KvBackend, prefix: &[u8]) { diff --git a/src/common/meta/src/kv_backend/txn/etcd.rs b/src/common/meta/src/kv_backend/txn/etcd.rs index 339920a4eb..b8623c404a 100644 --- a/src/common/meta/src/kv_backend/txn/etcd.rs +++ b/src/common/meta/src/kv_backend/txn/etcd.rs @@ -19,8 +19,8 @@ use etcd_client::{ use crate::error::{self, Result}; use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse, TxnResponse}; -use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse}; use crate::rpc::KeyValue; +use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse}; impl From for EtcdTxn { fn from(txn: Txn) -> Self { diff --git a/src/common/meta/src/leadership_notifier.rs b/src/common/meta/src/leadership_notifier.rs index 393bf3cb82..c6ba0e57b4 100644 --- a/src/common/meta/src/leadership_notifier.rs +++ b/src/common/meta/src/leadership_notifier.rs @@ -107,8 +107,8 @@ impl LeadershipChangeNotifier { #[cfg(test)] mod tests { - use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use std::sync::atomic::{AtomicBool, Ordering}; use super::*; diff --git a/src/common/meta/src/node_expiry_listener.rs b/src/common/meta/src/node_expiry_listener.rs index 7bc69f9065..a91bc347f6 100644 --- a/src/common/meta/src/node_expiry_listener.rs +++ b/src/common/meta/src/node_expiry_listener.rs @@ -17,14 +17,14 @@ use std::time::Duration; use common_telemetry::{debug, error, info, warn}; use tokio::task::JoinHandle; -use tokio::time::{interval, MissedTickBehavior}; +use tokio::time::{MissedTickBehavior, interval}; use crate::cluster::{NodeInfo, NodeInfoKey}; use crate::error; use crate::kv_backend::ResettableKvBackendRef; use crate::leadership_notifier::LeadershipChangeListener; -use crate::rpc::store::RangeRequest; use crate::rpc::KeyValue; +use crate::rpc::store::RangeRequest; /// [NodeExpiryListener] periodically checks all node info in memory and removes /// expired node info to prevent memory leak. diff --git a/src/common/meta/src/range_stream.rs b/src/common/meta/src/range_stream.rs index 367f081b63..2fc333064e 100644 --- a/src/common/meta/src/range_stream.rs +++ b/src/common/meta/src/range_stream.rs @@ -19,8 +19,8 @@ use snafu::ensure; use crate::error::{self, Result}; use crate::kv_backend::KvBackendRef; -use crate::rpc::store::{RangeRequest, RangeResponse}; use crate::rpc::KeyValue; +use crate::rpc::store::{RangeRequest, RangeResponse}; use crate::util::get_next_prefix_key; pub type KeyValueDecoderFn = dyn Fn(KeyValue) -> Result + Send + Sync; @@ -195,8 +195,8 @@ mod tests { use super::*; use crate::error::{Error, Result}; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::store::PutRequest; fn decoder(kv: KeyValue) -> Result<(Vec, Vec)> { @@ -266,14 +266,16 @@ mod tests { for i in 0..total { let key = vec![97 + i]; - assert!(kv_backend - .put(PutRequest { - key: key.clone(), - value: key.clone(), - ..Default::default() - }) - .await - .is_ok()); + assert!( + kv_backend + .put(PutRequest { + key: key.clone(), + value: key.clone(), + ..Default::default() + }) + .await + .is_ok() + ); expected.insert(key, ()); } diff --git a/src/common/meta/src/reconciliation/manager.rs b/src/common/meta/src/reconciliation/manager.rs index d5ffd36d84..860857c25e 100644 --- a/src/common/meta/src/reconciliation/manager.rs +++ b/src/common/meta/src/reconciliation/manager.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use common_procedure::{ - watcher, BoxedProcedure, ProcedureId, ProcedureManagerRef, ProcedureWithId, + BoxedProcedure, ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher, }; use common_telemetry::{error, info, warn}; use snafu::{OptionExt, ResultExt}; @@ -25,14 +25,14 @@ use table::table_reference::TableReference; use crate::cache_invalidator::CacheInvalidatorRef; use crate::error::{self, Result, TableNotFoundSnafu}; -use crate::key::table_name::TableNameKey; use crate::key::TableMetadataManagerRef; +use crate::key::table_name::TableNameKey; use crate::node_manager::NodeManagerRef; use crate::reconciliation::reconcile_catalog::ReconcileCatalogProcedure; -use crate::reconciliation::reconcile_database::{ReconcileDatabaseProcedure, DEFAULT_PARALLELISM}; +use crate::reconciliation::reconcile_database::{DEFAULT_PARALLELISM, ReconcileDatabaseProcedure}; use crate::reconciliation::reconcile_logical_tables::ReconcileLogicalTablesProcedure; -use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::reconcile_table::ReconcileTableProcedure; +use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::utils::Context; pub type ReconciliationManagerRef = Arc; diff --git a/src/common/meta/src/reconciliation/reconcile_catalog.rs b/src/common/meta/src/reconciliation/reconcile_catalog.rs index 341ffb3fdd..d3c2e526dd 100644 --- a/src/common/meta/src/reconciliation/reconcile_catalog.rs +++ b/src/common/meta/src/reconciliation/reconcile_catalog.rs @@ -34,7 +34,7 @@ use crate::node_manager::NodeManagerRef; use crate::reconciliation::reconcile_catalog::start::ReconcileCatalogStart; use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::utils::{ - wait_for_inflight_subprocedures, Context, ReconcileCatalogMetrics, SubprocedureMeta, + Context, ReconcileCatalogMetrics, SubprocedureMeta, wait_for_inflight_subprocedures, }; pub(crate) mod end; diff --git a/src/common/meta/src/reconciliation/reconcile_database.rs b/src/common/meta/src/reconciliation/reconcile_database.rs index f4beffa973..7d4262877c 100644 --- a/src/common/meta/src/reconciliation/reconcile_database.rs +++ b/src/common/meta/src/reconciliation/reconcile_database.rs @@ -36,15 +36,15 @@ use table::table_name::TableName; use crate::cache_invalidator::CacheInvalidatorRef; use crate::error::Result; -use crate::key::table_name::TableNameValue; use crate::key::TableMetadataManagerRef; +use crate::key::table_name::TableNameValue; use crate::lock_key::{CatalogLock, SchemaLock}; use crate::metrics; use crate::node_manager::NodeManagerRef; use crate::reconciliation::reconcile_database::start::ReconcileDatabaseStart; use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::utils::{ - wait_for_inflight_subprocedures, Context, ReconcileDatabaseMetrics, SubprocedureMeta, + Context, ReconcileDatabaseMetrics, SubprocedureMeta, wait_for_inflight_subprocedures, }; pub(crate) const DEFAULT_PARALLELISM: usize = 64; diff --git a/src/common/meta/src/reconciliation/reconcile_logical_tables/reconcile_regions.rs b/src/common/meta/src/reconciliation/reconcile_logical_tables/reconcile_regions.rs index 078af1af65..115790a86e 100644 --- a/src/common/meta/src/reconciliation/reconcile_logical_tables/reconcile_regions.rs +++ b/src/common/meta/src/reconciliation/reconcile_logical_tables/reconcile_regions.rs @@ -15,7 +15,7 @@ use std::any::Any; use std::collections::HashMap; -use api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader}; +use api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader, region_request}; use common_procedure::{Context as ProcedureContext, Status}; use common_telemetry::info; use common_telemetry::tracing_context::TracingContext; @@ -25,7 +25,7 @@ use store_api::storage::{RegionId, RegionNumber, TableId}; use table::metadata::RawTableInfo; use crate::ddl::utils::{add_peer_context_if_needed, region_storage_path}; -use crate::ddl::{build_template_from_raw_table_info, CreateRequestBuilder}; +use crate::ddl::{CreateRequestBuilder, build_template_from_raw_table_info}; use crate::error::Result; use crate::reconciliation::reconcile_logical_tables::update_table_infos::UpdateTableInfos; use crate::reconciliation::reconcile_logical_tables::{ReconcileLogicalTablesContext, State}; diff --git a/src/common/meta/src/reconciliation/reconcile_logical_tables/reconciliation_start.rs b/src/common/meta/src/reconciliation/reconcile_logical_tables/reconciliation_start.rs index 1649abdc07..d723d59059 100644 --- a/src/common/meta/src/reconciliation/reconcile_logical_tables/reconciliation_start.rs +++ b/src/common/meta/src/reconciliation/reconcile_logical_tables/reconciliation_start.rs @@ -17,7 +17,7 @@ use std::any::Any; use common_procedure::{Context as ProcedureContext, Status}; use common_telemetry::info; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use store_api::storage::TableId; use table::table_name::TableName; diff --git a/src/common/meta/src/reconciliation/reconcile_table.rs b/src/common/meta/src/reconciliation/reconcile_table.rs index c1ca21b971..15b3154f84 100644 --- a/src/common/meta/src/reconciliation/reconcile_table.rs +++ b/src/common/meta/src/reconciliation/reconcile_table.rs @@ -45,7 +45,7 @@ use crate::node_manager::NodeManagerRef; use crate::reconciliation::reconcile_table::reconciliation_start::ReconciliationStart; use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::utils::{ - build_table_meta_from_column_metadatas, Context, ReconcileTableMetrics, + Context, ReconcileTableMetrics, build_table_meta_from_column_metadatas, }; pub struct ReconcileTableContext { @@ -245,12 +245,9 @@ impl Procedure for ReconcileTableProcedure { if self.context.persistent_ctx.is_subprocedure { // The catalog and schema are already locked by the parent procedure. // Only lock the table name. - return LockKey::new(vec![TableNameLock::new( - table_ref.catalog, - table_ref.schema, - table_ref.table, - ) - .into()]); + return LockKey::new(vec![ + TableNameLock::new(table_ref.catalog, table_ref.schema, table_ref.table).into(), + ]); } LockKey::new(vec![ diff --git a/src/common/meta/src/reconciliation/reconcile_table/reconcile_regions.rs b/src/common/meta/src/reconciliation/reconcile_table/reconcile_regions.rs index 52c90d8a02..04202986e7 100644 --- a/src/common/meta/src/reconciliation/reconcile_table/reconcile_regions.rs +++ b/src/common/meta/src/reconciliation/reconcile_table/reconcile_regions.rs @@ -18,7 +18,7 @@ use std::collections::{HashMap, HashSet}; use api::v1::column_def::try_as_column_def; use api::v1::region::region_request::Body; use api::v1::region::{ - alter_request, AlterRequest, RegionColumnDef, RegionRequest, RegionRequestHeader, SyncColumns, + AlterRequest, RegionColumnDef, RegionRequest, RegionRequestHeader, SyncColumns, alter_request, }; use api::v1::{ColumnDef, SemanticType}; use async_trait::async_trait; @@ -145,7 +145,10 @@ impl State for ReconcileRegions { // Checks all column metadatas are consistent, and updates the table info if needed. if column_metadatas != self.column_metadatas { - info!("Datanode column metadatas are not consistent with metasrv, updating metasrv's column metadatas, table: {}, table_id: {}", table_name, table_id); + info!( + "Datanode column metadatas are not consistent with metasrv, updating metasrv's column metadatas, table: {}, table_id: {}", + table_name, table_id + ); // Safety: fetched in the above. let table_info_value = ctx.persistent_ctx.table_info_value.clone().unwrap(); return Ok(( diff --git a/src/common/meta/src/reconciliation/reconcile_table/resolve_column_metadata.rs b/src/common/meta/src/reconciliation/reconcile_table/resolve_column_metadata.rs index 97fd7d473b..ee4324102b 100644 --- a/src/common/meta/src/reconciliation/reconcile_table/resolve_column_metadata.rs +++ b/src/common/meta/src/reconciliation/reconcile_table/resolve_column_metadata.rs @@ -27,9 +27,9 @@ use crate::reconciliation::reconcile_table::reconcile_regions::ReconcileRegions; use crate::reconciliation::reconcile_table::update_table_info::UpdateTableInfo; use crate::reconciliation::reconcile_table::{ReconcileTableContext, State}; use crate::reconciliation::utils::{ - build_column_metadata_from_table_info, check_column_metadatas_consistent, - resolve_column_metadatas_with_latest, resolve_column_metadatas_with_metasrv, - ResolveColumnMetadataResult, + ResolveColumnMetadataResult, build_column_metadata_from_table_info, + check_column_metadatas_consistent, resolve_column_metadatas_with_latest, + resolve_column_metadatas_with_metasrv, }; /// Strategy for resolving column metadata inconsistencies. diff --git a/src/common/meta/src/reconciliation/reconcile_table/update_table_info.rs b/src/common/meta/src/reconciliation/reconcile_table/update_table_info.rs index 16284a22ef..790184c07a 100644 --- a/src/common/meta/src/reconciliation/reconcile_table/update_table_info.rs +++ b/src/common/meta/src/reconciliation/reconcile_table/update_table_info.rs @@ -23,8 +23,8 @@ use tonic::async_trait; use crate::cache_invalidator::Context as CacheContext; use crate::error::Result; use crate::instruction::CacheIdent; -use crate::key::table_info::TableInfoValue; use crate::key::DeserializedValueWithBytes; +use crate::key::table_info::TableInfoValue; use crate::reconciliation::reconcile_table::reconciliation_end::ReconciliationEnd; use crate::reconciliation::reconcile_table::{ReconcileTableContext, State}; use crate::rpc::router::region_distribution; diff --git a/src/common/meta/src/reconciliation/utils.rs b/src/common/meta/src/reconciliation/utils.rs index 3571a9f9e6..76b32bc2a6 100644 --- a/src/common/meta/src/reconciliation/utils.rs +++ b/src/common/meta/src/reconciliation/utils.rs @@ -18,11 +18,11 @@ use std::ops::AddAssign; use std::time::Instant; use api::v1::SemanticType; -use common_procedure::{watcher, Context as ProcedureContext, ProcedureId}; +use common_procedure::{Context as ProcedureContext, ProcedureId, watcher}; use common_telemetry::{error, warn}; use datatypes::schema::ColumnSchema; use futures::future::{join_all, try_join_all}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::{ColumnMetadata, RegionMetadata}; use store_api::storage::consts::ReservedColumnId; use store_api::storage::{RegionId, TableId}; @@ -41,8 +41,8 @@ use crate::key::TableMetadataManagerRef; use crate::metrics; use crate::node_manager::NodeManagerRef; use crate::reconciliation::reconcile_logical_tables::ReconcileLogicalTablesProcedure; -use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; use crate::reconciliation::reconcile_table::ReconcileTableProcedure; +use crate::reconciliation::reconcile_table::resolve_column_metadata::ResolveStrategy; #[derive(Debug, PartialEq, Eq)] pub(crate) struct PartialRegionMetadata<'a> { @@ -541,7 +541,10 @@ impl Display for SubprocedureMeta { write!( f, "ReconcileLogicalTable(procedure_id: {}, physical_table_id: {}, physical_table_name: {}, logical_tables: {:?})", - meta.procedure_id, meta.physical_table_id, meta.physical_table_name, meta.logical_tables + meta.procedure_id, + meta.physical_table_id, + meta.physical_table_name, + meta.logical_tables ) } SubprocedureMeta::Database(meta) => { @@ -687,7 +690,14 @@ pub struct ReconcileDatabaseMetrics { impl Display for ReconcileDatabaseMetrics { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "succeeded_tables: {}, failed_tables: {}, succeeded_procedures: {}, failed_procedures: {}", self.succeeded_tables, self.failed_tables, self.succeeded_procedures, self.failed_procedures) + write!( + f, + "succeeded_tables: {}, failed_tables: {}, succeeded_procedures: {}, failed_procedures: {}", + self.succeeded_tables, + self.failed_tables, + self.succeeded_procedures, + self.failed_procedures + ) } } diff --git a/src/common/meta/src/region_keeper.rs b/src/common/meta/src/region_keeper.rs index 54d5d6cc11..605aaba893 100644 --- a/src/common/meta/src/region_keeper.rs +++ b/src/common/meta/src/region_keeper.rs @@ -90,11 +90,9 @@ impl MemoryRegionKeeper { region_ids: &mut HashSet, ) -> HashSet { let inner = self.inner.read().unwrap(); - let operating_regions = region_ids + region_ids .extract_if(|region_id| inner.contains(&(datanode_id, *region_id))) - .collect::>(); - - operating_regions + .collect::>() } /// Returns number of element in tracking set. diff --git a/src/common/meta/src/region_registry.rs b/src/common/meta/src/region_registry.rs index b579c583bf..1f672d563d 100644 --- a/src/common/meta/src/region_registry.rs +++ b/src/common/meta/src/region_registry.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::{Arc, RwLock}; use common_telemetry::warn; diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs index ef0d532914..d23c943c77 100644 --- a/src/common/meta/src/rpc/ddl.rs +++ b/src/common/meta/src/rpc/ddl.rs @@ -37,13 +37,13 @@ use api::v1::{ CreateViewExpr, DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, EvalInterval, ExpireAfter, Option as PbOption, QueryContext as PbQueryContext, TruncateTableExpr, }; -use base64::engine::general_purpose; use base64::Engine as _; +use base64::engine::general_purpose; use common_error::ext::BoxedError; use common_time::{DatabaseTimeToLive, Timestamp, Timezone}; use prost::Message; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DefaultOnNull}; +use serde_with::{DefaultOnNull, serde_as}; use session::context::{QueryContextBuilder, QueryContextRef}; use snafu::{OptionExt, ResultExt}; use table::metadata::{RawTableInfo, TableId}; diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs index 7edeb48096..de5c38a1d0 100644 --- a/src/common/meta/src/rpc/router.rs +++ b/src/common/meta/src/rpc/router.rs @@ -27,10 +27,10 @@ use store_api::storage::{RegionId, RegionNumber}; use strum::AsRefStr; use table::table_name::TableName; +use crate::DatanodeId; use crate::error::{self, Result}; use crate::key::RegionDistribution; use crate::peer::Peer; -use crate::DatanodeId; /// Returns the distribution of regions to datanodes. /// @@ -129,10 +129,10 @@ pub fn find_leader_regions(region_routes: &[RegionRoute], datanode: &Peer) -> Ve region_routes .iter() .filter_map(|x| { - if let Some(peer) = &x.leader_peer { - if peer == datanode { - return Some(x.region.id.region_number()); - } + if let Some(peer) = &x.leader_peer + && peer == datanode + { + return Some(x.region.id.region_number()); } None }) diff --git a/src/common/meta/src/sequence.rs b/src/common/meta/src/sequence.rs index 0e26b4c594..167d8bc001 100644 --- a/src/common/meta/src/sequence.rs +++ b/src/common/meta/src/sequence.rs @@ -307,7 +307,7 @@ impl Inner { return error::UnexpectedSequenceValueSnafu { err_msg: format!("Not a valid u64 for '{}': {v:?}", self.name), } - .fail() + .fail(); } }; Ok(u64::from_le_bytes(v)) diff --git a/src/common/meta/src/snapshot.rs b/src/common/meta/src/snapshot.rs index 08aebbc99c..c342716f2e 100644 --- a/src/common/meta/src/snapshot.rs +++ b/src/common/meta/src/snapshot.rs @@ -21,7 +21,7 @@ use std::time::Instant; use common_telemetry::info; use file::{Metadata, MetadataContent}; -use futures::{future, TryStreamExt}; +use futures::{TryStreamExt, future}; use object_store::ObjectStore; use snafu::{OptionExt, ResultExt}; use strum::Display; @@ -32,9 +32,9 @@ use crate::error::{ }; use crate::key::{CANDIDATES_ROOT, ELECTION_KEY}; use crate::kv_backend::KvBackendRef; -use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use crate::rpc::store::{BatchPutRequest, RangeRequest}; +use crate::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use crate::rpc::KeyValue; +use crate::rpc::store::{BatchPutRequest, RangeRequest}; use crate::snapshot::file::{Document, KeyValue as FileKeyValue}; /// The format of the backup file. @@ -331,12 +331,12 @@ mod tests { use std::assert_matches::assert_matches; use std::sync::Arc; - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use object_store::services::Fs; use super::*; - use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::KvBackend; + use crate::kv_backend::memory::MemoryKvBackend; use crate::rpc::store::PutRequest; #[test] diff --git a/src/common/meta/src/state_store.rs b/src/common/meta/src/state_store.rs index 6649eae065..5421972f3a 100644 --- a/src/common/meta/src/state_store.rs +++ b/src/common/meta/src/state_store.rs @@ -21,20 +21,20 @@ use common_procedure::error::{ use common_procedure::store::poison_store::PoisonStore; use common_procedure::store::state_store::{KeySet, KeyValueStream, StateStore}; use common_procedure::store::util::multiple_value_stream; -use futures::future::try_join_all; use futures::StreamExt; +use futures::future::try_join_all; use itertools::Itertools; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{ProcedurePoisonConflictSnafu, Result, UnexpectedSnafu}; use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{DeserializedValueWithBytes, MetadataValue}; -use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp}; use crate::range_stream::PaginationStream; -use crate::rpc::store::{BatchDeleteRequest, PutRequest, RangeRequest}; use crate::rpc::KeyValue; +use crate::rpc::store::{BatchDeleteRequest, PutRequest, RangeRequest}; const DELIMITER: &str = "/"; @@ -240,7 +240,7 @@ impl KvStateStore { value: &PoisonValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult + use<>, )> { let key = key.as_bytes().to_vec(); let value = value.try_as_raw_value()?; @@ -260,7 +260,7 @@ impl KvStateStore { value: PoisonValue, ) -> Result<( Txn, - impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult, + impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult + use<>, )> { let key = key.as_bytes().to_vec(); let value = value.try_as_raw_value()?; diff --git a/src/common/meta/src/stats/topic.rs b/src/common/meta/src/stats/topic.rs index 3e0415fb6b..7855b801ec 100644 --- a/src/common/meta/src/stats/topic.rs +++ b/src/common/meta/src/stats/topic.rs @@ -19,11 +19,11 @@ use std::time::Duration; use common_telemetry::{debug, warn}; use datafusion_common::HashSet; +use crate::DatanodeId; use crate::datanode::TopicStat; use crate::distributed_time_constants::{ TOPIC_STATS_REPORT_INTERVAL_SECS, TOPIC_STATS_RETENTION_SECS, }; -use crate::DatanodeId; pub type TopicStatsRegistryRef = Arc; diff --git a/src/common/meta/src/test_util.rs b/src/common/meta/src/test_util.rs index 95a2d23800..0ee782c187 100644 --- a/src/common/meta/src/test_util.rs +++ b/src/common/meta/src/test_util.rs @@ -20,18 +20,18 @@ use api::v1::region::{InsertRequests, RegionRequest}; pub use common_base::AffectedRows; use common_query::request::QueryRequest; use common_recordbatch::SendableRecordBatchStream; -use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::config::kafka::MetasrvKafkaConfig; +use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use crate::cache_invalidator::DummyCacheInvalidator; use crate::ddl::flow_meta::FlowMetadataAllocator; use crate::ddl::table_meta::TableMetadataAllocator; use crate::ddl::{DdlContext, NoopRegionFailureDetectorControl}; use crate::error::Result; -use crate::key::flow::FlowMetadataManager; use crate::key::TableMetadataManager; -use crate::kv_backend::memory::MemoryKvBackend; +use crate::key::flow::FlowMetadataManager; use crate::kv_backend::KvBackendRef; +use crate::kv_backend::memory::MemoryKvBackend; use crate::node_manager::{ Datanode, DatanodeManager, DatanodeRef, Flownode, FlownodeManager, FlownodeRef, NodeManagerRef, }; @@ -40,7 +40,7 @@ use crate::region_keeper::MemoryRegionKeeper; use crate::region_registry::LeaderRegionRegistry; use crate::sequence::SequenceBuilder; use crate::wal_options_allocator::topic_pool::KafkaTopicPool; -use crate::wal_options_allocator::{build_kafka_topic_creator, WalOptionsAllocator}; +use crate::wal_options_allocator::{WalOptionsAllocator, build_kafka_topic_creator}; use crate::{DatanodeId, FlownodeId}; #[async_trait::async_trait] diff --git a/src/common/meta/src/util.rs b/src/common/meta/src/util.rs index e7a8eba303..e1155c74dd 100644 --- a/src/common/meta/src/util.rs +++ b/src/common/meta/src/util.rs @@ -31,12 +31,12 @@ pub fn get_prefix_end_key(key: &[u8]) -> Vec { } pub fn check_response_header(header: Option<&ResponseHeader>) -> Result<()> { - if let Some(header) = header { - if let Some(error) = &header.error { - let code = error.code; - let err_msg = &error.err_msg; - return IllegalServerStateSnafu { code, err_msg }.fail(); - } + if let Some(header) = header + && let Some(error) = &header.error + { + let code = error.code; + let err_msg = &error.err_msg; + return IllegalServerStateSnafu { code, err_msg }.fail(); } Ok(()) diff --git a/src/common/meta/src/wal_options_allocator.rs b/src/common/meta/src/wal_options_allocator.rs index fb631363f1..464b14b39f 100644 --- a/src/common/meta/src/wal_options_allocator.rs +++ b/src/common/meta/src/wal_options_allocator.rs @@ -22,8 +22,8 @@ use std::sync::Arc; use async_trait::async_trait; use common_wal::config::MetasrvWalConfig; -use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY}; -use snafu::{ensure, ResultExt}; +use common_wal::options::{KafkaWalOptions, WAL_OPTIONS_KEY, WalOptions}; +use snafu::{ResultExt, ensure}; use store_api::storage::{RegionId, RegionNumber}; use crate::error::{EncodeWalOptionsSnafu, InvalidTopicNamePrefixSnafu, Result}; @@ -173,8 +173,8 @@ pub fn extract_topic_from_wal_options( mod tests { use std::assert_matches::assert_matches; - use common_wal::config::kafka::common::KafkaTopicConfig; use common_wal::config::kafka::MetasrvKafkaConfig; + use common_wal::config::kafka::common::KafkaTopicConfig; use common_wal::maybe_skip_kafka_integration_test; use common_wal::test_util::get_kafka_endpoints; diff --git a/src/common/meta/src/wal_options_allocator/selector.rs b/src/common/meta/src/wal_options_allocator/selector.rs index 655808ce4c..e7b57686f0 100644 --- a/src/common/meta/src/wal_options_allocator/selector.rs +++ b/src/common/meta/src/wal_options_allocator/selector.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use rand::Rng; use snafu::ensure; diff --git a/src/common/meta/src/wal_options_allocator/topic_creator.rs b/src/common/meta/src/wal_options_allocator/topic_creator.rs index f19b9476ee..5b6d5870ef 100644 --- a/src/common/meta/src/wal_options_allocator/topic_creator.rs +++ b/src/common/meta/src/wal_options_allocator/topic_creator.rs @@ -14,7 +14,7 @@ use common_telemetry::{debug, error, info}; use common_wal::config::kafka::common::{ - KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_BACKOFF_CONFIG, + DEFAULT_BACKOFF_CONFIG, KafkaConnectionConfig, KafkaTopicConfig, }; use rskafka::client::error::Error as RsKafkaError; use rskafka::client::error::ProtocolError::TopicAlreadyExists; diff --git a/src/common/meta/src/wal_options_allocator/topic_pool.rs b/src/common/meta/src/wal_options_allocator/topic_pool.rs index e77039d384..6be1cfb778 100644 --- a/src/common/meta/src/wal_options_allocator/topic_pool.rs +++ b/src/common/meta/src/wal_options_allocator/topic_pool.rs @@ -16,8 +16,8 @@ use std::fmt::{self, Formatter}; use std::sync::Arc; use common_telemetry::info; -use common_wal::config::kafka::MetasrvKafkaConfig; use common_wal::TopicSelectorType; +use common_wal::config::kafka::MetasrvKafkaConfig; use snafu::ensure; use crate::error::{InvalidNumTopicsSnafu, Result}; diff --git a/src/common/procedure-test/src/lib.rs b/src/common/procedure-test/src/lib.rs index f7a3ecb710..f6d816381d 100644 --- a/src/common/procedure-test/src/lib.rs +++ b/src/common/procedure-test/src/lib.rs @@ -18,7 +18,7 @@ use std::collections::HashMap; use std::sync::Arc; use async_trait::async_trait; -use common_procedure::local::{acquire_dynamic_key_lock, DynamicKeyLockGuard}; +use common_procedure::local::{DynamicKeyLockGuard, acquire_dynamic_key_lock}; use common_procedure::rwlock::KeyRwLock; use common_procedure::store::poison_store::PoisonStore; use common_procedure::test_util::InMemoryPoisonStore; diff --git a/src/common/procedure/src/error.rs b/src/common/procedure/src/error.rs index a82e00b94f..c8081a67fc 100644 --- a/src/common/procedure/src/error.rs +++ b/src/common/procedure/src/error.rs @@ -20,8 +20,8 @@ use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; use snafu::{Location, Snafu}; -use crate::procedure::ProcedureId; use crate::PoisonKey; +use crate::procedure::ProcedureId; /// Procedure error. #[derive(Snafu)] diff --git a/src/common/procedure/src/event.rs b/src/common/procedure/src/event.rs index 022805f7bd..bc76de7842 100644 --- a/src/common/procedure/src/event.rs +++ b/src/common/procedure/src/event.rs @@ -16,8 +16,8 @@ use std::any::Any; use api::v1::value::ValueData; use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType}; -use common_event_recorder::error::Result; use common_event_recorder::Event; +use common_event_recorder::error::Result; use common_time::timestamp::{TimeUnit, Timestamp}; use crate::{ProcedureId, ProcedureState}; diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs index 2730f52630..8aeb5a359b 100644 --- a/src/common/procedure/src/local.rs +++ b/src/common/procedure/src/local.rs @@ -27,7 +27,7 @@ use common_event_recorder::EventRecorderRef; use common_runtime::{RepeatedTask, TaskFunction}; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{error, info, tracing}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::sync::watch::{self, Receiver, Sender}; use tokio::sync::{Mutex as TokioMutex, Notify}; @@ -129,10 +129,9 @@ impl ProcedureMeta { // Emit the event to the event recorder if the user metadata contains the eventable object. if let (Some(event_recorder), Some(user_metadata)) = (&self.event_recorder, &self.user_metadata) + && let Some(event) = user_metadata.to_event() { - if let Some(event) = user_metadata.to_event() { - event_recorder.record(Box::new(ProcedureEvent::new(self.id, event, state.clone()))); - } + event_recorder.record(Box::new(ProcedureEvent::new(self.id, event, state.clone()))); } // Safety: ProcedureMeta also holds the receiver, so `send()` should never fail. @@ -895,8 +894,8 @@ impl TaskFunction for RemoveOutdatedMetaFunction { #[cfg(test)] pub(crate) mod test_util { use common_test_util::temp_dir::TempDir; - use object_store::services::Fs as Builder; use object_store::ObjectStore; + use object_store::services::Fs as Builder; use super::*; @@ -1142,27 +1141,33 @@ mod tests { manager.manager_ctx.start(); let procedure_id = ProcedureId::random(); - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_none()); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_none() + ); assert!(manager.procedure_watcher(procedure_id).is_none()); let mut procedure = ProcedureToLoad::new("submit"); procedure.lock_key = LockKey::single_exclusive("test.submit"); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_some()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_some() + ); // Wait for the procedure done. let mut watcher = manager.procedure_watcher(procedure_id).unwrap(); watcher.changed().await.unwrap(); @@ -1308,18 +1313,22 @@ mod tests { let mut procedure = ProcedureToLoad::new("submit"); procedure.lock_key = LockKey::single_exclusive("test.submit"); let procedure_id = ProcedureId::random(); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_some()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_some() + ); } #[tokio::test(flavor = "multi_thread")] @@ -1342,23 +1351,27 @@ mod tests { let mut procedure = ProcedureToLoad::new("submit"); procedure.lock_key = LockKey::single_exclusive("test.submit"); let procedure_id = ProcedureId::random(); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); let mut watcher = manager.procedure_watcher(procedure_id).unwrap(); watcher.changed().await.unwrap(); manager.start().await.unwrap(); tokio::time::sleep(Duration::from_millis(300)).await; - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_none()); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_none() + ); // The remove_outdated_meta method has been stopped, so any procedure meta-data will not be automatically removed. manager.stop().await.unwrap(); @@ -1367,43 +1380,51 @@ mod tests { let procedure_id = ProcedureId::random(); manager.manager_ctx.set_running(); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); let mut watcher = manager.procedure_watcher(procedure_id).unwrap(); watcher.changed().await.unwrap(); tokio::time::sleep(Duration::from_millis(300)).await; - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_some()); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_some() + ); // After restart let mut procedure = ProcedureToLoad::new("submit"); procedure.lock_key = LockKey::single_exclusive("test.submit"); let procedure_id = ProcedureId::random(); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); let mut watcher = manager.procedure_watcher(procedure_id).unwrap(); watcher.changed().await.unwrap(); manager.start().await.unwrap(); tokio::time::sleep(Duration::from_millis(300)).await; - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_none()); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_none() + ); } #[tokio::test] @@ -1452,18 +1473,22 @@ mod tests { // Submit a new procedure should succeed. let mut procedure = ProcedureToLoad::new("submit"); procedure.lock_key = LockKey::single_exclusive("test.submit"); - assert!(manager - .submit(ProcedureWithId { - id: procedure_id, - procedure: Box::new(procedure), - }) - .await - .is_ok()); - assert!(manager - .procedure_state(procedure_id) - .await - .unwrap() - .is_some()); + assert!( + manager + .submit(ProcedureWithId { + id: procedure_id, + procedure: Box::new(procedure), + }) + .await + .is_ok() + ); + assert!( + manager + .procedure_state(procedure_id) + .await + .unwrap() + .is_some() + ); // Wait for the procedure done. let mut watcher = manager.procedure_watcher(procedure_id).unwrap(); watcher.changed().await.unwrap(); diff --git a/src/common/procedure/src/local/runner.rs b/src/common/procedure/src/local/runner.rs index 56e7e47d9a..59f5e2cca2 100644 --- a/src/common/procedure/src/local/runner.rs +++ b/src/common/procedure/src/local/runner.rs @@ -269,12 +269,12 @@ impl Runner { } async fn rollback(&mut self, ctx: &Context, err: Arc) { - if self.procedure.rollback_supported() { - if let Err(e) = self.procedure.rollback(ctx).await { - self.meta - .set_state(ProcedureState::rolling_back(Arc::new(e))); - return; - } + if self.procedure.rollback_supported() + && let Err(e) = self.procedure.rollback(ctx).await + { + self.meta + .set_state(ProcedureState::rolling_back(Arc::new(e))); + return; } self.meta.set_state(ProcedureState::failed(err)); } @@ -314,20 +314,20 @@ impl Runner { } // Cleans poisons before persist. - if status.need_clean_poisons() { - if let Err(e) = self.clean_poisons().await { - error!(e; "Failed to clean poison for procedure: {}", self.meta.id); - self.meta.set_state(ProcedureState::retrying(Arc::new(e))); - return; - } + if status.need_clean_poisons() + && let Err(e) = self.clean_poisons().await + { + error!(e; "Failed to clean poison for procedure: {}", self.meta.id); + self.meta.set_state(ProcedureState::retrying(Arc::new(e))); + return; } - if status.need_persist() { - if let Err(e) = self.persist_procedure().await { - error!(e; "Failed to persist procedure: {}", self.meta.id); - self.meta.set_state(ProcedureState::retrying(Arc::new(e))); - return; - } + if status.need_persist() + && let Err(e) = self.persist_procedure().await + { + error!(e; "Failed to persist procedure: {}", self.meta.id); + self.meta.set_state(ProcedureState::retrying(Arc::new(e))); + return; } match status { @@ -593,8 +593,8 @@ impl Runner { #[cfg(test)] mod tests { use std::assert_matches::assert_matches; - use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; + use std::sync::atomic::{AtomicU64, Ordering}; use async_trait::async_trait; use common_error::ext::{ErrorExt, PlainError}; @@ -602,14 +602,14 @@ mod tests { use common_error::status_code::StatusCode; use common_test_util::temp_dir::create_temp_dir; use futures::future::join_all; - use futures_util::future::BoxFuture; use futures_util::FutureExt; + use futures_util::future::BoxFuture; use object_store::{EntryMode, ObjectStore}; use tokio::sync::mpsc; use tokio::sync::watch::Receiver; use super::*; - use crate::local::{test_util, DynamicKeyLockGuard}; + use crate::local::{DynamicKeyLockGuard, test_util}; use crate::procedure::PoisonKeys; use crate::store::proc_path; use crate::test_util::InMemoryPoisonStore; diff --git a/src/common/procedure/src/procedure.rs b/src/common/procedure/src/procedure.rs index 995ff68d1d..843052ed97 100644 --- a/src/common/procedure/src/procedure.rs +++ b/src/common/procedure/src/procedure.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use async_trait::async_trait; use common_event_recorder::{Event, Eventable}; use serde::{Deserialize, Serialize}; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use snafu::{ResultExt, Snafu}; use tokio::sync::watch::Receiver; use uuid::Uuid; diff --git a/src/common/procedure/src/rwlock.rs b/src/common/procedure/src/rwlock.rs index cd5a7cf374..cbdfe30977 100644 --- a/src/common/procedure/src/rwlock.rs +++ b/src/common/procedure/src/rwlock.rs @@ -91,13 +91,13 @@ where let mut locks = self.inner.lock().unwrap(); let mut keys = Vec::new(); for key in iter { - if let Some(lock) = locks.get(key) { - if lock.try_write().is_ok() { - debug_assert_eq!(Arc::weak_count(lock), 0); - // Ensures nobody keeps this ref. - if Arc::strong_count(lock) == 1 { - keys.push(key); - } + if let Some(lock) = locks.get(key) + && lock.try_write().is_ok() + { + debug_assert_eq!(Arc::weak_count(lock), 0); + // Ensures nobody keeps this ref. + if Arc::strong_count(lock) == 1 { + keys.push(key); } } } diff --git a/src/common/procedure/src/store.rs b/src/common/procedure/src/store.rs index 3d6c0c7256..fcbaf0ae47 100644 --- a/src/common/procedure/src/store.rs +++ b/src/common/procedure/src/store.rs @@ -20,9 +20,9 @@ use futures::TryStreamExt; use serde::{Deserialize, Serialize}; use snafu::ResultExt; +use crate::ProcedureId; use crate::error::{Result, ToJsonSnafu}; pub(crate) use crate::store::state_store::StateStoreRef; -use crate::ProcedureId; pub mod poison_store; pub mod state_store; @@ -342,9 +342,9 @@ mod tests { use object_store::ObjectStore; + use crate::BoxedProcedure; use crate::procedure::PoisonKeys; use crate::store::state_store::ObjectStateStore; - use crate::BoxedProcedure; impl ProcedureStore { pub(crate) fn from_object_store(store: ObjectStore) -> ProcedureStore { @@ -355,7 +355,7 @@ mod tests { } use async_trait::async_trait; - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use object_store::services::Fs as Builder; use super::*; diff --git a/src/common/procedure/src/store/util.rs b/src/common/procedure/src/store/util.rs index a03ac9f983..e6ef5b62ec 100644 --- a/src/common/procedure/src/store/util.rs +++ b/src/common/procedure/src/store/util.rs @@ -16,7 +16,7 @@ use std::pin::Pin; use async_stream::try_stream; use futures::{Stream, TryStreamExt}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error; use crate::error::Result; @@ -135,8 +135,8 @@ pub fn multiple_value_stream( mod tests { use std::assert_matches::assert_matches; - use futures::stream::{self}; use futures::TryStreamExt; + use futures::stream::{self}; use super::*; use crate::error::{self}; diff --git a/src/common/procedure/src/test_util.rs b/src/common/procedure/src/test_util.rs index 21edbef333..083736a45c 100644 --- a/src/common/procedure/src/test_util.rs +++ b/src/common/procedure/src/test_util.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::{Arc, RwLock}; use snafu::ensure; diff --git a/src/common/procedure/src/watcher.rs b/src/common/procedure/src/watcher.rs index ea351936ef..4ecd75ded0 100644 --- a/src/common/procedure/src/watcher.rs +++ b/src/common/procedure/src/watcher.rs @@ -64,7 +64,7 @@ mod tests { use super::*; use crate::error::Error; - use crate::local::{test_util, LocalManager, ManagerConfig}; + use crate::local::{LocalManager, ManagerConfig, test_util}; use crate::procedure::PoisonKeys; use crate::store::state_store::ObjectStateStore; use crate::test_util::InMemoryPoisonStore; diff --git a/src/common/query/src/lib.rs b/src/common/query/src/lib.rs index ab81e6e9fc..e528bd9d33 100644 --- a/src/common/query/src/lib.rs +++ b/src/common/query/src/lib.rs @@ -24,8 +24,8 @@ pub mod test_util; use std::fmt::{Debug, Display, Formatter}; use std::sync::Arc; -use api::greptime_proto::v1::add_column_location::LocationType; use api::greptime_proto::v1::AddColumnLocation as Location; +use api::greptime_proto::v1::add_column_location::LocationType; use common_recordbatch::{RecordBatches, SendableRecordBatchStream}; use datafusion::physical_plan::ExecutionPlan; use serde::{Deserialize, Serialize}; diff --git a/src/common/query/src/logical_plan.rs b/src/common/query/src/logical_plan.rs index 2e53f828d6..cecb4dd3c9 100644 --- a/src/common/query/src/logical_plan.rs +++ b/src/common/query/src/logical_plan.rs @@ -22,7 +22,7 @@ use datafusion::error::Result as DatafusionResult; use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder}; use datafusion_common::{Column, TableReference}; use datafusion_expr::dml::InsertOp; -use datafusion_expr::{col, DmlStatement, TableSource, WriteOp}; +use datafusion_expr::{DmlStatement, TableSource, WriteOp, col}; pub use expr::{build_filter_from_timestamp, build_same_type_ts_filter}; use snafu::ResultExt; diff --git a/src/common/query/src/logical_plan/expr.rs b/src/common/query/src/logical_plan/expr.rs index ee9d621d0b..309115b05f 100644 --- a/src/common/query/src/logical_plan/expr.rs +++ b/src/common/query/src/logical_plan/expr.rs @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +use common_time::Timestamp; use common_time::range::TimestampRange; use common_time::timestamp::TimeUnit; -use common_time::Timestamp; use datafusion_common::{Column, ScalarValue}; use datafusion_expr::expr::Expr; -use datafusion_expr::{and, binary_expr, Operator}; +use datafusion_expr::{Operator, and, binary_expr}; use datatypes::data_type::DataType; use datatypes::schema::ColumnSchema; use datatypes::value::Value; diff --git a/src/common/query/src/stream.rs b/src/common/query/src/stream.rs index c1a67fb631..2525887daf 100644 --- a/src/common/query/src/stream.rs +++ b/src/common/query/src/stream.rs @@ -16,10 +16,10 @@ use std::any::Any; use std::fmt::{Debug, Formatter}; use std::sync::{Arc, Mutex}; -use common_recordbatch::adapter::DfRecordBatchStreamAdapter; use common_recordbatch::SendableRecordBatchStream; -use datafusion::execution::context::TaskContext; +use common_recordbatch::adapter::DfRecordBatchStreamAdapter; use datafusion::execution::SendableRecordBatchStream as DfSendableRecordBatchStream; +use datafusion::execution::context::TaskContext; use datafusion::physical_expr::{EquivalenceProperties, Partitioning, PhysicalSortExpr}; use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; use datafusion::physical_plan::{DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties}; diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs index d73db16401..fdec79fdef 100644 --- a/src/common/recordbatch/src/adapter.rs +++ b/src/common/recordbatch/src/adapter.rs @@ -26,13 +26,13 @@ use datafusion::arrow::compute::cast; use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef; use datafusion::error::Result as DfResult; use datafusion::execution::context::ExecutionProps; -use datafusion::logical_expr::utils::conjunction; use datafusion::logical_expr::Expr; +use datafusion::logical_expr::utils::conjunction; use datafusion::physical_expr::create_physical_expr; use datafusion::physical_plan::metrics::{BaselineMetrics, MetricValue}; use datafusion::physical_plan::{ - accept, DisplayFormatType, ExecutionPlan, ExecutionPlanVisitor, PhysicalExpr, - RecordBatchStream as DfRecordBatchStream, + DisplayFormatType, ExecutionPlan, ExecutionPlanVisitor, PhysicalExpr, + RecordBatchStream as DfRecordBatchStream, accept, }; use datafusion_common::arrow::error::ArrowError; use datafusion_common::{DataFusionError, ToDFSchema}; @@ -555,7 +555,7 @@ impl Stream for AsyncRecordBatchStreamAdapter { }; } AsyncRecordBatchStreamAdapterState::Ready(stream) => { - return Poll::Ready(ready!(Pin::new(stream).poll_next(cx))) + return Poll::Ready(ready!(Pin::new(stream).poll_next(cx))); } AsyncRecordBatchStreamAdapterState::Failed => return Poll::Ready(None), } @@ -575,10 +575,10 @@ fn custom_cast( target_type: &ArrowDataType, extype: Option, ) -> std::result::Result, ArrowError> { - if let ArrowDataType::Map(_, _) = array.data_type() { - if let ArrowDataType::Binary = target_type { - return convert_map_to_json_binary(array, extype); - } + if let ArrowDataType::Map(_, _) = array.data_type() + && let ArrowDataType::Binary = target_type + { + return convert_map_to_json_binary(array, extype); } cast(array, target_type) @@ -661,7 +661,7 @@ fn convert_map_to_json_binary( return Err(ArrowError::CastError(format!( "Failed to serialize JSON: {}", e - ))) + ))); } }; match jsonb::parse_value(json_string.as_bytes()) { @@ -670,7 +670,7 @@ fn convert_map_to_json_binary( return Err(ArrowError::CastError(format!( "Failed to serialize JSONB: {}", e - ))) + ))); } } } @@ -680,7 +680,7 @@ fn convert_map_to_json_binary( return Err(ArrowError::CastError(format!( "Failed to serialize JSON: {}", e - ))) + ))); } }, }; @@ -706,8 +706,8 @@ mod test { use snafu::IntoError; use super::*; - use crate::error::Error; use crate::RecordBatches; + use crate::error::Error; #[tokio::test] async fn test_async_recordbatch_stream_adaptor() { diff --git a/src/common/recordbatch/src/filter.rs b/src/common/recordbatch/src/filter.rs index aa360bc0f3..d8955bf57b 100644 --- a/src/common/recordbatch/src/filter.rs +++ b/src/common/recordbatch/src/filter.rs @@ -23,7 +23,7 @@ use datafusion_common::arrow::array::{ArrayRef, Datum, Scalar}; use datafusion_common::arrow::buffer::BooleanBuffer; use datafusion_common::arrow::compute::kernels::cmp; use datafusion_common::cast::{as_boolean_array, as_null_array, as_string_array}; -use datafusion_common::{internal_err, DataFusionError, ScalarValue}; +use datafusion_common::{DataFusionError, ScalarValue, internal_err}; use datatypes::arrow::array::{ Array, ArrayAccessor, ArrayData, BooleanArray, BooleanBufferBuilder, RecordBatch, StringArrayType, @@ -213,7 +213,7 @@ impl SimpleFilterEvaluator { return UnsupportedOperationSnafu { reason: format!("{:?}", self.op), } - .fail() + .fail(); } }; result @@ -346,7 +346,7 @@ mod test { use std::sync::Arc; use datafusion::execution::context::ExecutionProps; - use datafusion::logical_expr::{col, lit, BinaryExpr}; + use datafusion::logical_expr::{BinaryExpr, col, lit}; use datafusion::physical_expr::create_physical_expr; use datafusion_common::{Column, DFSchema}; use datatypes::arrow::datatypes::{DataType, Field, Schema}; @@ -503,7 +503,10 @@ mod test { assert_eq!(or_evaluator.column_name, "col"); assert_eq!(or_evaluator.op, Operator::Or); assert_eq!(or_evaluator.literal_list.len(), 3); - assert_eq!(format!("{:?}", or_evaluator.literal_list), "[Scalar(StringArray\n[\n \"B\",\n]), Scalar(StringArray\n[\n \"C\",\n]), Scalar(StringArray\n[\n \"D\",\n])]"); + assert_eq!( + format!("{:?}", or_evaluator.literal_list), + "[Scalar(StringArray\n[\n \"B\",\n]), Scalar(StringArray\n[\n \"C\",\n]), Scalar(StringArray\n[\n \"D\",\n])]" + ); // Create a schema and batch for testing let schema = Schema::new(vec![Field::new("col", DataType::Utf8, false)]); diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs index 863f384a03..1e50d76506 100644 --- a/src/common/recordbatch/src/lib.rs +++ b/src/common/recordbatch/src/lib.rs @@ -39,7 +39,7 @@ use error::Result; use futures::task::{Context, Poll}; use futures::{Stream, TryStreamExt}; pub use recordbatch::RecordBatch; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; pub trait RecordBatchStream: Stream> { fn name(&self) -> &str { diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs index efbf5f804b..f83f51e584 100644 --- a/src/common/recordbatch/src/recordbatch.rs +++ b/src/common/recordbatch/src/recordbatch.rs @@ -24,13 +24,13 @@ use datatypes::value::Value; use datatypes::vectors::{Helper, VectorRef}; use serde::ser::{Error, SerializeStruct}; use serde::{Serialize, Serializer}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use crate::DfRecordBatch; use crate::error::{ self, CastVectorSnafu, ColumnNotExistsSnafu, DataTypesSnafu, ProjectArrowRecordBatchSnafu, Result, }; -use crate::DfRecordBatch; /// A two-dimensional batch of column-oriented data with a defined schema. #[derive(Clone, Debug, PartialEq)] diff --git a/src/common/recordbatch/src/util.rs b/src/common/recordbatch/src/util.rs index 8732a47e03..0a587e303d 100644 --- a/src/common/recordbatch/src/util.rs +++ b/src/common/recordbatch/src/util.rs @@ -132,8 +132,8 @@ mod tests { use datatypes::prelude::*; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::vectors::UInt32Vector; - use futures::task::{Context, Poll}; use futures::Stream; + use futures::task::{Context, Poll}; use super::*; use crate::adapter::RecordBatchMetrics; diff --git a/src/common/runtime/src/global.rs b/src/common/runtime/src/global.rs index 5cd008fa8c..9480c94713 100644 --- a/src/common/runtime/src/global.rs +++ b/src/common/runtime/src/global.rs @@ -48,7 +48,9 @@ impl Default for RuntimeOptions { } pub fn create_runtime(runtime_name: &str, thread_name: &str, worker_threads: usize) -> Runtime { - info!("Creating runtime with runtime_name: {runtime_name}, thread_name: {thread_name}, work_threads: {worker_threads}."); + info!( + "Creating runtime with runtime_name: {runtime_name}, thread_name: {thread_name}, work_threads: {worker_threads}." + ); Builder::default() .runtime_name(runtime_name) .thread_name(thread_name) diff --git a/src/common/runtime/src/repeated_task.rs b/src/common/runtime/src/repeated_task.rs index 6cc26e0545..5960f3e030 100644 --- a/src/common/runtime/src/repeated_task.rs +++ b/src/common/runtime/src/repeated_task.rs @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Mutex; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_error::ext::ErrorExt; use common_telemetry::{debug, error}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use crate::Runtime; use crate::error::{IllegalStateSnafu, Result, WaitGcTaskStopSnafu}; use crate::runtime::RuntimeTrait; -use crate::Runtime; /// Task to execute repeatedly. #[async_trait::async_trait] @@ -170,8 +170,8 @@ impl RepeatedTask { #[cfg(test)] mod tests { - use std::sync::atomic::AtomicI32; use std::sync::Arc; + use std::sync::atomic::AtomicI32; use super::*; use crate::error::Error; diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs index d186b9d2f0..0a6159f97e 100644 --- a/src/common/runtime/src/runtime.rs +++ b/src/common/runtime/src/runtime.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::future::Future; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; use std::time::Duration; diff --git a/src/common/runtime/src/runtime_default.rs b/src/common/runtime/src/runtime_default.rs index ea0b5c27e3..eedf410097 100644 --- a/src/common/runtime/src/runtime_default.rs +++ b/src/common/runtime/src/runtime_default.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use tokio::runtime::Handle; pub use tokio::task::JoinHandle; -use crate::runtime::{Dropper, RuntimeTrait}; use crate::Builder; +use crate::runtime::{Dropper, RuntimeTrait}; /// A runtime to run future tasks #[derive(Clone, Debug)] diff --git a/src/common/runtime/src/runtime_throttleable.rs b/src/common/runtime/src/runtime_throttleable.rs index ea51270987..dc1be5cc81 100644 --- a/src/common/runtime/src/runtime_throttleable.rs +++ b/src/common/runtime/src/runtime_throttleable.rs @@ -26,9 +26,9 @@ use tokio::runtime::Handle; pub use tokio::task::JoinHandle; use tokio::time::Sleep; +use crate::Builder; use crate::error::{BuildRuntimeRateLimiterSnafu, Result}; use crate::runtime::{Dropper, Priority, RuntimeTrait}; -use crate::Builder; struct RuntimeRateLimiter { pub ratelimiter: Option, @@ -165,7 +165,7 @@ where match this.state { State::Pollable => {} - State::Throttled(ref mut sleep) => match sleep.poll_unpin(cx) { + State::Throttled(sleep) => match sleep.poll_unpin(cx) { Poll::Ready(_) => { *this.state = State::Pollable; } @@ -173,16 +173,16 @@ where }, }; - if let Some(ratelimiter) = &this.handle.ratelimiter { - if let Err(wait) = ratelimiter.try_wait() { - *this.state = State::Throttled(Box::pin(tokio::time::sleep(wait))); - match this.state.unwrap_backoff().poll_unpin(cx) { - Poll::Ready(_) => { - *this.state = State::Pollable; - } - Poll::Pending => { - return Poll::Pending; - } + if let Some(ratelimiter) = &this.handle.ratelimiter + && let Err(wait) = ratelimiter.try_wait() + { + *this.state = State::Throttled(Box::pin(tokio::time::sleep(wait))); + match this.state.unwrap_backoff().poll_unpin(cx) { + Poll::Ready(_) => { + *this.state = State::Pollable; + } + Poll::Pending => { + return Poll::Pending; } } } diff --git a/src/common/sql/src/convert.rs b/src/common/sql/src/convert.rs index fb9282a70c..a9aba95fac 100644 --- a/src/common/sql/src/convert.rs +++ b/src/common/sql/src/convert.rs @@ -14,18 +14,18 @@ use std::str::FromStr; -use common_time::timezone::Timezone; use common_time::Timestamp; +use common_time::timezone::Timezone; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnDefaultConstraint; use datatypes::types::{parse_string_to_json_type_value, parse_string_to_vector_type_value}; use datatypes::value::{OrderedF32, OrderedF64, Value}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; pub use sqlparser::ast::{ - visit_expressions_mut, visit_statements_mut, BinaryOperator, ColumnDef, ColumnOption, - ColumnOptionDef, DataType, Expr, Function, FunctionArg, FunctionArgExpr, FunctionArguments, - Ident, ObjectName, SqlOption, TableConstraint, TimezoneInfo, UnaryOperator, Value as SqlValue, - Visit, VisitMut, Visitor, VisitorMut, + BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function, + FunctionArg, FunctionArgExpr, FunctionArguments, Ident, ObjectName, SqlOption, TableConstraint, + TimezoneInfo, UnaryOperator, Value as SqlValue, Visit, VisitMut, Visitor, VisitorMut, + visit_expressions_mut, visit_statements_mut, }; use crate::error::{ @@ -173,7 +173,7 @@ pub fn sql_value_to_value( value: sql_val.clone(), datatype: data_type.clone(), } - .fail() + .fail(); } }; @@ -221,7 +221,7 @@ pub fn sql_value_to_value( }, Value::String(_) | Value::Binary(_) | Value::List(_) => { - return InvalidUnaryOpSnafu { unary_op, value }.fail() + return InvalidUnaryOpSnafu { unary_op, value }.fail(); } } } @@ -243,10 +243,8 @@ pub(crate) fn parse_string_to_value( timezone: Option<&Timezone>, auto_string_to_numeric: bool, ) -> Result { - if auto_string_to_numeric { - if let Some(value) = auto_cast_to_numeric(&s, data_type)? { - return Ok(value); - } + if auto_string_to_numeric && let Some(value) = auto_cast_to_numeric(&s, data_type)? { + return Ok(value); } ensure!( @@ -814,14 +812,16 @@ mod test { } } - assert!(parse_string_to_value( - "json_col", - r#"Nicola Kovac is the best rifler in the world"#.to_string(), - &ConcreteDataType::json_datatype(), - None, - false, + assert!( + parse_string_to_value( + "json_col", + r#"Nicola Kovac is the best rifler in the world"#.to_string(), + &ConcreteDataType::json_datatype(), + None, + false, + ) + .is_err() ) - .is_err()) } #[test] @@ -966,14 +966,16 @@ mod test { } } - assert!(parse_string_to_value( - "timestamp_col", - "2022-02-22T00:01:01+08".to_string(), - &ConcreteDataType::timestamp_datatype(TimeUnit::Nanosecond), - None, - false, - ) - .is_err()); + assert!( + parse_string_to_value( + "timestamp_col", + "2022-02-22T00:01:01+08".to_string(), + &ConcreteDataType::timestamp_datatype(TimeUnit::Nanosecond), + None, + false, + ) + .is_err() + ); // with timezone match parse_string_to_value( @@ -998,42 +1000,50 @@ mod test { #[test] fn test_parse_placeholder_value() { - assert!(sql_value_to_value( - "test", - &ConcreteDataType::string_datatype(), - &SqlValue::Placeholder("default".into()), - None, - None, - false - ) - .is_err()); - assert!(sql_value_to_value( - "test", - &ConcreteDataType::string_datatype(), - &SqlValue::Placeholder("default".into()), - None, - Some(UnaryOperator::Minus), - false - ) - .is_err()); - assert!(sql_value_to_value( - "test", - &ConcreteDataType::uint16_datatype(), - &SqlValue::Number("3".into(), false), - None, - Some(UnaryOperator::Minus), - false - ) - .is_err()); - assert!(sql_value_to_value( - "test", - &ConcreteDataType::uint16_datatype(), - &SqlValue::Number("3".into(), false), - None, - None, - false - ) - .is_ok()); + assert!( + sql_value_to_value( + "test", + &ConcreteDataType::string_datatype(), + &SqlValue::Placeholder("default".into()), + None, + None, + false + ) + .is_err() + ); + assert!( + sql_value_to_value( + "test", + &ConcreteDataType::string_datatype(), + &SqlValue::Placeholder("default".into()), + None, + Some(UnaryOperator::Minus), + false + ) + .is_err() + ); + assert!( + sql_value_to_value( + "test", + &ConcreteDataType::uint16_datatype(), + &SqlValue::Number("3".into(), false), + None, + Some(UnaryOperator::Minus), + false + ) + .is_err() + ); + assert!( + sql_value_to_value( + "test", + &ConcreteDataType::uint16_datatype(), + &SqlValue::Number("3".into(), false), + None, + None, + false + ) + .is_ok() + ); } #[test] diff --git a/src/common/sql/src/default_constraint.rs b/src/common/sql/src/default_constraint.rs index fc9e9489b0..0366f9aec3 100644 --- a/src/common/sql/src/default_constraint.rs +++ b/src/common/sql/src/default_constraint.rs @@ -14,14 +14,14 @@ use common_time::timezone::Timezone; use datatypes::prelude::ConcreteDataType; -use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN}; use datatypes::schema::ColumnDefaultConstraint; +use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN}; use sqlparser::ast::ValueWithSpan; pub use sqlparser::ast::{ - visit_expressions_mut, visit_statements_mut, BinaryOperator, ColumnDef, ColumnOption, - ColumnOptionDef, DataType, Expr, Function, FunctionArg, FunctionArgExpr, FunctionArguments, - Ident, ObjectName, SqlOption, TableConstraint, TimezoneInfo, UnaryOperator, Value as SqlValue, - Visit, VisitMut, Visitor, VisitorMut, + BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function, + FunctionArg, FunctionArgExpr, FunctionArguments, Ident, ObjectName, SqlOption, TableConstraint, + TimezoneInfo, UnaryOperator, Value as SqlValue, Visit, VisitMut, Visitor, VisitorMut, + visit_expressions_mut, visit_statements_mut, }; use crate::convert::{sql_number_to_value, sql_value_to_value}; diff --git a/src/common/sql/src/error.rs b/src/common/sql/src/error.rs index 8ef046a8c5..b777b54103 100644 --- a/src/common/sql/src/error.rs +++ b/src/common/sql/src/error.rs @@ -17,8 +17,8 @@ use std::any::Any; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datafusion_sql::sqlparser::ast::UnaryOperator; use datatypes::prelude::{ConcreteDataType, Value}; use snafu::{Location, Snafu}; diff --git a/src/common/substrait/src/df_substrait.rs b/src/common/substrait/src/df_substrait.rs index a03acace4a..5bc1cd14e4 100644 --- a/src/common/substrait/src/df_substrait.rs +++ b/src/common/substrait/src/df_substrait.rs @@ -16,9 +16,9 @@ use std::sync::Arc; use async_trait::async_trait; use bytes::{Buf, Bytes, BytesMut}; +use datafusion::execution::SessionStateBuilder; use datafusion::execution::context::SessionState; use datafusion::execution::runtime_env::RuntimeEnv; -use datafusion::execution::SessionStateBuilder; use datafusion::prelude::SessionConfig; use datafusion_expr::LogicalPlan; use datafusion_substrait::logical_plan::consumer::from_substrait_plan; diff --git a/src/common/telemetry/src/lib.rs b/src/common/telemetry/src/lib.rs index 2f3866d49b..0e1f9fa94a 100644 --- a/src/common/telemetry/src/lib.rs +++ b/src/common/telemetry/src/lib.rs @@ -22,7 +22,7 @@ mod panic_hook; pub mod tracing_context; mod tracing_sampler; -pub use logging::{init_default_ut_logging, init_global_logging, RELOAD_HANDLE}; +pub use logging::{RELOAD_HANDLE, init_default_ut_logging, init_global_logging}; pub use metric::dump_metrics; pub use panic_hook::set_panic_hook; pub use {common_error, tracing, tracing_subscriber}; diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs index 09cd9c3ef6..3260d79097 100644 --- a/src/common/telemetry/src/logging.rs +++ b/src/common/telemetry/src/logging.rs @@ -22,7 +22,7 @@ use std::time::Duration; use common_base::serde::empty_string_as_default; use once_cell::sync::{Lazy, OnceCell}; use opentelemetry::trace::TracerProvider; -use opentelemetry::{global, KeyValue}; +use opentelemetry::{KeyValue, global}; use opentelemetry_otlp::{Protocol, SpanExporter, WithExportConfig, WithHttpConfig}; use opentelemetry_sdk::propagation::TraceContextPropagator; use opentelemetry_sdk::trace::Sampler; @@ -35,9 +35,9 @@ use tracing_subscriber::filter::{FilterFn, Targets}; use tracing_subscriber::fmt::Layer; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::prelude::*; -use tracing_subscriber::{filter, EnvFilter, Registry}; +use tracing_subscriber::{EnvFilter, Registry, filter}; -use crate::tracing_sampler::{create_sampler, TracingSampleOptions}; +use crate::tracing_sampler::{TracingSampleOptions, create_sampler}; /// The default endpoint when use gRPC exporter protocol. pub const DEFAULT_OTLP_GRPC_ENDPOINT: &str = "http://localhost:4317"; diff --git a/src/common/telemetry/src/metric.rs b/src/common/telemetry/src/metric.rs index 666b054650..b9262290a8 100644 --- a/src/common/telemetry/src/metric.rs +++ b/src/common/telemetry/src/metric.rs @@ -231,7 +231,7 @@ mod test { use prometheus::{Counter, Gauge, Histogram, HistogramOpts, Opts}; use super::convert_label; - use crate::metric::{convert_metric_to_write_request, MetricFilter}; + use crate::metric::{MetricFilter, convert_metric_to_write_request}; #[test] fn test_convert_label() { diff --git a/src/common/telemetry/src/panic_hook.rs b/src/common/telemetry/src/panic_hook.rs index f4145af282..47e7b83bd7 100644 --- a/src/common/telemetry/src/panic_hook.rs +++ b/src/common/telemetry/src/panic_hook.rs @@ -52,19 +52,21 @@ pub fn set_panic_hook() { })); #[cfg(feature = "deadlock_detection")] - let _ = std::thread::spawn(move || loop { - std::thread::sleep(Duration::from_secs(5)); - let deadlocks = parking_lot::deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } + let _ = std::thread::spawn(move || { + loop { + std::thread::sleep(Duration::from_secs(5)); + let deadlocks = parking_lot::deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } - tracing::info!("{} deadlocks detected", deadlocks.len()); - for (i, threads) in deadlocks.iter().enumerate() { - tracing::info!("Deadlock #{}", i); - for t in threads { - tracing::info!("Thread Id {:#?}", t.thread_id()); - tracing::info!("{:#?}", t.backtrace()); + tracing::info!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + tracing::info!("Deadlock #{}", i); + for t in threads { + tracing::info!("Thread Id {:#?}", t.thread_id()); + tracing::info!("{:#?}", t.backtrace()); + } } } }); diff --git a/src/common/telemetry/src/tracing_sampler.rs b/src/common/telemetry/src/tracing_sampler.rs index 843603d730..ccd029b781 100644 --- a/src/common/telemetry/src/tracing_sampler.rs +++ b/src/common/telemetry/src/tracing_sampler.rs @@ -14,10 +14,10 @@ use std::collections::HashSet; +use opentelemetry::KeyValue; use opentelemetry::trace::{ Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState, }; -use opentelemetry::KeyValue; use opentelemetry_sdk::trace::{Sampler, ShouldSample}; use serde::{Deserialize, Serialize}; diff --git a/src/common/test-util/src/flight.rs b/src/common/test-util/src/flight.rs index 2f0e63f766..ea8cc56377 100644 --- a/src/common/test-util/src/flight.rs +++ b/src/common/test-util/src/flight.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_grpc::flight::{FlightEncoder, FlightMessage}; use common_grpc::FlightData; +use common_grpc::flight::{FlightEncoder, FlightMessage}; use common_recordbatch::DfRecordBatch; /// Encodes record batch to a Schema message and a RecordBatch message. diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs index 4274913925..bda9105f55 100644 --- a/src/common/time/src/date.rs +++ b/src/common/time/src/date.rs @@ -19,11 +19,11 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use snafu::ResultExt; +use crate::Timezone; use crate::error::{InvalidDateStrSnafu, ParseDateStrSnafu, Result}; use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth}; use crate::timezone::get_timezone; use crate::util::datetime_to_utc; -use crate::Timezone; const UNIX_EPOCH_FROM_CE: i32 = 719_163; diff --git a/src/common/time/src/duration.rs b/src/common/time/src/duration.rs index d39596b472..a1e582ca82 100644 --- a/src/common/time/src/duration.rs +++ b/src/common/time/src/duration.rs @@ -237,8 +237,8 @@ mod tests { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; - use crate::timestamp::TimeUnit; use crate::Duration; + use crate::timestamp::TimeUnit; #[test] fn test_duration() { diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs index 66a9f887c1..9556253413 100644 --- a/src/common/time/src/lib.rs +++ b/src/common/time/src/lib.rs @@ -31,4 +31,4 @@ pub use range::RangeMillis; pub use timestamp::Timestamp; pub use timestamp_millis::TimestampMillis; pub use timezone::Timezone; -pub use ttl::{DatabaseTimeToLive, TimeToLive, FOREVER, INSTANT}; +pub use ttl::{DatabaseTimeToLive, FOREVER, INSTANT, TimeToLive}; diff --git a/src/common/time/src/range.rs b/src/common/time/src/range.rs index 91fc75fea4..d0fd9dc814 100644 --- a/src/common/time/src/range.rs +++ b/src/common/time/src/range.rs @@ -17,9 +17,9 @@ use std::ops::{Bound, RangeBounds}; use serde::{Deserialize, Serialize}; +use crate::Timestamp; use crate::timestamp::TimeUnit; use crate::timestamp_millis::TimestampMillis; -use crate::Timestamp; /// A half-open time range. /// @@ -241,10 +241,10 @@ impl TimestampRange { /// affect correctness. pub fn new_inclusive(start: Option, end: Option) -> Self { // check for emptiness - if let (Some(start_ts), Some(end_ts)) = (start, end) { - if start_ts > end_ts { - return Self::empty(); - } + if let (Some(start_ts), Some(end_ts)) = (start, end) + && start_ts > end_ts + { + return Self::empty(); } let end = if let Some(end) = end { @@ -446,9 +446,11 @@ mod tests { let empty_and_all = empty.and(&TimestampRange::min_to_max()); assert!(empty_and_all.is_empty()); assert!(empty.and(&empty).is_empty()); - assert!(empty - .and(&TimestampRange::with_unit(0, 10, TimeUnit::Millisecond).unwrap()) - .is_empty()); + assert!( + empty + .and(&TimestampRange::with_unit(0, 10, TimeUnit::Millisecond).unwrap()) + .is_empty() + ); // AND TimestampRange with different unit let anded = TimestampRange::with_unit(0, 10, TimeUnit::Millisecond) diff --git a/src/common/time/src/time.rs b/src/common/time/src/time.rs index 16a755ba25..00b123f660 100644 --- a/src/common/time/src/time.rs +++ b/src/common/time/src/time.rs @@ -19,7 +19,7 @@ use chrono::{NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc}; use serde::{Deserialize, Serialize}; use crate::timestamp::TimeUnit; -use crate::timezone::{get_timezone, Timezone}; +use crate::timezone::{Timezone, get_timezone}; /// Time value, represents the elapsed time since midnight in the unit of `TimeUnit`. #[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)] @@ -376,7 +376,9 @@ mod tests { "10:00:00.001", Time::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None) ); - std::env::set_var("TZ", "Asia/Shanghai"); + unsafe { + std::env::set_var("TZ", "Asia/Shanghai"); + } assert_eq!( "08:00:00.001", Time::new(1, TimeUnit::Millisecond) diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs index cad239a94f..f94543f493 100644 --- a/src/common/time/src/timestamp.rs +++ b/src/common/time/src/timestamp.rs @@ -29,7 +29,7 @@ use snafu::{OptionExt, ResultExt}; use crate::error; use crate::error::{ArithmeticOverflowSnafu, ParseTimestampSnafu, Result, TimestampOverflowSnafu}; use crate::interval::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth}; -use crate::timezone::{get_timezone, Timezone}; +use crate::timezone::{Timezone, get_timezone}; use crate::util::{datetime_to_utc, div_ceil}; /// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed @@ -1018,9 +1018,11 @@ mod tests { .unwrap() ); - assert!(Timestamp::new(i64::MAX, TimeUnit::Second) - .convert_to(TimeUnit::Millisecond) - .is_none()); + assert!( + Timestamp::new(i64::MAX, TimeUnit::Second) + .convert_to(TimeUnit::Millisecond) + .is_none() + ); } #[test] @@ -1179,7 +1181,9 @@ mod tests { // $TZ doesn't take effort. #[test] fn test_parse_in_timezone() { - std::env::set_var("TZ", "Asia/Shanghai"); + unsafe { + std::env::set_var("TZ", "Asia/Shanghai"); + } assert_eq!( Timestamp::new(28800, TimeUnit::Second), Timestamp::from_str_utc("1970-01-01 08:00:00.000").unwrap() @@ -1232,7 +1236,9 @@ mod tests { #[test] fn test_to_timezone_aware_string() { set_default_timezone(Some("Asia/Shanghai")).unwrap(); - std::env::set_var("TZ", "Asia/Shanghai"); + unsafe { + std::env::set_var("TZ", "Asia/Shanghai"); + } assert_eq!( "1970-01-01 08:00:00.001", Timestamp::new(1, TimeUnit::Millisecond) diff --git a/src/common/time/src/timestamp_millis.rs b/src/common/time/src/timestamp_millis.rs index f640e14882..533021698b 100644 --- a/src/common/time/src/timestamp_millis.rs +++ b/src/common/time/src/timestamp_millis.rs @@ -14,8 +14,8 @@ use std::cmp::Ordering; -use crate::util::div_ceil; use crate::Timestamp; +use crate::util::div_ceil; /// Unix timestamp in millisecond resolution. /// diff --git a/src/common/time/src/ttl.rs b/src/common/time/src/ttl.rs index 0544cfb0d1..7e1acf7c4f 100644 --- a/src/common/time/src/ttl.rs +++ b/src/common/time/src/ttl.rs @@ -18,8 +18,8 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use snafu::ResultExt; -use crate::error::{Error, InvalidDatabaseTtlSnafu, ParseDurationSnafu}; use crate::Timestamp; +use crate::error::{Error, InvalidDatabaseTtlSnafu, ParseDurationSnafu}; pub const INSTANT: &str = "instant"; pub const FOREVER: &str = "forever"; diff --git a/src/common/time/src/util.rs b/src/common/time/src/util.rs index c9552d382f..ca02bb897c 100644 --- a/src/common/time/src/util.rs +++ b/src/common/time/src/util.rs @@ -17,8 +17,8 @@ use std::str::FromStr; use chrono::{LocalResult, NaiveDateTime, TimeZone}; use chrono_tz::Tz; -use crate::timezone::get_timezone; use crate::Timezone; +use crate::timezone::get_timezone; pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String { match get_timezone(None) { @@ -76,11 +76,7 @@ pub fn yesterday_rfc3339() -> String { pub(crate) fn div_ceil(this: i64, rhs: i64) -> i64 { let d = this / rhs; let r = this % rhs; - if r > 0 && rhs > 0 { - d + 1 - } else { - d - } + if r > 0 && rhs > 0 { d + 1 } else { d } } /// Formats nanoseconds into human-readable time with dynamic unit selection. diff --git a/src/common/version/build.rs b/src/common/version/build.rs index ef0530b52c..08d87c0bec 100644 --- a/src/common/version/build.rs +++ b/src/common/version/build.rs @@ -18,7 +18,7 @@ use std::path::PathBuf; use build_data::{format_timestamp, get_source_time}; use cargo_manifest::Manifest; -use shadow_rs::{BuildPattern, ShadowBuilder, CARGO_METADATA, CARGO_TREE}; +use shadow_rs::{BuildPattern, CARGO_METADATA, CARGO_TREE, ShadowBuilder}; fn main() -> shadow_rs::SdResult<()> { println!( diff --git a/src/common/wal/src/config.rs b/src/common/wal/src/config.rs index 04b173060d..2cfea614dd 100644 --- a/src/common/wal/src/config.rs +++ b/src/common/wal/src/config.rs @@ -114,8 +114,8 @@ mod tests { use tests::kafka::common::KafkaTopicConfig; use super::*; - use crate::config::{DatanodeKafkaConfig, MetasrvKafkaConfig}; use crate::TopicSelectorType; + use crate::config::{DatanodeKafkaConfig, MetasrvKafkaConfig}; #[test] fn test_toml_raft_engine() { diff --git a/src/common/wal/src/config/kafka/common.rs b/src/common/wal/src/config/kafka/common.rs index 1028ca838b..1b9bcc77be 100644 --- a/src/common/wal/src/config/kafka/common.rs +++ b/src/common/wal/src/config/kafka/common.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use std::time::Duration; use common_base::readable_size::ReadableSize; -use rskafka::client::{Credentials, SaslConfig}; use rskafka::BackoffConfig; +use rskafka::client::{Credentials, SaslConfig}; use rustls::{ClientConfig, RootCertStore}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; @@ -46,7 +46,7 @@ pub const DEFAULT_FLUSH_TRIGGER_SIZE: ReadableSize = ReadableSize::mb(512); pub const DEFAULT_CHECKPOINT_TRIGGER_SIZE: ReadableSize = ReadableSize::mb(128); use crate::error::{self, Result}; -use crate::{TopicSelectorType, BROKER_ENDPOINT, TOPIC_NAME_PREFIX}; +use crate::{BROKER_ENDPOINT, TOPIC_NAME_PREFIX, TopicSelectorType}; /// The SASL configurations for kafka client. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] diff --git a/src/common/wal/src/config/kafka/metasrv.rs b/src/common/wal/src/config/kafka/metasrv.rs index bffde60c81..029fdb3fcd 100644 --- a/src/common/wal/src/config/kafka/metasrv.rs +++ b/src/common/wal/src/config/kafka/metasrv.rs @@ -18,8 +18,8 @@ use common_base::readable_size::ReadableSize; use serde::{Deserialize, Serialize}; use crate::config::kafka::common::{ - KafkaConnectionConfig, KafkaTopicConfig, DEFAULT_AUTO_PRUNE_INTERVAL, - DEFAULT_AUTO_PRUNE_PARALLELISM, DEFAULT_CHECKPOINT_TRIGGER_SIZE, DEFAULT_FLUSH_TRIGGER_SIZE, + DEFAULT_AUTO_PRUNE_INTERVAL, DEFAULT_AUTO_PRUNE_PARALLELISM, DEFAULT_CHECKPOINT_TRIGGER_SIZE, + DEFAULT_FLUSH_TRIGGER_SIZE, KafkaConnectionConfig, KafkaTopicConfig, }; /// Kafka wal configurations for metasrv. diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs index aca2ef8b3d..06d82c932e 100644 --- a/src/datanode/src/alive_keeper.rs +++ b/src/datanode/src/alive_keeper.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashMap; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use api::v1::meta::GrantedRegion; use async_trait::async_trait; @@ -31,7 +31,7 @@ use store_api::region_request::{RegionCloseRequest, RegionRequest}; use store_api::storage::RegionId; #[cfg(test)] use tokio::sync::oneshot; -use tokio::sync::{mpsc, Mutex}; +use tokio::sync::{Mutex, mpsc}; use tokio::task::JoinHandle; use tokio::time::{Duration, Instant}; @@ -144,13 +144,13 @@ impl RegionAliveKeeper { async fn close_staled_region(&self, region_id: RegionId) { info!("Closing staled region: {region_id}"); let request = RegionRequest::Close(RegionCloseRequest {}); - if let Err(e) = self.region_server.handle_request(region_id, request).await { - if e.status_code() != StatusCode::RegionNotFound { - let _ = self - .region_server - .set_region_role(region_id, RegionRole::Follower); - error!(e; "Failed to close staled region {}, convert region to follower.", region_id); - } + if let Err(e) = self.region_server.handle_request(region_id, request).await + && e.status_code() != StatusCode::RegionNotFound + { + let _ = self + .region_server + .set_region_role(region_id, RegionRole::Follower); + error!(e; "Failed to close staled region {}, convert region to follower.", region_id); } } @@ -196,7 +196,7 @@ impl RegionAliveKeeper { return error::UnexpectedSnafu { violated: "RegionServerEventSender closed", } - .fail() + .fail(); } Err(mpsc::error::TryRecvError::Empty) => { break; diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index 5f7a0b8f85..9829c1a982 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -20,7 +20,7 @@ use common_options::memory::MemoryOptions; pub use common_procedure::options::ProcedureConfig; use common_telemetry::logging::{LoggingOptions, TracingOptions}; use common_wal::config::DatanodeWalConfig; -use common_workload::{sanitize_workload_types, DatanodeWorkloadType}; +use common_workload::{DatanodeWorkloadType, sanitize_workload_types}; use file_engine::config::EngineConfig as FileEngineConfig; use meta_client::MetaClientOptions; use metric_engine::config::EngineConfig as MetricEngineConfig; diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index ae9986a792..97aff45448 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -28,12 +28,12 @@ use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef}; use common_meta::kv_backend::KvBackendRef; pub use common_procedure::options::ProcedureConfig; use common_telemetry::{error, info, warn}; +use common_wal::config::DatanodeWalConfig; use common_wal::config::kafka::DatanodeKafkaConfig; use common_wal::config::raft_engine::RaftEngineConfig; -use common_wal::config::DatanodeWalConfig; use file_engine::engine::FileRegionEngine; use log_store::kafka::log_store::KafkaLogStore; -use log_store::kafka::{default_index_file, GlobalIndexCollector}; +use log_store::kafka::{GlobalIndexCollector, default_index_file}; use log_store::raft_engine::log_store::RaftEngineLogStore; use meta_client::MetaClientRef; use metric_engine::engine::MetricEngine; @@ -42,11 +42,11 @@ use mito2::engine::{MitoEngine, MitoEngineBuilder}; use mito2::sst::file_ref::{FileReferenceManager, FileReferenceManagerRef}; use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef}; use object_store::util::normalize_dir; -use query::dummy_catalog::{DummyCatalogManager, TableProviderFactoryRef}; use query::QueryEngineFactory; +use query::dummy_catalog::{DummyCatalogManager, TableProviderFactoryRef}; use servers::export_metrics::ExportMetricsTask; use servers::server::ServerHandlers; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::path_utils::WAL_DIR; use store_api::region_engine::{RegionEngineRef, RegionRole}; use tokio::fs; @@ -59,14 +59,14 @@ use crate::error::{ ShutdownServerSnafu, StartServerSnafu, }; use crate::event_listener::{ - new_region_server_event_channel, NoopRegionServerEventListener, RegionServerEventListenerRef, - RegionServerEventReceiver, + NoopRegionServerEventListener, RegionServerEventListenerRef, RegionServerEventReceiver, + new_region_server_event_channel, }; use crate::greptimedb_telemetry::get_greptimedb_telemetry_task; use crate::heartbeat::HeartbeatTask; use crate::region_server::{DummyTableProviderFactory, RegionServer}; use crate::store::{self, new_object_store_without_cache}; -use crate::utils::{build_region_open_requests, RegionOpenRequests}; +use crate::utils::{RegionOpenRequests, build_region_open_requests}; /// Datanode service. pub struct Datanode { @@ -655,12 +655,12 @@ async fn open_all_regions( } for region_id in open_regions { - if open_with_writable { - if let Err(e) = region_server.set_region_role(region_id, RegionRole::Leader) { - error!( - e; "failed to convert region {region_id} to leader" - ); - } + if open_with_writable + && let Err(e) = region_server.set_region_role(region_id, RegionRole::Leader) + { + error!( + e; "failed to convert region {region_id} to leader" + ); } } @@ -719,17 +719,17 @@ mod tests { use cache::build_datanode_cache_registry; use common_base::Plugins; use common_meta::cache::LayeredCacheRegistryBuilder; - use common_meta::key::datanode_table::DatanodeTableManager; use common_meta::key::RegionRoleSet; - use common_meta::kv_backend::memory::MemoryKvBackend; + use common_meta::key::datanode_table::DatanodeTableManager; use common_meta::kv_backend::KvBackendRef; + use common_meta::kv_backend::memory::MemoryKvBackend; use mito2::engine::MITO_ENGINE_NAME; use store_api::region_request::RegionRequest; use store_api::storage::RegionId; use crate::config::DatanodeOptions; use crate::datanode::DatanodeBuilder; - use crate::tests::{mock_region_server, MockRegionEngine}; + use crate::tests::{MockRegionEngine, mock_region_server}; async fn setup_table_datanode(kv: &KvBackendRef) { let mgr = DatanodeTableManager::new(kv.clone()); diff --git a/src/datanode/src/greptimedb_telemetry.rs b/src/datanode/src/greptimedb_telemetry.rs index 91b08ca634..302e614666 100644 --- a/src/datanode/src/greptimedb_telemetry.rs +++ b/src/datanode/src/greptimedb_telemetry.rs @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::AtomicBool; use std::sync::Arc; +use std::sync::atomic::AtomicBool; use async_trait::async_trait; use common_greptimedb_telemetry::{ - default_get_uuid, Collector, GreptimeDBTelemetry, GreptimeDBTelemetryTask, - Mode as VersionReporterMode, TELEMETRY_INTERVAL, + Collector, GreptimeDBTelemetry, GreptimeDBTelemetryTask, Mode as VersionReporterMode, + TELEMETRY_INTERVAL, default_get_uuid, }; struct StandaloneGreptimeDBTelemetryCollector { diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs index 352c35475d..be76d5811c 100644 --- a/src/datanode/src/heartbeat.rs +++ b/src/datanode/src/heartbeat.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashMap; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use api::v1::meta::heartbeat_request::NodeWorkloads; @@ -32,11 +32,11 @@ use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef}; use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message; use common_telemetry::{debug, error, info, trace, warn}; use common_workload::DatanodeWorkloadType; -use meta_client::client::{HeartbeatSender, MetaClient}; use meta_client::MetaClientRef; +use meta_client::client::{HeartbeatSender, MetaClient}; use servers::addrs; use snafu::ResultExt; -use tokio::sync::{mpsc, Notify}; +use tokio::sync::{Notify, mpsc}; use tokio::time::Instant; use self::handler::RegionHeartbeatResponseHandler; @@ -198,7 +198,9 @@ impl HeartbeatTask { let node_id = self.node_id; let node_epoch = self.node_epoch; let addr = &self.peer_addr; - info!("Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}."); + info!( + "Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}." + ); let meta_client = self.meta_client.clone(); let region_server_clone = self.region_server.clone(); diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs index 5783498819..165b8e057b 100644 --- a/src/datanode/src/heartbeat/handler.rs +++ b/src/datanode/src/heartbeat/handler.rs @@ -13,12 +13,12 @@ // limitations under the License. use async_trait::async_trait; +use common_meta::RegionIdent; use common_meta::error::{InvalidHeartbeatResponseSnafu, Result as MetaResult}; use common_meta::heartbeat::handler::{ HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext, }; use common_meta::instruction::{Instruction, InstructionReply}; -use common_meta::RegionIdent; use common_telemetry::error; use futures::future::BoxFuture; use snafu::OptionExt; @@ -140,10 +140,10 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler { }) .await; - if let Some(reply) = reply { - if let Err(e) = mailbox.send((meta, reply)).await { - error!(e; "Failed to send reply to mailbox"); - } + if let Some(reply) = reply + && let Err(e) = mailbox.send((meta, reply)).await + { + error!(e; "Failed to send reply to mailbox"); } }); @@ -215,21 +215,27 @@ mod tests { let region_id = RegionId::new(1024, 1); let storage_path = "test"; let instruction = open_region_instruction(region_id, storage_path); - assert!(heartbeat_handler - .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction)))); + assert!( + heartbeat_handler + .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction))) + ); // Close region let instruction = close_region_instruction(region_id); - assert!(heartbeat_handler - .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction)))); + assert!( + heartbeat_handler + .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction))) + ); // Downgrade region let instruction = Instruction::DowngradeRegion(DowngradeRegion { region_id: RegionId::new(2048, 1), flush_timeout: Some(Duration::from_secs(1)), }); - assert!(heartbeat_handler - .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction)))); + assert!( + heartbeat_handler + .is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction))) + ); // Upgrade region let instruction = Instruction::UpgradeRegion(UpgradeRegion { diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs index 77cbbf2c9c..da83786a7b 100644 --- a/src/datanode/src/heartbeat/handler/close_region.rs +++ b/src/datanode/src/heartbeat/handler/close_region.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_meta::instruction::{InstructionReply, SimpleReply}; use common_meta::RegionIdent; +use common_meta::instruction::{InstructionReply, SimpleReply}; use common_telemetry::{tracing, warn}; use futures_util::future::BoxFuture; use store_api::region_request::{RegionCloseRequest, RegionRequest}; @@ -38,7 +38,9 @@ impl HandlerContext { error: None, })), Err(error::Error::RegionNotFound { .. }) => { - warn!("Received a close region instruction from meta, but target region:{region_id} is not found."); + warn!( + "Received a close region instruction from meta, but target region:{region_id} is not found." + ); Some(InstructionReply::CloseRegion(SimpleReply { result: true, error: None, diff --git a/src/datanode/src/heartbeat/handler/downgrade_region.rs b/src/datanode/src/heartbeat/handler/downgrade_region.rs index 6a7599920d..06d3ab046e 100644 --- a/src/datanode/src/heartbeat/handler/downgrade_region.rs +++ b/src/datanode/src/heartbeat/handler/downgrade_region.rs @@ -215,7 +215,7 @@ mod tests { use crate::error; use crate::heartbeat::handler::HandlerContext; - use crate::tests::{mock_region_server, MockRegionEngine}; + use crate::tests::{MockRegionEngine, mock_region_server}; #[tokio::test] async fn test_region_not_exist() { @@ -490,10 +490,12 @@ mod tests { assert_matches!(reply, Some(InstructionReply::DowngradeRegion(_))); if let InstructionReply::DowngradeRegion(reply) = reply.unwrap() { assert!(reply.exists); - assert!(reply - .error - .unwrap() - .contains("Failed to set region to readonly")); + assert!( + reply + .error + .unwrap() + .contains("Failed to set region to readonly") + ); assert!(reply.last_entry_id.is_none()); } } diff --git a/src/datanode/src/heartbeat/handler/flush_region.rs b/src/datanode/src/heartbeat/handler/flush_region.rs index 8f8ea2389a..963d3bf488 100644 --- a/src/datanode/src/heartbeat/handler/flush_region.rs +++ b/src/datanode/src/heartbeat/handler/flush_region.rs @@ -177,7 +177,7 @@ mod tests { use store_api::storage::RegionId; use super::*; - use crate::tests::{mock_region_server, MockRegionEngine}; + use crate::tests::{MockRegionEngine, mock_region_server}; #[tokio::test] async fn test_handle_flush_region_hint() { @@ -288,7 +288,7 @@ mod tests { assert!(reply.is_some()); if let Some(InstructionReply::FlushRegions(flush_reply)) = reply { assert!(!flush_reply.overall_success); // Should fail due to non-existent regions - // With fail-fast, only process regions until first failure + // With fail-fast, only process regions until first failure assert!(flush_reply.results.len() <= region_ids.len()); } else { panic!("Expected FlushRegions reply"); @@ -324,7 +324,7 @@ mod tests { assert!(reply.is_some()); if let Some(InstructionReply::FlushRegions(flush_reply)) = reply { assert!(!flush_reply.overall_success); // Should fail due to one non-existent region - // With try-all, should process all regions + // With try-all, should process all regions assert_eq!(flush_reply.results.len(), region_ids.len()); // First should succeed, second should fail assert!(flush_reply.results[0].1.is_ok()); diff --git a/src/datanode/src/heartbeat/handler/upgrade_region.rs b/src/datanode/src/heartbeat/handler/upgrade_region.rs index ded0ff2e9d..c1f238e059 100644 --- a/src/datanode/src/heartbeat/handler/upgrade_region.rs +++ b/src/datanode/src/heartbeat/handler/upgrade_region.rs @@ -141,7 +141,7 @@ mod tests { use crate::error; use crate::heartbeat::handler::HandlerContext; - use crate::tests::{mock_region_server, MockRegionEngine}; + use crate::tests::{MockRegionEngine, mock_region_server}; #[tokio::test] async fn test_region_not_exist() { diff --git a/src/datanode/src/heartbeat/task_tracker.rs b/src/datanode/src/heartbeat/task_tracker.rs index 1fff51f897..17b99fc3b7 100644 --- a/src/datanode/src/heartbeat/task_tracker.rs +++ b/src/datanode/src/heartbeat/task_tracker.rs @@ -19,8 +19,8 @@ use std::time::Duration; use futures_util::future::BoxFuture; use snafu::ResultExt; use store_api::storage::RegionId; -use tokio::sync::watch::{self, Receiver}; use tokio::sync::RwLock; +use tokio::sync::watch::{self, Receiver}; use crate::error::{self, Error, Result}; @@ -201,7 +201,7 @@ mod tests { use store_api::storage::RegionId; use tokio::sync::oneshot; - use crate::heartbeat::task_tracker::{wait, TaskTracker}; + use crate::heartbeat::task_tracker::{TaskTracker, wait}; #[derive(Debug, Clone, PartialEq, Eq)] struct TestResult { diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs index 0859fae5ca..fdf981b97e 100644 --- a/src/datanode/src/region_server.rs +++ b/src/datanode/src/region_server.rs @@ -22,7 +22,7 @@ use api::region::RegionResponse; use api::v1::meta::TopicStat; use api::v1::region::sync_request::ManifestInfo; use api::v1::region::{ - region_request, ListMetadataRequest, RegionResponse as RegionResponseV1, SyncRequest, + ListMetadataRequest, RegionResponse as RegionResponseV1, SyncRequest, region_request, }; use api::v1::{ResponseHeader, Status}; use arrow_flight::{FlightData, Ticket}; @@ -31,15 +31,15 @@ use bytes::Bytes; use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_meta::datanode::TopicStatsReporter; -use common_query::request::QueryRequest; use common_query::OutputData; +use common_query::request::QueryRequest; use common_recordbatch::SendableRecordBatchStream; use common_runtime::Runtime; use common_telemetry::tracing::{self, info_span}; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{debug, error, info, warn}; use dashmap::DashMap; -use datafusion::datasource::{provider_as_source, TableProvider}; +use datafusion::datasource::{TableProvider, provider_as_source}; use datafusion::error::Result as DfResult; use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_expr::{LogicalPlan, TableSource}; @@ -47,17 +47,17 @@ use futures_util::future::try_join_all; use metric_engine::engine::MetricEngine; use mito2::engine::MITO_ENGINE_NAME; use prost::Message; +use query::QueryEngineRef; pub use query::dummy_catalog::{ DummyCatalogList, DummyTableProviderFactory, TableProviderFactoryRef, }; -use query::QueryEngineRef; use serde_json; use servers::error::{self as servers_error, ExecuteGrpcRequestSnafu, Result as ServerResult}; +use servers::grpc::FlightCompression; use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream}; use servers::grpc::region_server::RegionServerHandler; -use servers::grpc::FlightCompression; use session::context::{QueryContext, QueryContextBuilder, QueryContextRef}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metric_engine_consts::{ FILE_ENGINE_NAME, LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME, }; @@ -792,7 +792,7 @@ impl RegionServerInner { Some(status) => match status.clone() { RegionEngineWithStatus::Registering(engine) => engine, RegionEngineWithStatus::Deregistering(_) => { - return error::RegionBusySnafu { region_id }.fail() + return error::RegionBusySnafu { region_id }.fail(); } RegionEngineWithStatus::Ready(_) => status.clone().into_engine(), }, @@ -809,10 +809,10 @@ impl RegionServerInner { RegionChange::Deregisters => match current_region_status { Some(status) => match status.clone() { RegionEngineWithStatus::Registering(_) => { - return error::RegionBusySnafu { region_id }.fail() + return error::RegionBusySnafu { region_id }.fail(); } RegionEngineWithStatus::Deregistering(_) => { - return Ok(CurrentEngine::EarlyReturn(0)) + return Ok(CurrentEngine::EarlyReturn(0)); } RegionEngineWithStatus::Ready(_) => status.clone().into_engine(), }, @@ -822,10 +822,10 @@ impl RegionServerInner { match current_region_status { Some(status) => match status.clone() { RegionEngineWithStatus::Registering(_) => { - return error::RegionNotReadySnafu { region_id }.fail() + return error::RegionNotReadySnafu { region_id }.fail(); } RegionEngineWithStatus::Deregistering(_) => { - return error::RegionNotFoundSnafu { region_id }.fail() + return error::RegionNotFoundSnafu { region_id }.fail(); } RegionEngineWithStatus::Ready(engine) => engine, }, @@ -1366,7 +1366,7 @@ mod tests { use super::*; use crate::error::Result; - use crate::tests::{mock_region_server, MockRegionEngine}; + use crate::tests::{MockRegionEngine, mock_region_server}; #[tokio::test] async fn test_region_registering() { diff --git a/src/datanode/src/service.rs b/src/datanode/src/service.rs index ea909b5467..04256527e0 100644 --- a/src/datanode/src/service.rs +++ b/src/datanode/src/service.rs @@ -16,8 +16,8 @@ use std::net::SocketAddr; use std::sync::Arc; use common_config::Configurable; -use servers::grpc::builder::GrpcServerBuilder; use servers::grpc::GrpcServer; +use servers::grpc::builder::GrpcServerBuilder; use servers::http::HttpServerBuilder; use servers::metrics_handler::MetricsHandler; use servers::server::{ServerHandler, ServerHandlers}; diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs index dd8f7c3ca2..88b90883ef 100644 --- a/src/datanode/src/store.rs +++ b/src/datanode/src/store.rs @@ -21,13 +21,13 @@ use common_telemetry::info; use object_store::factory::new_raw_object_store; use object_store::layers::{LruCacheLayer, RetryLayer}; use object_store::services::Fs; -use object_store::util::{clean_temp_dir, join_dir, with_instrument_layers, PrintDetailedError}; +use object_store::util::{PrintDetailedError, clean_temp_dir, join_dir, with_instrument_layers}; use object_store::{ - Access, ObjectStore, ObjectStoreBuilder, ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR, + ATOMIC_WRITE_DIR, Access, OLD_ATOMIC_WRITE_DIR, ObjectStore, ObjectStoreBuilder, }; use snafu::prelude::*; -use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE}; +use crate::config::{DEFAULT_OBJECT_STORE_CACHE_SIZE, ObjectStoreConfig}; use crate::error::{self, CreateDirSnafu, Result}; fn with_retry_layers(object_store: ObjectStore) -> ObjectStore { diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs index 870973247c..1f6a882415 100644 --- a/src/datanode/src/tests.rs +++ b/src/datanode/src/tests.rs @@ -21,8 +21,8 @@ use async_trait::async_trait; use common_error::ext::BoxedError; use common_function::function_factory::ScalarFunctionFactory; use common_query::Output; -use common_runtime::runtime::{BuilderBuild, RuntimeTrait}; use common_runtime::Runtime; +use common_runtime::runtime::{BuilderBuild, RuntimeTrait}; use datafusion_expr::{AggregateUDF, LogicalPlan}; use query::dataframe::DataFrame; use query::planner::LogicalPlanner; diff --git a/src/datanode/src/utils.rs b/src/datanode/src/utils.rs index 77b9246c0f..64d6a40a9c 100644 --- a/src/datanode/src/utils.rs +++ b/src/datanode/src/utils.rs @@ -14,11 +14,11 @@ use std::collections::HashMap; +use common_meta::DatanodeId; use common_meta::key::datanode_table::DatanodeTableManager; use common_meta::key::topic_region::{TopicRegionKey, TopicRegionManager, TopicRegionValue}; use common_meta::kv_backend::KvBackendRef; use common_meta::wal_options_allocator::{extract_topic_from_wal_options, prepare_wal_options}; -use common_meta::DatanodeId; use futures::TryStreamExt; use snafu::ResultExt; use store_api::path_utils::table_dir; diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs index 57bc85ca24..fb03869e41 100644 --- a/src/datatypes/src/data_type.rs +++ b/src/datatypes/src/data_type.rs @@ -32,11 +32,11 @@ use crate::type_id::LogicalTypeId; use crate::types::{ BinaryType, BooleanType, DateType, Decimal128Type, DictionaryType, DurationMicrosecondType, DurationMillisecondType, DurationNanosecondType, DurationSecondType, DurationType, Float32Type, - Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, IntervalDayTimeType, + Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, JsonType, ListType, NullType, StringType, StructType, TimeMillisecondType, TimeType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType, - UInt16Type, UInt32Type, UInt64Type, UInt8Type, VectorType, + UInt8Type, UInt16Type, UInt32Type, UInt64Type, VectorType, }; use crate::value::Value; use crate::vectors::MutableVector; @@ -483,7 +483,7 @@ impl TryFrom<&ArrowDataType> for ConcreteDataType { return error::UnsupportedArrowTypeSnafu { arrow_type: dt.clone(), } - .fail() + .fail(); } }; diff --git a/src/datatypes/src/macros.rs b/src/datatypes/src/macros.rs index 19b7dad4ec..3d62b80a3a 100644 --- a/src/datatypes/src/macros.rs +++ b/src/datatypes/src/macros.rs @@ -47,8 +47,8 @@ macro_rules! with_match_primitive_type_id { use $crate::type_id::LogicalTypeId; use $crate::types::{ - Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, UInt16Type, - UInt32Type, UInt64Type, UInt8Type, + Float32Type, Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, UInt8Type, + UInt16Type, UInt32Type, UInt64Type, }; match $key_type { LogicalTypeId::Int8 => __with_ty__! { Int8Type }, diff --git a/src/datatypes/src/scalars.rs b/src/datatypes/src/scalars.rs index 4ac823704c..389888a9dd 100644 --- a/src/datatypes/src/scalars.rs +++ b/src/datatypes/src/scalars.rs @@ -18,8 +18,8 @@ use common_decimal::Decimal128; use common_time::Date; use crate::types::{ - Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, UInt16Type, UInt32Type, - UInt64Type, UInt8Type, + Float32Type, Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, UInt8Type, UInt16Type, + UInt32Type, UInt64Type, }; use crate::value::{ListValue, ListValueRef, Value}; use crate::vectors::{ diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs index d792e93c8b..87dd788b91 100644 --- a/src/datatypes/src/schema.rs +++ b/src/datatypes/src/schema.rs @@ -22,18 +22,18 @@ use std::sync::Arc; use arrow::datatypes::{Field, Schema as ArrowSchema}; use datafusion_common::DFSchemaRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{self, DuplicateColumnSnafu, Error, ProjectArrowSchemaSnafu, Result}; use crate::prelude::ConcreteDataType; pub use crate::schema::column_schema::{ - ColumnExtType, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions, Metadata, - SkippingIndexOptions, SkippingIndexType, COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, - COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_BACKEND, - COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE, - COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, - COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, - FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, TIME_INDEX_KEY, + COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE, COLUMN_FULLTEXT_OPT_KEY_ANALYZER, + COLUMN_FULLTEXT_OPT_KEY_BACKEND, COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, + COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, + COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, + COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, ColumnExtType, ColumnSchema, FULLTEXT_KEY, + FulltextAnalyzer, FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, Metadata, + SKIPPING_INDEX_KEY, SkippingIndexOptions, SkippingIndexType, TIME_INDEX_KEY, }; pub use crate::schema::constraint::ColumnDefaultConstraint; pub use crate::schema::raw::RawSchema; @@ -487,10 +487,12 @@ mod tests { .with_time_index(true), ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), false), ]; - assert!(SchemaBuilder::try_from(column_schemas) - .unwrap() - .build() - .is_err()); + assert!( + SchemaBuilder::try_from(column_schemas) + .unwrap() + .build() + .is_err() + ); let column_schemas = vec![ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true), @@ -498,9 +500,11 @@ mod tests { .with_time_index(true), ]; - assert!(SchemaBuilder::try_from(column_schemas) - .unwrap() - .build() - .is_err()); + assert!( + SchemaBuilder::try_from(column_schemas) + .unwrap() + .build() + .is_err() + ); } } diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs index fb539cad58..24ab30b61b 100644 --- a/src/datatypes/src/schema/column_schema.rs +++ b/src/datatypes/src/schema/column_schema.rs @@ -18,13 +18,13 @@ use std::str::FromStr; use arrow::datatypes::Field; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser_derive::{Visit, VisitMut}; use crate::data_type::{ConcreteDataType, DataType}; use crate::error::{self, Error, InvalidFulltextOptionSnafu, ParseExtendedTypeSnafu, Result}; -use crate::schema::constraint::ColumnDefaultConstraint; use crate::schema::TYPE_KEY; +use crate::schema::constraint::ColumnDefaultConstraint; use crate::value::Value; use crate::vectors::VectorRef; @@ -803,7 +803,9 @@ impl SkippingIndexOptions { ensure!( 0.0 < false_positive_rate && false_positive_rate <= 1.0, error::InvalidSkippingIndexOptionSnafu { - msg: format!("Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0"), + msg: format!( + "Invalid false positive rate: {false_positive_rate}, expected: 0.0 < rate <= 1.0" + ), } ); ensure!( @@ -945,10 +947,12 @@ mod tests { let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true) .with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::from(99)))) .unwrap(); - assert!(column_schema - .metadata() - .get(DEFAULT_CONSTRAINT_KEY) - .is_none()); + assert!( + column_schema + .metadata() + .get(DEFAULT_CONSTRAINT_KEY) + .is_none() + ); let field = Field::try_from(&column_schema).unwrap(); assert_eq!("test", field.name()); @@ -975,10 +979,12 @@ mod tests { .unwrap(); assert_eq!("v1", column_schema.metadata().get("k1").unwrap()); assert_eq!("test comment", column_schema.column_comment().unwrap()); - assert!(column_schema - .metadata() - .get(DEFAULT_CONSTRAINT_KEY) - .is_none()); + assert!( + column_schema + .metadata() + .get(DEFAULT_CONSTRAINT_KEY) + .is_none() + ); let field = Field::try_from(&column_schema).unwrap(); assert_eq!("v1", field.metadata().get("k1").unwrap()); @@ -1202,7 +1208,10 @@ mod tests { assert_eq!(DEFAULT_FALSE_POSITIVE_RATE, options.false_positive_rate()); let options_str = serde_json::to_string(&options).unwrap(); - assert_eq!(options_str, "{\"granularity\":10240,\"false-positive-rate-in-10000\":100,\"index-type\":\"BloomFilter\"}"); + assert_eq!( + options_str, + "{\"granularity\":10240,\"false-positive-rate-in-10000\":100,\"index-type\":\"BloomFilter\"}" + ); } #[test] @@ -1233,6 +1242,9 @@ mod tests { assert_eq!(DEFAULT_FALSE_POSITIVE_RATE, options.false_positive_rate()); let options_str = serde_json::to_string(&options).unwrap(); - assert_eq!(options_str, "{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"); + assert_eq!( + options_str, + "{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}" + ); } } diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs index e7bd5d2063..01928dc3b5 100644 --- a/src/datatypes/src/schema/constraint.rs +++ b/src/datatypes/src/schema/constraint.rs @@ -14,9 +14,9 @@ use std::fmt::{Display, Formatter}; -use common_time::{util, Timestamp}; +use common_time::{Timestamp, util}; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::data_type::{ConcreteDataType, DataType}; use crate::error::{self, Result}; @@ -331,9 +331,11 @@ mod tests { constraint.validate(&data_type, false).unwrap(); constraint.validate(&data_type, true).unwrap(); - assert!(constraint - .validate(&ConcreteDataType::uint32_datatype(), true) - .is_err()); + assert!( + constraint + .validate(&ConcreteDataType::uint32_datatype(), true) + .is_err() + ); } #[test] @@ -342,23 +344,29 @@ mod tests { constraint .validate(&ConcreteDataType::timestamp_millisecond_datatype(), false) .unwrap(); - assert!(constraint - .validate(&ConcreteDataType::boolean_datatype(), false) - .is_err()); + assert!( + constraint + .validate(&ConcreteDataType::boolean_datatype(), false) + .is_err() + ); let constraint = ColumnDefaultConstraint::Function("hello()".to_string()); - assert!(constraint - .validate(&ConcreteDataType::timestamp_millisecond_datatype(), false) - .is_err()); + assert!( + constraint + .validate(&ConcreteDataType::timestamp_millisecond_datatype(), false) + .is_err() + ); } #[test] fn test_create_default_vector_by_null() { let constraint = ColumnDefaultConstraint::null_value(); let data_type = ConcreteDataType::int32_datatype(); - assert!(constraint - .create_default_vector(&data_type, false, 10) - .is_err()); + assert!( + constraint + .create_default_vector(&data_type, false, 10) + .is_err() + ); let constraint = ColumnDefaultConstraint::null_value(); let v = constraint @@ -446,9 +454,11 @@ mod tests { let constraint = ColumnDefaultConstraint::Function("no".to_string()); let data_type = ConcreteDataType::timestamp_millisecond_datatype(); - assert!(constraint - .create_default_vector(&data_type, false, 4) - .is_err()); + assert!( + constraint + .create_default_vector(&data_type, false, 4) + .is_err() + ); assert!(constraint.create_default(&data_type, false).is_err()); } diff --git a/src/datatypes/src/timestamp.rs b/src/datatypes/src/timestamp.rs index 0c1685eb1c..bf6fdaa162 100644 --- a/src/datatypes/src/timestamp.rs +++ b/src/datatypes/src/timestamp.rs @@ -17,8 +17,8 @@ use arrow_array::{ TimestampNanosecondArray, TimestampSecondArray, }; use arrow_schema::DataType; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use paste::paste; use serde::{Deserialize, Serialize}; diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs index d22151908a..746170a5e9 100644 --- a/src/datatypes/src/types.rs +++ b/src/datatypes/src/types.rs @@ -44,13 +44,13 @@ pub use interval_type::{ IntervalDayTimeType, IntervalMonthDayNanoType, IntervalType, IntervalYearMonthType, }; pub use json_type::{ - json_type_value_to_string, parse_string_to_json_type_value, JsonType, JSON_TYPE_NAME, + JSON_TYPE_NAME, JsonType, json_type_value_to_string, parse_string_to_json_type_value, }; pub use list_type::ListType; pub use null_type::NullType; pub use primitive_type::{ - Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, LogicalPrimitiveType, - OrdPrimitive, UInt16Type, UInt32Type, UInt64Type, UInt8Type, WrapperType, + Float32Type, Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, LogicalPrimitiveType, + OrdPrimitive, UInt8Type, UInt16Type, UInt32Type, UInt64Type, WrapperType, }; pub use string_type::StringType; pub use struct_type::{StructField, StructType}; @@ -61,4 +61,4 @@ pub use timestamp_type::{ TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType, }; -pub use vector_type::{parse_string_to_vector_type_value, vector_type_value_to_string, VectorType}; +pub use vector_type::{VectorType, parse_string_to_vector_type_value, vector_type_value_to_string}; diff --git a/src/datatypes/src/types/date_type.rs b/src/datatypes/src/types/date_type.rs index 7c60222529..a78da1d65b 100644 --- a/src/datatypes/src/types/date_type.rs +++ b/src/datatypes/src/types/date_type.rs @@ -98,8 +98,8 @@ impl LogicalPrimitiveType for DateType { #[cfg(test)] mod tests { use common_base::bytes::StringBytes; - use common_time::timezone::set_default_timezone; use common_time::Timestamp; + use common_time::timezone::set_default_timezone; use super::*; diff --git a/src/datatypes/src/types/decimal_type.rs b/src/datatypes/src/types/decimal_type.rs index e397ba197c..9a4c269841 100644 --- a/src/datatypes/src/types/decimal_type.rs +++ b/src/datatypes/src/types/decimal_type.rs @@ -13,8 +13,8 @@ // limitations under the License. use arrow_schema::DataType as ArrowDataType; -use common_decimal::decimal128::DECIMAL128_MAX_PRECISION; use common_decimal::Decimal128; +use common_decimal::decimal128::DECIMAL128_MAX_PRECISION; use serde::{Deserialize, Serialize}; use crate::prelude::{DataType, ScalarVectorBuilder}; diff --git a/src/datatypes/src/types/primitive_type.rs b/src/datatypes/src/types/primitive_type.rs index 9b074eea32..bcd228df35 100644 --- a/src/datatypes/src/types/primitive_type.rs +++ b/src/datatypes/src/types/primitive_type.rs @@ -24,8 +24,8 @@ use crate::data_type::{ConcreteDataType, DataType}; use crate::error::{self, Result}; use crate::scalars::{Scalar, ScalarRef, ScalarVectorBuilder}; use crate::type_id::LogicalTypeId; -use crate::types::boolean_type::bool_to_numeric; use crate::types::DateType; +use crate::types::boolean_type::bool_to_numeric; use crate::value::{Value, ValueRef}; use crate::vectors::{MutableVector, PrimitiveVector, PrimitiveVectorBuilder, Vector}; diff --git a/src/datatypes/src/types/timestamp_type.rs b/src/datatypes/src/types/timestamp_type.rs index 7e12c89d37..2c9adb8bc1 100644 --- a/src/datatypes/src/types/timestamp_type.rs +++ b/src/datatypes/src/types/timestamp_type.rs @@ -19,8 +19,8 @@ use arrow::datatypes::{ TimestampNanosecondType as ArrowTimestampNanosecondType, TimestampSecondType as ArrowTimestampSecondType, }; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use enum_dispatch::enum_dispatch; use paste::paste; use serde::{Deserialize, Serialize}; @@ -200,8 +200,8 @@ impl_data_type_for_timestamp!(Microsecond); #[cfg(test)] mod tests { - use common_time::timezone::set_default_timezone; use common_time::Date; + use common_time::timezone::set_default_timezone; use super::*; diff --git a/src/datatypes/src/types/vector_type.rs b/src/datatypes/src/types/vector_type.rs index 302e6f23e2..d01949c473 100644 --- a/src/datatypes/src/types/vector_type.rs +++ b/src/datatypes/src/types/vector_type.rs @@ -176,7 +176,10 @@ mod tests { let cases = [ ("[1.0,2.0,3]", "[1,2,3]"), ("[0.0 , 0.0 , 0.0]", "[0,0,0]"), - ("[3.4028235e38, -3.4028235e38, 1.1754944e-38]", "[340282350000000000000000000000000000000,-340282350000000000000000000000000000000,0.000000000000000000000000000000000000011754944]"), + ( + "[3.4028235e38, -3.4028235e38, 1.1754944e-38]", + "[340282350000000000000000000000000000000,-340282350000000000000000000000000000000,0.000000000000000000000000000000000000011754944]", + ), ]; for (s, expected) in cases.iter() { diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs index d417d3ccbb..39e37e830c 100644 --- a/src/datatypes/src/value.rs +++ b/src/datatypes/src/value.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use arrow::datatypes::{DataType as ArrowDataType, Field}; use arrow_array::{Array, ListArray}; -use base64::engine::general_purpose::URL_SAFE; use base64::Engine as _; +use base64::engine::general_purpose::URL_SAFE; use common_base::bytes::{Bytes, StringBytes}; use common_decimal::Decimal128; use common_telemetry::error; @@ -28,13 +28,13 @@ use common_time::interval::IntervalUnit; use common_time::time::Time; use common_time::timestamp::{TimeUnit, Timestamp}; use common_time::{Duration, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timezone}; -use datafusion_common::scalar::ScalarStructBuilder; use datafusion_common::ScalarValue; +use datafusion_common::scalar::ScalarStructBuilder; use greptime_proto::v1::value::ValueData; pub use ordered_float::OrderedFloat; use serde::{Deserialize, Serialize, Serializer}; use serde_json::{Number, Value as JsonValue}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{self, ConvertArrowArrayToScalarsSnafu, Error, Result, TryFromValueSnafu}; use crate::prelude::*; @@ -386,7 +386,10 @@ impl Value { let output_type_id = output_type.logical_type_id(); ensure!( // Json type leverage Value(Binary) for storage. - output_type_id == value_type_id || self.is_null() || (output_type_id == LogicalTypeId::Json && value_type_id == LogicalTypeId::Binary), + output_type_id == value_type_id + || self.is_null() + || (output_type_id == LogicalTypeId::Json + && value_type_id == LogicalTypeId::Binary), error::ToScalarValueSnafu { reason: format!( "expect value to return output_type {output_type_id:?}, actual: {value_type_id:?}", @@ -1005,7 +1008,7 @@ impl TryFrom for Value { return error::UnsupportedArrowTypeSnafu { arrow_type: v.data_type(), } - .fail() + .fail(); } }; Ok(v) diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs index b11c8a3f7a..77678840d8 100644 --- a/src/datatypes/src/vectors.rs +++ b/src/datatypes/src/vectors.rs @@ -64,11 +64,11 @@ pub use interval::{ pub use list::{ListIter, ListVector, ListVectorBuilder}; pub use null::{NullVector, NullVectorBuilder}; pub use primitive::{ - Float32Vector, Float32VectorBuilder, Float64Vector, Float64VectorBuilder, Int16Vector, - Int16VectorBuilder, Int32Vector, Int32VectorBuilder, Int64Vector, Int64VectorBuilder, - Int8Vector, Int8VectorBuilder, PrimitiveIter, PrimitiveVector, PrimitiveVectorBuilder, - UInt16Vector, UInt16VectorBuilder, UInt32Vector, UInt32VectorBuilder, UInt64Vector, - UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder, + Float32Vector, Float32VectorBuilder, Float64Vector, Float64VectorBuilder, Int8Vector, + Int8VectorBuilder, Int16Vector, Int16VectorBuilder, Int32Vector, Int32VectorBuilder, + Int64Vector, Int64VectorBuilder, PrimitiveIter, PrimitiveVector, PrimitiveVectorBuilder, + UInt8Vector, UInt8VectorBuilder, UInt16Vector, UInt16VectorBuilder, UInt32Vector, + UInt32VectorBuilder, UInt64Vector, UInt64VectorBuilder, }; pub use string::{StringVector, StringVectorBuilder}; pub use time::{ diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs index 6c0b47b803..50e9011909 100644 --- a/src/datatypes/src/vectors/binary.rs +++ b/src/datatypes/src/vectors/binary.rs @@ -82,11 +82,11 @@ impl BinaryVector { continue; }; - if let Ok(s) = String::from_utf8(binary.to_vec()) { - if let Ok(v) = parse_string_to_vector_type_value(&s, Some(dim)) { - vector.push(Some(v)); - continue; - } + if let Ok(s) = String::from_utf8(binary.to_vec()) + && let Ok(v) = parse_string_to_vector_type_value(&s, Some(dim)) + { + vector.push(Some(v)); + continue; } let expected_bytes_size = dim as usize * std::mem::size_of::(); @@ -430,9 +430,11 @@ mod tests { builder.push_value_ref(ValueRef::Binary("hello".as_bytes())); assert!(builder.try_push_value_ref(ValueRef::Int32(123)).is_err()); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(BinaryVector::from_slice(&[b"hello", b"one", b"two"])); diff --git a/src/datatypes/src/vectors/boolean.rs b/src/datatypes/src/vectors/boolean.rs index 8b02fda95d..86e336bf9a 100644 --- a/src/datatypes/src/vectors/boolean.rs +++ b/src/datatypes/src/vectors/boolean.rs @@ -360,9 +360,11 @@ mod tests { builder.push_value_ref(ValueRef::Boolean(true)); assert!(builder.try_push_value_ref(ValueRef::Int32(123)).is_err()); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(BooleanVector::from_slice(&[true, false, true])); diff --git a/src/datatypes/src/vectors/constant.rs b/src/datatypes/src/vectors/constant.rs index 3ccade1392..c0249b630b 100644 --- a/src/datatypes/src/vectors/constant.rs +++ b/src/datatypes/src/vectors/constant.rs @@ -17,7 +17,7 @@ use std::fmt; use std::sync::Arc; use arrow::array::{Array, ArrayRef, UInt32Array}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::data_type::ConcreteDataType; use crate::error::{self, Result, SerializeSnafu}; @@ -105,7 +105,9 @@ impl ConstantVector { ) .unwrap() { - panic!("Array index out of bounds, cannot take index out of the length of the array: {len}"); + panic!( + "Array index out of bounds, cannot take index out of the length of the array: {len}" + ); } Ok(Arc::new(ConstantVector::new( @@ -182,11 +184,7 @@ impl Vector for ConstantVector { } fn null_count(&self) -> usize { - if self.only_null() { - self.len() - } else { - 0 - } + if self.only_null() { self.len() } else { 0 } } } diff --git a/src/datatypes/src/vectors/date.rs b/src/datatypes/src/vectors/date.rs index 31d81e2d3b..15e3cdc17e 100644 --- a/src/datatypes/src/vectors/date.rs +++ b/src/datatypes/src/vectors/date.rs @@ -72,9 +72,11 @@ mod tests { builder.push_value_ref(ValueRef::Date(Date::new(5))); assert!(builder.try_push_value_ref(ValueRef::Int32(123)).is_err()); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(DateVector::from_slice([5, 2, 3])); diff --git a/src/datatypes/src/vectors/decimal.rs b/src/datatypes/src/vectors/decimal.rs index e446b36de3..f75ef1a238 100644 --- a/src/datatypes/src/vectors/decimal.rs +++ b/src/datatypes/src/vectors/decimal.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use arrow_array::builder::{ArrayBuilder, Decimal128Builder}; use arrow_array::iterator::ArrayIter; use arrow_array::{Array, ArrayRef, Decimal128Array}; -use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION}; use common_decimal::Decimal128; +use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION}; use snafu::{OptionExt, ResultExt}; use crate::arrow::datatypes::DataType as ArrowDataType; @@ -430,8 +430,8 @@ pub mod tests { use common_decimal::Decimal128; use super::*; - use crate::vectors::operations::VectorOp; use crate::vectors::Int8Vector; + use crate::vectors::operations::VectorOp; #[test] fn test_from_arrow_decimal128_array() { diff --git a/src/datatypes/src/vectors/eq.rs b/src/datatypes/src/vectors/eq.rs index ac01cd3538..372b3ceee4 100644 --- a/src/datatypes/src/vectors/eq.rs +++ b/src/datatypes/src/vectors/eq.rs @@ -164,9 +164,10 @@ mod tests { use super::*; use crate::vectors::{ - list, DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector, - DurationSecondVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector, - Int8Vector, NullVector, UInt16Vector, UInt32Vector, UInt64Vector, UInt8Vector, VectorRef, + DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector, + DurationSecondVector, Float32Vector, Float64Vector, Int8Vector, Int16Vector, Int32Vector, + Int64Vector, NullVector, UInt8Vector, UInt16Vector, UInt32Vector, UInt64Vector, VectorRef, + list, }; fn assert_vector_ref_eq(vector: VectorRef) { diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs index 8b5bdb1dd5..d58b085e31 100644 --- a/src/datatypes/src/vectors/helper.rs +++ b/src/datatypes/src/vectors/helper.rs @@ -21,8 +21,8 @@ use arrow::array::{Array, ArrayRef, StringArray}; use arrow::compute; use arrow::compute::kernels::comparison; use arrow::datatypes::{ - DataType as ArrowDataType, Int16Type, Int32Type, Int64Type, Int8Type, TimeUnit, UInt16Type, - UInt32Type, UInt64Type, UInt8Type, + DataType as ArrowDataType, Int8Type, Int16Type, Int32Type, Int64Type, TimeUnit, UInt8Type, + UInt16Type, UInt32Type, UInt64Type, }; use arrow_array::{DictionaryArray, StructArray}; use arrow_schema::IntervalUnit; @@ -38,12 +38,12 @@ use crate::vectors::struct_vector::StructVector; use crate::vectors::{ BinaryVector, BooleanVector, ConstantVector, DateVector, Decimal128Vector, DictionaryVector, DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector, - DurationSecondVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector, - Int8Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, + DurationSecondVector, Float32Vector, Float64Vector, Int8Vector, Int16Vector, Int32Vector, + Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, ListVector, ListVectorBuilder, MutableVector, NullVector, StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector, - TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, - UInt32Vector, UInt64Vector, UInt8Vector, Vector, VectorRef, + TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt8Vector, + UInt16Vector, UInt32Vector, UInt64Vector, Vector, VectorRef, }; /// Helper functions for `Vector`. @@ -57,11 +57,11 @@ impl Helper { pub unsafe fn static_cast(vector: &VectorRef) -> &T { let object = vector.as_ref(); debug_assert!(object.as_any().is::()); - &*(object as *const dyn Vector as *const T) + unsafe { &*(object as *const dyn Vector as *const T) } } pub fn check_get_scalar(vector: &VectorRef) -> Result<&::VectorType> { - let arr = vector + vector .as_any() .downcast_ref::<::VectorType>() .with_context(|| error::UnknownVectorSnafu { @@ -70,12 +70,11 @@ impl Helper { vector.vector_type_name(), std::any::type_name::(), ), - }); - arr + }) } pub fn check_get(vector: &VectorRef) -> Result<&T> { - let arr = vector + vector .as_any() .downcast_ref::() .with_context(|| error::UnknownVectorSnafu { @@ -84,15 +83,14 @@ impl Helper { vector.vector_type_name(), std::any::type_name::(), ), - }); - arr + }) } pub fn check_get_mutable_vector( vector: &mut dyn MutableVector, ) -> Result<&mut T> { let ty = vector.data_type(); - let arr = vector + vector .as_mut_any() .downcast_mut() .with_context(|| error::UnknownVectorSnafu { @@ -101,14 +99,13 @@ impl Helper { ty, std::any::type_name::(), ), - }); - arr + }) } pub fn check_get_scalar_vector( vector: &VectorRef, ) -> Result<&::VectorType> { - let arr = vector + vector .as_any() .downcast_ref::<::VectorType>() .with_context(|| error::UnknownVectorSnafu { @@ -117,8 +114,7 @@ impl Helper { vector.vector_type_name(), std::any::type_name::(), ), - }); - arr + }) } /// Try to cast an arrow scalar value into vector @@ -254,7 +250,7 @@ impl Helper { return error::ConversionSnafu { from: format!("Unsupported scalar value: {value}"), } - .fail() + .fail(); } }; @@ -385,7 +381,7 @@ impl Helper { return error::UnsupportedArrowTypeSnafu { arrow_type: array.as_ref().data_type().clone(), } - .fail() + .fail(); } } } @@ -413,7 +409,7 @@ impl Helper { return error::UnsupportedArrowTypeSnafu { arrow_type: array.as_ref().data_type().clone(), } - .fail() + .fail(); } }) } @@ -458,11 +454,11 @@ impl Helper { #[cfg(test)] mod tests { use arrow::array::{ - ArrayRef, BooleanArray, Date32Array, Float32Array, Float64Array, Int16Array, Int32Array, - Int64Array, Int8Array, LargeBinaryArray, ListArray, NullArray, Time32MillisecondArray, + ArrayRef, BooleanArray, Date32Array, Float32Array, Float64Array, Int8Array, Int16Array, + Int32Array, Int64Array, LargeBinaryArray, ListArray, NullArray, Time32MillisecondArray, Time32SecondArray, Time64MicrosecondArray, Time64NanosecondArray, TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray, - TimestampSecondArray, UInt16Array, UInt32Array, UInt64Array, UInt8Array, + TimestampSecondArray, UInt8Array, UInt16Array, UInt32Array, UInt64Array, }; use arrow::buffer::Buffer; use arrow::datatypes::{Int32Type, IntervalMonthDayNano}; diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs index 703a4eb785..1de8332716 100644 --- a/src/datatypes/src/vectors/list.rs +++ b/src/datatypes/src/vectors/list.rs @@ -678,9 +678,11 @@ pub mod tests { ]; let input = new_list_vector(&data); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(new_list_vector(&[ diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs index e745ee13d6..8c67041ac6 100644 --- a/src/datatypes/src/vectors/null.rs +++ b/src/datatypes/src/vectors/null.rs @@ -17,7 +17,7 @@ use std::fmt; use std::sync::Arc; use arrow::array::{Array, ArrayRef, NullArray}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::data_type::ConcreteDataType; use crate::error::{self, Result}; @@ -269,9 +269,11 @@ mod tests { let input = NullVector::new(3); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(input); diff --git a/src/datatypes/src/vectors/operations/filter.rs b/src/datatypes/src/vectors/operations/filter.rs index 0a4c05b1a2..7dde6c1f94 100644 --- a/src/datatypes/src/vectors/operations/filter.rs +++ b/src/datatypes/src/vectors/operations/filter.rs @@ -111,7 +111,7 @@ mod tests { ($VectorType: ident, $ValueType: ident, $method: ident) => {{ use std::sync::Arc; - use $crate::vectors::{$VectorType, VectorRef}; + use $crate::vectors::{VectorRef, $VectorType}; let v = $VectorType::from_iterator((0..5).map($ValueType::$method)); let filter = BooleanVector::from_slice(&[false, true, false, true, true]); diff --git a/src/datatypes/src/vectors/operations/take.rs b/src/datatypes/src/vectors/operations/take.rs index bdf6c44ef1..41947d3ef5 100644 --- a/src/datatypes/src/vectors/operations/take.rs +++ b/src/datatypes/src/vectors/operations/take.rs @@ -67,7 +67,7 @@ mod tests { macro_rules! take_time_like_test { ($VectorType: ident, $ValueType: ident, $method: ident) => {{ - use $crate::vectors::{$VectorType, VectorRef}; + use $crate::vectors::{VectorRef, $VectorType}; let v = $VectorType::from_iterator((0..5).map($ValueType::$method)); let indices = UInt32Vector::from_slice(&[3, 0, 1, 4]); diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs index f3e49183f5..f2737f45c2 100644 --- a/src/datatypes/src/vectors/primitive.rs +++ b/src/datatypes/src/vectors/primitive.rs @@ -25,8 +25,8 @@ use crate::error::{self, Result}; use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder}; use crate::serialize::Serializable; use crate::types::{ - Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, LogicalPrimitiveType, - UInt16Type, UInt32Type, UInt64Type, UInt8Type, WrapperType, + Float32Type, Float64Type, Int8Type, Int16Type, Int32Type, Int64Type, LogicalPrimitiveType, + UInt8Type, UInt16Type, UInt32Type, UInt64Type, WrapperType, }; use crate::value::{Value, ValueRef}; use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef}; @@ -554,9 +554,11 @@ mod tests { let input = Int64Vector::from_slice([7, 8, 9]); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(Int64Vector::from_slice([123, 8, 9])); diff --git a/src/datatypes/src/vectors/string.rs b/src/datatypes/src/vectors/string.rs index 5603c96e15..f696f2335f 100644 --- a/src/datatypes/src/vectors/string.rs +++ b/src/datatypes/src/vectors/string.rs @@ -294,9 +294,11 @@ mod tests { let input = StringVector::from_slice(&["world", "one", "two"]); builder.extend_slice_of(&input, 1, 2).unwrap(); - assert!(builder - .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) - .is_err()); + assert!( + builder + .extend_slice_of(&crate::vectors::Int32Vector::from_slice([13]), 0, 1) + .is_err() + ); let vector = builder.to_vector(); let expect: VectorRef = Arc::new(StringVector::from_slice(&["hello", "one", "two"])); diff --git a/src/file-engine/src/engine.rs b/src/file-engine/src/engine.rs index 09a373caad..bc1ceaed40 100644 --- a/src/file-engine/src/engine.rs +++ b/src/file-engine/src/engine.rs @@ -23,7 +23,7 @@ use common_error::ext::BoxedError; use common_recordbatch::SendableRecordBatchStream; use common_telemetry::{error, info}; use object_store::ObjectStore; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::region_engine::{ RegionEngine, RegionManifestInfo, RegionRole, RegionScannerRef, RegionStatistic, diff --git a/src/file-engine/src/manifest.rs b/src/file-engine/src/manifest.rs index 6bf5ee104b..7e8aa7a732 100644 --- a/src/file-engine/src/manifest.rs +++ b/src/file-engine/src/manifest.rs @@ -18,16 +18,16 @@ use std::sync::Arc; use common_datasource::file_format::Format; use object_store::ObjectStore; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef}; use store_api::storage::{ColumnId, RegionId}; +use crate::FileOptions; use crate::error::{ CheckObjectSnafu, DecodeJsonSnafu, DeleteRegionManifestSnafu, EncodeJsonSnafu, InvalidMetadataSnafu, LoadRegionManifestSnafu, ManifestExistsSnafu, MissingRequiredFieldSnafu, ParseFileFormatSnafu, Result, StoreRegionManifestSnafu, }; -use crate::FileOptions; #[inline] fn region_manifest_path(region_dir: &str) -> String { diff --git a/src/file-engine/src/query.rs b/src/file-engine/src/query.rs index 872d302e15..b56777d43c 100644 --- a/src/file-engine/src/query.rs +++ b/src/file-engine/src/query.rs @@ -30,7 +30,7 @@ use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::vectors::VectorRef; use futures::Stream; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::ScanRequest; use self::file_stream::ScanPlanConfig; diff --git a/src/file-engine/src/query/file_stream.rs b/src/file-engine/src/query/file_stream.rs index d2730b4880..df9c018b96 100644 --- a/src/file-engine/src/query/file_stream.rs +++ b/src/file-engine/src/query/file_stream.rs @@ -14,11 +14,11 @@ use std::sync::Arc; +use common_datasource::file_format::Format; use common_datasource::file_format::csv::CsvFormat; use common_datasource::file_format::parquet::DefaultParquetFileReaderFactory; -use common_datasource::file_format::Format; -use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::SendableRecordBatchStream; +use common_recordbatch::adapter::RecordBatchStreamAdapter; use datafusion::common::ToDFSchema; use datafusion::datasource::listing::PartitionedFile; use datafusion::datasource::object_store::ObjectStoreUrl; @@ -28,8 +28,8 @@ use datafusion::datasource::physical_plan::{ use datafusion::datasource::source::DataSourceExec; use datafusion::physical_expr::create_physical_expr; use datafusion::physical_expr::execution_props::ExecutionProps; -use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::physical_plan::ExecutionPlan; +use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::prelude::SessionContext; use datafusion_expr::expr::Expr; use datafusion_expr::utils::conjunction; diff --git a/src/file-engine/src/region.rs b/src/file-engine/src/region.rs index 50c7e69114..c17e797966 100644 --- a/src/file-engine/src/region.rs +++ b/src/file-engine/src/region.rs @@ -22,9 +22,9 @@ use store_api::path_utils::region_name; use store_api::region_request::{RegionCreateRequest, RegionOpenRequest}; use store_api::storage::RegionId; +use crate::FileOptions; use crate::error::Result; use crate::manifest::FileRegionManifest; -use crate::FileOptions; #[derive(Debug)] pub struct FileRegion { @@ -140,10 +140,12 @@ mod tests { assert_eq!(region.metadata.region_id, region_id); assert_eq!(region.metadata.primary_key, vec![1]); - assert!(object_store - .exists("create_region_dir/1_0000000000/manifest/_file_manifest") - .await - .unwrap()); + assert!( + object_store + .exists("create_region_dir/1_0000000000/manifest/_file_manifest") + .await + .unwrap() + ); // Object exists, should fail let err = FileRegion::create(region_id, request, &object_store) @@ -214,16 +216,20 @@ mod tests { .await .unwrap(); - assert!(object_store - .exists("drop_region_dir/1_0000000000/manifest/_file_manifest") - .await - .unwrap()); + assert!( + object_store + .exists("drop_region_dir/1_0000000000/manifest/_file_manifest") + .await + .unwrap() + ); FileRegion::drop(®ion, &object_store).await.unwrap(); - assert!(!object_store - .exists("drop_region_dir/1_0000000000/manifest/_file_manifest") - .await - .unwrap()); + assert!( + !object_store + .exists("drop_region_dir/1_0000000000/manifest/_file_manifest") + .await + .unwrap() + ); let request = RegionOpenRequest { engine: "file".to_string(), diff --git a/src/file-engine/src/test_util.rs b/src/file-engine/src/test_util.rs index 7dbdf6b513..b3b4d69cfb 100644 --- a/src/file-engine/src/test_util.rs +++ b/src/file-engine/src/test_util.rs @@ -15,12 +15,12 @@ use std::collections::HashMap; use api::v1::SemanticType; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use common_time::timestamp::TimeUnit; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; -use object_store::services::Fs; use object_store::ObjectStore; +use object_store::services::Fs; use store_api::metadata::ColumnMetadata; pub fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) { diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs index 3e92fc08e5..8750e4f408 100644 --- a/src/flow/src/adapter.rs +++ b/src/flow/src/adapter.rs @@ -33,31 +33,31 @@ use datatypes::value::Value; use greptime_proto::v1; use itertools::{EitherOrBoth, Itertools}; use meta_client::MetaClientOptions; -use query::options::QueryOptions; use query::QueryEngine; +use query::options::QueryOptions; use serde::{Deserialize, Serialize}; use servers::grpc::GrpcOptions; use servers::heartbeat_options::HeartbeatOptions; use servers::http::HttpOptions; use session::context::QueryContext; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::{ConcreteDataType, RegionId}; use table::metadata::TableId; use tokio::sync::broadcast::error::TryRecvError; -use tokio::sync::{broadcast, watch, Mutex, RwLock}; +use tokio::sync::{Mutex, RwLock, broadcast, watch}; pub(crate) use crate::adapter::node_context::FlownodeContext; use crate::adapter::refill::RefillTask; use crate::adapter::table_source::ManagedTableSource; use crate::adapter::util::relation_desc_to_column_schemas_with_fallback; -pub(crate) use crate::adapter::worker::{create_worker, Worker, WorkerHandle}; +pub(crate) use crate::adapter::worker::{Worker, WorkerHandle, create_worker}; use crate::batching_mode::BatchingModeOptions; use crate::compute::ErrCollector; use crate::df_optimizer::sql_to_flow_plan; use crate::error::{EvalSnafu, ExternalSnafu, InternalSnafu, InvalidQuerySnafu, UnexpectedSnafu}; use crate::expr::Batch; use crate::metrics::{METRIC_FLOW_INSERT_ELAPSED, METRIC_FLOW_ROWS, METRIC_FLOW_RUN_INTERVAL_MS}; -use crate::repr::{self, DiffRow, RelationDesc, Row, BATCH_SIZE}; +use crate::repr::{self, BATCH_SIZE, DiffRow, RelationDesc, Row}; use crate::{CreateFlowArgs, FlowId, TableName}; pub(crate) mod flownode_impl; @@ -72,9 +72,9 @@ mod worker; pub(crate) mod node_context; pub(crate) mod table_source; +use crate::FrontendInvoker; use crate::error::Error; use crate::utils::StateReportHandler; -use crate::FrontendInvoker; // `GREPTIME_TIMESTAMP` is not used to distinguish when table is created automatically by flow pub const AUTO_CREATED_PLACEHOLDER_TS_COL: &str = "__ts_placeholder"; @@ -638,7 +638,10 @@ impl StreamingEngine { break; } Some(Err(TryRecvError::Lagged(num))) => { - common_telemetry::error!("Shutdown channel is lagged by {}, meaning multiple shutdown cmd have been issued", num); + common_telemetry::error!( + "Shutdown channel is lagged by {}, meaning multiple shutdown cmd have been issued", + num + ); break; } None => (), @@ -739,8 +742,7 @@ impl StreamingEngine { .await?; trace!( "Handling write request for table_id={} with {} rows", - table_id, - rows_len + table_id, rows_len ); Ok(()) } diff --git a/src/flow/src/adapter/flownode_impl.rs b/src/flow/src/adapter/flownode_impl.rs index 1be268221f..b6365ea7d7 100644 --- a/src/flow/src/adapter/flownode_impl.rs +++ b/src/flow/src/adapter/flownode_impl.rs @@ -14,12 +14,12 @@ //! impl `FlowNode` trait for FlowNodeManager so standalone can call them use std::collections::{HashMap, HashSet}; -use std::sync::atomic::AtomicBool; use std::sync::Arc; +use std::sync::atomic::AtomicBool; use api::v1::flow::{ - flow_request, CreateRequest, DirtyWindowRequests, DropRequest, FlowRequest, FlowResponse, - FlushFlow, + CreateRequest, DirtyWindowRequests, DropRequest, FlowRequest, FlowResponse, FlushFlow, + flow_request, }; use api::v1::region::InsertRequests; use catalog::CatalogManager; @@ -35,7 +35,7 @@ use futures::TryStreamExt; use greptime_proto::v1::flow::DirtyWindowRequest; use itertools::Itertools; use session::context::QueryContextBuilder; -use snafu::{ensure, IntoError, OptionExt, ResultExt}; +use snafu::{IntoError, OptionExt, ResultExt, ensure}; use store_api::storage::{RegionId, TableId}; use tokio::sync::{Mutex, RwLock}; @@ -612,7 +612,7 @@ impl FlowEngine for FlowDualEngine { return InternalSnafu { reason: format!("Invalid flow type: {}", flow_type), } - .fail() + .fail(); } }; diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs index 7983b396fe..2cfad8671e 100644 --- a/src/flow/src/adapter/node_context.rs +++ b/src/flow/src/adapter/node_context.rs @@ -15,8 +15,8 @@ //! Node context, prone to change with every incoming requests use std::collections::{BTreeMap, BTreeSet, HashMap}; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use common_recordbatch::RecordBatch; use common_telemetry::trace; @@ -24,7 +24,7 @@ use datatypes::prelude::ConcreteDataType; use session::context::QueryContext; use snafu::{OptionExt, ResultExt}; use table::metadata::TableId; -use tokio::sync::{broadcast, mpsc, RwLock}; +use tokio::sync::{RwLock, broadcast, mpsc}; use crate::adapter::table_source::FlowTableSource; use crate::adapter::{FlowId, ManagedTableSource, TableName}; @@ -33,7 +33,7 @@ use crate::expr::error::InternalSnafu; use crate::expr::{Batch, GlobalId}; use crate::metrics::METRIC_FLOW_INPUT_BUF_SIZE; use crate::plan::TypedPlan; -use crate::repr::{DiffRow, RelationDesc, BATCH_SIZE, BROADCAST_CAP, SEND_BUF_CAP}; +use crate::repr::{BATCH_SIZE, BROADCAST_CAP, DiffRow, RelationDesc, SEND_BUF_CAP}; /// A context that holds the information of the dataflow #[derive(Debug)] diff --git a/src/flow/src/adapter/parse_expr.rs b/src/flow/src/adapter/parse_expr.rs index 3a28e813d5..3f84eed810 100644 --- a/src/flow/src/adapter/parse_expr.rs +++ b/src/flow/src/adapter/parse_expr.rs @@ -14,12 +14,12 @@ //! parse expr like "ts <= now() - interval '5 m'" +use nom::IResult; use nom::branch::alt; use nom::bytes::complete::{tag, tag_no_case}; use nom::character::complete::{alphanumeric1, digit0, multispace0}; use nom::combinator::peek; use nom::sequence::tuple; -use nom::IResult; use crate::repr; @@ -137,7 +137,7 @@ fn infix_binding_power(op: &str) -> IResult<&str, (u8, u8)> { return Err(nom::Err::Error(nom::error::Error::new( op, nom::error::ErrorKind::Fail, - ))) + ))); } }; Ok((op, ret)) diff --git a/src/flow/src/adapter/refill.rs b/src/flow/src/adapter/refill.rs index a29e120f0a..89b7344c0c 100644 --- a/src/flow/src/adapter/refill.rs +++ b/src/flow/src/adapter/refill.rs @@ -27,7 +27,7 @@ use datatypes::value::Value; use futures::StreamExt; use query::parser::QueryLanguageParser; use session::context::QueryContextBuilder; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use table::metadata::TableId; use crate::adapter::table_source::ManagedTableSource; diff --git a/src/flow/src/adapter/table_source.rs b/src/flow/src/adapter/table_source.rs index c40a9ce360..b2bdded896 100644 --- a/src/flow/src/adapter/table_source.rs +++ b/src/flow/src/adapter/table_source.rs @@ -22,8 +22,8 @@ use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use table::metadata::TableId; -use crate::adapter::util::table_info_value_to_relation_desc; use crate::adapter::TableName; +use crate::adapter::util::table_info_value_to_relation_desc; use crate::error::{ Error, ExternalSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu, }; diff --git a/src/flow/src/adapter/util.rs b/src/flow/src/adapter/util.rs index 3bb0031eee..aaed0b5d28 100644 --- a/src/flow/src/adapter/util.rs +++ b/src/flow/src/adapter/util.rs @@ -29,11 +29,11 @@ use session::context::QueryContextBuilder; use snafu::{OptionExt, ResultExt}; use table::table_reference::TableReference; +use crate::StreamingEngine; use crate::adapter::table_source::TableDesc; -use crate::adapter::{TableName, WorkerHandle, AUTO_CREATED_PLACEHOLDER_TS_COL}; +use crate::adapter::{AUTO_CREATED_PLACEHOLDER_TS_COL, TableName, WorkerHandle}; use crate::error::{Error, ExternalSnafu, UnexpectedSnafu}; use crate::repr::{ColumnType, RelationDesc, RelationType}; -use crate::StreamingEngine; impl StreamingEngine { /// Get a worker handle for creating flow, using round robin to select a worker pub(crate) async fn get_worker_handle_for_create_flow(&self) -> &WorkerHandle { diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs index 3be60cc7e6..32cc4eb7f5 100644 --- a/src/flow/src/adapter/worker.rs +++ b/src/flow/src/adapter/worker.rs @@ -15,14 +15,14 @@ //! For single-thread flow worker use std::collections::{BTreeMap, VecDeque}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use common_telemetry::info; use dfir_rs::scheduled::graph::Dfir; use enum_as_inner::EnumAsInner; use snafu::ensure; -use tokio::sync::{broadcast, mpsc, oneshot, Mutex}; +use tokio::sync::{Mutex, broadcast, mpsc, oneshot}; use crate::adapter::FlowId; use crate::compute::{Context, DataflowState, ErrCollector}; diff --git a/src/flow/src/batching_mode/engine.rs b/src/flow/src/batching_mode/engine.rs index 956d9a273a..334ed242a7 100644 --- a/src/flow/src/batching_mode/engine.rs +++ b/src/flow/src/batching_mode/engine.rs @@ -22,9 +22,9 @@ use api::v1::flow::{DirtyWindowRequests, FlowResponse}; use catalog::CatalogManagerRef; use common_error::ext::BoxedError; use common_meta::ddl::create_flow::FlowType; +use common_meta::key::TableMetadataManagerRef; use common_meta::key::flow::FlowMetadataManagerRef; use common_meta::key::table_info::{TableInfoManager, TableInfoValue}; -use common_meta::key::TableMetadataManagerRef; use common_runtime::JoinHandle; use common_telemetry::tracing::warn; use common_telemetry::{debug, info}; @@ -34,17 +34,17 @@ use datafusion_expr::LogicalPlan; use datatypes::prelude::ConcreteDataType; use query::QueryEngineRef; use session::context::QueryContext; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::parsers::utils::is_tql; use store_api::storage::{RegionId, TableId}; use table::table_reference::TableReference; -use tokio::sync::{oneshot, RwLock}; +use tokio::sync::{RwLock, oneshot}; +use crate::batching_mode::BatchingModeOptions; use crate::batching_mode::frontend_client::FrontendClient; use crate::batching_mode::task::{BatchingTask, TaskArgs}; -use crate::batching_mode::time_window::{find_time_window_expr, TimeWindowExpr}; +use crate::batching_mode::time_window::{TimeWindowExpr, find_time_window_expr}; use crate::batching_mode::utils::sql_to_df_plan; -use crate::batching_mode::BatchingModeOptions; use crate::engine::FlowEngine; use crate::error::{ CreateFlowSnafu, DatafusionSnafu, ExternalSnafu, FlowAlreadyExistSnafu, FlowNotFoundSnafu, @@ -237,8 +237,7 @@ impl BatchingEngine { if !missing_tids.is_empty() { warn!( "Failed to get all the table info for table ids, expected table ids: {:?}, those table doesn't exist: {:?}", - tids, - missing_tids + tids, missing_tids ); } @@ -612,19 +611,16 @@ impl BatchingEngine { let is_pk: bool = pk_idxs.contains(&&idx); ensure!( - col.data_type == ConcreteDataType::float64_datatype() + col.data_type == ConcreteDataType::float64_datatype() || col.data_type.is_timestamp() || (col.data_type == ConcreteDataType::string_datatype() && is_pk), - InvalidQuerySnafu { - reason: format!( - "TQL query only supports f64 value column, timestamp column and string tag columns, table `{}`(id={}) has column `{}` with type {:?} which is not supported", - table_ref, - table_id, - col.name, - col.data_type - ), - } - ); + InvalidQuerySnafu { + reason: format!( + "TQL query only supports f64 value column, timestamp column and string tag columns, table `{}`(id={}) has column `{}` with type {:?} which is not supported", + table_ref, table_id, col.name, col.data_type + ), + } + ); } } Ok(()) @@ -642,7 +638,9 @@ impl BatchingEngine { .fail()? }; if tx.send(()).is_err() { - warn!("Fail to shutdown flow {flow_id} due to receiver already dropped, maybe flow {flow_id} is already dropped?") + warn!( + "Fail to shutdown flow {flow_id} due to receiver already dropped, maybe flow {flow_id} is already dropped?" + ) } Ok(()) } diff --git a/src/flow/src/batching_mode/frontend_client.rs b/src/flow/src/batching_mode/frontend_client.rs index 22ca444262..cba8f896d5 100644 --- a/src/flow/src/batching_mode/frontend_client.rs +++ b/src/flow/src/batching_mode/frontend_client.rs @@ -63,10 +63,8 @@ pub trait GrpcQueryHandlerWithBoxedError: Send + Sync + 'static { /// auto impl #[async_trait::async_trait] -impl< - E: ErrorExt + Send + Sync + 'static, - T: GrpcQueryHandler + Send + Sync + 'static, - > GrpcQueryHandlerWithBoxedError for T +impl + Send + Sync + 'static> + GrpcQueryHandlerWithBoxedError for T { async fn do_query( &self, diff --git a/src/flow/src/batching_mode/state.rs b/src/flow/src/batching_mode/state.rs index bc74957a2d..f9cc0ba9c2 100644 --- a/src/flow/src/batching_mode/state.rs +++ b/src/flow/src/batching_mode/state.rs @@ -22,7 +22,7 @@ use common_telemetry::tracing::warn; use common_time::Timestamp; use datatypes::value::Value; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::sync::oneshot; use tokio::time::Instant; @@ -179,10 +179,10 @@ impl DirtyTimeWindows { pub fn window_size(&self) -> Duration { let mut ret = Duration::from_secs(0); for (start, end) in &self.windows { - if let Some(end) = end { - if let Some(duration) = end.sub(start) { - ret += duration.to_std().unwrap_or_default(); - } + if let Some(end) = end + && let Some(duration) = end.sub(start) + { + ret += duration.to_std().unwrap_or_default(); } } ret @@ -279,23 +279,24 @@ impl DirtyTimeWindows { if let Some(task_ctx) = task_ctx { warn!( - "Flow id = {:?}, too many time windows: {}, only the first {} are taken for this query, the group by expression might be wrong. Time window expr={:?}, expire_after={:?}, first_time_window={:?}, last_time_window={:?}, the original query: {:?}", - task_ctx.config.flow_id, - self.windows.len(), - self.max_filter_num_per_query, - task_ctx.config.time_window_expr, - task_ctx.config.expire_after, - first_time_window, - last_time_window, - task_ctx.config.query - ); + "Flow id = {:?}, too many time windows: {}, only the first {} are taken for this query, the group by expression might be wrong. Time window expr={:?}, expire_after={:?}, first_time_window={:?}, last_time_window={:?}, the original query: {:?}", + task_ctx.config.flow_id, + self.windows.len(), + self.max_filter_num_per_query, + task_ctx.config.time_window_expr, + task_ctx.config.expire_after, + first_time_window, + last_time_window, + task_ctx.config.query + ); } else { - warn!("Flow id = {:?}, too many time windows: {}, only the first {} are taken for this query, the group by expression might be wrong. first_time_window={:?}, last_time_window={:?}", - flow_id, - self.windows.len(), - self.max_filter_num_per_query, - first_time_window, - last_time_window + warn!( + "Flow id = {:?}, too many time windows: {}, only the first {} are taken for this query, the group by expression might be wrong. first_time_window={:?}, last_time_window={:?}", + flow_id, + self.windows.len(), + self.max_filter_num_per_query, + first_time_window, + last_time_window ) } } @@ -486,10 +487,10 @@ impl DirtyTimeWindows { let mut prev_tw = None; for (lower_bound, upper_bound) in std::mem::take(&mut self.windows) { // filter out expired time window - if let Some(expire_lower_bound) = expire_lower_bound { - if lower_bound < expire_lower_bound { - continue; - } + if let Some(expire_lower_bound) = expire_lower_bound + && lower_bound < expire_lower_bound + { + continue; } let Some(prev_tw) = &mut prev_tw else { @@ -590,13 +591,11 @@ mod test { (chrono::Duration::seconds(5 * 60), None), BTreeMap::from([( Timestamp::new_second(0), - Some(Timestamp::new_second( - (2 + merge_dist as i64) * 5 * 60, - )), + Some(Timestamp::new_second((2 + merge_dist as i64) * 5 * 60)), )]), Some( "((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:25:00' AS TIMESTAMP)))", - ) + ), ), // separate time window ( @@ -612,14 +611,12 @@ mod test { ), ( Timestamp::new_second((2 + merge_dist as i64) * 5 * 60), - Some(Timestamp::new_second( - (3 + merge_dist as i64) * 5 * 60, - )), + Some(Timestamp::new_second((3 + merge_dist as i64) * 5 * 60)), ), ]), Some( "(((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:05:00' AS TIMESTAMP))) OR ((ts >= CAST('1970-01-01 00:25:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:30:00' AS TIMESTAMP))))", - ) + ), ), // overlapping ( @@ -630,13 +627,11 @@ mod test { (chrono::Duration::seconds(5 * 60), None), BTreeMap::from([( Timestamp::new_second(0), - Some(Timestamp::new_second( - (1 + merge_dist as i64) * 5 * 60, - )), + Some(Timestamp::new_second((1 + merge_dist as i64) * 5 * 60)), )]), Some( "((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:20:00' AS TIMESTAMP)))", - ) + ), ), // complex overlapping ( @@ -648,71 +643,59 @@ mod test { (chrono::Duration::seconds(3), None), BTreeMap::from([( Timestamp::new_second(0), - Some(Timestamp::new_second( - (merge_dist as i64) * 7 - )), + Some(Timestamp::new_second((merge_dist as i64) * 7)), )]), Some( "((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:21' AS TIMESTAMP)))", - ) + ), ), // split range ( - Vec::from_iter((0..20).map(|i|Timestamp::new_second(i*3)).chain(std::iter::once( - Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)), - ))), + Vec::from_iter((0..20).map(|i| Timestamp::new_second(i * 3)).chain( + std::iter::once(Timestamp::new_second( + 60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1), + )), + )), (chrono::Duration::seconds(3), None), BTreeMap::from([ - ( - Timestamp::new_second(0), - Some(Timestamp::new_second( - 60 - )), - ), - ( - Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)), - Some(Timestamp::new_second( - 60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1) + 3 - )), - )]), + (Timestamp::new_second(0), Some(Timestamp::new_second(60))), + ( + Timestamp::new_second(60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1)), + Some(Timestamp::new_second( + 60 + 3 * (DirtyTimeWindows::MERGE_DIST as i64 + 1) + 3, + )), + ), + ]), Some( "((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))", - ) + ), ), // split 2 min into 1 min ( - Vec::from_iter((0..40).map(|i|Timestamp::new_second(i*3))), + Vec::from_iter((0..40).map(|i| Timestamp::new_second(i * 3))), (chrono::Duration::seconds(3), None), - BTreeMap::from([ - ( + BTreeMap::from([( Timestamp::new_second(0), - Some(Timestamp::new_second( - 40 * 3 - )), + Some(Timestamp::new_second(40 * 3)), )]), Some( "((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:00' AS TIMESTAMP)))", - ) + ), ), // split 3s + 1min into 3s + 57s ( - Vec::from_iter(std::iter::once(Timestamp::new_second(0)).chain((0..40).map(|i|Timestamp::new_second(20+i*3)))), + Vec::from_iter( + std::iter::once(Timestamp::new_second(0)) + .chain((0..40).map(|i| Timestamp::new_second(20 + i * 3))), + ), (chrono::Duration::seconds(3), None), BTreeMap::from([ - ( - Timestamp::new_second(0), - Some(Timestamp::new_second( - 3 - )), - ),( - Timestamp::new_second(20), - Some(Timestamp::new_second( - 140 - )), - )]), + (Timestamp::new_second(0), Some(Timestamp::new_second(3))), + (Timestamp::new_second(20), Some(Timestamp::new_second(140))), + ]), Some( "(((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:00:03' AS TIMESTAMP))) OR ((ts >= CAST('1970-01-01 00:00:20' AS TIMESTAMP)) AND (ts < CAST('1970-01-01 00:01:17' AS TIMESTAMP))))", - ) + ), ), // expired ( @@ -722,12 +705,10 @@ mod test { ], ( chrono::Duration::seconds(5 * 60), - Some(Timestamp::new_second( - (merge_dist as i64) * 6 * 60, - )), + Some(Timestamp::new_second((merge_dist as i64) * 6 * 60)), ), BTreeMap::from([]), - None + None, ), ]; // let len = testcases.len(); diff --git a/src/flow/src/batching_mode/task.rs b/src/flow/src/batching_mode/task.rs index 75ac47f6d8..5436d9a225 100644 --- a/src/flow/src/batching_mode/task.rs +++ b/src/flow/src/batching_mode/task.rs @@ -25,16 +25,16 @@ use common_telemetry::{debug, info}; use common_time::Timestamp; use datafusion::datasource::DefaultTableSource; use datafusion::sql::unparser::expr_to_sql; -use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::DFSchemaRef; +use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_expr::{DmlStatement, LogicalPlan, WriteOp}; use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema}; use operator::expr_helper::column_schemas_to_defs; -use query::query_engine::DefaultSerializer; use query::QueryEngineRef; +use query::query_engine::DefaultSerializer; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::parser::{ParseOptions, ParserContext}; use sql::statements::statement::Statement; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; @@ -44,14 +44,14 @@ use tokio::sync::oneshot::error::TryRecvError; use tokio::time::Instant; use crate::adapter::{AUTO_CREATED_PLACEHOLDER_TS_COL, AUTO_CREATED_UPDATE_AT_TS_COL}; +use crate::batching_mode::BatchingModeOptions; use crate::batching_mode::frontend_client::FrontendClient; use crate::batching_mode::state::{FilterExprInfo, TaskState}; use crate::batching_mode::time_window::TimeWindowExpr; use crate::batching_mode::utils::{ - gen_plan_with_matching_schema, get_table_info_df_schema, sql_to_df_plan, AddFilterRewriter, - ColumnMatcherRewriter, FindGroupByFinalName, + AddFilterRewriter, ColumnMatcherRewriter, FindGroupByFinalName, gen_plan_with_matching_schema, + get_table_info_df_schema, sql_to_df_plan, }; -use crate::batching_mode::BatchingModeOptions; use crate::df_optimizer::apply_df_optimizer; use crate::error::{ ConvertColumnSchemaSnafu, DatafusionSnafu, ExternalSnafu, InvalidQuerySnafu, @@ -666,7 +666,10 @@ impl BatchingTask { debug!( "Flow id = {:?}, found time window: precise_lower_bound={:?}, precise_upper_bound={:?} with dirty time windows: {:?}", - self.config.flow_id, expire_lower_bound, expire_upper_bound, self.state.read().unwrap().dirty_time_windows + self.config.flow_id, + expire_lower_bound, + expire_upper_bound, + self.state.read().unwrap().dirty_time_windows ); let window_size = expire_upper_bound .sub(&expire_lower_bound) diff --git a/src/flow/src/batching_mode/time_window.rs b/src/flow/src/batching_mode/time_window.rs index df5b0d2fb7..b7a5f5f2cf 100644 --- a/src/flow/src/batching_mode/time_window.rs +++ b/src/flow/src/batching_mode/time_window.rs @@ -27,8 +27,8 @@ use catalog::CatalogManagerRef; use common_error::ext::BoxedError; use common_recordbatch::DfRecordBatch; use common_telemetry::warn; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datafusion::error::Result as DfResult; use datafusion::execution::SessionState; use datafusion::logical_expr::Expr; @@ -46,15 +46,15 @@ use datatypes::vectors::{ }; use itertools::Itertools; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use crate::Error; use crate::adapter::util::from_proto_to_data_type; use crate::error::{ ArrowSnafu, DatafusionSnafu, DatatypesSnafu, ExternalSnafu, PlanSnafu, TimeSnafu, UnexpectedSnafu, }; use crate::expr::error::DataTypeSnafu; -use crate::Error; /// Represents a test timestamp in seconds since the Unix epoch. const DEFAULT_TEST_TIMESTAMP: Timestamp = Timestamp::new_second(17_0000_0000); @@ -625,9 +625,14 @@ fn binary_search_expr( phy_expr: &PhysicalExprRef, df_schema: &DFSchema, ) -> Result { - ensure!(lower_bound.map(|v|v.unit()) == upper_bound.map(|v| v.unit()), UnexpectedSnafu { - reason: format!(" unit mismatch for time window expression {phy_expr:?}, found {lower_bound:?} and {upper_bound:?}"), - }); + ensure!( + lower_bound.map(|v| v.unit()) == upper_bound.map(|v| v.unit()), + UnexpectedSnafu { + reason: format!( + " unit mismatch for time window expression {phy_expr:?}, found {lower_bound:?} and {upper_bound:?}" + ), + } + ); let output_unit = upper_bound .context(UnexpectedSnafu { @@ -746,7 +751,7 @@ mod test { use session::context::QueryContext; use super::*; - use crate::batching_mode::utils::{df_plan_to_sql, sql_to_df_plan, AddFilterRewriter}; + use crate::batching_mode::utils::{AddFilterRewriter, df_plan_to_sql, sql_to_df_plan}; use crate::test_utils::create_test_query_engine; #[tokio::test] @@ -765,7 +770,7 @@ mod test { Some(Timestamp::new(1740394109000, TimeUnit::Millisecond)), Some(Timestamp::new(1740394109001, TimeUnit::Millisecond)), ), - r#"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:29' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:48:29.001' AS TIMESTAMP))) GROUP BY numbers_with_ts.ts"# + r#"SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:29' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:48:29.001' AS TIMESTAMP))) GROUP BY numbers_with_ts.ts"#, ), // complex time window index ( @@ -776,7 +781,7 @@ mod test { Some(Timestamp::new(1740394080, TimeUnit::Second)), Some(Timestamp::new(1740394140, TimeUnit::Second)), ), - "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')" + "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')", ), // complex time window index with where ( @@ -787,7 +792,7 @@ mod test { Some(Timestamp::new(1740394080, TimeUnit::Second)), Some(Timestamp::new(1740394140, TimeUnit::Second)), ), - "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')" + "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE numbers_with_ts.number IN (2, 3, 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')", ), // complex time window index with between and ( @@ -798,14 +803,14 @@ mod test { Some(Timestamp::new(1740394080, TimeUnit::Second)), Some(Timestamp::new(1740394140, TimeUnit::Second)), ), - "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')" + "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS time_window FROM numbers_with_ts WHERE (numbers_with_ts.number BETWEEN 2 AND 4) AND ((ts >= CAST('2025-02-24 10:48:00' AS TIMESTAMP)) AND (ts <= CAST('2025-02-24 10:49:00' AS TIMESTAMP))) GROUP BY arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)')", ), // no time index ( "SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;", Timestamp::new(23, TimeUnit::Millisecond), ("ts".to_string(), None, None), - "SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;" + "SELECT date_bin('5 minutes', ts) FROM numbers_with_ts;", ), // time index ( @@ -816,7 +821,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)" + "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)", ), // on spot ( @@ -827,7 +832,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)" + "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)", ), // different time unit ( @@ -838,7 +843,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)" + "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)", ), // time index with other fields ( @@ -849,7 +854,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT sum(numbers_with_ts.number) AS sum_up, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)" + "SELECT sum(numbers_with_ts.number) AS sum_up, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)", ), // time index with other pks ( @@ -860,7 +865,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number" + "SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number", ), // subquery ( @@ -871,7 +876,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT numbers_with_ts.number, time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number)" + "SELECT numbers_with_ts.number, time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number)", ), // cte ( @@ -882,7 +887,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT cte.number, cte.time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number) AS cte" + "SELECT cte.number, cte.time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP))) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number) AS cte", ), // complex subquery without alias ( @@ -893,7 +898,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT sum(numbers_with_ts.number), numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window, bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) GROUP BY numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts), bucket_name" + "SELECT sum(numbers_with_ts.number), numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window, bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) GROUP BY numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts), bucket_name", ), // complex subquery alias ( @@ -904,7 +909,7 @@ mod test { Some(Timestamp::new(0, TimeUnit::Millisecond)), Some(Timestamp::new(300000, TimeUnit::Millisecond)), ), - "SELECT sum(cte.number), cte.number, date_bin('5 minutes', cte.ts) AS time_window, cte.bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) AS cte GROUP BY cte.number, date_bin('5 minutes', cte.ts), cte.bucket_name" + "SELECT sum(cte.number), cte.number, date_bin('5 minutes', cte.ts) AS time_window, cte.bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE ((ts >= CAST('1970-01-01 00:00:00' AS TIMESTAMP)) AND (ts <= CAST('1970-01-01 00:05:00' AS TIMESTAMP)))) AS cte GROUP BY cte.number, date_bin('5 minutes', cte.ts), cte.bucket_name", ), ]; diff --git a/src/flow/src/batching_mode/utils.rs b/src/flow/src/batching_mode/utils.rs index 93cd02aee5..2891cf2541 100644 --- a/src/flow/src/batching_mode/utils.rs +++ b/src/flow/src/batching_mode/utils.rs @@ -29,10 +29,10 @@ use datafusion_common::tree_node::{ use datafusion_common::{DFSchema, DataFusionError, ScalarValue}; use datafusion_expr::{Distinct, LogicalPlan, Projection}; use datatypes::schema::SchemaRef; -use query::parser::{PromQuery, QueryLanguageParser, QueryStatement, DEFAULT_LOOKBACK_STRING}; use query::QueryEngineRef; +use query::parser::{DEFAULT_LOOKBACK_STRING, PromQuery, QueryLanguageParser, QueryStatement}; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::parser::{ParseOptions, ParserContext}; use sql::statements::statement::Statement; use sql::statements::tql::Tql; @@ -290,18 +290,17 @@ impl ColumnMatcherRewriter { .collect::>(); // first match by position for (idx, expr) in exprs.iter_mut().enumerate() { - if !all_names.contains(&expr.qualified_name().1) { - if let Some(col_name) = self + if !all_names.contains(&expr.qualified_name().1) + && let Some(col_name) = self .schema .column_schemas() .get(idx) .map(|c| c.name.clone()) - { - // if the data type mismatched, later check_execute will error out - // hence no need to check it here, beside, optimize pass might be able to cast it - // so checking here is not necessary - *expr = expr.clone().alias(col_name); - } + { + // if the data type mismatched, later check_execute will error out + // hence no need to check it here, beside, optimize pass might be able to cast it + // so checking here is not necessary + *expr = expr.clone().alias(col_name); } } @@ -331,8 +330,7 @@ impl ColumnMatcherRewriter { // helpful error message return Err(DataFusionError::Plan(format!( "Expect the last column in table to be timestamp column, found column {} with type {:?}", - last_col_schema.name, - last_col_schema.data_type + last_col_schema.name, last_col_schema.data_type ))); } } else if query_col_cnt + 2 == table_col_cnt { @@ -344,8 +342,7 @@ impl ColumnMatcherRewriter { } else { return Err(DataFusionError::Plan(format!( "Expect the second last column in the table to be timestamp column, found column {} with type {:?}", - second_last_col_schema.name, - second_last_col_schema.data_type + second_last_col_schema.name, second_last_col_schema.data_type ))); } @@ -361,9 +358,12 @@ impl ColumnMatcherRewriter { } } else { return Err(DataFusionError::Plan(format!( - "Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}", - query_col_cnt, exprs, table_col_cnt, self.schema.column_schemas() - ))); + "Expect table have 0,1 or 2 columns more than query columns, found {} query columns {:?}, {} table columns {:?}", + query_col_cnt, + exprs, + table_col_cnt, + self.schema.column_schemas() + ))); } Ok(exprs) } @@ -520,36 +520,31 @@ mod test { let testcases = vec![ ( "SELECT number FROM numbers_with_ts GROUP BY number", - "SELECT numbers_with_ts.number FROM numbers_with_ts WHERE (number > 4) GROUP BY numbers_with_ts.number" + "SELECT numbers_with_ts.number FROM numbers_with_ts WHERE (number > 4) GROUP BY numbers_with_ts.number", ), - ( "SELECT number FROM numbers_with_ts WHERE number < 2 OR number >10", - "SELECT numbers_with_ts.number FROM numbers_with_ts WHERE ((numbers_with_ts.number < 2) OR (numbers_with_ts.number > 10)) AND (number > 4)" + "SELECT numbers_with_ts.number FROM numbers_with_ts WHERE ((numbers_with_ts.number < 2) OR (numbers_with_ts.number > 10)) AND (number > 4)", ), - ( "SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window", - "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)" + "SELECT date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts)", ), - // subquery ( - "SELECT number, time_window FROM (SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number);", - "SELECT numbers_with_ts.number, time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number)" + "SELECT number, time_window FROM (SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number);", + "SELECT numbers_with_ts.number, time_window FROM (SELECT numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window FROM numbers_with_ts WHERE (number > 4) GROUP BY date_bin('5 minutes', numbers_with_ts.ts), numbers_with_ts.number)", ), - // complex subquery without alias ( "SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) GROUP BY number, time_window, bucket_name;", - "SELECT sum(numbers_with_ts.number), numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window, bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE (number > 4)) GROUP BY numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts), bucket_name" + "SELECT sum(numbers_with_ts.number), numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts) AS time_window, bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE (number > 4)) GROUP BY numbers_with_ts.number, date_bin('5 minutes', numbers_with_ts.ts), bucket_name", ), - // complex subquery alias ( "SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) as cte WHERE number > 1 GROUP BY number, time_window, bucket_name;", - "SELECT sum(cte.number), cte.number, date_bin('5 minutes', cte.ts) AS time_window, cte.bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE (number > 4)) AS cte WHERE (cte.number > 1) GROUP BY cte.number, date_bin('5 minutes', cte.ts), cte.bucket_name" - ) + "SELECT sum(cte.number), cte.number, date_bin('5 minutes', cte.ts) AS time_window, cte.bucket_name FROM (SELECT numbers_with_ts.number, numbers_with_ts.ts, CASE WHEN (numbers_with_ts.number < 5) THEN 'bucket_0_5' WHEN (numbers_with_ts.number >= 5) THEN 'bucket_5_inf' END AS bucket_name FROM numbers_with_ts WHERE (number > 4)) AS cte WHERE (cte.number > 1) GROUP BY cte.number, date_bin('5 minutes', cte.ts), cte.bucket_name", + ), ]; use datafusion_expr::{col, lit}; let query_engine = create_test_query_engine(); @@ -588,7 +583,9 @@ mod test { // add ts placeholder ( "SELECT number FROM numbers_with_ts", - Ok("SELECT numbers_with_ts.number, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"), + Ok( + "SELECT numbers_with_ts.number, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), ColumnSchema::new( @@ -616,7 +613,9 @@ mod test { // add update_at and ts placeholder ( "SELECT number FROM numbers_with_ts", - Ok("SELECT numbers_with_ts.number, now() AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"), + Ok( + "SELECT numbers_with_ts.number, now() AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), ColumnSchema::new( @@ -635,7 +634,9 @@ mod test { // add ts placeholder ( "SELECT number, ts FROM numbers_with_ts", - Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts"), + Ok( + "SELECT numbers_with_ts.number, numbers_with_ts.ts AS update_at, CAST('1970-01-01 00:00:00' AS TIMESTAMP) AS __ts_placeholder FROM numbers_with_ts", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), ColumnSchema::new( @@ -654,7 +655,9 @@ mod test { // add update_at after time index column ( "SELECT number, ts FROM numbers_with_ts", - Ok("SELECT numbers_with_ts.number, numbers_with_ts.ts, now() AS update_atat FROM numbers_with_ts"), + Ok( + "SELECT numbers_with_ts.number, numbers_with_ts.ts, now() AS update_atat FROM numbers_with_ts", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), ColumnSchema::new( @@ -674,7 +677,9 @@ mod test { // error datatype mismatch ( "SELECT number, ts FROM numbers_with_ts", - Err("Expect the last column in table to be timestamp column, found column atat with type Int8"), + Err( + "Expect the last column in table to be timestamp column, found column atat with type Int8", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), ColumnSchema::new( @@ -694,14 +699,12 @@ mod test { // error datatype mismatch on second last column ( "SELECT number FROM numbers_with_ts", - Err("Expect the second last column in the table to be timestamp column, found column ts with type Int8"), + Err( + "Expect the second last column in the table to be timestamp column, found column ts with type Int8", + ), vec![ ColumnSchema::new("number", ConcreteDataType::int32_datatype(), true), - ColumnSchema::new( - "ts", - ConcreteDataType::int8_datatype(), - false, - ), + ColumnSchema::new("ts", ConcreteDataType::int8_datatype(), false), ColumnSchema::new( // name is irrelevant for update_at column "atat", @@ -744,32 +747,32 @@ mod test { async fn test_find_group_by_exprs() { let testcases = vec![ ( - "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts GROUP BY ts;", - vec!["ts"] + "SELECT arrow_cast(date_bin(INTERVAL '1 MINS', numbers_with_ts.ts), 'Timestamp(Second, None)') AS ts FROM numbers_with_ts GROUP BY ts;", + vec!["ts"], ), ( "SELECT number FROM numbers_with_ts GROUP BY number", - vec!["number"] + vec!["number"], ), ( "SELECT date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window", - vec!["time_window"] + vec!["time_window"], ), - // subquery + // subquery ( - "SELECT number, time_window FROM (SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number);", - vec!["time_window", "number"] + "SELECT number, time_window FROM (SELECT number, date_bin('5 minutes', ts) as time_window FROM numbers_with_ts GROUP BY time_window, number);", + vec!["time_window", "number"], ), // complex subquery without alias ( "SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) GROUP BY number, time_window, bucket_name;", - vec!["number", "time_window", "bucket_name"] + vec!["number", "time_window", "bucket_name"], ), // complex subquery alias ( "SELECT sum(number), number, date_bin('5 minutes', ts) as time_window, bucket_name FROM (SELECT number, ts, case when number < 5 THEN 'bucket_0_5' when number >= 5 THEN 'bucket_5_inf' END as bucket_name FROM numbers_with_ts) as cte GROUP BY number, time_window, bucket_name;", - vec!["number", "time_window", "bucket_name"] - ) + vec!["number", "time_window", "bucket_name"], + ), ]; let query_engine = create_test_query_engine(); diff --git a/src/flow/src/compute/render/reduce.rs b/src/flow/src/compute/render/reduce.rs index 78bc027ee4..38ea92b7dc 100644 --- a/src/flow/src/compute/render/reduce.rs +++ b/src/flow/src/compute/render/reduce.rs @@ -24,7 +24,7 @@ use datatypes::value::{ListValue, Value}; use datatypes::vectors::{BooleanVector, NullVector}; use dfir_rs::scheduled::graph_ext::GraphExt; use itertools::Itertools; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::compute::render::{Context, SubgraphArg}; use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff}; @@ -514,8 +514,7 @@ fn reduce_batch_subgraph( trace!( "Reduce take {} batches, {} rows", - input_batch_count, - input_row_count + input_batch_count, input_row_count ); // write lock the arrange for the rest of the function body @@ -1035,7 +1034,11 @@ impl AccumOutput { ensure!( !self.accum.is_empty() && self.accum.len() == self.output.len(), InternalSnafu { - reason: format!("Accum and output should have the non-zero and same length, found accum.len() = {}, output.len() = {}", self.accum.len(), self.output.len()) + reason: format!( + "Accum and output should have the non-zero and same length, found accum.len() = {}, output.len() = {}", + self.accum.len(), + self.output.len() + ) } ); // make output vec from output map diff --git a/src/flow/src/compute/state.rs b/src/flow/src/compute/state.rs index 82d2b908d7..2ccd366194 100644 --- a/src/flow/src/compute/state.rs +++ b/src/flow/src/compute/state.rs @@ -16,8 +16,8 @@ use std::cell::RefCell; use std::collections::{BTreeMap, VecDeque}; use std::rc::Rc; -use dfir_rs::scheduled::graph::Dfir; use dfir_rs::scheduled::SubgraphId; +use dfir_rs::scheduled::graph::Dfir; use get_size2::GetSize; use crate::compute::types::ErrCollector; diff --git a/src/flow/src/compute/types.rs b/src/flow/src/compute/types.rs index 63ad143840..9e33928130 100644 --- a/src/flow/src/compute/types.rs +++ b/src/flow/src/compute/types.rs @@ -18,10 +18,10 @@ use std::rc::Rc; use std::sync::Arc; use common_error::ext::ErrorExt; +use dfir_rs::scheduled::SubgraphId; use dfir_rs::scheduled::graph::Dfir; use dfir_rs::scheduled::handoff::TeeingHandoff; use dfir_rs::scheduled::port::RecvPort; -use dfir_rs::scheduled::SubgraphId; use itertools::Itertools; use tokio::sync::Mutex; diff --git a/src/flow/src/df_optimizer.rs b/src/flow/src/df_optimizer.rs index 75a91b6070..5fa180d53c 100644 --- a/src/flow/src/df_optimizer.rs +++ b/src/flow/src/df_optimizer.rs @@ -40,10 +40,10 @@ use datafusion_expr::{ BinaryExpr, ColumnarValue, Expr, Literal, Operator, Projection, ScalarFunctionArgs, ScalarUDFImpl, Signature, TypeSignature, Volatility, }; +use query::QueryEngine; use query::optimizer::count_wildcard::CountWildcardToTimeIndexRule; use query::parser::QueryLanguageParser; use query::query_engine::DefaultSerializer; -use query::QueryEngine; use session::context::QueryContextRef; use snafu::ResultExt; /// note here we are using the `substrait_proto_df` crate from the `substrait` module and @@ -163,68 +163,67 @@ impl AnalyzerRule for AvgExpandRule { fn put_aggr_to_proj_analyzer( plan: datafusion_expr::LogicalPlan, ) -> Result, DataFusionError> { - if let datafusion_expr::LogicalPlan::Projection(proj) = &plan { - if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() { - let mut replace_old_proj_exprs = HashMap::new(); - let mut expanded_aggr_exprs = vec![]; - for aggr_expr in &aggr.aggr_expr { - let mut is_composite = false; - if let Expr::AggregateFunction(_) = &aggr_expr { - expanded_aggr_exprs.push(aggr_expr.clone()); - } else { - let old_name = aggr_expr.name_for_alias()?; - let new_proj_expr = aggr_expr - .clone() - .transform(|ch| { - if let Expr::AggregateFunction(_) = &ch { - is_composite = true; - expanded_aggr_exprs.push(ch.clone()); - Ok(Transformed::yes(Expr::Column(Column::from_qualified_name( - ch.name_for_alias()?, - )))) - } else { - Ok(Transformed::no(ch)) - } - })? - .data; - replace_old_proj_exprs.insert(old_name, new_proj_expr); - } + if let datafusion_expr::LogicalPlan::Projection(proj) = &plan + && let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() + { + let mut replace_old_proj_exprs = HashMap::new(); + let mut expanded_aggr_exprs = vec![]; + for aggr_expr in &aggr.aggr_expr { + let mut is_composite = false; + if let Expr::AggregateFunction(_) = &aggr_expr { + expanded_aggr_exprs.push(aggr_expr.clone()); + } else { + let old_name = aggr_expr.name_for_alias()?; + let new_proj_expr = aggr_expr + .clone() + .transform(|ch| { + if let Expr::AggregateFunction(_) = &ch { + is_composite = true; + expanded_aggr_exprs.push(ch.clone()); + Ok(Transformed::yes(Expr::Column(Column::from_qualified_name( + ch.name_for_alias()?, + )))) + } else { + Ok(Transformed::no(ch)) + } + })? + .data; + replace_old_proj_exprs.insert(old_name, new_proj_expr); } + } - if expanded_aggr_exprs.len() > aggr.aggr_expr.len() { - let mut aggr = aggr.clone(); - aggr.aggr_expr = expanded_aggr_exprs; - let mut aggr_plan = datafusion_expr::LogicalPlan::Aggregate(aggr); - // important to recompute schema after changing aggr_expr - aggr_plan = aggr_plan.recompute_schema()?; + if expanded_aggr_exprs.len() > aggr.aggr_expr.len() { + let mut aggr = aggr.clone(); + aggr.aggr_expr = expanded_aggr_exprs; + let mut aggr_plan = datafusion_expr::LogicalPlan::Aggregate(aggr); + // important to recompute schema after changing aggr_expr + aggr_plan = aggr_plan.recompute_schema()?; - // reconstruct proj with new proj_exprs - let mut new_proj_exprs = proj.expr.clone(); - for proj_expr in new_proj_exprs.iter_mut() { - if let Some(new_proj_expr) = - replace_old_proj_exprs.get(&proj_expr.name_for_alias()?) - { - *proj_expr = new_proj_expr.clone(); - } - *proj_expr = proj_expr - .clone() - .transform(|expr| { - if let Some(new_expr) = - replace_old_proj_exprs.get(&expr.name_for_alias()?) - { - Ok(Transformed::yes(new_expr.clone())) - } else { - Ok(Transformed::no(expr)) - } - })? - .data; + // reconstruct proj with new proj_exprs + let mut new_proj_exprs = proj.expr.clone(); + for proj_expr in new_proj_exprs.iter_mut() { + if let Some(new_proj_expr) = + replace_old_proj_exprs.get(&proj_expr.name_for_alias()?) + { + *proj_expr = new_proj_expr.clone(); } - let proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new( - new_proj_exprs, - Arc::new(aggr_plan), - )?); - return Ok(Transformed::yes(proj)); + *proj_expr = proj_expr + .clone() + .transform(|expr| { + if let Some(new_expr) = replace_old_proj_exprs.get(&expr.name_for_alias()?) + { + Ok(Transformed::yes(new_expr.clone())) + } else { + Ok(Transformed::no(expr)) + } + })? + .data; } + let proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new( + new_proj_exprs, + Arc::new(aggr_plan), + )?); + return Ok(Transformed::yes(proj)); } } Ok(Transformed::no(plan)) @@ -276,47 +275,45 @@ impl TreeNodeRewriter for ExpandAvgRewriter<'_> { type Node = Expr; fn f_up(&mut self, expr: Expr) -> Result, DataFusionError> { - if let Expr::AggregateFunction(aggr_func) = &expr { - if aggr_func.func.name() == "avg" { - let sum_expr = { - let mut tmp = aggr_func.clone(); - tmp.func = sum_udaf(); - Expr::AggregateFunction(tmp) - }; - let sum_cast = { - let mut tmp = sum_expr.clone(); - tmp = Expr::Cast(datafusion_expr::Cast { - expr: Box::new(tmp), - data_type: arrow_schema::DataType::Float64, - }); - tmp - }; + if let Expr::AggregateFunction(aggr_func) = &expr + && aggr_func.func.name() == "avg" + { + let sum_expr = { + let mut tmp = aggr_func.clone(); + tmp.func = sum_udaf(); + Expr::AggregateFunction(tmp) + }; + let sum_cast = { + let mut tmp = sum_expr.clone(); + tmp = Expr::Cast(datafusion_expr::Cast { + expr: Box::new(tmp), + data_type: arrow_schema::DataType::Float64, + }); + tmp + }; - let count_expr = { - let mut tmp = aggr_func.clone(); - tmp.func = count_udaf(); + let count_expr = { + let mut tmp = aggr_func.clone(); + tmp.func = count_udaf(); - Expr::AggregateFunction(tmp) - }; - let count_expr_ref = - Expr::Column(Column::from_qualified_name(count_expr.name_for_alias()?)); + Expr::AggregateFunction(tmp) + }; + let count_expr_ref = + Expr::Column(Column::from_qualified_name(count_expr.name_for_alias()?)); - let div = - BinaryExpr::new(Box::new(sum_cast), Operator::Divide, Box::new(count_expr)); - let div_expr = Box::new(Expr::BinaryExpr(div)); + let div = BinaryExpr::new(Box::new(sum_cast), Operator::Divide, Box::new(count_expr)); + let div_expr = Box::new(Expr::BinaryExpr(div)); - let zero = Box::new(0.lit()); - let not_zero = - BinaryExpr::new(Box::new(count_expr_ref), Operator::NotEq, zero.clone()); - let not_zero = Box::new(Expr::BinaryExpr(not_zero)); - let null = Box::new(Expr::Literal(ScalarValue::Null, None)); + let zero = Box::new(0.lit()); + let not_zero = BinaryExpr::new(Box::new(count_expr_ref), Operator::NotEq, zero.clone()); + let not_zero = Box::new(Expr::BinaryExpr(not_zero)); + let null = Box::new(Expr::Literal(ScalarValue::Null, None)); - let case_when = - datafusion_expr::Case::new(None, vec![(not_zero, div_expr)], Some(null)); - let case_when_expr = Expr::Case(case_when); + let case_when = + datafusion_expr::Case::new(None, vec![(not_zero, div_expr)], Some(null)); + let case_when_expr = Expr::Case(case_when); - return Ok(Transformed::yes(case_when_expr)); - } + return Ok(Transformed::yes(case_when_expr)); } Ok(Transformed::no(expr)) @@ -350,81 +347,79 @@ impl AnalyzerRule for TumbleExpandRule { fn expand_tumble_analyzer( plan: datafusion_expr::LogicalPlan, ) -> Result, DataFusionError> { - if let datafusion_expr::LogicalPlan::Projection(proj) = &plan { - if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() { - let mut new_group_expr = vec![]; - let mut alias_to_expand = HashMap::new(); - let mut encountered_tumble = false; - for expr in aggr.group_expr.iter() { - match expr { - datafusion_expr::Expr::ScalarFunction(func) if func.name() == "tumble" => { - encountered_tumble = true; + if let datafusion_expr::LogicalPlan::Projection(proj) = &plan + && let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() + { + let mut new_group_expr = vec![]; + let mut alias_to_expand = HashMap::new(); + let mut encountered_tumble = false; + for expr in aggr.group_expr.iter() { + match expr { + datafusion_expr::Expr::ScalarFunction(func) if func.name() == "tumble" => { + encountered_tumble = true; - let tumble_start = TumbleExpand::new(TUMBLE_START); - let tumble_start = datafusion_expr::expr::ScalarFunction::new_udf( - Arc::new(tumble_start.into()), - func.args.clone(), - ); - let tumble_start = datafusion_expr::Expr::ScalarFunction(tumble_start); - let start_col_name = tumble_start.name_for_alias()?; - new_group_expr.push(tumble_start); + let tumble_start = TumbleExpand::new(TUMBLE_START); + let tumble_start = datafusion_expr::expr::ScalarFunction::new_udf( + Arc::new(tumble_start.into()), + func.args.clone(), + ); + let tumble_start = datafusion_expr::Expr::ScalarFunction(tumble_start); + let start_col_name = tumble_start.name_for_alias()?; + new_group_expr.push(tumble_start); - let tumble_end = TumbleExpand::new(TUMBLE_END); - let tumble_end = datafusion_expr::expr::ScalarFunction::new_udf( - Arc::new(tumble_end.into()), - func.args.clone(), - ); - let tumble_end = datafusion_expr::Expr::ScalarFunction(tumble_end); - let end_col_name = tumble_end.name_for_alias()?; - new_group_expr.push(tumble_end); + let tumble_end = TumbleExpand::new(TUMBLE_END); + let tumble_end = datafusion_expr::expr::ScalarFunction::new_udf( + Arc::new(tumble_end.into()), + func.args.clone(), + ); + let tumble_end = datafusion_expr::Expr::ScalarFunction(tumble_end); + let end_col_name = tumble_end.name_for_alias()?; + new_group_expr.push(tumble_end); - alias_to_expand - .insert(expr.name_for_alias()?, (start_col_name, end_col_name)); - } - _ => new_group_expr.push(expr.clone()), + alias_to_expand.insert(expr.name_for_alias()?, (start_col_name, end_col_name)); } + _ => new_group_expr.push(expr.clone()), } - if !encountered_tumble { - return Ok(Transformed::no(plan)); - } - let mut new_aggr = aggr.clone(); - new_aggr.group_expr = new_group_expr; - let new_aggr = datafusion_expr::LogicalPlan::Aggregate(new_aggr).recompute_schema()?; - // replace alias in projection if needed, and add new column ref if necessary - let mut new_proj_expr = vec![]; - let mut have_expanded = false; - - for proj_expr in proj.expr.iter() { - if let Some((start_col_name, end_col_name)) = - alias_to_expand.get(&proj_expr.name_for_alias()?) - { - let start_col = Column::from_qualified_name(start_col_name); - let end_col = Column::from_qualified_name(end_col_name); - new_proj_expr.push(datafusion_expr::Expr::Column(start_col)); - new_proj_expr.push(datafusion_expr::Expr::Column(end_col)); - have_expanded = true; - } else { - new_proj_expr.push(proj_expr.clone()); - } - } - - // append to end of projection if not exist - if !have_expanded { - for (start_col_name, end_col_name) in alias_to_expand.values() { - let start_col = Column::from_qualified_name(start_col_name); - let end_col = Column::from_qualified_name(end_col_name); - new_proj_expr - .push(datafusion_expr::Expr::Column(start_col).alias("window_start")); - new_proj_expr.push(datafusion_expr::Expr::Column(end_col).alias("window_end")); - } - } - - let new_proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new( - new_proj_expr, - Arc::new(new_aggr), - )?); - return Ok(Transformed::yes(new_proj)); } + if !encountered_tumble { + return Ok(Transformed::no(plan)); + } + let mut new_aggr = aggr.clone(); + new_aggr.group_expr = new_group_expr; + let new_aggr = datafusion_expr::LogicalPlan::Aggregate(new_aggr).recompute_schema()?; + // replace alias in projection if needed, and add new column ref if necessary + let mut new_proj_expr = vec![]; + let mut have_expanded = false; + + for proj_expr in proj.expr.iter() { + if let Some((start_col_name, end_col_name)) = + alias_to_expand.get(&proj_expr.name_for_alias()?) + { + let start_col = Column::from_qualified_name(start_col_name); + let end_col = Column::from_qualified_name(end_col_name); + new_proj_expr.push(datafusion_expr::Expr::Column(start_col)); + new_proj_expr.push(datafusion_expr::Expr::Column(end_col)); + have_expanded = true; + } else { + new_proj_expr.push(proj_expr.clone()); + } + } + + // append to end of projection if not exist + if !have_expanded { + for (start_col_name, end_col_name) in alias_to_expand.values() { + let start_col = Column::from_qualified_name(start_col_name); + let end_col = Column::from_qualified_name(end_col_name); + new_proj_expr.push(datafusion_expr::Expr::Column(start_col).alias("window_start")); + new_proj_expr.push(datafusion_expr::Expr::Column(end_col).alias("window_end")); + } + } + + let new_proj = datafusion_expr::LogicalPlan::Projection(Projection::try_new( + new_proj_expr, + Arc::new(new_aggr), + )?); + return Ok(Transformed::yes(new_proj)); } Ok(Transformed::no(plan)) @@ -479,13 +474,12 @@ impl ScalarUDFImpl for TumbleExpand { )); } - if let Some(start_time) = opt{ - if !matches!(start_time, Utf8 | Date32 | Timestamp(_, _)){ + if let Some(start_time) = opt + && !matches!(start_time, Utf8 | Date32 | Timestamp(_, _)){ return Err(DataFusionError::Plan( format!("Expect start_time to either be date, timestamp or string, found {:?}", start_time) )); } - } Ok(arg_types.to_vec()) } @@ -548,20 +542,24 @@ impl AnalyzerRule for CheckGroupByRule { fn check_group_by_analyzer( plan: datafusion_expr::LogicalPlan, ) -> Result, DataFusionError> { - if let datafusion_expr::LogicalPlan::Projection(proj) = &plan { - if let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() { - let mut found_column_used = FindColumn::new(); - proj.expr - .iter() - .map(|i| i.visit(&mut found_column_used)) - .count(); - for expr in aggr.group_expr.iter() { - if !found_column_used - .names_for_alias - .contains(&expr.name_for_alias()?) - { - return Err(DataFusionError::Plan(format!("Expect {} expr in group by also exist in select list, but select list only contain {:?}",expr.name_for_alias()?, found_column_used.names_for_alias))); - } + if let datafusion_expr::LogicalPlan::Projection(proj) = &plan + && let datafusion_expr::LogicalPlan::Aggregate(aggr) = proj.input.as_ref() + { + let mut found_column_used = FindColumn::new(); + proj.expr + .iter() + .map(|i| i.visit(&mut found_column_used)) + .count(); + for expr in aggr.group_expr.iter() { + if !found_column_used + .names_for_alias + .contains(&expr.name_for_alias()?) + { + return Err(DataFusionError::Plan(format!( + "Expect {} expr in group by also exist in select list, but select list only contain {:?}", + expr.name_for_alias()?, + found_column_used.names_for_alias + ))); } } } diff --git a/src/flow/src/error.rs b/src/flow/src/error.rs index 817cef6a9e..46cfb7c1c3 100644 --- a/src/flow/src/error.rs +++ b/src/flow/src/error.rs @@ -26,8 +26,8 @@ use common_telemetry::common_error::status_code::StatusCode; use snafu::{Location, ResultExt, Snafu}; use tonic::metadata::MetadataMap; -use crate::expr::EvalError; use crate::FlowId; +use crate::expr::EvalError; /// This error is used to represent all possible errors that can occur in the flow module. #[derive(Snafu)] @@ -76,9 +76,7 @@ pub enum Error { location: Location, }, - #[snafu(display( - "No available frontend found after timeout: {timeout:?}, context: {context}" - ))] + #[snafu(display("No available frontend found after timeout: {timeout:?}, context: {context}"))] NoAvailableFrontend { timeout: std::time::Duration, context: String, diff --git a/src/flow/src/expr.rs b/src/flow/src/expr.rs index a3c12a9742..a57234efb7 100644 --- a/src/flow/src/expr.rs +++ b/src/flow/src/expr.rs @@ -36,7 +36,7 @@ use itertools::Itertools; pub(crate) use linear::{MapFilterProject, MfpPlan, SafeMfpPlan}; pub(crate) use relation::{Accum, Accumulator, AggregateExpr, AggregateFunc}; pub(crate) use scalar::{ScalarExpr, TypedExpr}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::expr::error::{ArrowSnafu, DataTypeSnafu}; use crate::repr::Diff; diff --git a/src/flow/src/expr/df_func.rs b/src/flow/src/expr/df_func.rs index a19017e7f3..1cb5712a23 100644 --- a/src/flow/src/expr/df_func.rs +++ b/src/flow/src/expr/df_func.rs @@ -37,7 +37,7 @@ use crate::expr::error::{ }; use crate::expr::{Batch, ScalarExpr}; use crate::repr::RelationDesc; -use crate::transform::{from_scalar_fn_to_df_fn_impl, FunctionExtensions}; +use crate::transform::{FunctionExtensions, from_scalar_fn_to_df_fn_impl}; /// A way to represent a scalar function that is implemented in Datafusion #[derive(Debug, Clone)] diff --git a/src/flow/src/expr/func.rs b/src/flow/src/expr/func.rs index 79f512900f..cda70773a3 100644 --- a/src/flow/src/expr/func.rs +++ b/src/flow/src/expr/func.rs @@ -20,8 +20,8 @@ use std::time::Duration; use arrow::array::{ArrayRef, BooleanArray}; use common_error::ext::BoxedError; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datafusion_expr::Operator; use datatypes::data_type::ConcreteDataType; use datatypes::prelude::DataType; @@ -30,7 +30,7 @@ use datatypes::value::Value; use datatypes::vectors::{BooleanVector, Helper, TimestampMillisecondVector, VectorRef}; use serde::{Deserialize, Serialize}; use smallvec::smallvec; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use strum::{EnumIter, IntoEnumIterator}; use substrait::df_logical_plan::consumer::name_to_op; @@ -40,7 +40,7 @@ use crate::expr::error::{ TryFromValueSnafu, TypeMismatchSnafu, }; use crate::expr::signature::{GenericFn, Signature}; -use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr, TypedExpr, TUMBLE_END, TUMBLE_START}; +use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr, TUMBLE_END, TUMBLE_START, TypedExpr}; use crate::repr::{self, value_to_internal_ts}; /// UnmaterializableFunc is a function that can't be eval independently, diff --git a/src/flow/src/expr/linear.rs b/src/flow/src/expr/linear.rs index 807119a032..9729433518 100644 --- a/src/flow/src/expr/linear.rs +++ b/src/flow/src/expr/linear.rs @@ -24,12 +24,12 @@ use datatypes::prelude::ConcreteDataType; use datatypes::value::Value; use datatypes::vectors::{BooleanVector, Helper}; use itertools::Itertools; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{Error, InvalidQuerySnafu}; use crate::expr::error::{ArrowSnafu, DataTypeSnafu, EvalError, InternalSnafu, TypeMismatchSnafu}; use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr}; -use crate::repr::{self, value_to_internal_ts, Diff, Row}; +use crate::repr::{self, Diff, Row, value_to_internal_ts}; /// A compound operator that can be applied row-by-row. /// @@ -968,9 +968,11 @@ mod test { let mfp = MapFilterProject::new(4); // append a expression to the mfp'input row that get the sum of the first 3 columns let mfp = mfp - .map(vec![ScalarExpr::Column(0) - .call_binary(ScalarExpr::Column(1), BinaryFunc::AddInt32) - .call_binary(ScalarExpr::Column(2), BinaryFunc::AddInt32)]) + .map(vec![ + ScalarExpr::Column(0) + .call_binary(ScalarExpr::Column(1), BinaryFunc::AddInt32) + .call_binary(ScalarExpr::Column(2), BinaryFunc::AddInt32), + ]) .unwrap(); // only retain sum result let mfp = mfp.project(vec![4]).unwrap(); @@ -1032,11 +1034,11 @@ mod test { fn test_permute() { let mfp = MapFilterProject::new(3) .map(vec![ - ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt) + ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt), ]) .unwrap() .filter(vec![ - ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt) + ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt), ]) .unwrap() .project(vec![0, 1]) @@ -1058,7 +1060,7 @@ mod test { ))]) .unwrap() .filter(vec![ - ScalarExpr::Column(3).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt) + ScalarExpr::Column(3).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt), ]) .unwrap() .project([0, 1, 2]) @@ -1129,11 +1131,11 @@ mod test { .project(vec![2, 1, 0]) .unwrap() .filter(vec![ - ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt) + ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Gt), ]) .unwrap() .map(vec![ - ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt) + ScalarExpr::Column(0).call_binary(ScalarExpr::Column(1), BinaryFunc::Lt), ]) .unwrap() .project(vec![3]) diff --git a/src/flow/src/expr/relation/accum.rs b/src/flow/src/expr/relation/accum.rs index 64062869ed..d0d25e9768 100644 --- a/src/flow/src/expr/relation/accum.rs +++ b/src/flow/src/expr/relation/accum.rs @@ -445,11 +445,7 @@ impl OrdValue { Ok(Self { val: { let v = iter.next().ok_or_else(fail_accum::)?; - if v == Value::Null { - None - } else { - Some(v) - } + if v == Value::Null { None } else { Some(v) } }, non_nulls: Diff::try_from(iter.next().ok_or_else(fail_accum::)?) .map_err(err_try_from_val)?, @@ -473,11 +469,7 @@ impl TryFrom> for OrdValue { Ok(Self { val: { let v = iter.next().unwrap(); - if v == Value::Null { - None - } else { - Some(v) - } + if v == Value::Null { None } else { Some(v) } }, non_nulls: Diff::try_from(iter.next().unwrap()).map_err(err_try_from_val)?, }) @@ -917,10 +909,7 @@ mod test { if actual_state != state { panic!( "Failed to cast into state from accum for {:?} with input {:?}, expect state {:?}, got state {:?}", - aggr_fn, - input, - state, - actual_state + aggr_fn, input, state, actual_state ); } } diff --git a/src/flow/src/expr/relation/func.rs b/src/flow/src/expr/relation/func.rs index a513f178e5..35a43957d8 100644 --- a/src/flow/src/expr/relation/func.rs +++ b/src/flow/src/expr/relation/func.rs @@ -24,10 +24,10 @@ use snafu::OptionExt; use strum::{EnumIter, IntoEnumIterator}; use crate::error::{Error, InvalidQuerySnafu}; +use crate::expr::VectorDiff; use crate::expr::error::EvalError; use crate::expr::relation::accum::{Accum, Accumulator}; use crate::expr::signature::{GenericFn, Signature}; -use crate::expr::VectorDiff; use crate::repr::Diff; /// Aggregate functions that can be applied to a group of rows. diff --git a/src/flow/src/expr/scalar.rs b/src/flow/src/expr/scalar.rs index 9c75a86ffa..18649bff12 100644 --- a/src/flow/src/expr/scalar.rs +++ b/src/flow/src/expr/scalar.rs @@ -17,7 +17,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; -use arrow::array::{make_array, ArrayData, ArrayRef, BooleanArray}; +use arrow::array::{ArrayData, ArrayRef, BooleanArray, make_array}; use arrow::buffer::BooleanBuffer; use arrow::compute::or_kleene; use common_error::ext::BoxedError; @@ -28,7 +28,7 @@ use datatypes::value::Value; use datatypes::vectors::{BooleanVector, Helper, VectorRef}; use dfir_rs::lattices::cc_traits::Iter; use itertools::Itertools; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{ DatafusionSnafu, Error, InvalidQuerySnafu, UnexpectedSnafu, UnsupportedTemporalFilterSnafu, @@ -330,7 +330,8 @@ impl ScalarExpr { ); ensure!( - then_input_vec.len() == else_input_vec.len() && then_input_vec.len() == batch.row_count(), + then_input_vec.len() == else_input_vec.len() + && then_input_vec.len() == batch.row_count(), InvalidArgumentSnafu { reason: format!( "then and else branch must have the same length(found {} and {}) which equals input batch's row count(which is {})", diff --git a/src/flow/src/expr/utils.rs b/src/flow/src/expr/utils.rs index 636d83b72d..6b9fda5337 100644 --- a/src/flow/src/expr/utils.rs +++ b/src/flow/src/expr/utils.rs @@ -18,12 +18,12 @@ use std::cmp::Ordering; use std::collections::BTreeMap; use datatypes::value::Value; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; +use crate::Result; use crate::error::UnexpectedSnafu; use crate::expr::ScalarExpr; use crate::plan::TypedPlan; -use crate::Result; /// Find lower bound for time `current` in given `plan` for the time window expr. /// @@ -102,11 +102,14 @@ pub fn find_time_window_lower_bound( } ); - ensure!(all_ref_columns.len() == 1, UnexpectedSnafu { - reason: format!( - "Expect only one column to be referenced in expression {expr:?}, found {all_ref_columns:?}" - ), - }); + ensure!( + all_ref_columns.len() == 1, + UnexpectedSnafu { + reason: format!( + "Expect only one column to be referenced in expression {expr:?}, found {all_ref_columns:?}" + ), + } + ); let permute_map = BTreeMap::from([(ts_col_idx, 0usize)]); @@ -170,9 +173,14 @@ pub fn find_time_window_lower_bound( // binary search for the lower bound - ensure!(lower_bound.map(|v|v.unit())==upper_bound.map(|v|v.unit()), UnexpectedSnafu{ - reason: format!(" unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}"), - }); + ensure!( + lower_bound.map(|v| v.unit()) == upper_bound.map(|v| v.unit()), + UnexpectedSnafu { + reason: format!( + " unit mismatch for time window expression {expr:?}, found {lower_bound:?} and {upper_bound:?}" + ), + } + ); let output_unit = lower_bound.expect("should have lower bound").unit(); diff --git a/src/flow/src/heartbeat.rs b/src/flow/src/heartbeat.rs index 0092a90276..0086bc1c86 100644 --- a/src/flow/src/heartbeat.rs +++ b/src/flow/src/heartbeat.rs @@ -14,8 +14,8 @@ //! Send heartbeat from flownode to metasrv -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use api::v1::meta::{HeartbeatRequest, Peer}; use common_error::ext::BoxedError; diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs index 055042a6b5..509fc49123 100644 --- a/src/flow/src/lib.rs +++ b/src/flow/src/lib.rs @@ -49,8 +49,8 @@ pub use engine::FlowAuthHeader; pub(crate) use engine::{CreateFlowArgs, FlowId, TableName}; pub use error::{Error, Result}; pub use server::{ - get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServer, - FlownodeServiceBuilder, FrontendInvoker, + FlownodeBuilder, FlownodeInstance, FlownodeServer, FlownodeServiceBuilder, FrontendInvoker, + get_flow_auth_options, }; pub use crate::adapter::FlownodeOptions; diff --git a/src/flow/src/repr/relation.rs b/src/flow/src/repr/relation.rs index 29cf021520..817d85f310 100644 --- a/src/flow/src/repr/relation.rs +++ b/src/flow/src/repr/relation.rs @@ -17,7 +17,7 @@ use datatypes::data_type::DataType; use datatypes::prelude::ConcreteDataType; use itertools::Itertools; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{DatafusionSnafu, InternalSnafu, InvalidQuerySnafu, Result, UnexpectedSnafu}; use crate::expr::{SafeMfpPlan, ScalarExpr}; diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs index 77e27bd90f..21820b36a6 100644 --- a/src/flow/src/server.rs +++ b/src/flow/src/server.rs @@ -24,8 +24,8 @@ use catalog::CatalogManagerRef; use common_base::Plugins; use common_error::ext::BoxedError; use common_meta::cache::{LayeredCacheRegistryRef, TableFlownodeSetCacheRef, TableRouteCacheRef}; -use common_meta::key::flow::FlowMetadataManagerRef; use common_meta::key::TableMetadataManagerRef; +use common_meta::key::flow::FlowMetadataManagerRef; use common_meta::kv_backend::KvBackendRef; use common_meta::node_manager::{Flownode, NodeManagerRef}; use common_meta::procedure_executor::ProcedureExecutorRef; @@ -33,7 +33,7 @@ use common_query::Output; use common_runtime::JoinHandle; use common_telemetry::tracing::info; use futures::TryStreamExt; -use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests}; +use greptime_proto::v1::flow::{FlowRequest, FlowResponse, InsertRequests, flow_server}; use itertools::Itertools; use operator::delete::Deleter; use operator::insert::Inserter; @@ -48,16 +48,16 @@ use servers::metrics_handler::MetricsHandler; use servers::server::{ServerHandler, ServerHandlers}; use session::context::QueryContextRef; use snafu::{OptionExt, ResultExt}; -use tokio::sync::{broadcast, oneshot, Mutex}; +use tokio::sync::{Mutex, broadcast, oneshot}; use tonic::codec::CompressionEncoding; use tonic::{Request, Response, Status}; use crate::adapter::flownode_impl::{FlowDualEngine, FlowDualEngineRef}; -use crate::adapter::{create_worker, FlowStreamingEngineRef}; +use crate::adapter::{FlowStreamingEngineRef, create_worker}; use crate::batching_mode::engine::BatchingEngine; use crate::error::{ - to_status_with_last_err, CacheRequiredSnafu, ExternalSnafu, IllegalAuthConfigSnafu, - ListFlowsSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu, + CacheRequiredSnafu, ExternalSnafu, IllegalAuthConfigSnafu, ListFlowsSnafu, ParseAddrSnafu, + ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu, to_status_with_last_err, }; use crate::heartbeat::HeartbeatTask; use crate::metrics::{METRIC_FLOW_PROCESSING_TIME, METRIC_FLOW_ROWS}; diff --git a/src/flow/src/test_utils.rs b/src/flow/src/test_utils.rs index 2b47d1595c..a2b568b679 100644 --- a/src/flow/src/test_utils.rs +++ b/src/flow/src/test_utils.rs @@ -23,22 +23,22 @@ use datatypes::timestamp::TimestampMillisecond; use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef}; use itertools::Itertools; use prost::Message; +use query::QueryEngine; use query::options::QueryOptions; use query::parser::QueryLanguageParser; use query::query_engine::DefaultSerializer; -use query::QueryEngine; use session::context::QueryContext; /// note here we are using the `substrait_proto_df` crate from the `substrait` module and /// rename it to `substrait_proto` use substrait::substrait_proto_df as substrait_proto; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; use substrait_proto::proto; -use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; +use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use table::test_util::MemTable; +use crate::adapter::FlownodeContext; use crate::adapter::node_context::IdToNameMap; use crate::adapter::table_source::test::FlowDummyTableSource; -use crate::adapter::FlownodeContext; use crate::df_optimizer::apply_df_optimizer; use crate::expr::GlobalId; use crate::transform::register_function_to_query_engine; diff --git a/src/flow/src/transform.rs b/src/flow/src/transform.rs index f958ed5585..f3ebf0ca71 100644 --- a/src/flow/src/transform.rs +++ b/src/flow/src/transform.rs @@ -27,8 +27,8 @@ use snafu::ResultExt; /// note here we are using the `substrait_proto_df` crate from the `substrait` module and /// rename it to `substrait_proto` use substrait::substrait_proto_df as substrait_proto; -use substrait_proto::proto::extensions::simple_extension_declaration::MappingType; use substrait_proto::proto::extensions::SimpleExtensionDeclaration; +use substrait_proto::proto::extensions::simple_extension_declaration::MappingType; use crate::adapter::FlownodeContext; use crate::error::{Error, NotImplementedSnafu, UnexpectedSnafu}; @@ -176,14 +176,14 @@ mod test { use datatypes::vectors::{TimestampMillisecondVectorBuilder, VectorRef}; use itertools::Itertools; use prost::Message; + use query::QueryEngine; use query::options::QueryOptions; use query::parser::QueryLanguageParser; use query::query_engine::DefaultSerializer; - use query::QueryEngine; use session::context::QueryContext; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; use substrait_proto::proto; - use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; + use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use table::test_util::MemTable; use super::*; diff --git a/src/flow/src/transform/aggr.rs b/src/flow/src/transform/aggr.rs index 27c7acfb1d..d0b9945f05 100644 --- a/src/flow/src/transform/aggr.rs +++ b/src/flow/src/transform/aggr.rs @@ -25,7 +25,7 @@ use crate::expr::{ }; use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, Plan, ReducePlan, TypedPlan}; use crate::repr::{ColumnType, RelationDesc, RelationType}; -use crate::transform::{substrait_proto, FlownodeContext, FunctionExtensions}; +use crate::transform::{FlownodeContext, FunctionExtensions, substrait_proto}; impl TypedExpr { /// Allow `deprecated` due to the usage of deprecated grouping_expressions on datafusion to substrait side @@ -121,7 +121,7 @@ impl AggregateExpr { .await? } None => { - return not_impl_err!("Aggregate without aggregate function is not supported") + return not_impl_err!("Aggregate without aggregate function is not supported"); } }; @@ -395,8 +395,8 @@ mod test { use crate::expr::{BinaryFunc, DfScalarFunction, GlobalId, RawDfScalarFn}; use crate::plan::{Plan, TypedPlan}; use crate::repr::{ColumnType, RelationType}; - use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait}; use crate::transform::CDT; + use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait}; #[tokio::test] async fn test_df_func_basic() { @@ -414,68 +414,69 @@ mod test { expr: ScalarExpr::Column(0), distinct: false, }; - let expected = TypedPlan { - schema: RelationType::new(vec![ - ColumnType::new(CDT::uint64_datatype(), true), // sum(number) - ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window start - ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window end - ]) - .with_key(vec![2]) - .with_time_index(Some(1)) - .into_named(vec![ - Some("sum(abs(numbers_with_ts.number))".to_string()), - Some("window_start".to_string()), - Some("window_end".to_string()), - ]), - plan: Plan::Mfp { - input: Box::new( - Plan::Reduce { - input: Box::new( - Plan::Get { - id: crate::expr::Id::Global(GlobalId::User(1)), - } - .with_types( - RelationType::new(vec![ - ColumnType::new(ConcreteDataType::uint32_datatype(), false), - ColumnType::new( - ConcreteDataType::timestamp_millisecond_datatype(), - false, - ), - ]) - .into_named(vec![ - Some("number".to_string()), - Some("ts".to_string()), - ]), - ) - .mfp(MapFilterProject::new(2).into_safe()) - .unwrap(), - ), - key_val_plan: KeyValPlan { - key_plan: MapFilterProject::new(2) - .map(vec![ - ScalarExpr::Column(1).call_unary( - UnaryFunc::TumbleWindowFloor { - window_size: Duration::from_nanos(1_000_000_000), - start_time: Some(Timestamp::new_millisecond( - 1625097600000, - )), - }, - ), - ScalarExpr::Column(1).call_unary( - UnaryFunc::TumbleWindowCeiling { - window_size: Duration::from_nanos(1_000_000_000), - start_time: Some(Timestamp::new_millisecond( - 1625097600000, - )), - }, - ), - ]) - .unwrap() - .project(vec![2, 3]) - .unwrap() - .into_safe(), - val_plan: MapFilterProject::new(2) - .map(vec![ScalarExpr::CallDf { + let expected = + TypedPlan { + schema: RelationType::new(vec![ + ColumnType::new(CDT::uint64_datatype(), true), // sum(number) + ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window start + ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window end + ]) + .with_key(vec![2]) + .with_time_index(Some(1)) + .into_named(vec![ + Some("sum(abs(numbers_with_ts.number))".to_string()), + Some("window_start".to_string()), + Some("window_end".to_string()), + ]), + plan: Plan::Mfp { + input: Box::new( + Plan::Reduce { + input: Box::new( + Plan::Get { + id: crate::expr::Id::Global(GlobalId::User(1)), + } + .with_types( + RelationType::new(vec![ + ColumnType::new(ConcreteDataType::uint32_datatype(), false), + ColumnType::new( + ConcreteDataType::timestamp_millisecond_datatype(), + false, + ), + ]) + .into_named(vec![ + Some("number".to_string()), + Some("ts".to_string()), + ]), + ) + .mfp(MapFilterProject::new(2).into_safe()) + .unwrap(), + ), + key_val_plan: KeyValPlan { + key_plan: MapFilterProject::new(2) + .map(vec![ + ScalarExpr::Column(1).call_unary( + UnaryFunc::TumbleWindowFloor { + window_size: Duration::from_nanos(1_000_000_000), + start_time: Some(Timestamp::new_millisecond( + 1625097600000, + )), + }, + ), + ScalarExpr::Column(1).call_unary( + UnaryFunc::TumbleWindowCeiling { + window_size: Duration::from_nanos(1_000_000_000), + start_time: Some(Timestamp::new_millisecond( + 1625097600000, + )), + }, + ), + ]) + .unwrap() + .project(vec![2, 3]) + .unwrap() + .into_safe(), + val_plan: MapFilterProject::new(2) + .map(vec![ScalarExpr::CallDf { df_scalar_fn: DfScalarFunction::try_from_raw_fn( RawDfScalarFn { f: BytesMut::from( @@ -503,39 +504,39 @@ mod test { exprs: vec![ScalarExpr::Column(0)], } .cast(CDT::uint64_datatype())]) - .unwrap() - .project(vec![2]) - .unwrap() - .into_safe(), - }, - reduce_plan: ReducePlan::Accumulable(AccumulablePlan { - full_aggrs: vec![aggr_expr.clone()], - simple_aggrs: vec![AggrWithIndex::new(aggr_expr.clone(), 0, 0)], - distinct_aggrs: vec![], - }), - } - .with_types( - RelationType::new(vec![ - ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window start - ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window end - ColumnType::new(CDT::uint64_datatype(), true), //sum(number) - ]) - .with_key(vec![1]) - .with_time_index(Some(0)) - .into_unnamed(), + .unwrap() + .project(vec![2]) + .unwrap() + .into_safe(), + }, + reduce_plan: ReducePlan::Accumulable(AccumulablePlan { + full_aggrs: vec![aggr_expr.clone()], + simple_aggrs: vec![AggrWithIndex::new(aggr_expr.clone(), 0, 0)], + distinct_aggrs: vec![], + }), + } + .with_types( + RelationType::new(vec![ + ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window start + ColumnType::new(CDT::timestamp_millisecond_datatype(), true), // window end + ColumnType::new(CDT::uint64_datatype(), true), //sum(number) + ]) + .with_key(vec![1]) + .with_time_index(Some(0)) + .into_unnamed(), + ), ), - ), - mfp: MapFilterProject::new(3) - .map(vec![ - ScalarExpr::Column(2), - ScalarExpr::Column(0), - ScalarExpr::Column(1), - ]) - .unwrap() - .project(vec![3, 4, 5]) - .unwrap(), - }, - }; + mfp: MapFilterProject::new(3) + .map(vec![ + ScalarExpr::Column(2), + ScalarExpr::Column(0), + ScalarExpr::Column(1), + ]) + .unwrap() + .project(vec![3, 4, 5]) + .unwrap(), + }, + }; assert_eq!(flow_plan, expected); } @@ -1327,8 +1328,10 @@ mod test { .unwrap() .into_safe(), val_plan: MapFilterProject::new(1) - .map(vec![ScalarExpr::Column(0) - .call_unary(UnaryFunc::Cast(CDT::uint64_datatype()))]) + .map(vec![ + ScalarExpr::Column(0) + .call_unary(UnaryFunc::Cast(CDT::uint64_datatype())), + ]) .unwrap() .project(vec![1]) .unwrap() @@ -1450,8 +1453,10 @@ mod test { .unwrap() .into_safe(), val_plan: MapFilterProject::new(1) - .map(vec![ScalarExpr::Column(0) - .call_unary(UnaryFunc::Cast(CDT::uint64_datatype()))]) + .map(vec![ + ScalarExpr::Column(0) + .call_unary(UnaryFunc::Cast(CDT::uint64_datatype())), + ]) .unwrap() .project(vec![1]) .unwrap() @@ -1533,9 +1538,11 @@ mod test { .unwrap() .into_safe(), val_plan: MapFilterProject::new(1) - .map(vec![ScalarExpr::Column(0) - .call_binary(ScalarExpr::Column(0), BinaryFunc::AddUInt32) - .call_unary(UnaryFunc::Cast(CDT::uint64_datatype()))]) + .map(vec![ + ScalarExpr::Column(0) + .call_binary(ScalarExpr::Column(0), BinaryFunc::AddUInt32) + .call_unary(UnaryFunc::Cast(CDT::uint64_datatype())), + ]) .unwrap() .project(vec![1]) .unwrap() diff --git a/src/flow/src/transform/expr.rs b/src/flow/src/transform/expr.rs index 229c28b541..40dcaef8a6 100644 --- a/src/flow/src/transform/expr.rs +++ b/src/flow/src/transform/expr.rs @@ -23,26 +23,26 @@ use datafusion::functions::all_default_functions; use datafusion_physical_expr::PhysicalExpr; use datafusion_substrait::logical_plan::consumer::DefaultSubstraitConsumer; use datatypes::data_type::ConcreteDataType as CDT; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use substrait_proto::proto::Expression; use substrait_proto::proto::expression::field_reference::ReferenceType::DirectReference; use substrait_proto::proto::expression::reference_segment::ReferenceType::StructField; use substrait_proto::proto::expression::{IfThen, RexType, ScalarFunction}; use substrait_proto::proto::function_argument::ArgType; -use substrait_proto::proto::Expression; use crate::error::{ DatafusionSnafu, DatatypesSnafu, Error, EvalSnafu, ExternalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu, }; use crate::expr::{ - BinaryFunc, DfScalarFunction, RawDfScalarFn, ScalarExpr, TypedExpr, UnaryFunc, - UnmaterializableFunc, VariadicFunc, TUMBLE_END, TUMBLE_START, + BinaryFunc, DfScalarFunction, RawDfScalarFn, ScalarExpr, TUMBLE_END, TUMBLE_START, TypedExpr, + UnaryFunc, UnmaterializableFunc, VariadicFunc, }; use crate::repr::{ColumnType, RelationDesc, RelationType}; use crate::transform::literal::{ from_substrait_literal, from_substrait_type, to_substrait_literal, }; -use crate::transform::{substrait_proto, FunctionExtensions}; +use crate::transform::{FunctionExtensions, substrait_proto}; // TODO(discord9): refactor plan to substrait convert of `arrow_cast` function thus remove this function /// ref to `arrow_schema::datatype` for type name @@ -682,12 +682,14 @@ mod test { ), ), mfp: MapFilterProject::new(1) - .map(vec![ScalarExpr::Column(0) - .call_unary(UnaryFunc::Cast(CDT::int64_datatype())) - .call_binary( - ScalarExpr::Literal(Value::from(1i64), CDT::int64_datatype()), - BinaryFunc::AddInt64, - )]) + .map(vec![ + ScalarExpr::Column(0) + .call_unary(UnaryFunc::Cast(CDT::int64_datatype())) + .call_binary( + ScalarExpr::Literal(Value::from(1i64), CDT::int64_datatype()), + BinaryFunc::AddInt64, + ), + ]) .unwrap() .project(vec![1]) .unwrap(), @@ -744,8 +746,10 @@ mod test { ), ), mfp: MapFilterProject::new(1) - .map(vec![ScalarExpr::Column(0) - .call_binary(ScalarExpr::Column(0), BinaryFunc::AddUInt32)]) + .map(vec![ + ScalarExpr::Column(0) + .call_binary(ScalarExpr::Column(0), BinaryFunc::AddUInt32), + ]) .unwrap() .project(vec![1]) .unwrap(), diff --git a/src/flow/src/transform/literal.rs b/src/flow/src/transform/literal.rs index 2640f606b2..eaebd8ff05 100644 --- a/src/flow/src/transform/literal.rs +++ b/src/flow/src/transform/literal.rs @@ -28,8 +28,8 @@ use substrait::variation_const::{ UNSIGNED_INTEGER_TYPE_VARIATION_REF, }; use substrait_proto::proto; -use substrait_proto::proto::expression::literal::{LiteralType, PrecisionTimestamp}; use substrait_proto::proto::expression::Literal; +use substrait_proto::proto::expression::literal::{LiteralType, PrecisionTimestamp}; use substrait_proto::proto::r#type::Kind; use crate::error::{Error, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu}; diff --git a/src/flow/src/transform/plan.rs b/src/flow/src/transform/plan.rs index bccfd2850a..dd8d5bb334 100644 --- a/src/flow/src/transform/plan.rs +++ b/src/flow/src/transform/plan.rs @@ -20,13 +20,13 @@ use substrait::substrait_proto_df::proto::{FilterRel, ReadRel}; use substrait_proto::proto::expression::MaskExpression; use substrait_proto::proto::read_rel::ReadType; use substrait_proto::proto::rel::RelType; -use substrait_proto::proto::{plan_rel, Plan as SubPlan, ProjectRel, Rel}; +use substrait_proto::proto::{Plan as SubPlan, ProjectRel, Rel, plan_rel}; use crate::error::{Error, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu}; use crate::expr::{MapFilterProject, TypedExpr}; use crate::plan::{Plan, TypedPlan}; use crate::repr::{self, RelationType}; -use crate::transform::{substrait_proto, FlownodeContext, FunctionExtensions}; +use crate::transform::{FlownodeContext, FunctionExtensions, substrait_proto}; impl TypedPlan { /// Convert Substrait Plan into Flow's TypedPlan @@ -39,18 +39,18 @@ impl TypedPlan { // Parse relations match plan.relations.len() { - 1 => { - match plan.relations[0].rel_type.as_ref() { + 1 => match plan.relations[0].rel_type.as_ref() { Some(rt) => match rt { plan_rel::RelType::Rel(rel) => { Ok(TypedPlan::from_substrait_rel(ctx, rel, &function_extension).await?) - }, + } plan_rel::RelType::Root(root) => { let input = root.input.as_ref().with_context(|| InvalidQuerySnafu { reason: "Root relation without input", })?; - let mut ret = TypedPlan::from_substrait_rel(ctx, input, &function_extension).await?; + let mut ret = + TypedPlan::from_substrait_rel(ctx, input, &function_extension).await?; if !root.names.is_empty() { ret.schema = ret.schema.clone().try_with_names(root.names.clone())?; @@ -59,14 +59,13 @@ impl TypedPlan { Ok(ret) } }, - None => plan_err!("Cannot parse plan relation: None") - } - }, - _ => not_impl_err!( - "Substrait plan with more than 1 relation trees not supported. Number of relation trees: {:?}", - plan.relations.len() - ) - } + None => plan_err!("Cannot parse plan relation: None"), + }, + _ => not_impl_err!( + "Substrait plan with more than 1 relation trees not supported. Number of relation trees: {:?}", + plan.relations.len() + ), + } } #[async_recursion::async_recursion] @@ -240,8 +239,8 @@ mod test { use crate::expr::GlobalId; use crate::plan::{Plan, TypedPlan}; use crate::repr::{ColumnType, RelationType}; - use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait}; use crate::transform::CDT; + use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait}; #[tokio::test] async fn test_select() { diff --git a/src/flow/src/utils.rs b/src/flow/src/utils.rs index 5e01d0bfa4..8f28ed23c7 100644 --- a/src/flow/src/utils.rs +++ b/src/flow/src/utils.rs @@ -22,13 +22,13 @@ use common_meta::key::flow::flow_state::FlowStat; use common_telemetry::trace; use datatypes::value::Value; use get_size2::GetSize; -use smallvec::{smallvec, SmallVec}; -use tokio::sync::{mpsc, oneshot, RwLock}; +use smallvec::{SmallVec, smallvec}; +use tokio::sync::{RwLock, mpsc, oneshot}; use tokio::time::Instant; use crate::error::InternalSnafu; use crate::expr::{EvalError, ScalarExpr}; -use crate::repr::{value_to_internal_ts, DiffRow, Duration, KeyValDiffRow, Row, Timestamp}; +use crate::repr::{DiffRow, Duration, KeyValDiffRow, Row, Timestamp, value_to_internal_ts}; /// A batch of updates, arranged by key pub type Batch = BTreeMap>; @@ -173,11 +173,11 @@ impl KeyExpiryManager { .or_default() .insert(row.clone()); - if let Some(expire_time) = self.compute_expiration_timestamp(now) { - if expire_time > event_ts { - // return how much time it's expired - return Ok(Some(expire_time - event_ts)); - } + if let Some(expire_time) = self.compute_expiration_timestamp(now) + && expire_time > event_ts + { + // return how much time it's expired + return Ok(Some(expire_time - event_ts)); } Ok(None) @@ -195,11 +195,11 @@ impl KeyExpiryManager { return Ok(None); }; - if let Some(expire_time) = self.compute_expiration_timestamp(now) { - if expire_time > event_ts { - // return how much time it's expired - return Ok(Some(expire_time - event_ts)); - } + if let Some(expire_time) = self.compute_expiration_timestamp(now) + && expire_time > event_ts + { + // return how much time it's expired + return Ok(Some(expire_time - event_ts)); } Ok(None) @@ -365,17 +365,15 @@ impl Arrangement { for ((key, val), update_ts, diff) in updates { // check if the key is expired - if let Some(s) = &mut self.expire_state { - if let Some(expired_by) = s.get_expire_duration_and_update_event_ts(now, &key)? { - max_expired_by = max_expired_by.max(Some(expired_by)); - trace!( - "Expired key: {:?}, expired by: {:?} with time being now={}", - key, - expired_by, - now - ); - continue; - } + if let Some(s) = &mut self.expire_state + && let Some(expired_by) = s.get_expire_duration_and_update_event_ts(now, &key)? + { + max_expired_by = max_expired_by.max(Some(expired_by)); + trace!( + "Expired key: {:?}, expired by: {:?} with time being now={}", + key, expired_by, now + ); + continue; } // If the `highest_ts` is less than `update_ts`, we need to create a new batch with key being `update_ts`. @@ -499,13 +497,12 @@ impl Arrangement { for (_, batch) in batches_to_compact { for (key, updates) in batch { // check if the key is expired - if let Some(s) = &mut self.expire_state { - if let Some(expired_by) = + if let Some(s) = &mut self.expire_state + && let Some(expired_by) = s.get_expire_duration_and_update_event_ts(now, &key)? - { - max_expired_by = max_expired_by.max(Some(expired_by)); - continue; - } + { + max_expired_by = max_expired_by.max(Some(expired_by)); + continue; } let mut row = compacting_batch @@ -563,12 +560,12 @@ impl Arrangement { /// Expire keys in now that are older than expire_time, intended for reducing memory usage and limit late data arrive pub fn truncate_expired_keys(&mut self, now: Timestamp) { - if let Some(s) = &mut self.expire_state { - if let Some(expired_keys) = s.remove_expired_keys(now) { - for key in expired_keys { - for (_, batch) in self.spine.iter_mut() { - batch.remove(&key); - } + if let Some(s) = &mut self.expire_state + && let Some(expired_keys) = s.remove_expired_keys(now) + { + for key in expired_keys { + for (_, batch) in self.spine.iter_mut() { + batch.remove(&key); } } } diff --git a/src/frontend/src/events.rs b/src/frontend/src/events.rs index 958a27a2e8..75e4dc15db 100644 --- a/src/frontend/src/events.rs +++ b/src/frontend/src/events.rs @@ -20,8 +20,8 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME}; use common_error::ext::BoxedError; use common_event_recorder::error::{InsertEventsSnafu, Result}; use common_event_recorder::{ - build_row_inserts_request, group_events_by_type, Event, EventHandler, - DEFAULT_COMPACTION_TIME_WINDOW, + DEFAULT_COMPACTION_TIME_WINDOW, Event, EventHandler, build_row_inserts_request, + group_events_by_type, }; use common_frontend::slow_query_event::SLOW_QUERY_EVENT_TYPE; use datafusion::common::HashMap; diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs index 137e988154..bf2e7a0558 100644 --- a/src/frontend/src/frontend.rs +++ b/src/frontend/src/frontend.rs @@ -33,8 +33,8 @@ use snafu::ResultExt; use crate::error; use crate::error::Result; use crate::heartbeat::HeartbeatTask; -use crate::instance::prom_store::ExportMetricHandler; use crate::instance::Instance; +use crate::instance::prom_store::ExportMetricHandler; use crate::service_config::{ InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, OtlpOptions, PostgresOptions, PromStoreOptions, diff --git a/src/frontend/src/heartbeat/tests.rs b/src/frontend/src/heartbeat/tests.rs index c44d3a15d1..b3aff9e8fd 100644 --- a/src/frontend/src/heartbeat/tests.rs +++ b/src/frontend/src/heartbeat/tests.rs @@ -23,9 +23,9 @@ use common_meta::heartbeat::handler::{ }; use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta}; use common_meta::instruction::{CacheIdent, Instruction}; +use common_meta::key::MetadataKey; use common_meta::key::schema_name::{SchemaName, SchemaNameKey}; use common_meta::key::table_info::TableInfoKey; -use common_meta::key::MetadataKey; use partition::manager::TableRouteCacheInvalidator; use table::metadata::TableId; use tokio::sync::mpsc; @@ -98,11 +98,13 @@ async fn test_invalidate_table_cache_handler() { ) .await; - assert!(!backend - .inner - .lock() - .unwrap() - .contains_key(&table_info_key.to_bytes())); + assert!( + !backend + .inner + .lock() + .unwrap() + .contains_key(&table_info_key.to_bytes()) + ); // removes a invalid key handle_instruction( @@ -141,11 +143,13 @@ async fn test_invalidate_schema_key_handler() { ) .await; - assert!(!backend - .inner - .lock() - .unwrap() - .contains_key(&schema_key.to_bytes())); + assert!( + !backend + .inner + .lock() + .unwrap() + .contains_key(&schema_key.to_bytes()) + ); // removes a invalid key handle_instruction( diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs index 495ab2bee4..fe2cf2b07c 100644 --- a/src/frontend/src/instance.rs +++ b/src/frontend/src/instance.rs @@ -32,30 +32,30 @@ use std::time::{Duration, SystemTime}; use async_stream::stream; use async_trait::async_trait; use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq}; +use catalog::CatalogManagerRef; use catalog::process_manager::{ ProcessManagerRef, QueryStatement as CatalogQueryStatement, SlowQueryTimer, }; -use catalog::CatalogManagerRef; use client::OutputData; -use common_base::cancellation::CancellableFuture; use common_base::Plugins; +use common_base::cancellation::CancellableFuture; use common_config::KvBackendConfig; use common_error::ext::{BoxedError, ErrorExt}; use common_event_recorder::EventRecorderRef; use common_meta::cache_invalidator::CacheInvalidatorRef; +use common_meta::key::TableMetadataManagerRef; use common_meta::key::runtime_switch::RuntimeSwitchManager; use common_meta::key::table_name::TableNameKey; -use common_meta::key::TableMetadataManagerRef; use common_meta::kv_backend::KvBackendRef; use common_meta::node_manager::NodeManagerRef; use common_meta::procedure_executor::ProcedureExecutorRef; use common_meta::state_store::KvStateStore; +use common_procedure::ProcedureManagerRef; use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::options::ProcedureConfig; -use common_procedure::ProcedureManagerRef; use common_query::Output; -use common_recordbatch::error::StreamTimeoutSnafu; use common_recordbatch::RecordBatchStreamWrapper; +use common_recordbatch::error::StreamTimeoutSnafu; use common_telemetry::logging::SlowQueryOptions; use common_telemetry::{debug, error, info, tracing}; use dashmap::DashMap; @@ -70,11 +70,11 @@ use partition::manager::PartitionRuleManagerRef; use pipeline::pipeline_operator::PipelineOperator; use prometheus::HistogramTimer; use promql_parser::label::Matcher; +use query::QueryEngineRef; use query::metrics::OnDone; use query::parser::{PromQuery, QueryLanguageParser, QueryStatement}; -use query::query_engine::options::{validate_catalog_and_schema, QueryOptions}; use query::query_engine::DescribeResult; -use query::QueryEngineRef; +use query::query_engine::options::{QueryOptions, validate_catalog_and_schema}; use servers::error::{ self as server_error, AuthSnafu, CommonMetaSnafu, ExecuteQuerySnafu, OtlpMetricModeIncompatibleSnafu, ParsePromQLSnafu, UnexpectedResultSnafu, @@ -309,10 +309,10 @@ impl Instance { match stmt { Statement::Query(_) | Statement::Explain(_) | Statement::Delete(_) => { // TODO: remove this when format is supported in datafusion - if let Statement::Explain(explain) = &stmt { - if let Some(format) = explain.format() { - query_ctx.set_explain_format(format.to_string()); - } + if let Statement::Explain(explain) = &stmt + && let Some(format) = explain.format() + { + query_ctx.set_explain_format(format.to_string()); } self.plan_and_exec_sql(stmt, &query_ctx, query_interceptor) @@ -548,10 +548,12 @@ impl SqlQueryHandler for Instance { { Ok(stmts) => { if stmts.is_empty() { - return vec![InvalidSqlSnafu { - err_msg: "empty statements", - } - .fail()]; + return vec![ + InvalidSqlSnafu { + err_msg: "empty statements", + } + .fail(), + ]; } let mut results = Vec::with_capacity(stmts.len()); diff --git a/src/frontend/src/instance/builder.rs b/src/frontend/src/instance/builder.rs index cb4923763c..04edecbc5a 100644 --- a/src/frontend/src/instance/builder.rs +++ b/src/frontend/src/instance/builder.rs @@ -15,14 +15,14 @@ use std::sync::Arc; use cache::{TABLE_FLOWNODE_SET_CACHE_NAME, TABLE_ROUTE_CACHE_NAME}; -use catalog::process_manager::ProcessManagerRef; use catalog::CatalogManagerRef; +use catalog::process_manager::ProcessManagerRef; use common_base::Plugins; use common_event_recorder::EventRecorderImpl; use common_meta::cache::{LayeredCacheRegistryRef, TableRouteCacheRef}; use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator}; -use common_meta::key::flow::FlowMetadataManager; use common_meta::key::TableMetadataManager; +use common_meta::key::flow::FlowMetadataManager; use common_meta::kv_backend::KvBackendRef; use common_meta::node_manager::NodeManagerRef; use common_meta::procedure_executor::ProcedureExecutorRef; @@ -36,15 +36,15 @@ use operator::statement::{StatementExecutor, StatementExecutorRef}; use operator::table::TableMutationOperator; use partition::manager::PartitionRuleManager; use pipeline::pipeline_operator::PipelineOperator; -use query::region_query::RegionQueryHandlerFactoryRef; use query::QueryEngineFactory; +use query::region_query::RegionQueryHandlerFactoryRef; use snafu::OptionExt; use crate::error::{self, Result}; use crate::events::EventHandlerImpl; use crate::frontend::FrontendOptions; -use crate::instance::region_query::FrontendRegionQueryHandler; use crate::instance::Instance; +use crate::instance::region_query::FrontendRegionQueryHandler; use crate::limiter::Limiter; /// The frontend [`Instance`] builder. diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs index 8b6983f52b..97fb927540 100644 --- a/src/frontend/src/instance/grpc.rs +++ b/src/frontend/src/instance/grpc.rs @@ -26,10 +26,10 @@ use async_trait::async_trait; use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq}; use common_base::AffectedRows; use common_error::ext::BoxedError; -use common_grpc::flight::FlightDecoder; use common_grpc::FlightData; -use common_query::logical_plan::add_insert_to_logical_plan; +use common_grpc::flight::FlightDecoder; use common_query::Output; +use common_query::logical_plan::add_insert_to_logical_plan; use common_telemetry::tracing::{self}; use datafusion::datasource::DefaultTableSource; use query::parser::PromQuery; @@ -37,17 +37,17 @@ use servers::interceptor::{GrpcQueryInterceptor, GrpcQueryInterceptorRef}; use servers::query_handler::grpc::GrpcQueryHandler; use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use table::TableRef; use table::table::adapter::DfTableProviderAdapter; use table::table_name::TableName; -use table::TableRef; use crate::error::{ CatalogSnafu, Error, ExternalSnafu, IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, PlanStatementSnafu, Result, SubstraitDecodeLogicalPlanSnafu, TableNotFoundSnafu, TableOperationSnafu, }; -use crate::instance::{attach_timer, Instance}; +use crate::instance::{Instance, attach_timer}; use crate::metrics::{ GRPC_HANDLE_PLAN_ELAPSED, GRPC_HANDLE_PROMQL_ELAPSED, GRPC_HANDLE_SQL_ELAPSED, }; diff --git a/src/frontend/src/instance/jaeger.rs b/src/frontend/src/instance/jaeger.rs index f1a1fad328..1fff8a02b3 100644 --- a/src/frontend/src/instance/jaeger.rs +++ b/src/frontend/src/instance/jaeger.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use async_trait::async_trait; use catalog::CatalogManagerRef; -use common_catalog::consts::{trace_services_table_name, TRACE_TABLE_NAME}; +use common_catalog::consts::{TRACE_TABLE_NAME, trace_services_table_name}; use common_function::function::{Function, FunctionRef}; use common_function::scalars::json::json_get::{ JsonGetBool, JsonGetFloat, JsonGetInt, JsonGetString, @@ -28,10 +28,10 @@ use common_query::{Output, OutputData}; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::util; use datafusion::dataframe::DataFrame; -use datafusion::execution::context::SessionContext; use datafusion::execution::SessionStateBuilder; +use datafusion::execution::context::SessionContext; use datafusion_expr::select_expr::SelectExpr; -use datafusion_expr::{col, lit, lit_timestamp_nano, wildcard, Expr, SortExpr}; +use datafusion_expr::{Expr, SortExpr, col, lit, lit_timestamp_nano, wildcard}; use datatypes::value::ValueRef; use query::QueryEngineRef; use serde_json::Value as JsonValue; @@ -39,7 +39,7 @@ use servers::error::{ CatalogSnafu, CollectRecordbatchSnafu, DataFusionSnafu, Result as ServerResult, TableNotFoundSnafu, }; -use servers::http::jaeger::{QueryTraceParams, JAEGER_QUERY_TABLE_NAME_KEY}; +use servers::http::jaeger::{JAEGER_QUERY_TABLE_NAME_KEY, QueryTraceParams}; use servers::otlp::trace::{ DURATION_NANO_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_KIND_COLUMN, SPAN_KIND_PREFIX, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, TRACE_ID_COLUMN, @@ -251,14 +251,16 @@ impl JaegerQueryHandler for Instance { // timestamp >= {start_time} AND // timestamp <= {end_time} // ``` - let mut filters = vec![col(TRACE_ID_COLUMN).in_list( - trace_ids_from_output(output) - .await? - .iter() - .map(lit) - .collect::>(), - false, - )]; + let mut filters = vec![ + col(TRACE_ID_COLUMN).in_list( + trace_ids_from_output(output) + .await? + .iter() + .map(lit) + .collect::>(), + false, + ), + ]; if let Some(start_time) = query_params.start_time { filters.push(col(TIMESTAMP_COLUMN).gt_eq(lit_timestamp_nano(start_time))); diff --git a/src/frontend/src/instance/prom_store.rs b/src/frontend/src/instance/prom_store.rs index 3cf5a200fd..5c70b484dd 100644 --- a/src/frontend/src/instance/prom_store.rs +++ b/src/frontend/src/instance/prom_store.rs @@ -23,15 +23,15 @@ use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq}; use client::OutputData; use common_catalog::format_full_table_name; use common_error::ext::BoxedError; -use common_query::prelude::GREPTIME_PHYSICAL_TABLE; use common_query::Output; +use common_query::prelude::GREPTIME_PHYSICAL_TABLE; use common_recordbatch::RecordBatches; use common_telemetry::{debug, tracing}; use operator::insert::InserterRef; use operator::statement::StatementExecutor; use prost::Message; use servers::error::{self, AuthSnafu, Result as ServerResult}; -use servers::http::header::{collect_plan_metrics, CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF}; +use servers::http::header::{CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF, collect_plan_metrics}; use servers::http::prom_store::PHYSICAL_TABLE_PARAM; use servers::interceptor::{PromStoreProtocolInterceptor, PromStoreProtocolInterceptorRef}; use servers::prom_store::{self, Metrics}; diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs index 73c86deabc..cdd2c01c60 100644 --- a/src/frontend/src/server.rs +++ b/src/frontend/src/server.rs @@ -34,7 +34,7 @@ use servers::postgres::PostgresServer; use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter; use servers::query_handler::sql::ServerSqlQueryHandlerAdapter; use servers::server::{Server, ServerHandlers}; -use servers::tls::{maybe_watch_tls_config, ReloadableTlsServerConfig}; +use servers::tls::{ReloadableTlsServerConfig, maybe_watch_tls_config}; use snafu::ResultExt; use crate::error::{self, Result, StartServerSnafu, TomlFormatSnafu}; diff --git a/src/index/benches/bytes_to_u64_vec.rs b/src/index/benches/bytes_to_u64_vec.rs index 7e3c304ef5..52f2059151 100644 --- a/src/index/benches/bytes_to_u64_vec.rs +++ b/src/index/benches/bytes_to_u64_vec.rs @@ -15,7 +15,7 @@ use std::hint::black_box; use bytes::Bytes; -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main}; use index::bloom_filter::reader::bytes_to_u64_vec; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; diff --git a/src/index/benches/tokenizer_bench.rs b/src/index/benches/tokenizer_bench.rs index 7e86027553..e365c884b2 100644 --- a/src/index/benches/tokenizer_bench.rs +++ b/src/index/benches/tokenizer_bench.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use index::fulltext_index::tokenizer::{EnglishTokenizer, Tokenizer}; fn bench_english_tokenizer(c: &mut Criterion) { @@ -20,14 +20,38 @@ fn bench_english_tokenizer(c: &mut Criterion) { let texts = vec![ ("short", "Hello, world! This is a test."), - ("medium", "The quick brown fox jumps over the lazy dog. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."), - ("long", "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt."), - ("mixed_chars", "Hello123 world!!! This-is_a.test@example.com with various: punctuation; and [brackets] {curly} (parentheses) & symbols* + numbers456."), - ("numbers_heavy", "test123 456test test789 abc123def 999888777 hello42world 123 456 789 mix1ng l3tt3rs 4nd numb3rs"), - ("punctuation_heavy", "Hello!!! World??? This...is...a...test... With lots of!!! punctuation??? marks!!! And... ellipses???"), - ("postgres log", "2025-08-01 21:09:28.928 UTC [27] LOG: checkpoint complete: wrote 0 buffers (0.0%); 0 WAL file(s) added, 0 removed, 0 recycled; write=0.001 s, sync=0.001 s, total=0.003 s; sync files=0, longest=0.000 s, average=0.000 s; distance=0 kB, estimate=5 kB; lsn=0/1992868, redo lsn=0/1992868"), - ("many_short_words", "a b c d e f g h i j k l m n o p q r s t u v w x y z"), - ("with_unicode", "这是,一个测试。🈶一些 Unicøde 字符比如 café and naïve words."), + ( + "medium", + "The quick brown fox jumps over the lazy dog. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", + ), + ( + "long", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt.", + ), + ( + "mixed_chars", + "Hello123 world!!! This-is_a.test@example.com with various: punctuation; and [brackets] {curly} (parentheses) & symbols* + numbers456.", + ), + ( + "numbers_heavy", + "test123 456test test789 abc123def 999888777 hello42world 123 456 789 mix1ng l3tt3rs 4nd numb3rs", + ), + ( + "punctuation_heavy", + "Hello!!! World??? This...is...a...test... With lots of!!! punctuation??? marks!!! And... ellipses???", + ), + ( + "postgres log", + "2025-08-01 21:09:28.928 UTC [27] LOG: checkpoint complete: wrote 0 buffers (0.0%); 0 WAL file(s) added, 0 removed, 0 recycled; write=0.001 s, sync=0.001 s, total=0.003 s; sync files=0, longest=0.000 s, average=0.000 s; distance=0 kB, estimate=5 kB; lsn=0/1992868, redo lsn=0/1992868", + ), + ( + "many_short_words", + "a b c d e f g h i j k l m n o p q r s t u v w x y z", + ), + ( + "with_unicode", + "这是,一个测试。🈶一些 Unicøde 字符比如 café and naïve words.", + ), ]; let mut group = c.benchmark_group("english_tokenizer"); diff --git a/src/index/src/bloom_filter/applier.rs b/src/index/src/bloom_filter/applier.rs index 1bd58a08f4..18332d4815 100644 --- a/src/index/src/bloom_filter/applier.rs +++ b/src/index/src/bloom_filter/applier.rs @@ -19,9 +19,9 @@ use fastbloom::BloomFilter; use greptime_proto::v1::index::BloomFilterMeta; use itertools::Itertools; +use crate::Bytes; use crate::bloom_filter::error::Result; use crate::bloom_filter::reader::BloomFilterReader; -use crate::Bytes; /// `InListPredicate` contains a list of acceptable values. A value needs to match at least /// one of the elements (logical OR semantic) for the predicate to be satisfied. @@ -202,8 +202,8 @@ fn intersect_ranges(lhs: &[Range], rhs: &[Range]) -> Vec BloomFilterMetaReader { #[cfg(test)] mod tests { - use std::sync::atomic::AtomicUsize; use std::sync::Arc; + use std::sync::atomic::AtomicUsize; use futures::io::Cursor; diff --git a/src/index/src/fulltext_index/create.rs b/src/index/src/fulltext_index/create.rs index 46f18999cc..7bff3fa506 100644 --- a/src/index/src/fulltext_index/create.rs +++ b/src/index/src/fulltext_index/create.rs @@ -20,7 +20,7 @@ use puffin::puffin_manager::{PuffinWriter, PutOptions}; pub use crate::fulltext_index::create::bloom_filter::BloomFilterFulltextIndexCreator; pub use crate::fulltext_index::create::tantivy::{ - TantivyFulltextIndexCreator, ROWID_FIELD_NAME, TEXT_FIELD_NAME, + ROWID_FIELD_NAME, TEXT_FIELD_NAME, TantivyFulltextIndexCreator, }; use crate::fulltext_index::error::Result; diff --git a/src/index/src/fulltext_index/create/bloom_filter.rs b/src/index/src/fulltext_index/create/bloom_filter.rs index 2249b2f2ee..28648f9b54 100644 --- a/src/index/src/fulltext_index/create/bloom_filter.rs +++ b/src/index/src/fulltext_index/create/bloom_filter.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashMap; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use std::sync::atomic::AtomicUsize; use async_trait::async_trait; use common_error::ext::BoxedError; diff --git a/src/index/src/fulltext_index/create/tantivy.rs b/src/index/src/fulltext_index/create/tantivy.rs index 274fea596e..6f05921e3a 100644 --- a/src/index/src/fulltext_index/create/tantivy.rs +++ b/src/index/src/fulltext_index/create/tantivy.rs @@ -20,9 +20,9 @@ use common_error::ext::BoxedError; use puffin::puffin_manager::{PuffinWriter, PutOptions}; use snafu::{OptionExt, ResultExt}; use tantivy::indexer::NoMergePolicy; -use tantivy::schema::{Schema, STORED, TEXT}; +use tantivy::schema::{STORED, Schema, TEXT}; use tantivy::store::{Compressor, ZstdCompressor}; -use tantivy::{doc, Index, IndexWriter}; +use tantivy::{Index, IndexWriter, doc}; use crate::fulltext_index::create::FulltextIndexCreator; use crate::fulltext_index::error::{ @@ -163,10 +163,10 @@ mod tests { use common_test_util::temp_dir::create_temp_dir; use futures::AsyncRead; + use tantivy::TantivyDocument; use tantivy::collector::DocSetCollector; use tantivy::query::QueryParser; use tantivy::schema::Value; - use tantivy::TantivyDocument; use super::*; use crate::fulltext_index::Analyzer; diff --git a/src/index/src/fulltext_index/search/tantivy.rs b/src/index/src/fulltext_index/search/tantivy.rs index a55b599d21..d06a5cd329 100644 --- a/src/index/src/fulltext_index/search/tantivy.rs +++ b/src/index/src/fulltext_index/search/tantivy.rs @@ -24,12 +24,12 @@ use tantivy::query::QueryParser; use tantivy::schema::{Field, Value}; use tantivy::{Index, IndexReader, ReloadPolicy, TantivyDocument}; +use crate::fulltext_index::Config; use crate::fulltext_index::create::{ROWID_FIELD_NAME, TEXT_FIELD_NAME}; use crate::fulltext_index::error::{ Result, TantivyDocNotFoundSnafu, TantivyParserSnafu, TantivySnafu, }; use crate::fulltext_index::search::{FulltextIndexSearcher, RowId}; -use crate::fulltext_index::Config; /// `TantivyFulltextIndexSearcher` is a searcher using Tantivy. pub struct TantivyFulltextIndexSearcher { diff --git a/src/index/src/fulltext_index/tests.rs b/src/index/src/fulltext_index/tests.rs index a2a87a645a..abdf20e22d 100644 --- a/src/index/src/fulltext_index/tests.rs +++ b/src/index/src/fulltext_index/tests.rs @@ -15,7 +15,7 @@ use std::collections::BTreeSet; use std::sync::Arc; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use puffin::puffin_manager::file_accessor::MockFileAccessor; use puffin::puffin_manager::fs_puffin_manager::FsPuffinManager; use puffin::puffin_manager::stager::BoundedStager; diff --git a/src/index/src/fulltext_index/tokenizer.rs b/src/index/src/fulltext_index/tokenizer.rs index e521b39018..b5093afb33 100644 --- a/src/index/src/fulltext_index/tokenizer.rs +++ b/src/index/src/fulltext_index/tokenizer.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::fulltext_index::error::Result; use crate::Bytes; +use crate::fulltext_index::error::Result; lazy_static::lazy_static! { static ref JIEBA: jieba_rs::Jieba = jieba_rs::Jieba::new(); diff --git a/src/index/src/inverted_index/create.rs b/src/index/src/inverted_index/create.rs index a0468fdcba..dade9705ac 100644 --- a/src/index/src/inverted_index/create.rs +++ b/src/index/src/inverted_index/create.rs @@ -17,10 +17,10 @@ pub mod sort_create; use async_trait::async_trait; +use crate::BytesRef; use crate::bitmap::BitmapType; use crate::inverted_index::error::Result; use crate::inverted_index::format::writer::InvertedIndexWriter; -use crate::BytesRef; /// `InvertedIndexCreator` provides functionality to construct an inverted index #[async_trait] diff --git a/src/index/src/inverted_index/create/sort/external_sort.rs b/src/index/src/inverted_index/create/sort/external_sort.rs index 3b4eaebc5c..12c49ca89d 100644 --- a/src/index/src/inverted_index/create/sort/external_sort.rs +++ b/src/index/src/inverted_index/create/sort/external_sort.rs @@ -16,8 +16,8 @@ use std::collections::{BTreeMap, VecDeque}; use std::mem; use std::num::NonZeroUsize; use std::ops::RangeInclusive; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use async_trait::async_trait; use common_telemetry::{debug, error}; @@ -227,10 +227,10 @@ impl ExternalSorter { return Ok(()); } - if let Some(current_threshold) = self.current_memory_usage_threshold { - if memory_usage < current_threshold { - return Ok(()); - } + if let Some(current_threshold) = self.current_memory_usage_threshold + && memory_usage < current_threshold + { + return Ok(()); } let file_id = &format!("{:012}", self.total_row_count); diff --git a/src/index/src/inverted_index/create/sort/intermediate_rw.rs b/src/index/src/inverted_index/create/sort/intermediate_rw.rs index 94a743c67c..e6772fdb3e 100644 --- a/src/index/src/inverted_index/create/sort/intermediate_rw.rs +++ b/src/index/src/inverted_index/create/sort/intermediate_rw.rs @@ -33,15 +33,15 @@ mod codec_v1; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::{stream, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; +use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt, stream}; use snafu::ResultExt; +use crate::Bytes; use crate::bitmap::{Bitmap, BitmapType}; use crate::inverted_index::create::sort::SortedStream; use crate::inverted_index::error::{ CloseSnafu, FlushSnafu, ReadSnafu, Result, UnknownIntermediateCodecMagicSnafu, WriteSnafu, }; -use crate::Bytes; /// `IntermediateWriter` serializes and writes intermediate data to the wrapped `writer` pub struct IntermediateWriter { diff --git a/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs b/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs index c8ca7c4332..357a5b4e9c 100644 --- a/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs +++ b/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs @@ -19,9 +19,9 @@ use bytes::{Buf, BufMut}; use greptime_proto::v1::index::BitmapType; use snafu::ResultExt; +use crate::Bytes; use crate::bitmap::Bitmap; use crate::inverted_index::error::{CommonIoSnafu, Error, Result}; -use crate::Bytes; const U64_LENGTH: usize = std::mem::size_of::(); diff --git a/src/index/src/inverted_index/create/sort/merge_stream.rs b/src/index/src/inverted_index/create/sort/merge_stream.rs index dbe95e2db5..98b4aee73b 100644 --- a/src/index/src/inverted_index/create/sort/merge_stream.rs +++ b/src/index/src/inverted_index/create/sort/merge_stream.rs @@ -16,13 +16,13 @@ use std::cmp::Ordering; use std::pin::Pin; use std::task::{Context, Poll}; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use pin_project::pin_project; +use crate::Bytes; use crate::bitmap::Bitmap; use crate::inverted_index::create::sort::SortedStream; use crate::inverted_index::error::Result; -use crate::Bytes; /// A [`Stream`] implementation that merges two sorted streams into a single sorted stream #[pin_project] diff --git a/src/index/src/inverted_index/create/sort_create.rs b/src/index/src/inverted_index/create/sort_create.rs index 246e65d5ad..cb22179a69 100644 --- a/src/index/src/inverted_index/create/sort_create.rs +++ b/src/index/src/inverted_index/create/sort_create.rs @@ -18,12 +18,12 @@ use std::num::NonZeroUsize; use async_trait::async_trait; use snafu::ensure; +use crate::BytesRef; use crate::bitmap::BitmapType; -use crate::inverted_index::create::sort::{SortOutput, Sorter}; use crate::inverted_index::create::InvertedIndexCreator; +use crate::inverted_index::create::sort::{SortOutput, Sorter}; use crate::inverted_index::error::{InconsistentRowCountSnafu, Result}; use crate::inverted_index::format::writer::InvertedIndexWriter; -use crate::BytesRef; type IndexName = String; type SegmentRowCount = NonZeroUsize; @@ -119,13 +119,13 @@ mod tests { use std::collections::BTreeMap; use common_base::BitVec; - use futures::{stream, StreamExt}; + use futures::{StreamExt, stream}; use super::*; + use crate::Bytes; use crate::bitmap::Bitmap; use crate::inverted_index::error::Error; use crate::inverted_index::format::writer::{MockInvertedIndexWriter, ValueStream}; - use crate::Bytes; #[tokio::test] async fn test_sort_index_creator_basic() { diff --git a/src/index/src/inverted_index/error.rs b/src/index/src/inverted_index/error.rs index fd3b640f53..f9007fbbe4 100644 --- a/src/index/src/inverted_index/error.rs +++ b/src/index/src/inverted_index/error.rs @@ -80,7 +80,9 @@ pub enum Error { location: Location, }, - #[snafu(display("Unexpected inverted index footer payload size, max: {max_payload_size}, actual: {actual_payload_size}"))] + #[snafu(display( + "Unexpected inverted index footer payload size, max: {max_payload_size}, actual: {actual_payload_size}" + ))] UnexpectedFooterPayloadSize { max_payload_size: u64, actual_payload_size: u64, @@ -88,7 +90,9 @@ pub enum Error { location: Location, }, - #[snafu(display("Unexpected inverted index offset size, offset: {offset}, size: {size}, blob_size: {blob_size}, payload_size: {payload_size}"))] + #[snafu(display( + "Unexpected inverted index offset size, offset: {offset}, size: {size}, blob_size: {blob_size}, payload_size: {payload_size}" + ))] UnexpectedOffsetSize { offset: u64, size: u64, @@ -208,7 +212,9 @@ pub enum Error { location: Location, }, - #[snafu(display("Inconsistent row count, index_name: {index_name}, total_row_count: {total_row_count}, expected: {expected_row_count}"))] + #[snafu(display( + "Inconsistent row count, index_name: {index_name}, total_row_count: {total_row_count}, expected: {expected_row_count}" + ))] InconsistentRowCount { index_name: String, total_row_count: usize, diff --git a/src/index/src/inverted_index/format/reader.rs b/src/index/src/inverted_index/format/reader.rs index 2195d1f9e3..40fa22130a 100644 --- a/src/index/src/inverted_index/format/reader.rs +++ b/src/index/src/inverted_index/format/reader.rs @@ -22,9 +22,9 @@ use greptime_proto::v1::index::InvertedIndexMetas; use snafu::ResultExt; use crate::bitmap::{Bitmap, BitmapType}; +use crate::inverted_index::FstMap; use crate::inverted_index::error::{DecodeBitmapSnafu, DecodeFstSnafu, Result}; pub use crate::inverted_index::format::reader::blob::InvertedIndexBlobReader; -use crate::inverted_index::FstMap; mod blob; mod footer; diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs index 2db0388cef..f48791e8f4 100644 --- a/src/index/src/inverted_index/format/reader/blob.rs +++ b/src/index/src/inverted_index/format/reader/blob.rs @@ -19,14 +19,14 @@ use async_trait::async_trait; use bytes::Bytes; use common_base::range_read::RangeReader; use greptime_proto::v1::index::InvertedIndexMetas; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::inverted_index::error::{CommonIoSnafu, Result, UnexpectedBlobSizeSnafu}; -use crate::inverted_index::format::reader::footer::{ - InvertedIndexFooterReader, DEFAULT_PREFETCH_SIZE, -}; -use crate::inverted_index::format::reader::InvertedIndexReader; use crate::inverted_index::format::MIN_BLOB_SIZE; +use crate::inverted_index::format::reader::InvertedIndexReader; +use crate::inverted_index::format::reader::footer::{ + DEFAULT_PREFETCH_SIZE, InvertedIndexFooterReader, +}; /// Inverted index blob reader, implements [`InvertedIndexReader`] pub struct InvertedIndexBlobReader { diff --git a/src/index/src/inverted_index/format/reader/footer.rs b/src/index/src/inverted_index/format/reader/footer.rs index b5a855a455..2609eb6cbb 100644 --- a/src/index/src/inverted_index/format/reader/footer.rs +++ b/src/index/src/inverted_index/format/reader/footer.rs @@ -15,7 +15,7 @@ use common_base::range_read::RangeReader; use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexMetas}; use prost::Message; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::inverted_index::error::{ BlobSizeTooSmallSnafu, CommonIoSnafu, DecodeProtoSnafu, InvalidFooterPayloadSizeSnafu, Result, diff --git a/src/index/src/inverted_index/format/writer.rs b/src/index/src/inverted_index/format/writer.rs index 9c4307a094..cf4b01b560 100644 --- a/src/index/src/inverted_index/format/writer.rs +++ b/src/index/src/inverted_index/format/writer.rs @@ -20,10 +20,10 @@ use std::num::NonZeroUsize; use async_trait::async_trait; use futures::Stream; +use crate::Bytes; use crate::bitmap::{Bitmap, BitmapType}; use crate::inverted_index::error::Result; pub use crate::inverted_index::format::writer::blob::InvertedIndexBlobWriter; -use crate::Bytes; pub type ValueStream = Box> + Send + Unpin>; @@ -49,5 +49,5 @@ pub trait InvertedIndexWriter: Send { /// Finalizes the index writing process, ensuring all data is written. /// `total_row_count` and `segment_row_count` is used to fill in the metadata. async fn finish(&mut self, total_row_count: u64, segment_row_count: NonZeroUsize) - -> Result<()>; + -> Result<()>; } diff --git a/src/index/src/inverted_index/format/writer/blob.rs b/src/index/src/inverted_index/format/writer/blob.rs index aed0f9d894..5991284869 100644 --- a/src/index/src/inverted_index/format/writer/blob.rs +++ b/src/index/src/inverted_index/format/writer/blob.rs @@ -105,8 +105,8 @@ mod tests { use greptime_proto::v1::index::BitmapType; use super::*; - use crate::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader}; use crate::Bytes; + use crate::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader}; fn unpack(fst_value: u64) -> [u32; 2] { bytemuck::cast::(fst_value) diff --git a/src/index/src/inverted_index/format/writer/single.rs b/src/index/src/inverted_index/format/writer/single.rs index 0f60c53e11..16e49a40e8 100644 --- a/src/index/src/inverted_index/format/writer/single.rs +++ b/src/index/src/inverted_index/format/writer/single.rs @@ -17,9 +17,9 @@ use futures::{AsyncWrite, AsyncWriteExt, Stream, StreamExt}; use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexStats}; use snafu::ResultExt; +use crate::Bytes; use crate::bitmap::{Bitmap, BitmapType}; use crate::inverted_index::error::{FstCompileSnafu, FstInsertSnafu, Result, WriteSnafu}; -use crate::Bytes; /// `SingleIndexWriter` writes values to the blob storage for an individual inverted index pub struct SingleIndexWriter { @@ -164,8 +164,8 @@ mod tests { use futures::stream; use super::*; - use crate::inverted_index::error::Error; use crate::Bytes; + use crate::inverted_index::error::Error; #[tokio::test] async fn test_single_index_writer_write_empty() { diff --git a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs index 5bc227a982..7b551808e3 100644 --- a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs +++ b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs @@ -16,19 +16,19 @@ use std::mem::size_of; use fst::map::OpBuilder; use fst::{IntoStreamer, Streamer}; -use regex_automata::dfa::dense::DFA; +use regex_automata::Anchored; use regex_automata::dfa::Automaton; +use regex_automata::dfa::dense::DFA; use regex_automata::util::primitives::StateID; use regex_automata::util::start::Config; -use regex_automata::Anchored; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; +use crate::inverted_index::FstMap; use crate::inverted_index::error::{ EmptyPredicatesSnafu, IntersectionApplierWithInListSnafu, ParseDFASnafu, Result, }; use crate::inverted_index::search::fst_apply::FstApplier; use crate::inverted_index::search::predicate::{Predicate, Range}; -use crate::inverted_index::FstMap; /// `IntersectionFstApplier` applies intersection operations on an FstMap using specified ranges and regex patterns. pub struct IntersectionFstApplier { diff --git a/src/index/src/inverted_index/search/fst_apply/keys_apply.rs b/src/index/src/inverted_index/search/fst_apply/keys_apply.rs index eb30f38f5c..d565749452 100644 --- a/src/index/src/inverted_index/search/fst_apply/keys_apply.rs +++ b/src/index/src/inverted_index/search/fst_apply/keys_apply.rs @@ -15,16 +15,16 @@ use std::collections::BTreeSet; use std::mem::size_of; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; +use crate::Bytes; +use crate::inverted_index::FstMap; use crate::inverted_index::error::{ EmptyPredicatesSnafu, KeysApplierUnexpectedPredicatesSnafu, KeysApplierWithoutInListSnafu, ParseRegexSnafu, Result, }; use crate::inverted_index::search::fst_apply::FstApplier; use crate::inverted_index::search::predicate::Predicate; -use crate::inverted_index::FstMap; -use crate::Bytes; /// `KeysFstApplier` is responsible for applying a search using a set of predefined keys /// against an FstMap to fetch associated values. diff --git a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs index 39b9235f40..ae22e79c74 100644 --- a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs +++ b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs @@ -171,10 +171,10 @@ mod tests { use super::*; use crate::bitmap::Bitmap; + use crate::inverted_index::FstMap; use crate::inverted_index::error::Error; use crate::inverted_index::format::reader::MockInvertedIndexReader; use crate::inverted_index::search::fst_apply::MockFstApplier; - use crate::inverted_index::FstMap; fn s(s: &'static str) -> String { s.to_owned() @@ -223,11 +223,9 @@ mod tests { .expect_metadata() .returning(|| Ok(mock_metas([("tag-0", 0)]))); mock_reader.expect_fst_vec().returning(|_ranges| { - Ok(vec![FstMap::from_iter([( - b"tag-0_value-0", - fst_value(2, 1), - )]) - .unwrap()]) + Ok(vec![ + FstMap::from_iter([(b"tag-0_value-0", fst_value(2, 1))]).unwrap(), + ]) }); mock_reader.expect_bitmap_deque().returning(|arg| { @@ -256,11 +254,9 @@ mod tests { .expect_metadata() .returning(|| Ok(mock_metas([("tag-0", 0)]))); mock_reader.expect_fst_vec().returning(|_range| { - Ok(vec![FstMap::from_iter([( - b"tag-0_value-1", - fst_value(2, 1), - )]) - .unwrap()]) + Ok(vec![ + FstMap::from_iter([(b"tag-0_value-1", fst_value(2, 1))]).unwrap(), + ]) }); let output = applier .apply(SearchContext::default(), &mut mock_reader) diff --git a/src/log-query/src/log_query.rs b/src/log-query/src/log_query.rs index 37ab87070c..a6398dcd4d 100644 --- a/src/log-query/src/log_query.rs +++ b/src/log-query/src/log_query.rs @@ -240,14 +240,14 @@ impl TimeFilter { } // Validate that end is after start - if let (Some(start), Some(end)) = (&start_dt, &end_dt) { - if end <= start { - return Err(EndBeforeStartSnafu { - start: start.to_rfc3339(), - end: end.to_rfc3339(), - } - .build()); + if let (Some(start), Some(end)) = (&start_dt, &end_dt) + && end <= start + { + return Err(EndBeforeStartSnafu { + start: start.to_rfc3339(), + end: end.to_rfc3339(), } + .build()); } // Update the fields with canonicalized timestamps diff --git a/src/log-store/src/kafka.rs b/src/log-store/src/kafka.rs index b16834cb97..2e123cc059 100644 --- a/src/log-store/src/kafka.rs +++ b/src/log-store/src/kafka.rs @@ -24,7 +24,7 @@ pub(crate) mod util; pub(crate) mod worker; pub use client_manager::DEFAULT_PARTITION; -pub use index::{default_index_file, GlobalIndexCollector}; +pub use index::{GlobalIndexCollector, default_index_file}; use serde::{Deserialize, Serialize}; use store_api::logstore::entry::Id as EntryId; diff --git a/src/log-store/src/kafka/client_manager.rs b/src/log-store/src/kafka/client_manager.rs index 5618557e0f..de87e6f376 100644 --- a/src/log-store/src/kafka/client_manager.rs +++ b/src/log-store/src/kafka/client_manager.rs @@ -15,11 +15,11 @@ use std::collections::HashMap; use std::sync::Arc; -use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG; use common_wal::config::kafka::DatanodeKafkaConfig; +use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG; use dashmap::DashMap; -use rskafka::client::partition::{Compression, PartitionClient, UnknownTopicHandling}; use rskafka::client::ClientBuilder; +use rskafka::client::partition::{Compression, PartitionClient, UnknownTopicHandling}; use snafu::ResultExt; use store_api::logstore::provider::KafkaProvider; use tokio::sync::{Mutex, RwLock}; diff --git a/src/log-store/src/kafka/consumer.rs b/src/log-store/src/kafka/consumer.rs index d8052e609f..8a7cba5a4b 100644 --- a/src/log-store/src/kafka/consumer.rs +++ b/src/log-store/src/kafka/consumer.rs @@ -272,8 +272,7 @@ impl Stream for Consumer { .iter() .map(|record| record.offset) .collect::>(), - records_and_offsets - .len() + records_and_offsets.len() ); this.buffer.extend(records_and_offsets); continue; @@ -295,8 +294,8 @@ mod tests { use std::sync::Arc; use chrono::{TimeZone, Utc}; - use futures::future::Fuse; use futures::TryStreamExt; + use futures::future::Fuse; use rskafka::record::{Record, RecordAndOffset}; use super::*; diff --git a/src/log-store/src/kafka/index.rs b/src/log-store/src/kafka/index.rs index fa98f3fcaf..23a9378ccb 100644 --- a/src/log-store/src/kafka/index.rs +++ b/src/log-store/src/kafka/index.rs @@ -20,7 +20,7 @@ pub use collector::GlobalIndexCollector; pub(crate) use collector::{IndexCollector, NoopCollector}; pub(crate) use encoder::{IndexEncoder, JsonIndexEncoder}; pub(crate) use iterator::{ - build_region_wal_index_iterator, NextBatchHint, RegionWalIndexIterator, MIN_BATCH_WINDOW_SIZE, + MIN_BATCH_WINDOW_SIZE, NextBatchHint, RegionWalIndexIterator, build_region_wal_index_iterator, }; #[cfg(test)] pub(crate) use iterator::{MultipleRegionWalIndexIterator, RegionWalRange, RegionWalVecIndex}; diff --git a/src/log-store/src/kafka/index/collector.rs b/src/log-store/src/kafka/index/collector.rs index 95e909a2ec..8fd1fba39e 100644 --- a/src/log-store/src/kafka/index/collector.rs +++ b/src/log-store/src/kafka/index/collector.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::{BTreeSet, HashMap}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_telemetry::{error, info}; @@ -22,16 +22,16 @@ use futures::future::try_join_all; use object_store::ErrorKind; use serde::{Deserialize, Serialize}; use snafu::ResultExt; -use store_api::logstore::provider::KafkaProvider; use store_api::logstore::EntryId; +use store_api::logstore::provider::KafkaProvider; use store_api::storage::RegionId; use tokio::select; -use tokio::sync::mpsc::Sender; use tokio::sync::Mutex as TokioMutex; +use tokio::sync::mpsc::Sender; use crate::error::{self, Result}; use crate::kafka::index::encoder::{DatanodeWalIndexes, IndexEncoder}; -use crate::kafka::index::{default_index_file, JsonIndexEncoder}; +use crate::kafka::index::{JsonIndexEncoder, default_index_file}; use crate::kafka::worker::{DumpIndexRequest, TruncateIndexRequest, WorkerRequest}; /// The [`IndexCollector`] trait defines the operations for managing and collecting index entries. @@ -239,16 +239,15 @@ impl GlobalIndexCollector { region_id: RegionId, entry_id: EntryId, ) -> Result<()> { - if let Some(sender) = self.providers.lock().await.get(provider).cloned() { - if sender + if let Some(sender) = self.providers.lock().await.get(provider).cloned() + && sender .send(WorkerRequest::TruncateIndex(TruncateIndexRequest::new( region_id, entry_id, ))) .await .is_err() - { - return error::OrderedBatchProducerStoppedSnafu {}.fail(); - } + { + return error::OrderedBatchProducerStoppedSnafu {}.fail(); } Ok(()) @@ -333,10 +332,10 @@ mod tests { use store_api::logstore::provider::KafkaProvider; use store_api::storage::RegionId; + use crate::kafka::index::JsonIndexEncoder; use crate::kafka::index::collector::RegionIndexes; use crate::kafka::index::encoder::IndexEncoder; - use crate::kafka::index::JsonIndexEncoder; - use crate::kafka::{default_index_file, GlobalIndexCollector}; + use crate::kafka::{GlobalIndexCollector, default_index_file}; #[tokio::test] async fn test_read_remote_region_index() { diff --git a/src/log-store/src/kafka/index/encoder.rs b/src/log-store/src/kafka/index/encoder.rs index bbbf013ebf..37d8c8e7ef 100644 --- a/src/log-store/src/kafka/index/encoder.rs +++ b/src/log-store/src/kafka/index/encoder.rs @@ -51,12 +51,9 @@ pub struct DeltaEncodedRegionIndexes { impl DeltaEncodedRegionIndexes { /// Retrieves the original (decoded) index values for a given region. pub(crate) fn region(&self, region_id: RegionId) -> Option> { - let decoded = self - .regions + self.regions .get(®ion_id) - .map(|delta| delta.iter().copied().original().collect::>()); - - decoded + .map(|delta| delta.iter().copied().original().collect::>()) } /// Retrieves the last index. @@ -175,8 +172,10 @@ mod tests { .unwrap(), region_2_indexes, ); - assert!(datanode_index - .provider(&KafkaProvider::new("my_topic_3".to_string())) - .is_none()); + assert!( + datanode_index + .provider(&KafkaProvider::new("my_topic_3".to_string())) + .is_none() + ); } } diff --git a/src/log-store/src/kafka/log_store.rs b/src/log-store/src/kafka/log_store.rs index 8914761f33..702e5bf319 100644 --- a/src/log-store/src/kafka/log_store.rs +++ b/src/log-store/src/kafka/log_store.rs @@ -38,12 +38,12 @@ use crate::error::{self, ConsumeRecordSnafu, Error, GetOffsetSnafu, InvalidProvi use crate::kafka::client_manager::{ClientManager, ClientManagerRef}; use crate::kafka::consumer::{ConsumerBuilder, RecordsBuffer}; use crate::kafka::index::{ - build_region_wal_index_iterator, GlobalIndexCollector, MIN_BATCH_WINDOW_SIZE, + GlobalIndexCollector, MIN_BATCH_WINDOW_SIZE, build_region_wal_index_iterator, }; use crate::kafka::periodic_offset_fetcher::PeriodicOffsetFetcher; use crate::kafka::producer::OrderedBatchProducerRef; use crate::kafka::util::record::{ - convert_to_kafka_records, maybe_emit_entry, remaining_entries, Record, ESTIMATED_META_SIZE, + ESTIMATED_META_SIZE, Record, convert_to_kafka_records, maybe_emit_entry, remaining_entries, }; use crate::metrics; @@ -348,9 +348,9 @@ impl LogStore for KafkaLogStore { if entry_id as i64 <= start_offset { warn!( - "The entry_id: {} is less than start_offset: {}, topic: {}. Overwriting entry_id with start_offset", - entry_id, start_offset, &provider.topic - ); + "The entry_id: {} is less than start_offset: {}, topic: {}. Overwriting entry_id with start_offset", + entry_id, start_offset, &provider.topic + ); entry_id = start_offset as u64; } @@ -559,16 +559,16 @@ mod tests { use common_meta::datanode::TopicStatsReporter; use common_telemetry::info; use common_telemetry::tracing::warn; - use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::kafka::DatanodeKafkaConfig; + use common_wal::config::kafka::common::KafkaConnectionConfig; use dashmap::DashMap; use futures::TryStreamExt; - use rand::prelude::SliceRandom; use rand::Rng; + use rand::prelude::SliceRandom; use rskafka::client::partition::OffsetAt; + use store_api::logstore::LogStore; use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry}; use store_api::logstore::provider::Provider; - use store_api::logstore::LogStore; use store_api::storage::RegionId; use super::build_entry; diff --git a/src/log-store/src/kafka/periodic_offset_fetcher.rs b/src/log-store/src/kafka/periodic_offset_fetcher.rs index 8e441c3d34..caecfc25b5 100644 --- a/src/log-store/src/kafka/periodic_offset_fetcher.rs +++ b/src/log-store/src/kafka/periodic_offset_fetcher.rs @@ -15,7 +15,7 @@ use std::time::Duration; use common_telemetry::{debug, error, info}; -use tokio::time::{interval, MissedTickBehavior}; +use tokio::time::{MissedTickBehavior, interval}; use crate::error::Result; use crate::kafka::client_manager::ClientManagerRef; diff --git a/src/log-store/src/kafka/test_util.rs b/src/log-store/src/kafka/test_util.rs index c83e0ef00e..cb4855e467 100644 --- a/src/log-store/src/kafka/test_util.rs +++ b/src/log-store/src/kafka/test_util.rs @@ -15,8 +15,8 @@ use std::sync::Arc; use chrono::{TimeZone, Utc}; -use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::kafka::DatanodeKafkaConfig; +use common_wal::config::kafka::common::KafkaConnectionConfig; use dashmap::DashMap; use rskafka::client::ClientBuilder; use rskafka::record::Record; diff --git a/src/log-store/src/kafka/util/record.rs b/src/log-store/src/kafka/util/record.rs index fd38766954..1e291d9776 100644 --- a/src/log-store/src/kafka/util/record.rs +++ b/src/log-store/src/kafka/util/record.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use rskafka::record::Record as KafkaRecord; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry}; use store_api::logstore::provider::{KafkaProvider, Provider}; use store_api::storage::RegionId; @@ -274,8 +274,7 @@ pub(crate) fn maybe_emit_entry( IllegalSequenceSnafu { error: format!( "Illegal sequence of a middle record, last record: {:?}, incoming record: {:?}", - last_record.meta.tp, - record.meta.tp + last_record.meta.tp, record.meta.tp ) } ); @@ -355,9 +354,11 @@ mod tests { // `First` overwrite `First` let mut buffer = HashMap::new(); let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]); - assert!(maybe_emit_entry(&provider, record, &mut buffer) - .unwrap() - .is_none()); + assert!( + maybe_emit_entry(&provider, record, &mut buffer) + .unwrap() + .is_none() + ); let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]); let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer) .unwrap() @@ -395,9 +396,11 @@ mod tests { // `First` overwrite `Middle(0)` let mut buffer = HashMap::new(); let record = new_test_record(RecordType::Middle(0), 1, region_id.as_u64(), vec![1; 100]); - assert!(maybe_emit_entry(&provider, record, &mut buffer) - .unwrap() - .is_none()); + assert!( + maybe_emit_entry(&provider, record, &mut buffer) + .unwrap() + .is_none() + ); let record = new_test_record(RecordType::First, 2, region_id.as_u64(), vec![2; 100]); let incomplete_entry = maybe_emit_entry(&provider, record, &mut buffer) .unwrap() @@ -421,22 +424,28 @@ mod tests { let region_id = RegionId::new(1, 1); let mut buffer = HashMap::new(); let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]); - assert!(maybe_emit_entry(&provider, record, &mut buffer) - .unwrap() - .is_none()); + assert!( + maybe_emit_entry(&provider, record, &mut buffer) + .unwrap() + .is_none() + ); let record = new_test_record(RecordType::Middle(2), 1, region_id.as_u64(), vec![2; 100]); let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err(); assert_matches!(err, error::Error::IllegalSequence { .. }); let mut buffer = HashMap::new(); let record = new_test_record(RecordType::First, 1, region_id.as_u64(), vec![1; 100]); - assert!(maybe_emit_entry(&provider, record, &mut buffer) - .unwrap() - .is_none()); + assert!( + maybe_emit_entry(&provider, record, &mut buffer) + .unwrap() + .is_none() + ); let record = new_test_record(RecordType::Middle(1), 1, region_id.as_u64(), vec![2; 100]); - assert!(maybe_emit_entry(&provider, record, &mut buffer) - .unwrap() - .is_none()); + assert!( + maybe_emit_entry(&provider, record, &mut buffer) + .unwrap() + .is_none() + ); let record = new_test_record(RecordType::Middle(3), 1, region_id.as_u64(), vec![2; 100]); let err = maybe_emit_entry(&provider, record, &mut buffer).unwrap_err(); assert_matches!(err, error::Error::IllegalSequence { .. }); diff --git a/src/log-store/src/kafka/worker.rs b/src/log-store/src/kafka/worker.rs index de75114f48..7e3d00672c 100644 --- a/src/log-store/src/kafka/worker.rs +++ b/src/log-store/src/kafka/worker.rs @@ -25,8 +25,8 @@ use futures::future::try_join_all; use rskafka::client::partition::Compression; use rskafka::record::Record; use snafu::{OptionExt, ResultExt}; -use store_api::logstore::provider::KafkaProvider; use store_api::logstore::EntryId; +use store_api::logstore::provider::KafkaProvider; use store_api::storage::RegionId; use tokio::sync::mpsc::Receiver; use tokio::sync::oneshot::{self}; diff --git a/src/log-store/src/raft_engine/backend.rs b/src/log-store/src/raft_engine/backend.rs index 8d27994f8b..398796c2d6 100644 --- a/src/log-store/src/raft_engine/backend.rs +++ b/src/log-store/src/raft_engine/backend.rs @@ -24,12 +24,12 @@ use common_error::ext::BoxedError; use common_meta::error as meta_error; use common_meta::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse}; use common_meta::kv_backend::{KvBackend, TxnService}; +use common_meta::rpc::KeyValue; use common_meta::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use common_meta::rpc::KeyValue; use common_meta::util::get_next_prefix_key; use common_runtime::RepeatedTask; use raft_engine::{Config, Engine, LogBatch, ReadableSize, RecoveryMode}; diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs index f4cd7c61cf..dbd4d0daa7 100644 --- a/src/log-store/src/raft_engine/log_store.rs +++ b/src/log-store/src/raft_engine/log_store.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; use std::fmt::{Debug, Formatter}; use std::sync::Arc; use std::time::Duration; @@ -22,7 +22,7 @@ use common_runtime::{RepeatedTask, TaskFunction}; use common_telemetry::{debug, error, info}; use common_wal::config::raft_engine::RaftEngineConfig; use raft_engine::{Config, Engine, LogBatch, MessageExt, ReadableSize, RecoveryMode}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::logstore::entry::{Entry, Id as EntryId, NaiveEntry}; use store_api::logstore::provider::{Provider, RaftEngineProvider}; use store_api::logstore::{AppendBatchResponse, LogStore, SendableEntryStream, WalIndex}; @@ -528,7 +528,7 @@ mod tests { use common_base::readable_size::ReadableSize; use common_telemetry::debug; - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use futures_util::StreamExt; use store_api::logstore::{LogStore, SendableEntryStream}; @@ -627,10 +627,12 @@ mod tests { ) .await .unwrap(); - assert!(logstore - .append(EntryImpl::create(1, 1, "1".as_bytes().to_vec()).into()) - .await - .is_ok()); + assert!( + logstore + .append(EntryImpl::create(1, 1, "1".as_bytes().to_vec()).into()) + .await + .is_ok() + ); let entries = logstore .read(&Provider::raft_engine_provider(1), 1, None) .await diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs index 98d419acc4..240006595c 100644 --- a/src/log-store/src/test_util/log_store_util.rs +++ b/src/log-store/src/test_util/log_store_util.rs @@ -15,8 +15,8 @@ use std::path::Path; use common_base::readable_size::ReadableSize; -use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::kafka::DatanodeKafkaConfig; +use common_wal::config::kafka::common::KafkaConnectionConfig; use common_wal::config::raft_engine::RaftEngineConfig; use crate::kafka::log_store::KafkaLogStore; diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs index e365eba220..fb5125224c 100644 --- a/src/meta-client/examples/meta_client.rs +++ b/src/meta-client/examples/meta_client.rs @@ -21,7 +21,7 @@ use common_meta::rpc::store::{ PutRequest, RangeRequest, }; use meta_client::client::MetaClientBuilder; -use tracing::{event, subscriber, Level}; +use tracing::{Level, event, subscriber}; use tracing_subscriber::FmtSubscriber; fn main() { diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index ebf2a6a167..2f95c2c4aa 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -41,6 +41,7 @@ use common_meta::key::flow::flow_state::{FlowStat, FlowStateManager}; use common_meta::kv_backend::KvBackendRef; use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor}; use common_meta::range_stream::PaginationStream; +use common_meta::rpc::KeyValue; use common_meta::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse}; use common_meta::rpc::procedure::{ AddRegionFollowerRequest, MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse, @@ -51,7 +52,6 @@ use common_meta::rpc::store::{ BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use common_meta::rpc::KeyValue; use common_telemetry::info; use futures::TryStreamExt; use heartbeat::Client as HeartbeatClient; diff --git a/src/meta-client/src/client/cluster.rs b/src/meta-client/src/client/cluster.rs index cc82572533..5386ee8a57 100644 --- a/src/meta-client/src/client/cluster.rs +++ b/src/meta-client/src/client/cluster.rs @@ -31,14 +31,14 @@ use common_meta::rpc::store::{ RangeRequest, RangeResponse, }; use common_telemetry::{error, info, warn}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::sync::RwLock; +use tonic::Status; use tonic::codec::CompressionEncoding; use tonic::transport::Channel; -use tonic::Status; use crate::client::ask_leader::AskLeader; -use crate::client::{util, Id, LeaderProviderRef}; +use crate::client::{Id, LeaderProviderRef, util}; use crate::error::{ ConvertMetaResponseSnafu, CreateChannelSnafu, Error, IllegalGrpcClientStateSnafu, ReadOnlyKvBackendSnafu, Result, RetryTimesExceededSnafu, diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs index 3f763ae189..be5a319fa8 100644 --- a/src/meta-client/src/client/heartbeat.rs +++ b/src/meta-client/src/client/heartbeat.rs @@ -20,12 +20,12 @@ use common_grpc::channel_manager::ChannelManager; use common_meta::util; use common_telemetry::info; use common_telemetry::tracing_context::TracingContext; -use snafu::{ensure, OptionExt, ResultExt}; -use tokio::sync::{mpsc, RwLock}; +use snafu::{OptionExt, ResultExt, ensure}; +use tokio::sync::{RwLock, mpsc}; use tokio_stream::wrappers::ReceiverStream; +use tonic::Streaming; use tonic::codec::CompressionEncoding; use tonic::transport::Channel; -use tonic::Streaming; use crate::client::ask_leader::AskLeader; use crate::client::{Id, LeaderProviderRef}; diff --git a/src/meta-client/src/client/procedure.rs b/src/meta-client/src/client/procedure.rs index f63abe1b42..7b05b188aa 100644 --- a/src/meta-client/src/client/procedure.rs +++ b/src/meta-client/src/client/procedure.rs @@ -25,14 +25,14 @@ use api::v1::meta::{ use common_grpc::channel_manager::ChannelManager; use common_telemetry::tracing_context::TracingContext; use common_telemetry::{error, info, warn}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::sync::RwLock; +use tonic::Status; use tonic::codec::CompressionEncoding; use tonic::transport::Channel; -use tonic::Status; use crate::client::ask_leader::AskLeader; -use crate::client::{util, Id, LeaderProviderRef}; +use crate::client::{Id, LeaderProviderRef, util}; use crate::error; use crate::error::Result; diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs index ed6b1fc79e..a7cf449629 100644 --- a/src/meta-client/src/client/store.rs +++ b/src/meta-client/src/client/store.rs @@ -23,12 +23,12 @@ use api::v1::meta::{ }; use common_grpc::channel_manager::ChannelManager; use common_telemetry::tracing_context::TracingContext; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::sync::RwLock; use tonic::codec::CompressionEncoding; use tonic::transport::Channel; -use crate::client::{load_balance as lb, Id}; +use crate::client::{Id, load_balance as lb}; use crate::error; use crate::error::Result; diff --git a/src/meta-client/src/error.rs b/src/meta-client/src/error.rs index a992fc49df..ad803fc74a 100644 --- a/src/meta-client/src/error.rs +++ b/src/meta-client/src/error.rs @@ -16,7 +16,7 @@ use common_error::define_from_tonic_status; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; -use snafu::{location, Location, Snafu}; +use snafu::{Location, Snafu, location}; #[derive(Snafu)] #[snafu(visibility(pub))] diff --git a/src/meta-srv/examples/kv_store.rs b/src/meta-srv/examples/kv_store.rs index c0cd41b6ad..95c64b202e 100644 --- a/src/meta-srv/examples/kv_store.rs +++ b/src/meta-srv/examples/kv_store.rs @@ -14,7 +14,7 @@ use common_meta::kv_backend::etcd::EtcdStore; use common_meta::rpc::store::{DeleteRangeRequest, PutRequest, RangeRequest}; -use tracing::{event, subscriber, Level}; +use tracing::{Level, event, subscriber}; use tracing_subscriber::FmtSubscriber; fn main() { diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs index 6bd7b95b55..d9bf45cd81 100644 --- a/src/meta-srv/src/bootstrap.rs +++ b/src/meta-srv/src/bootstrap.rs @@ -28,14 +28,14 @@ use common_meta::distributed_time_constants::META_LEASE_SECS; use common_meta::kv_backend::chroot::ChrootKvBackend; use common_meta::kv_backend::etcd::EtcdStore; use common_meta::kv_backend::memory::MemoryKvBackend; -#[cfg(feature = "pg_kvbackend")] -use common_meta::kv_backend::rds::postgres::create_postgres_tls_connector; -#[cfg(feature = "pg_kvbackend")] -use common_meta::kv_backend::rds::postgres::{TlsMode as PgTlsMode, TlsOption as PgTlsOption}; #[cfg(feature = "mysql_kvbackend")] use common_meta::kv_backend::rds::MySqlStore; #[cfg(feature = "pg_kvbackend")] use common_meta::kv_backend::rds::PgStore; +#[cfg(feature = "pg_kvbackend")] +use common_meta::kv_backend::rds::postgres::create_postgres_tls_connector; +#[cfg(feature = "pg_kvbackend")] +use common_meta::kv_backend::rds::postgres::{TlsMode as PgTlsMode, TlsOption as PgTlsOption}; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef}; use common_telemetry::info; #[cfg(feature = "pg_kvbackend")] @@ -57,30 +57,30 @@ use sqlx::mysql::MySqlConnectOptions; use sqlx::mysql::MySqlPool; use tokio::net::TcpListener; use tokio::sync::mpsc::{self, Receiver, Sender}; -use tokio::sync::{oneshot, Mutex}; +use tokio::sync::{Mutex, oneshot}; #[cfg(feature = "pg_kvbackend")] use tokio_postgres::NoTls; use tonic::codec::CompressionEncoding; use tonic::transport::server::{Router, TcpIncoming}; +#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))] +use crate::election::CANDIDATE_LEASE_SECS; use crate::election::etcd::EtcdElection; #[cfg(feature = "mysql_kvbackend")] use crate::election::rds::mysql::MySqlElection; #[cfg(feature = "pg_kvbackend")] use crate::election::rds::postgres::PgElection; -#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))] -use crate::election::CANDIDATE_LEASE_SECS; use crate::metasrv::builder::MetasrvBuilder; use crate::metasrv::{BackendImpl, Metasrv, MetasrvOptions, SelectTarget, SelectorRef}; use crate::node_excluder::NodeExcluderRef; +use crate::selector::SelectorType; use crate::selector::lease_based::LeaseBasedSelector; use crate::selector::load_based::LoadBasedSelector; use crate::selector::round_robin::RoundRobinSelector; use crate::selector::weight_compute::RegionNumsBasedWeightCompute; -use crate::selector::SelectorType; use crate::service::admin; use crate::service::admin::admin_axum_router; -use crate::{error, Result}; +use crate::{Result, error}; pub struct MetasrvInstance { metasrv: Arc, @@ -180,10 +180,10 @@ impl MetasrvInstance { } pub async fn shutdown(&self) -> Result<()> { - if let Some(mut rx) = self.serve_state.lock().await.take() { - if let Ok(Err(err)) = rx.try_recv() { - common_telemetry::error!(err; "Metasrv start failed") - } + if let Some(mut rx) = self.serve_state.lock().await.take() + && let Ok(Err(err)) = rx.try_recv() + { + common_telemetry::error!(err; "Metasrv start failed") } if let Some(signal) = &self.signal_sender { signal diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs index 23d8cd05dc..0114cced6f 100644 --- a/src/meta-srv/src/cluster.rs +++ b/src/meta-srv/src/cluster.rs @@ -25,19 +25,19 @@ use api::v1::meta::{ use common_grpc::channel_manager::ChannelManager; use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue}; use common_meta::kv_backend::{KvBackend, ResettableKvBackendRef, TxnService}; +use common_meta::rpc::KeyValue; use common_meta::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use common_meta::rpc::KeyValue; use common_meta::util; use common_telemetry::warn; use derive_builder::Builder; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error; -use crate::error::{match_for_io_error, Result}; +use crate::error::{Result, match_for_io_error}; use crate::metasrv::ElectionRef; pub type MetaPeerClientRef = Arc; @@ -370,7 +370,7 @@ mod tests { use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue, Stat}; use common_meta::rpc::KeyValue; - use super::{check_resp_header, to_stat_kv_map, Context}; + use super::{Context, check_resp_header, to_stat_kv_map}; use crate::error; #[test] diff --git a/src/meta-srv/src/election.rs b/src/meta-srv/src/election.rs index f8d4a1e2bd..2d2826b286 100644 --- a/src/meta-srv/src/election.rs +++ b/src/meta-srv/src/election.rs @@ -17,8 +17,8 @@ pub mod etcd; pub mod rds; use std::fmt::{self, Debug}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use common_telemetry::{error, info, warn}; use tokio::sync::broadcast::error::RecvError; diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs index 936f9548ac..8c6eac441f 100644 --- a/src/meta-srv/src/election/etcd.rs +++ b/src/meta-srv/src/election/etcd.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_meta::distributed_time_constants::{META_KEEP_ALIVE_INTERVAL_SECS, META_LEASE_SECS}; @@ -22,14 +22,14 @@ use common_telemetry::{error, info, warn}; use etcd_client::{ Client, GetOptions, LeaderKey as EtcdLeaderKey, LeaseKeepAliveStream, LeaseKeeper, PutOptions, }; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::sync::broadcast; use tokio::sync::broadcast::Receiver; -use tokio::time::{timeout, MissedTickBehavior}; +use tokio::time::{MissedTickBehavior, timeout}; use crate::election::{ - listen_leader_change, send_leader_change_and_set_flags, Election, LeaderChangeMessage, - LeaderKey, CANDIDATE_LEASE_SECS, KEEP_ALIVE_INTERVAL_SECS, + CANDIDATE_LEASE_SECS, Election, KEEP_ALIVE_INTERVAL_SECS, LeaderChangeMessage, LeaderKey, + listen_leader_change, send_leader_change_and_set_flags, }; use crate::error; use crate::error::Result; @@ -160,11 +160,11 @@ impl Election for EtcdElection { let _ = keep_alive_interval.tick().await; keeper.keep_alive().await.context(error::EtcdFailedSnafu)?; - if let Some(res) = receiver.message().await.context(error::EtcdFailedSnafu)? { - if res.ttl() <= 0 { - warn!("Candidate lease expired, key: {}", self.candidate_key()); - break; - } + if let Some(res) = receiver.message().await.context(error::EtcdFailedSnafu)? + && res.ttl() <= 0 + { + warn!("Candidate lease expired, key: {}", self.candidate_key()); + break; } } diff --git a/src/meta-srv/src/election/rds/mysql.rs b/src/meta-srv/src/election/rds/mysql.rs index e36e1bfacd..0c43d51e7d 100644 --- a/src/meta-srv/src/election/rds/mysql.rs +++ b/src/meta-srv/src/election/rds/mysql.rs @@ -12,24 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_meta::key::{CANDIDATES_ROOT, ELECTION_KEY}; use common_telemetry::{error, warn}; use common_time::Timestamp; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sqlx::mysql::{MySqlArguments, MySqlRow}; use sqlx::pool::PoolConnection; use sqlx::query::Query; use sqlx::{MySql, MySqlPool, MySqlTransaction, Row}; -use tokio::sync::{broadcast, Mutex, MutexGuard}; +use tokio::sync::{Mutex, MutexGuard, broadcast}; use tokio::time::MissedTickBehavior; -use crate::election::rds::{parse_value_and_expire_time, Lease, RdsLeaderKey, LEASE_SEP}; +use crate::election::rds::{LEASE_SEP, Lease, RdsLeaderKey, parse_value_and_expire_time}; use crate::election::{ - listen_leader_change, send_leader_change_and_set_flags, Election, LeaderChangeMessage, + Election, LeaderChangeMessage, listen_leader_change, send_leader_change_and_set_flags, }; use crate::error::{ AcquireMySqlClientSnafu, DecodeSqlValueSnafu, DeserializeFromJsonSnafu, diff --git a/src/meta-srv/src/election/rds/postgres.rs b/src/meta-srv/src/election/rds/postgres.rs index 870f5377e4..1dde25f3f6 100644 --- a/src/meta-srv/src/election/rds/postgres.rs +++ b/src/meta-srv/src/election/rds/postgres.rs @@ -12,23 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_meta::key::{CANDIDATES_ROOT, ELECTION_KEY}; use common_telemetry::{error, warn}; use common_time::Timestamp; use deadpool_postgres::{Manager, Pool}; -use snafu::{ensure, OptionExt, ResultExt}; -use tokio::sync::{broadcast, RwLock}; +use snafu::{OptionExt, ResultExt, ensure}; +use tokio::sync::{RwLock, broadcast}; use tokio::time::MissedTickBehavior; -use tokio_postgres::types::ToSql; use tokio_postgres::Row; +use tokio_postgres::types::ToSql; -use crate::election::rds::{parse_value_and_expire_time, Lease, RdsLeaderKey, LEASE_SEP}; +use crate::election::rds::{LEASE_SEP, Lease, RdsLeaderKey, parse_value_and_expire_time}; use crate::election::{ - listen_leader_change, send_leader_change_and_set_flags, Election, LeaderChangeMessage, + Election, LeaderChangeMessage, listen_leader_change, send_leader_change_and_set_flags, }; use crate::error::{ DeserializeFromJsonSnafu, GetPostgresClientSnafu, NoLeaderSnafu, PostgresExecutionSnafu, @@ -691,7 +691,9 @@ impl PgElection { } // Case 1.3 (false, _) => { - warn!("Leader lease not found, but still hold the lock. Now stepping down."); + warn!( + "Leader lease not found, but still hold the lock. Now stepping down." + ); self.step_down().await?; } } @@ -776,13 +778,11 @@ impl PgElection { .is_leader .compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire) .is_ok() - { - if let Err(e) = self + && let Err(e) = self .leader_watcher .send(LeaderChangeMessage::StepDown(Arc::new(leader_key))) - { - error!(e; "Failed to send leader change message"); - } + { + error!(e; "Failed to send leader change message"); } Ok(()) } @@ -1607,20 +1607,25 @@ mod tests { let f = ElectionSqlFactory::new(42, Some("test_schema"), "greptime_metakv"); let s = f.build(); assert!(s.campaign.contains("pg_try_advisory_lock")); - assert!(s - .put_value_with_lease - .contains("\"test_schema\".\"greptime_metakv\"")); - assert!(s - .update_value_with_lease - .contains("\"test_schema\".\"greptime_metakv\"")); - assert!(s - .get_value_with_lease - .contains("\"test_schema\".\"greptime_metakv\"")); - assert!(s - .get_value_with_lease_by_prefix - .contains("\"test_schema\".\"greptime_metakv\"")); - assert!(s - .delete_value - .contains("\"test_schema\".\"greptime_metakv\"")); + assert!( + s.put_value_with_lease + .contains("\"test_schema\".\"greptime_metakv\"") + ); + assert!( + s.update_value_with_lease + .contains("\"test_schema\".\"greptime_metakv\"") + ); + assert!( + s.get_value_with_lease + .contains("\"test_schema\".\"greptime_metakv\"") + ); + assert!( + s.get_value_with_lease_by_prefix + .contains("\"test_schema\".\"greptime_metakv\"") + ); + assert!( + s.delete_value + .contains("\"test_schema\".\"greptime_metakv\"") + ); } } diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs index b5ef3879ce..29174a97f2 100644 --- a/src/meta-srv/src/error.rs +++ b/src/meta-srv/src/error.rs @@ -1127,10 +1127,10 @@ pub(crate) fn match_for_io_error(err_status: &tonic::Status) -> Option<&std::io: // h2::Error do not expose std::io::Error with `source()` // https://github.com/hyperium/h2/pull/462 - if let Some(h2_err) = err.downcast_ref::() { - if let Some(io_err) = h2_err.get_io() { - return Some(io_err); - } + if let Some(h2_err) = err.downcast_ref::() + && let Some(io_err) = h2_err.get_io() + { + return Some(io_err); } err = err.source()?; diff --git a/src/meta-srv/src/events.rs b/src/meta-srv/src/events.rs index 5c2e555baa..3da0b5d3e7 100644 --- a/src/meta-srv/src/events.rs +++ b/src/meta-srv/src/events.rs @@ -17,7 +17,7 @@ use client::inserter::{Context, Inserter}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME}; use common_error::ext::BoxedError; use common_event_recorder::error::{InsertEventsSnafu, Result}; -use common_event_recorder::{build_row_inserts_request, group_events_by_type, Event, EventHandler}; +use common_event_recorder::{Event, EventHandler, build_row_inserts_request, group_events_by_type}; use snafu::ResultExt; pub mod region_migration_event; diff --git a/src/meta-srv/src/events/region_migration_event.rs b/src/meta-srv/src/events/region_migration_event.rs index 1c530b281a..192231812c 100644 --- a/src/meta-srv/src/events/region_migration_event.rs +++ b/src/meta-srv/src/events/region_migration_event.rs @@ -17,8 +17,8 @@ use std::time::Duration; use api::v1::value::ValueData; use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType}; -use common_event_recorder::error::{Result, SerializeEventSnafu}; use common_event_recorder::Event; +use common_event_recorder::error::{Result, SerializeEventSnafu}; use serde::Serialize; use snafu::ResultExt; use store_api::storage::{RegionId, TableId}; diff --git a/src/meta-srv/src/failure_detector.rs b/src/meta-srv/src/failure_detector.rs index ac0df6c2dc..e12b31e263 100644 --- a/src/meta-srv/src/failure_detector.rs +++ b/src/meta-srv/src/failure_detector.rs @@ -443,8 +443,8 @@ mod tests { } #[test] - fn test_is_available_if_it_starts_heartbeat_again_after_being_marked_dead_due_to_detection_of_failure( - ) { + fn test_is_available_if_it_starts_heartbeat_again_after_being_marked_dead_due_to_detection_of_failure() + { let mut fd = PhiAccrualFailureDetector { threshold: 8.0, min_std_deviation_millis: 100.0, diff --git a/src/meta-srv/src/greptimedb_telemetry.rs b/src/meta-srv/src/greptimedb_telemetry.rs index 9cd9c2f672..3dfc9cd85a 100644 --- a/src/meta-srv/src/greptimedb_telemetry.rs +++ b/src/meta-srv/src/greptimedb_telemetry.rs @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::AtomicBool; use std::sync::Arc; +use std::sync::atomic::AtomicBool; use async_trait::async_trait; use common_greptimedb_telemetry::{ - default_get_uuid, Collector, GreptimeDBTelemetry, GreptimeDBTelemetryTask, - Mode as VersionReporterMode, TELEMETRY_INTERVAL, + Collector, GreptimeDBTelemetry, GreptimeDBTelemetryTask, Mode as VersionReporterMode, + TELEMETRY_INTERVAL, default_get_uuid, }; use crate::cluster::MetaPeerClientRef; diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs index 8b241ce8ee..8f7aba2f92 100644 --- a/src/meta-srv/src/handler.rs +++ b/src/meta-srv/src/handler.rs @@ -20,8 +20,8 @@ use std::time::{Duration, Instant}; use api::v1::meta::mailbox_message::Payload; use api::v1::meta::{ - HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, ResponseHeader, Role, - PROTOCOL_VERSION, + HeartbeatRequest, HeartbeatResponse, MailboxMessage, PROTOCOL_VERSION, RegionLease, + ResponseHeader, Role, }; use check_leader_handler::CheckLeaderHandler; use collect_cluster_info_handler::{ @@ -50,7 +50,7 @@ use response_header_handler::ResponseHeaderHandler; use snafu::{OptionExt, ResultExt}; use store_api::storage::RegionId; use tokio::sync::mpsc::Sender; -use tokio::sync::{oneshot, watch, Notify, RwLock}; +use tokio::sync::{Notify, RwLock, oneshot, watch}; use crate::error::{self, DeserializeFromJsonSnafu, Result, UnexpectedInstructionReplySnafu}; use crate::handler::collect_topic_stats_handler::CollectTopicStatsHandler; @@ -430,11 +430,7 @@ impl HeartbeatMailbox { .iter() .filter_map(|entry| { let (id, deadline) = entry.pair(); - if deadline < &now { - Some(*id) - } else { - None - } + if deadline < &now { Some(*id) } else { None } }) .collect::>(); diff --git a/src/meta-srv/src/handler/collect_cluster_info_handler.rs b/src/meta-srv/src/handler/collect_cluster_info_handler.rs index 1a76b4a8cd..b0f9596d1a 100644 --- a/src/meta-srv/src/handler/collect_cluster_info_handler.rs +++ b/src/meta-srv/src/handler/collect_cluster_info_handler.rs @@ -21,10 +21,10 @@ use common_meta::rpc::store::PutRequest; use snafu::ResultExt; use store_api::region_engine::RegionRole; +use crate::Result; use crate::error::{InvalidClusterInfoFormatSnafu, SaveClusterInfoSnafu}; use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler}; use crate::metasrv::Context; -use crate::Result; /// The handler to collect cluster info from the heartbeat request of frontend. pub struct CollectFrontendClusterInfoHandler; diff --git a/src/meta-srv/src/handler/collect_topic_stats_handler.rs b/src/meta-srv/src/handler/collect_topic_stats_handler.rs index c26e9204ad..4901a4a6e5 100644 --- a/src/meta-srv/src/handler/collect_topic_stats_handler.rs +++ b/src/meta-srv/src/handler/collect_topic_stats_handler.rs @@ -81,10 +81,11 @@ mod tests { .get_latest_entry_id("test2") .unwrap(); assert_eq!(latest_entry_id, (10, aligned_ts)); - assert!(ctx - .topic_stats_registry - .get_latest_entry_id("test3") - .is_none()); + assert!( + ctx.topic_stats_registry + .get_latest_entry_id("test3") + .is_none() + ); } async fn handle_request_many_times( diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs index 220e28bd6b..f4563640dd 100644 --- a/src/meta-srv/src/handler/persist_stats_handler.rs +++ b/src/meta-srv/src/handler/persist_stats_handler.rs @@ -17,12 +17,12 @@ use std::time::{Duration, Instant}; use api::v1::meta::{HeartbeatRequest, Role}; use api::v1::value::ValueData; use api::v1::{ColumnSchema, Row, RowInsertRequest, RowInsertRequests, Rows, Value}; -use client::inserter::{Context as InserterContext, Inserter}; use client::DEFAULT_CATALOG_NAME; +use client::inserter::{Context as InserterContext, Inserter}; use common_catalog::consts::DEFAULT_PRIVATE_SCHEMA_NAME; use common_macro::{Schema, ToRow}; -use common_meta::datanode::RegionStat; use common_meta::DatanodeId; +use common_meta::datanode::RegionStat; use common_telemetry::warn; use dashmap::DashMap; use store_api::region_engine::RegionRole; @@ -535,11 +535,13 @@ mod tests { assert_eq!(request.rows.unwrap().rows, vec![expected_row]); // Check last persisted time - assert!(handler - .last_persisted_time - .get(&datanode_id) - .unwrap() - .gt(&before_insert_time)); + assert!( + handler + .last_persisted_time + .get(&datanode_id) + .unwrap() + .gt(&before_insert_time) + ); // Check last persisted region stats assert_eq!( diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs index b659ba472a..d7c2466e07 100644 --- a/src/meta-srv/src/handler/region_lease_handler.rs +++ b/src/meta-srv/src/handler/region_lease_handler.rs @@ -25,10 +25,10 @@ use store_api::storage::RegionId; use crate::error::Result; use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler}; use crate::metasrv::Context; +use crate::region::RegionLeaseKeeper; use crate::region::lease_keeper::{ RegionLeaseInfo, RegionLeaseKeeperRef, RenewRegionLeasesResponse, }; -use crate::region::RegionLeaseKeeper; pub struct RegionLeaseHandler { region_lease_seconds: u64, @@ -125,9 +125,9 @@ mod test { use common_meta::datanode::{RegionManifestInfo, RegionStat, Stat}; use common_meta::distributed_time_constants; + use common_meta::key::TableMetadataManager; use common_meta::key::table_route::TableRouteValue; use common_meta::key::test_utils::new_test_table_info; - use common_meta::key::TableMetadataManager; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::region_keeper::MemoryRegionKeeper; diff --git a/src/meta-srv/src/handler/remap_flow_peer_handler.rs b/src/meta-srv/src/handler/remap_flow_peer_handler.rs index ec6cdb7b5a..f6b3c67a9e 100644 --- a/src/meta-srv/src/handler/remap_flow_peer_handler.rs +++ b/src/meta-srv/src/handler/remap_flow_peer_handler.rs @@ -20,9 +20,9 @@ use common_meta::rpc::store::PutRequest; use common_telemetry::{error, info, warn}; use dashmap::DashMap; +use crate::Result; use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler}; use crate::metasrv::Context; -use crate::Result; #[derive(Debug, Default)] pub struct RemapFlowPeerHandler { diff --git a/src/meta-srv/src/handler/response_header_handler.rs b/src/meta-srv/src/handler/response_header_handler.rs index 5b017f9917..54f8329bb7 100644 --- a/src/meta-srv/src/handler/response_header_handler.rs +++ b/src/meta-srv/src/handler/response_header_handler.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::meta::{HeartbeatRequest, ResponseHeader, Role, PROTOCOL_VERSION}; +use api::v1::meta::{HeartbeatRequest, PROTOCOL_VERSION, ResponseHeader, Role}; use crate::error::Result; use crate::handler::{HandleControl, HeartbeatAccumulator, HeartbeatHandler}; diff --git a/src/meta-srv/src/key.rs b/src/meta-srv/src/key.rs index 33f40c87df..803ee9f0c6 100644 --- a/src/meta-srv/src/key.rs +++ b/src/meta-srv/src/key.rs @@ -21,7 +21,7 @@ use api::v1::meta::heartbeat_request::NodeWorkloads; pub use datanode::*; pub use flownode::*; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error; diff --git a/src/meta-srv/src/key/datanode.rs b/src/meta-srv/src/key/datanode.rs index ef4c89c34d..056cfa95b9 100644 --- a/src/meta-srv/src/key/datanode.rs +++ b/src/meta-srv/src/key/datanode.rs @@ -18,7 +18,7 @@ use common_meta::datanode::DatanodeStatKey; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error; use crate::error::Result; diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs index 819b82fadc..fb7150323f 100644 --- a/src/meta-srv/src/lease.rs +++ b/src/meta-srv/src/lease.rs @@ -26,7 +26,7 @@ use common_meta::distributed_time_constants::FRONTEND_HEARTBEAT_INTERVAL_MILLIS; use common_meta::kv_backend::{KvBackend, ResettableKvBackendRef}; use common_meta::peer::{Peer, PeerLookupService}; use common_meta::rpc::store::RangeRequest; -use common_meta::{util, DatanodeId, FlownodeId}; +use common_meta::{DatanodeId, FlownodeId, util}; use common_time::util as time_util; use common_workload::DatanodeWorkloadType; use snafu::ResultExt; @@ -325,8 +325,8 @@ impl PeerLookupService for MetaPeerLookupService { mod tests { use std::time::Duration; - use api::v1::meta::heartbeat_request::NodeWorkloads; use api::v1::meta::DatanodeWorkloads; + use api::v1::meta::heartbeat_request::NodeWorkloads; use common_meta::cluster::{FrontendStatus, NodeInfo, NodeInfoKey, NodeStatus}; use common_meta::distributed_time_constants::FRONTEND_HEARTBEAT_INTERVAL_MILLIS; use common_meta::kv_backend::ResettableKvBackendRef; @@ -337,8 +337,8 @@ mod tests { use crate::key::{DatanodeLeaseKey, LeaseValue}; use crate::lease::{ - alive_datanodes, is_datanode_accept_ingest_workload, lookup_frontends, ClusterRole, - MetaPeerLookupService, + ClusterRole, MetaPeerLookupService, alive_datanodes, is_datanode_accept_ingest_workload, + lookup_frontends, }; use crate::test_util::create_meta_peer_client; diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs index bdd6b2e815..f5f6615b0e 100644 --- a/src/meta-srv/src/metasrv.rs +++ b/src/meta-srv/src/metasrv.rs @@ -20,16 +20,16 @@ use std::sync::{Arc, Mutex, RwLock}; use std::time::Duration; use clap::ValueEnum; -use common_base::readable_size::ReadableSize; use common_base::Plugins; +use common_base::readable_size::ReadableSize; use common_config::{Configurable, DEFAULT_DATA_HOME}; use common_event_recorder::EventRecorderOptions; use common_greptimedb_telemetry::GreptimeDBTelemetryTask; use common_meta::cache_invalidator::CacheInvalidatorRef; use common_meta::ddl_manager::DdlManagerRef; use common_meta::distributed_time_constants; -use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; use common_meta::key::TableMetadataManagerRef; +use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef}; use common_meta::leadership_notifier::{ LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef, @@ -44,8 +44,8 @@ use common_meta::stats::topic::TopicStatsRegistryRef; use common_meta::wal_options_allocator::WalOptionsAllocatorRef; use common_options::datanode::DatanodeClientOptions; use common_options::memory::MemoryOptions; -use common_procedure::options::ProcedureConfig; use common_procedure::ProcedureManagerRef; +use common_procedure::options::ProcedureConfig; use common_telemetry::logging::{LoggingOptions, TracingOptions}; use common_telemetry::{error, info, warn}; use common_wal::config::MetasrvWalConfig; @@ -68,16 +68,16 @@ use crate::error::{ use crate::failure_detector::PhiAccrualFailureDetectorOptions; use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatHandlerGroupRef}; use crate::lease::lookup_datanode_peer; +use crate::procedure::ProcedureManagerListenerAdapter; use crate::procedure::region_migration::manager::RegionMigrationManagerRef; use crate::procedure::wal_prune::manager::WalPruneTickerRef; -use crate::procedure::ProcedureManagerListenerAdapter; use crate::pubsub::{PublisherRef, SubscriptionManagerRef}; use crate::region::flush_trigger::RegionFlushTickerRef; use crate::region::supervisor::RegionSupervisorTickerRef; use crate::selector::{RegionStatAwareSelector, Selector, SelectorType}; use crate::service::mailbox::MailboxRef; use crate::service::store::cached_kv::LeaderCachedKvBackend; -use crate::state::{become_follower, become_leader, StateRef}; +use crate::state::{StateRef, become_follower, become_leader}; pub const TABLE_ID_SEQ: &str = "table_id"; pub const FLOW_ID_SEQ: &str = "flow_id"; diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs index 449e862120..7558f4dca2 100644 --- a/src/meta-srv/src/metasrv/builder.rs +++ b/src/meta-srv/src/metasrv/builder.rs @@ -21,7 +21,7 @@ use client::client_manager::NodeClients; use client::inserter::InsertOptions; use common_base::Plugins; use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID}; -use common_event_recorder::{EventRecorderImpl, EventRecorderRef, DEFAULT_COMPACTION_TIME_WINDOW}; +use common_event_recorder::{DEFAULT_COMPACTION_TIME_WINDOW, EventRecorderImpl, EventRecorderRef}; use common_grpc::channel_manager::ChannelConfig; use common_meta::ddl::flow_meta::FlowMetadataAllocator; use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef}; @@ -30,10 +30,10 @@ use common_meta::ddl::{ }; use common_meta::ddl_manager::DdlManager; use common_meta::distributed_time_constants::{self}; -use common_meta::key::flow::flow_state::FlowStateManager; -use common_meta::key::flow::FlowMetadataManager; -use common_meta::key::runtime_switch::{RuntimeSwitchManager, RuntimeSwitchManagerRef}; use common_meta::key::TableMetadataManager; +use common_meta::key::flow::FlowMetadataManager; +use common_meta::key::flow::flow_state::FlowStateManager; +use common_meta::key::runtime_switch::{RuntimeSwitchManager, RuntimeSwitchManagerRef}; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef}; use common_meta::node_manager::NodeManagerRef; @@ -44,10 +44,10 @@ use common_meta::sequence::SequenceBuilder; use common_meta::state_store::KvStateStore; use common_meta::stats::topic::TopicStatsRegistry; use common_meta::wal_options_allocator::{build_kafka_client, build_wal_options_allocator}; -use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::ProcedureManagerRef; +use common_procedure::local::{LocalManager, ManagerConfig}; use common_telemetry::{info, warn}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::cache_invalidator::MetasrvCacheInvalidator; use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef}; @@ -62,17 +62,18 @@ use crate::handler::region_lease_handler::{CustomizedRegionLeaseRenewerRef, Regi use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatMailbox, Pushers}; use crate::lease::MetaPeerLookupService; use crate::metasrv::{ - ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, RegionStatAwareSelectorRef, SelectTarget, - SelectorContext, SelectorRef, FLOW_ID_SEQ, METASRV_DATA_DIR, TABLE_ID_SEQ, + ElectionRef, FLOW_ID_SEQ, METASRV_DATA_DIR, Metasrv, MetasrvInfo, MetasrvOptions, + RegionStatAwareSelectorRef, SelectTarget, SelectorContext, SelectorRef, TABLE_ID_SEQ, }; -use crate::procedure::region_migration::manager::RegionMigrationManager; use crate::procedure::region_migration::DefaultContextFactory; -use crate::procedure::wal_prune::manager::{WalPruneManager, WalPruneTicker}; +use crate::procedure::region_migration::manager::RegionMigrationManager; use crate::procedure::wal_prune::Context as WalPruneContext; +use crate::procedure::wal_prune::manager::{WalPruneManager, WalPruneTicker}; use crate::region::flush_trigger::RegionFlushTrigger; use crate::region::supervisor::{ - HeartbeatAcceptor, RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorSelector, - RegionSupervisorTicker, DEFAULT_INITIALIZATION_RETRY_PERIOD, DEFAULT_TICK_INTERVAL, + DEFAULT_INITIALIZATION_RETRY_PERIOD, DEFAULT_TICK_INTERVAL, HeartbeatAcceptor, + RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorSelector, + RegionSupervisorTicker, }; use crate::selector::lease_based::LeaseBasedSelector; use crate::selector::round_robin::RoundRobinSelector; @@ -319,7 +320,9 @@ impl MetasrvBuilder { } ); if options.allow_region_failover_on_local_wal { - warn!("Region failover is force enabled in the local WAL implementation! This may lead to data loss during failover!"); + warn!( + "Region failover is force enabled in the local WAL implementation! This may lead to data loss during failover!" + ); } } diff --git a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs index d049366552..3f488a9f86 100644 --- a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs +++ b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs @@ -16,10 +16,10 @@ use std::any::Any; use std::time::Duration; use api::v1::meta::MailboxMessage; +use common_meta::RegionIdent; use common_meta::distributed_time_constants::REGION_LEASE_SECS; use common_meta::instruction::{Instruction, InstructionReply, SimpleReply}; use common_meta::key::datanode_table::RegionInfo; -use common_meta::RegionIdent; use common_procedure::{Context as ProcedureContext, Status}; use common_telemetry::{info, warn}; use serde::{Deserialize, Serialize}; diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs index eb8d73001f..6a23cc92d1 100644 --- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs @@ -26,7 +26,7 @@ use common_telemetry::{error, info, warn}; use common_time::util::current_time_millis; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; -use tokio::time::{sleep, Instant}; +use tokio::time::{Instant, sleep}; use crate::error::{self, Result}; use crate::handler::HeartbeatMailbox; @@ -200,7 +200,9 @@ impl DowngradeLeaderRegion { if !exists { warn!( "Trying to downgrade the region {} on datanode {:?}, but region doesn't exist!, elapsed: {:?}", - region_id, leader, now.elapsed() + region_id, + leader, + now.elapsed() ); } else { info!( @@ -226,7 +228,7 @@ impl DowngradeLeaderRegion { } Err(error::Error::MailboxTimeout { .. }) => { let reason = format!( - "Mailbox received timeout for downgrade leader region {region_id} on datanode {:?}, elapsed: {:?}", + "Mailbox received timeout for downgrade leader region {region_id} on datanode {:?}, elapsed: {:?}", leader, now.elapsed() ); @@ -260,11 +262,8 @@ impl DowngradeLeaderRegion { if elapsed >= (REGION_LEASE_SECS * 1000) as i64 { ctx.volatile_ctx.reset_leader_region_lease_deadline(); info!( - "Datanode {}({}) has been disconnected for longer than the region lease period ({:?}), reset leader region lease deadline to None, region: {}", - leader, - last_connection_at, - region_lease, - ctx.persistent_ctx.region_id + "Datanode {}({}) has been disconnected for longer than the region lease period ({:?}), reset leader region lease deadline to None, region: {}", + leader, last_connection_at, region_lease, ctx.persistent_ctx.region_id ); } else if elapsed > 0 { // `now - last_connection_at` < REGION_LEASE_SECS * 1000 @@ -275,7 +274,11 @@ impl DowngradeLeaderRegion { .set_leader_region_lease_deadline(lease_timeout); info!( "Datanode {}({}) last connected {:?} ago, updated leader region lease deadline to {:?}, region: {}", - leader, last_connection_at, elapsed, ctx.volatile_ctx.leader_region_lease_deadline, ctx.persistent_ctx.region_id + leader, + last_connection_at, + elapsed, + ctx.volatile_ctx.leader_region_lease_deadline, + ctx.persistent_ctx.region_id ); } else { warn!( @@ -353,7 +356,7 @@ mod tests { use super::*; use crate::error::Error; use crate::procedure::region_migration::manager::RegionMigrationTriggerReason; - use crate::procedure::region_migration::test_util::{new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{TestingEnv, new_procedure_context}; use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::test_util::{ new_close_region_reply, new_downgrade_region_reply, send_mock_reply, diff --git a/src/meta-srv/src/procedure/region_migration/flush_leader_region.rs b/src/meta-srv/src/procedure/region_migration/flush_leader_region.rs index e1cd76cc3b..b5cc1a955c 100644 --- a/src/meta-srv/src/procedure/region_migration/flush_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/flush_leader_region.rs @@ -174,8 +174,7 @@ impl PreFlushRegion { Err(Error::PusherNotFound { .. }) => { warn!( "Failed to flush leader region({}), the datanode({}) is unreachable(PusherNotFound). Skip flush operation.", - region_id, - leader + region_id, leader ); Ok(()) } @@ -191,7 +190,7 @@ mod tests { use store_api::storage::RegionId; use super::*; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::test_util::{ new_close_region_reply, new_flush_region_reply_for_region, send_mock_reply, diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs index 70b685dba4..563b0f290d 100644 --- a/src/meta-srv/src/procedure/region_migration/manager.rs +++ b/src/meta-srv/src/procedure/region_migration/manager.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::fmt::Display; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -22,10 +22,10 @@ use common_meta::key::table_info::TableInfoValue; use common_meta::key::table_route::TableRouteValue; use common_meta::peer::Peer; use common_meta::rpc::router::RegionRoute; -use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId}; +use common_procedure::{ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher}; use common_telemetry::{error, info, warn}; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::RegionId; use table::table_name::TableName; @@ -554,7 +554,10 @@ mod test { let err = manager.submit_procedure(task).await.unwrap_err(); assert_matches!(err, error::Error::LeaderPeerChanged { .. }); - assert_eq!(err.to_string(), "Region's leader peer changed: Region's leader peer(3) is not the `from_peer`(1), region: 4398046511105(1024, 1)"); + assert_eq!( + err.to_string(), + "Region's leader peer changed: Region's leader peer(3) is not the `from_peer`(1), region: 4398046511105(1024, 1)" + ); } #[tokio::test] diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs index 07297f70aa..e544adbf4c 100644 --- a/src/meta-srv/src/procedure/region_migration/migration_start.rs +++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs @@ -127,12 +127,10 @@ impl RegionMigrationStart { /// Checks whether the candidate region on region has been opened. /// Returns true if it's been opened. fn check_candidate_region_on_peer(&self, region_route: &RegionRoute, to_peer: &Peer) -> bool { - let region_opened = region_route + region_route .follower_peers .iter() - .any(|peer| peer.id == to_peer.id); - - region_opened + .any(|peer| peer.id == to_peer.id) } /// Returns true if the region leader is not the `from_peer`. @@ -184,7 +182,7 @@ mod tests { use super::*; use crate::error::Error; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::{ContextFactory, PersistentContext}; diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs index 7228108cb2..7f5098aa6b 100644 --- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs @@ -16,10 +16,10 @@ use std::any::Any; use std::time::Duration; use api::v1::meta::MailboxMessage; +use common_meta::RegionIdent; use common_meta::distributed_time_constants::REGION_LEASE_SECS; use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply}; use common_meta::key::datanode_table::RegionInfo; -use common_meta::RegionIdent; use common_procedure::{Context as ProcedureContext, Status}; use common_telemetry::info; use serde::{Deserialize, Serialize}; @@ -195,16 +195,16 @@ mod tests { use std::collections::HashMap; use common_catalog::consts::MITO2_ENGINE; + use common_meta::DatanodeId; use common_meta::key::table_route::TableRouteValue; use common_meta::key::test_utils::new_test_table_info; use common_meta::peer::Peer; use common_meta::rpc::router::{Region, RegionRoute}; - use common_meta::DatanodeId; use store_api::storage::RegionId; use super::*; use crate::error::Error; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::test_util::{ new_close_region_reply, new_open_region_reply, send_mock_reply, diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs index 9895723b07..1c927ca25f 100644 --- a/src/meta-srv/src/procedure/region_migration/test_util.rs +++ b/src/meta-srv/src/procedure/region_migration/test_util.rs @@ -14,22 +14,22 @@ use std::assert_matches::assert_matches; use std::collections::HashMap; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; use api::v1::meta::MailboxMessage; +use common_meta::DatanodeId; use common_meta::ddl::NoopRegionFailureDetectorControl; use common_meta::key::table_route::TableRouteValue; use common_meta::key::{TableMetadataManager, TableMetadataManagerRef}; -use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; +use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::region_keeper::{MemoryRegionKeeper, MemoryRegionKeeperRef}; use common_meta::rpc::router::RegionRoute; use common_meta::sequence::SequenceBuilder; use common_meta::state_store::KvStateStore; -use common_meta::DatanodeId; use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::test_util::InMemoryPoisonStore; use common_procedure::{Context as ProcedureContext, ProcedureId, ProcedureManagerRef, Status}; @@ -56,7 +56,7 @@ use crate::procedure::region_migration::upgrade_candidate_region::UpgradeCandida use crate::procedure::region_migration::{ Context, ContextFactory, DefaultContextFactory, PersistentContext, State, VolatileContext, }; -use crate::procedure::test_util::{send_mock_reply, MailboxContext}; +use crate::procedure::test_util::{MailboxContext, send_mock_reply}; use crate::service::mailbox::Channel; /// `TestingEnv` provides components during the tests. @@ -331,10 +331,12 @@ impl ProcedureMigrationTestSuite { region_route.leader_peer.as_ref().unwrap().id, expected_leader_id ); - assert!(!region_route - .follower_peers - .iter() - .any(|route| route.id == removed_follower_id)) + assert!( + !region_route + .follower_peers + .iter() + .any(|route| route.id == removed_follower_id) + ) } } diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata.rs b/src/meta-srv/src/procedure/region_migration/update_metadata.rs index b8f074b591..8e7b2d4d3b 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata.rs @@ -64,7 +64,9 @@ impl State for UpdateMetadata { self.upgrade_candidate_region(ctx).await?; if let Err(err) = ctx.invalidate_table_cache().await { - warn!("Failed to broadcast the invalidate table cache message during the upgrade candidate, error: {err:?}"); + warn!( + "Failed to broadcast the invalidate table cache message during the upgrade candidate, error: {err:?}" + ); }; Ok((Box::new(CloseDowngradedRegion), Status::executing(false))) } @@ -72,7 +74,9 @@ impl State for UpdateMetadata { self.rollback_downgraded_region(ctx).await?; if let Err(err) = ctx.invalidate_table_cache().await { - warn!("Failed to broadcast the invalidate table cache message during the rollback, error: {err:?}"); + warn!( + "Failed to broadcast the invalidate table cache message during the rollback, error: {err:?}" + ); }; Ok(( Box::new(RegionMigrationAbort::new( diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs index a83dbd4b37..28633efa56 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs @@ -17,8 +17,8 @@ use common_meta::rpc::router::LeaderState; use snafu::ResultExt; use crate::error::{self, Result}; -use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::Context; +use crate::procedure::region_migration::update_metadata::UpdateMetadata; impl UpdateMetadata { /// Downgrades the leader region. @@ -86,7 +86,7 @@ mod tests { use crate::error::Error; use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::{ContextFactory, PersistentContext, State}; diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs index fe0b5c28da..7c4a7b713e 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs @@ -16,8 +16,8 @@ use common_error::ext::BoxedError; use snafu::ResultExt; use crate::error::{self, Result}; -use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::Context; +use crate::procedure::region_migration::update_metadata::UpdateMetadata; impl UpdateMetadata { /// Rollbacks the downgraded leader region if the candidate region is unreachable. @@ -70,7 +70,7 @@ mod tests { use crate::error::Error; use crate::procedure::region_migration::migration_abort::RegionMigrationAbort; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::{ContextFactory, PersistentContext, State}; use crate::region::supervisor::RegionFailureDetectorControl; diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs index 42edb52671..54948ab0c8 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs @@ -14,13 +14,13 @@ use common_error::ext::BoxedError; use common_meta::key::datanode_table::RegionInfo; -use common_meta::rpc::router::{region_distribution, RegionRoute}; +use common_meta::rpc::router::{RegionRoute, region_distribution}; use common_telemetry::{info, warn}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{self, Result}; -use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::Context; +use crate::procedure::region_migration::update_metadata::UpdateMetadata; impl UpdateMetadata { /// Returns new [Vec]. @@ -49,14 +49,18 @@ impl UpdateMetadata { let expected_old_leader = &ctx.persistent_ctx.from_peer; // Upgrades candidate to leader. - ensure!(region_route + ensure!( + region_route .leader_peer .take_if(|old_leader| old_leader.id == expected_old_leader.id) .is_some(), - error::UnexpectedSnafu{ - violated: format!("Unexpected region leader: {:?} during the upgrading candidate metadata, expected: {:?}", region_route.leader_peer, expected_old_leader), - } - ); + error::UnexpectedSnafu { + violated: format!( + "Unexpected region leader: {:?} during the upgrading candidate metadata, expected: {:?}", + region_route.leader_peer, expected_old_leader + ), + } + ); region_route.leader_peer = Some(candidate.clone()); info!( @@ -72,8 +76,8 @@ impl UpdateMetadata { if removed.len() > 1 { warn!( - "Removes duplicated regions: {removed:?} during the upgrading candidate metadata for region: {region_id}" - ); + "Removes duplicated regions: {removed:?} during the upgrading candidate metadata for region: {region_id}" + ); } Ok(region_routes) @@ -108,7 +112,9 @@ impl UpdateMetadata { ensure!( !region_route.is_leader_downgrading(), error::UnexpectedSnafu { - violated: format!("Unexpected intermediate state is found during the update metadata for upgrading region {region_id}"), + violated: format!( + "Unexpected intermediate state is found during the update metadata for upgrading region {region_id}" + ), } ); @@ -196,7 +202,7 @@ mod tests { use crate::error::Error; use crate::procedure::region_migration::close_downgraded_region::CloseDowngradedRegion; - use crate::procedure::region_migration::test_util::{self, new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{self, TestingEnv, new_procedure_context}; use crate::procedure::region_migration::update_metadata::UpdateMetadata; use crate::procedure::region_migration::{ContextFactory, PersistentContext, State}; diff --git a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs index b9a2c506ee..2dfaa21a89 100644 --- a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs @@ -24,8 +24,8 @@ use common_procedure::{Context as ProcedureContext, Status}; use common_telemetry::{error, warn}; use common_wal::options::WalOptions; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; -use tokio::time::{sleep, Instant}; +use snafu::{OptionExt, ResultExt, ensure}; +use tokio::time::{Instant, sleep}; use crate::error::{self, Result}; use crate::handler::HeartbeatMailbox; @@ -241,9 +241,9 @@ impl UpgradeCandidateRegion { } Err(error::Error::MailboxTimeout { .. }) => { let reason = format!( - "Mailbox received timeout for upgrade candidate region {region_id} on datanode {:?}", - candidate, - ); + "Mailbox received timeout for upgrade candidate region {region_id} on datanode {:?}", + candidate, + ); error::RetryLaterSnafu { reason }.fail() } Err(err) => Err(err), @@ -315,7 +315,7 @@ mod tests { use super::*; use crate::error::Error; use crate::procedure::region_migration::manager::RegionMigrationTriggerReason; - use crate::procedure::region_migration::test_util::{new_procedure_context, TestingEnv}; + use crate::procedure::region_migration::test_util::{TestingEnv, new_procedure_context}; use crate::procedure::region_migration::{ContextFactory, PersistentContext}; use crate::procedure::test_util::{ new_close_region_reply, new_upgrade_region_reply, send_mock_reply, diff --git a/src/meta-srv/src/procedure/test_util.rs b/src/meta-srv/src/procedure/test_util.rs index 9f8b40692d..d822ae845c 100644 --- a/src/meta-srv/src/procedure/test_util.rs +++ b/src/meta-srv/src/procedure/test_util.rs @@ -19,10 +19,10 @@ use api::v1::meta::{HeartbeatResponse, MailboxMessage}; use common_meta::instruction::{ DowngradeRegionReply, FlushRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply, }; +use common_meta::key::TableMetadataManagerRef; use common_meta::key::table_route::TableRouteValue; use common_meta::key::test_utils::new_test_table_info; use common_meta::key::topic_name::TopicNameKey; -use common_meta::key::TableMetadataManagerRef; use common_meta::peer::Peer; use common_meta::region_registry::{ LeaderRegion, LeaderRegionManifestInfo, LeaderRegionRegistryRef, diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs index 5fc861f6d7..8b3f7cc852 100644 --- a/src/meta-srv/src/procedure/tests.rs +++ b/src/meta-srv/src/procedure/tests.rs @@ -25,12 +25,12 @@ use common_meta::ddl::create_logical_tables::{CreateLogicalTablesProcedure, Crea use common_meta::ddl::create_table::*; use common_meta::ddl::test_util::columns::TestColumnDefBuilder; use common_meta::ddl::test_util::create_table::{ - build_raw_table_info_from_expr, TestCreateTableExprBuilder, + TestCreateTableExprBuilder, build_raw_table_info_from_expr, }; use common_meta::key::table_route::{PhysicalTableRouteValue, TableRouteValue}; use common_meta::node_manager::NodeManagerRef; use common_meta::rpc::ddl::CreateTableTask; -use common_meta::rpc::router::{find_leaders, RegionRoute}; +use common_meta::rpc::router::{RegionRoute, find_leaders}; use common_procedure::Status; use store_api::storage::RegionId; diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs index fd0eca50c2..02488e0f83 100644 --- a/src/meta-srv/src/procedure/utils.rs +++ b/src/meta-srv/src/procedure/utils.rs @@ -18,7 +18,7 @@ pub mod mock { use std::sync::Arc; use api::v1::region::region_server::RegionServer; - use api::v1::region::{region_request, RegionResponse}; + use api::v1::region::{RegionResponse, region_request}; use api::v1::{ResponseHeader, Status as PbStatus}; use async_trait::async_trait; use client::Client; @@ -118,8 +118,8 @@ pub mod test_data { use common_meta::ddl::flow_meta::FlowMetadataAllocator; use common_meta::ddl::table_meta::TableMetadataAllocator; use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl}; - use common_meta::key::flow::FlowMetadataManager; use common_meta::key::TableMetadataManager; + use common_meta::key::flow::FlowMetadataManager; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::node_manager::NodeManagerRef; use common_meta::peer::Peer; diff --git a/src/meta-srv/src/procedure/wal_prune.rs b/src/meta-srv/src/procedure/wal_prune.rs index 1d4c34c7cd..0897441647 100644 --- a/src/meta-srv/src/procedure/wal_prune.rs +++ b/src/meta-srv/src/procedure/wal_prune.rs @@ -35,11 +35,11 @@ use serde::{Deserialize, Serialize}; use snafu::ResultExt; use store_api::logstore::EntryId; +use crate::Result; use crate::error::{self}; use crate::procedure::wal_prune::utils::{ delete_records, get_offsets_for_topic, get_partition_client, update_pruned_entry_id, }; -use crate::Result; pub type KafkaClientRef = Arc; @@ -114,10 +114,7 @@ impl WalPruneProcedure { if self.data.prunable_entry_id <= earliest_offset { warn!( "The prunable entry id is less or equal to the earliest offset, topic: {}, prunable entry id: {}, earliest offset: {}, latest offset: {}", - self.data.topic, - self.data.prunable_entry_id, - earliest_offset, - latest_offset + self.data.topic, self.data.prunable_entry_id, earliest_offset, latest_offset ); return Ok(Status::done()); } @@ -222,7 +219,8 @@ mod tests { (n_region * n_table * 5) as usize, ) .await; - let prunable_entry_id = new_wal_prune_metadata( + + new_wal_prune_metadata( context.table_metadata_manager.clone(), context.leader_region_registry.clone(), n_region, @@ -230,8 +228,7 @@ mod tests { &offsets, topic.to_string(), ) - .await; - prunable_entry_id + .await } fn record(i: usize) -> Record { diff --git a/src/meta-srv/src/procedure/wal_prune/manager.rs b/src/meta-srv/src/procedure/wal_prune/manager.rs index 00fde36a85..f651d4b2de 100644 --- a/src/meta-srv/src/procedure/wal_prune/manager.rs +++ b/src/meta-srv/src/procedure/wal_prune/manager.rs @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_set::Entry; use std::collections::HashSet; +use std::collections::hash_set::Entry; use std::fmt::{Debug, Formatter}; use std::sync::{Arc, RwLock}; -use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId}; +use common_procedure::{ProcedureId, ProcedureManagerRef, ProcedureWithId, watcher}; use common_telemetry::{debug, error, info, warn}; use futures::future::join_all; use snafu::{OptionExt, ResultExt}; -use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::Semaphore; +use tokio::sync::mpsc::{Receiver, Sender}; use crate::define_ticker; use crate::error::{self, Result}; diff --git a/src/meta-srv/src/procedure/wal_prune/test_util.rs b/src/meta-srv/src/procedure/wal_prune/test_util.rs index 6f8656e36c..e94aa184c3 100644 --- a/src/meta-srv/src/procedure/wal_prune/test_util.rs +++ b/src/meta-srv/src/procedure/wal_prune/test_util.rs @@ -19,11 +19,11 @@ use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::region_registry::{LeaderRegionRegistry, LeaderRegionRegistryRef}; use common_meta::state_store::KvStateStore; use common_meta::wal_options_allocator::build_kafka_client; +use common_procedure::ProcedureManagerRef; use common_procedure::local::{LocalManager, ManagerConfig}; use common_procedure::test_util::InMemoryPoisonStore; -use common_procedure::ProcedureManagerRef; -use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use common_wal::config::kafka::MetasrvKafkaConfig; +use common_wal::config::kafka::common::{KafkaConnectionConfig, KafkaTopicConfig}; use rskafka::client::Client; use crate::procedure::wal_prune::Context as WalPruneContext; diff --git a/src/meta-srv/src/procedure/wal_prune/utils.rs b/src/meta-srv/src/procedure/wal_prune/utils.rs index 045af46a4a..659ccb4159 100644 --- a/src/meta-srv/src/procedure/wal_prune/utils.rs +++ b/src/meta-srv/src/procedure/wal_prune/utils.rs @@ -20,8 +20,8 @@ use common_meta::key::TableMetadataManagerRef; use common_meta::region_registry::LeaderRegionRegistryRef; use common_telemetry::warn; use itertools::{Itertools, MinMaxResult}; -use rskafka::client::partition::{OffsetAt, PartitionClient, UnknownTopicHandling}; use rskafka::client::Client; +use rskafka::client::partition::{OffsetAt, PartitionClient, UnknownTopicHandling}; use snafu::ResultExt; use store_api::storage::RegionId; diff --git a/src/meta-srv/src/region/failure_detector.rs b/src/meta-srv/src/region/failure_detector.rs index a795e2e3e8..697db5c43b 100644 --- a/src/meta-srv/src/region/failure_detector.rs +++ b/src/meta-srv/src/region/failure_detector.rs @@ -15,8 +15,8 @@ use std::ops::DerefMut; use common_meta::ddl::DetectingRegion; -use dashmap::mapref::multiple::RefMulti; use dashmap::DashMap; +use dashmap::mapref::multiple::RefMulti; use crate::failure_detector::{PhiAccrualFailureDetector, PhiAccrualFailureDetectorOptions}; diff --git a/src/meta-srv/src/region/flush_trigger.rs b/src/meta-srv/src/region/flush_trigger.rs index e04402bd10..3aee4488f6 100644 --- a/src/meta-srv/src/region/flush_trigger.rs +++ b/src/meta-srv/src/region/flush_trigger.rs @@ -19,8 +19,8 @@ use std::time::{Duration, Instant}; use api::v1::meta::MailboxMessage; use common_base::readable_size::ReadableSize; use common_meta::instruction::{FlushRegions, Instruction}; -use common_meta::key::topic_region::{ReplayCheckpoint, TopicRegionKey, TopicRegionValue}; use common_meta::key::TableMetadataManagerRef; +use common_meta::key::topic_region::{ReplayCheckpoint, TopicRegionKey, TopicRegionValue}; use common_meta::peer::Peer; use common_meta::region_registry::{LeaderRegion, LeaderRegionRegistryRef}; use common_meta::stats::topic::TopicStatsRegistryRef; @@ -425,7 +425,12 @@ fn filter_regions_by_replay_size>( if replay_size > threshold.as_bytes() { debug!( "Region {}: estimated replay size {} exceeds threshold {}, entry id: {}, topic latest entry id: {}, topic: '{}'", - region_id, ReadableSize(replay_size), threshold, entry_id, latest_entry_id, topic + region_id, + ReadableSize(replay_size), + threshold, + entry_id, + latest_entry_id, + topic ); regions_to_flush.push(region_id); } diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs index 77fec7da3b..324396fa64 100644 --- a/src/meta-srv/src/region/lease_keeper.rs +++ b/src/meta-srv/src/region/lease_keeper.rs @@ -15,11 +15,11 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use common_meta::key::table_route::TableRouteValue; +use common_meta::DatanodeId; use common_meta::key::TableMetadataManagerRef; +use common_meta::key::table_route::TableRouteValue; use common_meta::region_keeper::MemoryRegionKeeperRef; use common_meta::rpc::router::RegionRoute; -use common_meta::DatanodeId; use common_telemetry::warn; use snafu::ResultExt; use store_api::region_engine::RegionRole; @@ -60,16 +60,16 @@ fn renew_region_lease_via_region_route( region_id: RegionId, ) -> Option<(RegionId, RegionRole)> { // If it's a leader region on this datanode. - if let Some(leader) = ®ion_route.leader_peer { - if leader.id == datanode_id { - let region_role = if region_route.is_leader_downgrading() { - RegionRole::DowngradingLeader - } else { - RegionRole::Leader - }; + if let Some(leader) = ®ion_route.leader_peer + && leader.id == datanode_id + { + let region_role = if region_route.is_leader_downgrading() { + RegionRole::DowngradingLeader + } else { + RegionRole::Leader + }; - return Some((region_id, region_role)); - } + return Some((region_id, region_role)); } // If it's a follower region on this datanode. @@ -256,9 +256,9 @@ mod tests { use std::collections::{HashMap, HashSet}; use std::sync::Arc; + use common_meta::key::TableMetadataManager; use common_meta::key::table_route::{LogicalTableRouteValue, TableRouteValue}; use common_meta::key::test_utils::new_test_table_info; - use common_meta::key::TableMetadataManager; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::region_keeper::MemoryRegionKeeper; @@ -267,7 +267,7 @@ mod tests { use store_api::storage::RegionId; use table::metadata::RawTableInfo; - use super::{renew_region_lease_via_region_route, RegionLeaseKeeper}; + use super::{RegionLeaseKeeper, renew_region_lease_via_region_route}; use crate::region::lease_keeper::{RegionLeaseInfo, RenewRegionLeasesResponse}; fn new_test_keeper() -> RegionLeaseKeeper { diff --git a/src/meta-srv/src/region/supervisor.rs b/src/meta-srv/src/region/supervisor.rs index c289ecb1b5..719bded6e8 100644 --- a/src/meta-srv/src/region/supervisor.rs +++ b/src/meta-srv/src/region/supervisor.rs @@ -18,6 +18,7 @@ use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use async_trait::async_trait; +use common_meta::DatanodeId; use common_meta::datanode::Stat; use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController}; use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; @@ -26,19 +27,18 @@ use common_meta::key::{MetadataKey, MetadataValue}; use common_meta::kv_backend::KvBackendRef; use common_meta::leadership_notifier::LeadershipChangeListener; use common_meta::peer::{Peer, PeerLookupServiceRef}; -use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; +use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use common_meta::rpc::store::RangeRequest; -use common_meta::DatanodeId; use common_runtime::JoinHandle; use common_telemetry::{debug, error, info, warn}; use common_time::util::current_time_millis; use error::Error::{LeaderPeerChanged, MigrationRunning, RegionMigrated, TableRouteNotFound}; use futures::{StreamExt, TryStreamExt}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::storage::RegionId; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::oneshot; -use tokio::time::{interval, interval_at, MissedTickBehavior}; +use tokio::time::{MissedTickBehavior, interval, interval_at}; use crate::error::{self, Result}; use crate::failure_detector::PhiAccrualFailureDetectorOptions; @@ -47,7 +47,7 @@ use crate::procedure::region_migration::manager::{ RegionMigrationManagerRef, RegionMigrationTriggerReason, }; use crate::procedure::region_migration::{ - RegionMigrationProcedureTask, DEFAULT_REGION_MIGRATION_TIMEOUT, + DEFAULT_REGION_MIGRATION_TIMEOUT, RegionMigrationProcedureTask, }; use crate::region::failure_detector::RegionFailureDetector; use crate::selector::SelectorOptions; @@ -206,7 +206,9 @@ impl RegionSupervisorTicker { initialization_interval.tick().await; let (tx, rx) = oneshot::channel(); if sender.send(Event::InitializeAllRegions(tx)).await.is_err() { - info!("EventReceiver is dropped, region failure detectors initialization loop is stopped"); + info!( + "EventReceiver is dropped, region failure detectors initialization loop is stopped" + ); break; } if rx.await.is_ok() { @@ -381,7 +383,9 @@ impl RegionSupervisor { match self.is_maintenance_mode_enabled().await { Ok(false) => {} Ok(true) => { - warn!("Skipping initialize all regions since maintenance mode is enabled."); + warn!( + "Skipping initialize all regions since maintenance mode is enabled." + ); continue; } Err(err) => { @@ -449,10 +453,10 @@ impl RegionSupervisor { .region_routes .iter() .for_each(|region_route| { - if !regions.contains(®ion_route.region.id) { - if let Some(leader_peer) = ®ion_route.leader_peer { - detecting_regions.push((leader_peer.id, region_route.region.id)); - } + if !regions.contains(®ion_route.region.id) + && let Some(leader_peer) = ®ion_route.leader_peer + { + detecting_regions.push((leader_peer.id, region_route.region.id)); } }); } @@ -764,14 +768,14 @@ pub(crate) mod tests { use std::sync::{Arc, Mutex}; use std::time::Duration; + use common_meta::ddl::RegionFailureDetectorController; use common_meta::ddl::test_util::{ test_create_logical_table_task, test_create_physical_table_task, }; - use common_meta::ddl::RegionFailureDetectorController; use common_meta::key::table_route::{ LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue, }; - use common_meta::key::{runtime_switch, TableMetadataManager}; + use common_meta::key::{TableMetadataManager, runtime_switch}; use common_meta::peer::Peer; use common_meta::rpc::router::{Region, RegionRoute}; use common_meta::test_util::NoopPeerLookupService; @@ -790,7 +794,7 @@ pub(crate) mod tests { DatanodeHeartbeat, Event, RegionFailureDetectorControl, RegionSupervisor, RegionSupervisorTicker, }; - use crate::selector::test_utils::{new_test_selector_context, RandomNodeSelector}; + use crate::selector::test_utils::{RandomNodeSelector, new_test_selector_context}; pub(crate) fn new_test_supervisor() -> (RegionSupervisor, Sender) { let env = TestingEnv::new(); diff --git a/src/meta-srv/src/selector/common.rs b/src/meta-srv/src/selector/common.rs index 24c3597c26..341b9106d6 100644 --- a/src/meta-srv/src/selector/common.rs +++ b/src/meta-srv/src/selector/common.rs @@ -20,8 +20,8 @@ use snafu::ensure; use crate::error; use crate::error::Result; use crate::metasrv::SelectTarget; -use crate::selector::weighted_choose::{WeightedChoose, WeightedItem}; use crate::selector::SelectorOptions; +use crate::selector::weighted_choose::{WeightedChoose, WeightedItem}; /// Filter out the excluded peers from the `weight_array`. pub fn filter_out_excluded_peers( @@ -90,9 +90,9 @@ mod tests { use common_meta::peer::Peer; + use crate::selector::SelectorOptions; use crate::selector::common::{choose_items, filter_out_excluded_peers}; use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem}; - use crate::selector::SelectorOptions; #[test] fn test_choose_peers() { diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs index 5863afe8ce..b00e3a9461 100644 --- a/src/meta-srv/src/selector/load_based.rs +++ b/src/meta-srv/src/selector/load_based.rs @@ -87,7 +87,10 @@ where let leader_peer_ids = get_leader_peer_ids(&table_metadata_manager, table_id).await?; let filter_result = filter_out_datanode_by_table(&stat_kvs, &leader_peer_ids); if filter_result.is_empty() { - info!("The regions of the table cannot be allocated to completely different datanodes, table id: {}.", table_id); + info!( + "The regions of the table cannot be allocated to completely different datanodes, table id: {}.", + table_id + ); stat_kvs } else { filter_result @@ -171,8 +174,8 @@ async fn get_leader_peer_ids( mod tests { use std::collections::HashMap; - use api::v1::meta::heartbeat_request::NodeWorkloads; use api::v1::meta::DatanodeWorkloads; + use api::v1::meta::heartbeat_request::NodeWorkloads; use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue}; use common_workload::DatanodeWorkloadType; diff --git a/src/meta-srv/src/selector/round_robin.rs b/src/meta-srv/src/selector/round_robin.rs index cbe9246b86..801301659f 100644 --- a/src/meta-srv/src/selector/round_robin.rs +++ b/src/meta-srv/src/selector/round_robin.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashSet; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use std::sync::atomic::AtomicUsize; use std::time::Duration; use common_meta::peer::Peer; diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs index 8b089d11aa..d146785e8c 100644 --- a/src/meta-srv/src/service/admin.rs +++ b/src/meta-srv/src/service/admin.rs @@ -27,9 +27,9 @@ use std::convert::Infallible; use std::sync::Arc; use std::task::{Context, Poll}; -use axum::{routing, Router as AxumRouter}; +use axum::{Router as AxumRouter, routing}; use tonic::body::Body; -use tonic::codegen::{http, BoxFuture, Service}; +use tonic::codegen::{BoxFuture, Service, http}; use tonic::server::NamedService; use crate::metasrv::Metasrv; @@ -207,7 +207,7 @@ impl Router { return Ok(http::Response::builder() .status(http::StatusCode::NOT_FOUND) .body(Body::empty()) - .unwrap()) + .unwrap()); } }; @@ -312,13 +312,13 @@ pub fn admin_axum_router(metasrv: Arc) -> AxumRouter { #[cfg(test)] mod tests { - use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; + use common_meta::kv_backend::memory::MemoryKvBackend; use tokio::io::{AsyncReadExt, AsyncWriteExt, DuplexStream}; use super::*; - use crate::metasrv::builder::MetasrvBuilder; use crate::metasrv::MetasrvOptions; + use crate::metasrv::builder::MetasrvBuilder; use crate::{bootstrap, error}; struct MockOkHandler; @@ -431,8 +431,7 @@ mod tests { .options(opts) .kv_backend(kv_backend.clone()); - let metasrv = builder.build().await.unwrap(); - metasrv + builder.build().await.unwrap() } async fn send_request(client: &mut DuplexStream, request: &[u8]) -> String { @@ -650,14 +649,14 @@ mod tests { mod axum_admin_tests { use std::sync::Arc; - use axum::body::{to_bytes, Body}; + use axum::body::{Body, to_bytes}; use axum::http::{Method, Request, StatusCode}; use common_meta::kv_backend::memory::MemoryKvBackend; use tower::ServiceExt; // for `oneshot` use super::*; - use crate::metasrv::builder::MetasrvBuilder; use crate::metasrv::MetasrvOptions; + use crate::metasrv::builder::MetasrvBuilder; use crate::service::admin::sequencer::NextTableIdResponse; async fn setup_axum_app() -> AxumRouter { diff --git a/src/meta-srv/src/service/admin/heartbeat.rs b/src/meta-srv/src/service/admin/heartbeat.rs index e351f76ffa..cb13764d30 100644 --- a/src/meta-srv/src/service/admin/heartbeat.rs +++ b/src/meta-srv/src/service/admin/heartbeat.rs @@ -14,9 +14,9 @@ use std::collections::HashMap; +use axum::Json; use axum::extract::{Query, State}; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_meta::datanode::DatanodeStatValue; use serde::{Deserialize, Serialize}; use snafu::ResultExt; @@ -25,7 +25,7 @@ use tonic::codegen::http; use crate::cluster::MetaPeerClientRef; use crate::error::{self, Result}; use crate::service::admin::util::ErrorHandler; -use crate::service::admin::{util, HttpHandler}; +use crate::service::admin::{HttpHandler, util}; #[derive(Clone)] pub struct HeartBeatHandler { @@ -127,16 +127,16 @@ fn filter_by_addr(stat_vals: Vec, addr: &str) -> Vec { // Handle PUT request to '/admin/maintenance' with URL parameters. (The legacy version) if path.ends_with(MAINTENANCE_PATH) { - warn!("Found PUT request to '/admin/maintenance', it's deprecated, will be removed in the future"); + warn!( + "Found PUT request to '/admin/maintenance', it's deprecated, will be removed in the future" + ); let response = self.handle_legacy_maintenance(params).await?; to_json_response(response) } else { @@ -195,7 +197,9 @@ impl HttpHandler for MaintenanceHandler { to_json_response(response) } else if path.ends_with(MAINTENANCE_PATH) { // Handle POST request to '/admin/maintenance' with URL parameters. (The legacy version) - warn!("Found PUT request to '/admin/maintenance', it's deprecated, will be removed in the future"); + warn!( + "Found PUT request to '/admin/maintenance', it's deprecated, will be removed in the future" + ); let response = self.handle_legacy_maintenance(params).await?; to_json_response(response) } else { diff --git a/src/meta-srv/src/service/admin/node_lease.rs b/src/meta-srv/src/service/admin/node_lease.rs index 4384ce0835..0e3ff73919 100644 --- a/src/meta-srv/src/service/admin/node_lease.rs +++ b/src/meta-srv/src/service/admin/node_lease.rs @@ -15,9 +15,9 @@ use std::collections::HashMap; use std::time::Duration; +use axum::Json; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use axum::Json; use serde::{Deserialize, Serialize}; use snafu::ResultExt; use tonic::codegen::http; @@ -26,8 +26,8 @@ use crate::cluster::MetaPeerClientRef; use crate::error::{self, Result}; use crate::key::{DatanodeLeaseKey, LeaseValue}; use crate::lease; -use crate::service::admin::util::ErrorHandler; use crate::service::admin::HttpHandler; +use crate::service::admin::util::ErrorHandler; #[derive(Clone)] pub struct NodeLeaseHandler { diff --git a/src/meta-srv/src/service/admin/procedure.rs b/src/meta-srv/src/service/admin/procedure.rs index 74c8594503..c40b20cab9 100644 --- a/src/meta-srv/src/service/admin/procedure.rs +++ b/src/meta-srv/src/service/admin/procedure.rs @@ -14,9 +14,9 @@ use std::collections::HashMap; +use axum::Json; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; use common_telemetry::info; use serde::{Deserialize, Serialize}; @@ -24,8 +24,8 @@ use snafu::ResultExt; use tonic::codegen::http; use crate::error::RuntimeSwitchManagerSnafu; -use crate::service::admin::util::{to_json_response, to_not_found_response, ErrorHandler}; use crate::service::admin::HttpHandler; +use crate::service::admin::util::{ErrorHandler, to_json_response, to_not_found_response}; #[derive(Clone)] pub struct ProcedureManagerHandler { diff --git a/src/meta-srv/src/service/admin/recovery.rs b/src/meta-srv/src/service/admin/recovery.rs index ebfcc05656..aa44ed3aca 100644 --- a/src/meta-srv/src/service/admin/recovery.rs +++ b/src/meta-srv/src/service/admin/recovery.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use axum::Json; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; use serde::{Deserialize, Serialize}; diff --git a/src/meta-srv/src/service/admin/sequencer.rs b/src/meta-srv/src/service/admin/sequencer.rs index db15998b05..0ee22492c2 100644 --- a/src/meta-srv/src/service/admin/sequencer.rs +++ b/src/meta-srv/src/service/admin/sequencer.rs @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +use axum::Json; use axum::extract::{self, State}; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_meta::key::runtime_switch::RuntimeSwitchManagerRef; use common_meta::sequence::SequenceRef; use serde::{Deserialize, Serialize}; use servers::http::result::error_result::ErrorResponse; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{ PeekSequenceSnafu, Result, RuntimeSwitchManagerSnafu, SetNextSequenceSnafu, UnexpectedSnafu, diff --git a/src/meta-srv/src/service/cluster.rs b/src/meta-srv/src/service/cluster.rs index d80481b462..e29c62b9f5 100644 --- a/src/meta-srv/src/service/cluster.rs +++ b/src/meta-srv/src/service/cluster.rs @@ -13,9 +13,9 @@ // limitations under the License. use api::v1::meta::{ - cluster_server, BatchGetRequest as PbBatchGetRequest, BatchGetResponse as PbBatchGetResponse, - MetasrvNodeInfo, MetasrvPeersRequest, MetasrvPeersResponse, RangeRequest as PbRangeRequest, - RangeResponse as PbRangeResponse, + BatchGetRequest as PbBatchGetRequest, BatchGetResponse as PbBatchGetResponse, MetasrvNodeInfo, + MetasrvPeersRequest, MetasrvPeersResponse, RangeRequest as PbRangeRequest, + RangeResponse as PbRangeResponse, cluster_server, }; use common_telemetry::warn; use snafu::ResultExt; diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs index d1a3a0e636..6b63116de6 100644 --- a/src/meta-srv/src/service/heartbeat.rs +++ b/src/meta-srv/src/service/heartbeat.rs @@ -13,12 +13,12 @@ // limitations under the License. use std::io::ErrorKind; -use std::sync::atomic::AtomicU64; use std::sync::Arc; +use std::sync::atomic::AtomicU64; use api::v1::meta::{ - heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse, - Peer, RequestHeader, ResponseHeader, Role, + AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse, Peer, RequestHeader, + ResponseHeader, Role, heartbeat_server, }; use common_telemetry::{debug, error, info, warn}; use futures::StreamExt; @@ -91,12 +91,12 @@ impl heartbeat_server::Heartbeat for Metasrv { } } Err(err) => { - if let Some(io_err) = error::match_for_io_error(&err) { - if io_err.kind() == ErrorKind::BrokenPipe { - // client disconnected in unexpected way - error!("Client disconnected: broken pipe"); - break; - } + if let Some(io_err) = error::match_for_io_error(&err) + && io_err.kind() == ErrorKind::BrokenPipe + { + // client disconnected in unexpected way + error!("Client disconnected: broken pipe"); + break; } if tx.send(Err(err)).await.is_err() { @@ -199,8 +199,8 @@ mod tests { use tonic::IntoRequest; use super::get_node_id; - use crate::metasrv::builder::MetasrvBuilder; use crate::metasrv::MetasrvOptions; + use crate::metasrv::builder::MetasrvBuilder; #[tokio::test] async fn test_ask_leader() { diff --git a/src/meta-srv/src/service/procedure.rs b/src/meta-srv/src/service/procedure.rs index 9dc61b1986..d3cffbc86c 100644 --- a/src/meta-srv/src/service/procedure.rs +++ b/src/meta-srv/src/service/procedure.rs @@ -17,11 +17,10 @@ use std::time::Duration; use api::v1::meta::reconcile_request::Target; use api::v1::meta::{ - procedure_service_server, DdlTaskRequest as PbDdlTaskRequest, - DdlTaskResponse as PbDdlTaskResponse, MigrateRegionRequest, MigrateRegionResponse, - ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, QueryProcedureRequest, - ReconcileCatalog, ReconcileDatabase, ReconcileRequest, ReconcileResponse, ReconcileTable, - ResolveStrategy, + DdlTaskRequest as PbDdlTaskRequest, DdlTaskResponse as PbDdlTaskResponse, MigrateRegionRequest, + MigrateRegionResponse, ProcedureDetailRequest, ProcedureDetailResponse, ProcedureStateResponse, + QueryProcedureRequest, ReconcileCatalog, ReconcileDatabase, ReconcileRequest, + ReconcileResponse, ReconcileTable, ResolveStrategy, procedure_service_server, }; use common_meta::procedure_executor::ExecutorContext; use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest}; diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs index d8c468c350..04fa7fed56 100644 --- a/src/meta-srv/src/service/store.rs +++ b/src/meta-srv/src/service/store.rs @@ -15,14 +15,14 @@ pub mod cached_kv; use api::v1::meta::{ - store_server, BatchDeleteRequest as PbBatchDeleteRequest, - BatchDeleteResponse as PbBatchDeleteResponse, BatchGetRequest as PbBatchGetRequest, - BatchGetResponse as PbBatchGetResponse, BatchPutRequest as PbBatchPutRequest, - BatchPutResponse as PbBatchPutResponse, CompareAndPutRequest as PbCompareAndPutRequest, + BatchDeleteRequest as PbBatchDeleteRequest, BatchDeleteResponse as PbBatchDeleteResponse, + BatchGetRequest as PbBatchGetRequest, BatchGetResponse as PbBatchGetResponse, + BatchPutRequest as PbBatchPutRequest, BatchPutResponse as PbBatchPutResponse, + CompareAndPutRequest as PbCompareAndPutRequest, CompareAndPutResponse as PbCompareAndPutResponse, DeleteRangeRequest as PbDeleteRangeRequest, DeleteRangeResponse as PbDeleteRangeResponse, PutRequest as PbPutRequest, PutResponse as PbPutResponse, RangeRequest as PbRangeRequest, RangeResponse as PbRangeResponse, - ResponseHeader, + ResponseHeader, store_server, }; use common_meta::rpc::store::{ BatchDeleteRequest, BatchGetRequest, BatchPutRequest, CompareAndPutRequest, DeleteRangeRequest, @@ -189,8 +189,8 @@ mod tests { use common_telemetry::tracing_context::W3cTrace; use tonic::IntoRequest; - use crate::metasrv::builder::MetasrvBuilder; use crate::metasrv::Metasrv; + use crate::metasrv::builder::MetasrvBuilder; async fn new_metasrv() -> Metasrv { MetasrvBuilder::new() diff --git a/src/meta-srv/src/service/store/cached_kv.rs b/src/meta-srv/src/service/store/cached_kv.rs index f86b42a9e2..052130f329 100644 --- a/src/meta-srv/src/service/store/cached_kv.rs +++ b/src/meta-srv/src/service/store/cached_kv.rs @@ -24,13 +24,13 @@ use common_meta::kv_backend::txn::{Txn, TxnOp, TxnRequest, TxnResponse}; use common_meta::kv_backend::{ KvBackend, KvBackendRef, ResettableKvBackend, ResettableKvBackendRef, TxnService, }; -use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; +use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; +use common_meta::rpc::KeyValue; use common_meta::rpc::store::{ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; -use common_meta::rpc::KeyValue; use futures::TryStreamExt; use crate::metrics; diff --git a/src/meta-srv/src/state.rs b/src/meta-srv/src/state.rs index 0466644c67..a28305c4ce 100644 --- a/src/meta-srv/src/state.rs +++ b/src/meta-srv/src/state.rs @@ -113,7 +113,7 @@ pub fn become_follower() -> impl FnOnce(&State) -> State { mod tests { use std::assert_matches::assert_matches; - use crate::state::{become_follower, become_leader, FollowerState, LeaderState, State}; + use crate::state::{FollowerState, LeaderState, State, become_follower, become_leader}; #[tokio::test] async fn test_next_state() { diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs index 54dd34ea86..250fac63c2 100644 --- a/src/meta-srv/src/table_meta_alloc.rs +++ b/src/meta-srv/src/table_meta_alloc.rs @@ -19,7 +19,7 @@ use common_error::ext::BoxedError; use common_meta::ddl::table_meta::PeerAllocator; use common_meta::error::{ExternalSnafu, Result as MetaResult}; use common_meta::peer::Peer; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::storage::MAX_REGION_SEQ; use crate::error::{self, Result, TooManyPartitionsSnafu}; diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs index 01db2d00d9..01be0fd63b 100644 --- a/src/meta-srv/src/test_util.rs +++ b/src/meta-srv/src/test_util.rs @@ -14,8 +14,8 @@ use std::sync::Arc; -use api::v1::meta::heartbeat_request::NodeWorkloads; use api::v1::meta::DatanodeWorkloads; +use api::v1::meta::heartbeat_request::NodeWorkloads; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::rpc::router::{Region, RegionRoute}; diff --git a/src/meta-srv/src/utils/insert_forwarder.rs b/src/meta-srv/src/utils/insert_forwarder.rs index 4c8ff5222d..6b6353de57 100644 --- a/src/meta-srv/src/utils/insert_forwarder.rs +++ b/src/meta-srv/src/utils/insert_forwarder.rs @@ -21,7 +21,7 @@ use client::{Client, Database}; use common_error::ext::BoxedError; use common_meta::peer::PeerLookupServiceRef; use common_telemetry::{debug, warn}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::sync::RwLock; use crate::error::{LookupFrontendsSnafu, NoAvailableFrontendSnafu}; diff --git a/src/metric-engine/src/data_region.rs b/src/metric-engine/src/data_region.rs index a359e697d7..5056cd0352 100644 --- a/src/metric-engine/src/data_region.rs +++ b/src/metric-engine/src/data_region.rs @@ -227,7 +227,9 @@ impl DataRegion { .map(|result| result.affected_rows) } _ => { - info!("Metric region received alter request {request:?} on physical region {region_id:?}"); + info!( + "Metric region received alter request {request:?} on physical region {region_id:?}" + ); FORBIDDEN_OPERATION_COUNT.inc(); ForbiddenPhysicalAlterSnafu.fail() diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs index 19a989d0a3..7677ae4619 100644 --- a/src/metric-engine/src/engine.rs +++ b/src/metric-engine/src/engine.rs @@ -334,10 +334,10 @@ impl RegionEngine for MetricEngine { { SetRegionRoleStateResponse::Success(success) => success, SetRegionRoleStateResponse::NotFound => { - return Ok(SetRegionRoleStateResponse::NotFound) + return Ok(SetRegionRoleStateResponse::NotFound); } SetRegionRoleStateResponse::InvalidTransition(error) => { - return Ok(SetRegionRoleStateResponse::InvalidTransition(error)) + return Ok(SetRegionRoleStateResponse::InvalidTransition(error)); } }; @@ -349,10 +349,10 @@ impl RegionEngine for MetricEngine { { SetRegionRoleStateResponse::Success(success) => success, SetRegionRoleStateResponse::NotFound => { - return Ok(SetRegionRoleStateResponse::NotFound) + return Ok(SetRegionRoleStateResponse::NotFound); } SetRegionRoleStateResponse::InvalidTransition(error) => { - return Ok(SetRegionRoleStateResponse::InvalidTransition(error)) + return Ok(SetRegionRoleStateResponse::InvalidTransition(error)); } }; diff --git a/src/metric-engine/src/engine/alter.rs b/src/metric-engine/src/engine/alter.rs index e7bedaf374..1c4cb93639 100644 --- a/src/metric-engine/src/engine/alter.rs +++ b/src/metric-engine/src/engine/alter.rs @@ -18,7 +18,7 @@ mod validate; use std::collections::{HashMap, HashSet}; use extract_new_columns::extract_new_columns; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY; use store_api::region_request::{AffectedRows, AlterKind, RegionAlterRequest}; @@ -229,10 +229,10 @@ mod test { use store_api::region_request::{ AlterKind, BatchRegionDdlRequest, RegionAlterRequest, SetRegionOption, }; - use store_api::storage::consts::ReservedColumnId; use store_api::storage::RegionId; + use store_api::storage::consts::ReservedColumnId; - use crate::test_util::{alter_logical_region_request, create_logical_region_request, TestEnv}; + use crate::test_util::{TestEnv, alter_logical_region_request, create_logical_region_request}; #[tokio::test] async fn test_alter_region() { diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs index 26f80129d2..7f579023b2 100644 --- a/src/metric-engine/src/engine/create.rs +++ b/src/metric-engine/src/engine/create.rs @@ -18,12 +18,12 @@ use std::collections::{HashMap, HashSet}; use api::v1::SemanticType; use common_telemetry::info; -use common_time::{Timestamp, FOREVER}; +use common_time::{FOREVER, Timestamp}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::{ColumnSchema, SkippingIndexOptions}; use datatypes::value::Value; use mito2::engine::MITO_ENGINE_NAME; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::{ ALTER_PHYSICAL_EXTENSION_KEY, DATA_REGION_SUBDIR, DATA_SCHEMA_TABLE_ID_COLUMN_NAME, @@ -35,12 +35,12 @@ use store_api::metric_engine_consts::{ use store_api::mito_engine_options::{TTL_KEY, WAL_OPTIONS_KEY}; use store_api::region_engine::RegionEngine; use store_api::region_request::{AffectedRows, PathType, RegionCreateRequest, RegionRequest}; -use store_api::storage::consts::ReservedColumnId; use store_api::storage::RegionId; +use store_api::storage::consts::ReservedColumnId; -use crate::engine::create::extract_new_columns::extract_new_columns; -use crate::engine::options::{set_data_region_options, PhysicalRegionOptions}; use crate::engine::MetricEngineInner; +use crate::engine::create::extract_new_columns::extract_new_columns; +use crate::engine::options::{PhysicalRegionOptions, set_data_region_options}; use crate::error::{ ColumnTypeMismatchSnafu, ConflictRegionOptionSnafu, CreateMitoRegionSnafu, InternalColumnOccupiedSnafu, InvalidMetadataSnafu, MissingRegionOptionSnafu, @@ -180,7 +180,9 @@ impl MetricEngineInner { )?; extension_return_value.extend(response.extensions); - info!("Created physical metric region {region_id}, primary key encoding={primary_key_encoding}, physical_region_options={physical_region_options:?}"); + info!( + "Created physical metric region {region_id}, primary key encoding={primary_key_encoding}, physical_region_options={physical_region_options:?}" + ); PHYSICAL_REGION_COUNT.inc(); // remember this table @@ -623,7 +625,7 @@ mod test { use super::*; use crate::config::EngineConfig; use crate::engine::MetricEngine; - use crate::test_util::{create_logical_region_request, TestEnv}; + use crate::test_util::{TestEnv, create_logical_region_request}; #[test] fn test_verify_region_create_request() { diff --git a/src/metric-engine/src/engine/flush.rs b/src/metric-engine/src/engine/flush.rs index 156ff2fc9d..87b054dfde 100644 --- a/src/metric-engine/src/engine/flush.rs +++ b/src/metric-engine/src/engine/flush.rs @@ -59,7 +59,7 @@ mod tests { use store_api::region_request::RegionPutRequest; use super::*; - use crate::test_util::{build_rows, row_schema_with_tags, TestEnv}; + use crate::test_util::{TestEnv, build_rows, row_schema_with_tags}; #[tokio::test] async fn test_list_ssts_after_write_and_flush_metric() { diff --git a/src/metric-engine/src/engine/open.rs b/src/metric-engine/src/engine/open.rs index e30a446cff..895bb9ed14 100644 --- a/src/metric-engine/src/engine/open.rs +++ b/src/metric-engine/src/engine/open.rs @@ -25,9 +25,9 @@ use store_api::region_engine::{BatchResponses, RegionEngine}; use store_api::region_request::{AffectedRows, PathType, RegionOpenRequest, ReplayCheckpoint}; use store_api::storage::RegionId; -use crate::engine::create::region_options_for_metadata_region; -use crate::engine::options::{set_data_region_options, PhysicalRegionOptions}; use crate::engine::MetricEngineInner; +use crate::engine::create::region_options_for_metadata_region; +use crate::engine::options::{PhysicalRegionOptions, set_data_region_options}; use crate::error::{ BatchOpenMitoRegionSnafu, NoOpenRegionResultSnafu, OpenMitoRegionSnafu, PhysicalRegionNotFoundSnafu, Result, diff --git a/src/metric-engine/src/engine/put.rs b/src/metric-engine/src/engine/put.rs index c7dc44be1e..21aeec605d 100644 --- a/src/metric-engine/src/engine/put.rs +++ b/src/metric-engine/src/engine/put.rs @@ -14,7 +14,7 @@ use api::v1::{Rows, WriteHint}; use common_telemetry::{error, info}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use store_api::codec::PrimaryKeyEncoding; use store_api::region_request::{AffectedRows, RegionPutRequest}; use store_api::storage::{RegionId, TableId}; diff --git a/src/metric-engine/src/engine/read.rs b/src/metric-engine/src/engine/read.rs index 028e784731..07643873c1 100644 --- a/src/metric-engine/src/engine/read.rs +++ b/src/metric-engine/src/engine/read.rs @@ -296,7 +296,7 @@ mod test { use super::*; use crate::test_util::{ - alter_logical_region_add_tag_columns, create_logical_region_request, TestEnv, + TestEnv, alter_logical_region_add_tag_columns, create_logical_region_request, }; #[tokio::test] diff --git a/src/metric-engine/src/engine/sync.rs b/src/metric-engine/src/engine/sync.rs index a0ebf07ab8..b62b138dab 100644 --- a/src/metric-engine/src/engine/sync.rs +++ b/src/metric-engine/src/engine/sync.rs @@ -15,7 +15,7 @@ use std::time::Instant; use common_telemetry::info; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::region_engine::{RegionEngine, RegionManifestInfo, SyncManifestResponse}; use store_api::storage::RegionId; @@ -91,11 +91,11 @@ impl MetricEngineInner { .recover_states(data_region_id, physical_region_options) .await?; info!( - "Sync metadata region for physical region {}, cost: {:?}, new opened logical region ids: {:?}", - data_region_id, - now.elapsed(), - new_opened_logical_region_ids - ); + "Sync metadata region for physical region {}, cost: {:?}, new opened logical region ids: {:?}", + data_region_id, + now.elapsed(), + new_opened_logical_region_ids + ); Ok(SyncManifestResponse::Metric { metadata_synced, diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs index e0c30fa658..d909f54694 100644 --- a/src/metric-engine/src/metadata_region.rs +++ b/src/metric-engine/src/metadata_region.rs @@ -21,13 +21,13 @@ use api::v1::helper::row; use api::v1::value::ValueData; use api::v1::{ColumnDataType, ColumnSchema, Rows, SemanticType}; use async_stream::try_stream; -use base64::engine::general_purpose::STANDARD_NO_PAD; use base64::Engine; +use base64::engine::general_purpose::STANDARD_NO_PAD; use common_base::readable_size::ReadableSize; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; use datafusion::prelude::{col, lit}; -use futures_util::stream::BoxStream; use futures_util::TryStreamExt; +use futures_util::stream::BoxStream; use mito2::engine::MitoEngine; use moka::future::Cache; use moka::policy::EvictionPolicy; @@ -349,12 +349,11 @@ fn decode_record_batch_to_key(batch: RecordBatch) -> Vec { (0..batch.num_rows()) .flat_map(move |row_index| { - let key = key_col + key_col .get_ref(row_index) .as_string() .unwrap() - .map(|s| s.to_string()); - key + .map(|s| s.to_string()) }) .collect() } diff --git a/src/metric-engine/src/repeated_task.rs b/src/metric-engine/src/repeated_task.rs index 022d744688..fa382d7844 100644 --- a/src/metric-engine/src/repeated_task.rs +++ b/src/metric-engine/src/repeated_task.rs @@ -90,7 +90,7 @@ mod tests { use store_api::region_engine::{RegionEngine, RegionManifestInfo}; - use crate::config::{EngineConfig, DEFAULT_FLUSH_METADATA_REGION_INTERVAL}; + use crate::config::{DEFAULT_FLUSH_METADATA_REGION_INTERVAL, EngineConfig}; use crate::test_util::TestEnv; #[tokio::test] diff --git a/src/metric-engine/src/row_modifier.rs b/src/metric-engine/src/row_modifier.rs index d67f8692b9..4adcc2e777 100644 --- a/src/metric-engine/src/row_modifier.rs +++ b/src/metric-engine/src/row_modifier.rs @@ -25,7 +25,7 @@ use store_api::codec::PrimaryKeyEncoding; use store_api::metric_engine_consts::{ DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME, }; -use store_api::storage::consts::{ReservedColumnId, PRIMARY_KEY_COLUMN_NAME}; +use store_api::storage::consts::{PRIMARY_KEY_COLUMN_NAME, ReservedColumnId}; use store_api::storage::{ColumnId, TableId}; use crate::error::{EncodePrimaryKeySnafu, Result}; diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs index 11d2191cac..d594541d84 100644 --- a/src/metric-engine/src/test_util.rs +++ b/src/metric-engine/src/test_util.rs @@ -23,8 +23,8 @@ use datatypes::schema::ColumnSchema; use mito2::config::MitoConfig; use mito2::engine::MitoEngine; use mito2::test_util::TestEnv as MitoTestEnv; -use object_store::util::join_dir; use object_store::ObjectStore; +use object_store::util::join_dir; use store_api::metadata::ColumnMetadata; use store_api::metric_engine_consts::{ ALTER_PHYSICAL_EXTENSION_KEY, LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME, @@ -425,8 +425,8 @@ pub fn build_rows(num_tags: usize, num_rows: usize) -> Vec { #[cfg(test)] mod test { - use object_store::services::Fs; use object_store::ObjectStore; + use object_store::services::Fs; use store_api::metric_engine_consts::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR}; use super::*; diff --git a/src/mito-codec/benches/bench_sparse_encoding.rs b/src/mito-codec/benches/bench_sparse_encoding.rs index 9841f55266..c65a480293 100644 --- a/src/mito-codec/benches/bench_sparse_encoding.rs +++ b/src/mito-codec/benches/bench_sparse_encoding.rs @@ -15,10 +15,10 @@ use std::hint::black_box; use bytes::Bytes; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; use datatypes::prelude::ValueRef; -use mito_codec::row_converter::sparse::{RESERVED_COLUMN_ID_TABLE_ID, RESERVED_COLUMN_ID_TSID}; use mito_codec::row_converter::SparsePrimaryKeyCodec; +use mito_codec::row_converter::sparse::{RESERVED_COLUMN_ID_TABLE_ID, RESERVED_COLUMN_ID_TSID}; fn encode_sparse(c: &mut Criterion) { let num_tags = 10; diff --git a/src/mito-codec/src/index.rs b/src/mito-codec/src/index.rs index 768d5db20a..b6ab84f06c 100644 --- a/src/mito-codec/src/index.rs +++ b/src/mito-codec/src/index.rs @@ -20,13 +20,13 @@ use std::sync::Arc; use datatypes::data_type::ConcreteDataType; use datatypes::value::ValueRef; use memcomparable::Serializer; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::ColumnMetadata; use store_api::storage::ColumnId; use crate::error::{FieldTypeMismatchSnafu, IndexEncodeNullSnafu, Result}; -use crate::row_converter::{build_primary_key_codec_with_fields, PrimaryKeyCodec, SortField}; +use crate::row_converter::{PrimaryKeyCodec, SortField, build_primary_key_codec_with_fields}; /// Encodes index values according to their data types for sorting and storage use. pub struct IndexValueCodec; diff --git a/src/mito-codec/src/key_values.rs b/src/mito-codec/src/key_values.rs index 896a57810a..9d26eeb56b 100644 --- a/src/mito-codec/src/key_values.rs +++ b/src/mito-codec/src/key_values.rs @@ -18,11 +18,11 @@ use api::v1::{ColumnSchema, Mutation, OpType, Row, Rows}; use datatypes::prelude::ConcreteDataType; use datatypes::value::ValueRef; use memcomparable::Deserializer; -use store_api::codec::{infer_primary_key_encoding_from_hint, PrimaryKeyEncoding}; +use store_api::codec::{PrimaryKeyEncoding, infer_primary_key_encoding_from_hint}; use store_api::metadata::RegionMetadata; use store_api::storage::SequenceNumber; -use crate::row_converter::{SortField, COLUMN_ID_ENCODE_SIZE}; +use crate::row_converter::{COLUMN_ID_ENCODE_SIZE, SortField}; /// Key value view of a mutation. #[derive(Debug)] @@ -333,7 +333,7 @@ mod tests { use api::v1::{self, ColumnDataType, SemanticType}; use super::*; - use crate::test_util::{i64_value, TestRegionMetadataBuilder}; + use crate::test_util::{TestRegionMetadataBuilder, i64_value}; const TS_NAME: &str = "ts"; const START_SEQ: SequenceNumber = 100; diff --git a/src/mito-codec/src/row_converter.rs b/src/mito-codec/src/row_converter.rs index ab14d7a549..6fe33a9ee7 100644 --- a/src/mito-codec/src/row_converter.rs +++ b/src/mito-codec/src/row_converter.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use common_recordbatch::filter::SimpleFilterEvaluator; use datatypes::value::{Value, ValueRef}; pub use dense::{DensePrimaryKeyCodec, SortField}; -pub use sparse::{SparsePrimaryKeyCodec, SparseValues, COLUMN_ID_ENCODE_SIZE}; +pub use sparse::{COLUMN_ID_ENCODE_SIZE, SparsePrimaryKeyCodec, SparseValues}; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::{RegionMetadata, RegionMetadataRef}; use store_api::storage::ColumnId; diff --git a/src/mito-codec/src/row_converter/sparse.rs b/src/mito-codec/src/row_converter/sparse.rs index 5b65ba2ca8..a104bb8abc 100644 --- a/src/mito-codec/src/row_converter/sparse.rs +++ b/src/mito-codec/src/row_converter/sparse.rs @@ -24,8 +24,8 @@ use serde::{Deserialize, Serialize}; use snafu::ResultExt; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::RegionMetadataRef; -use store_api::storage::consts::ReservedColumnId; use store_api::storage::ColumnId; +use store_api::storage::consts::ReservedColumnId; use crate::error::{DeserializeFieldSnafu, Result, SerializeFieldSnafu, UnsupportedOperationSnafu}; use crate::key_values::KeyValue; @@ -140,10 +140,10 @@ impl SparsePrimaryKeyCodec { /// Returns the field of the given column id. fn get_field(&self, column_id: ColumnId) -> Option<&SortField> { // if the `columns` is not specified, all unknown columns is primary key(label field). - if let Some(columns) = &self.inner.columns { - if !columns.contains(&column_id) { - return None; - } + if let Some(columns) = &self.inner.columns + && !columns.contains(&column_id) + { + return None; } match column_id { @@ -385,8 +385,8 @@ mod tests { use std::sync::Arc; use api::v1::SemanticType; - use common_time::timestamp::TimeUnit; use common_time::Timestamp; + use common_time::timestamp::TimeUnit; use datatypes::schema::ColumnSchema; use datatypes::value::{OrderedFloat, Value}; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; diff --git a/src/mito-codec/src/test_util.rs b/src/mito-codec/src/test_util.rs index 2f72cb109a..22592841ed 100644 --- a/src/mito-codec/src/test_util.rs +++ b/src/mito-codec/src/test_util.rs @@ -15,8 +15,8 @@ //! Test utilities for mito codec. use api::greptime_proto::v1; -use api::v1::value::ValueData; use api::v1::SemanticType; +use api::v1::value::ValueData; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder}; diff --git a/src/mito2/benches/bench_compaction_picker.rs b/src/mito2/benches/bench_compaction_picker.rs index 75acd1ca48..aa2026a404 100644 --- a/src/mito2/benches/bench_compaction_picker.rs +++ b/src/mito2/benches/bench_compaction_picker.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use mito2::compaction::run::{ - find_overlapping_items, find_sorted_runs, merge_seq_files, reduce_runs, Item, Ranged, SortedRun, + Item, Ranged, SortedRun, find_overlapping_items, find_sorted_runs, merge_seq_files, reduce_runs, }; #[derive(Clone, Debug, Eq, Hash, PartialEq)] diff --git a/src/mito2/benches/bench_filter_time_partition.rs b/src/mito2/benches/bench_filter_time_partition.rs index 3dcf773d18..46c394c618 100644 --- a/src/mito2/benches/bench_filter_time_partition.rs +++ b/src/mito2/benches/bench_filter_time_partition.rs @@ -15,12 +15,12 @@ use std::str::FromStr; use std::sync::Arc; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use datatypes::arrow; use datatypes::arrow::array::{ArrayRef, RecordBatch, TimestampMillisecondArray}; use datatypes::arrow::datatypes::{DataType, Field, Schema, TimeUnit}; use datatypes::arrow_array::StringArray; -use mito2::memtable::{filter_record_batch, BulkPart}; +use mito2::memtable::{BulkPart, filter_record_batch}; fn random_array(num: usize) -> BulkPart { let mut min = i64::MAX; diff --git a/src/mito2/benches/memtable_bench.rs b/src/mito2/benches/memtable_bench.rs index 08c89abf3e..02ad4e48bb 100644 --- a/src/mito2/benches/memtable_bench.rs +++ b/src/mito2/benches/memtable_bench.rs @@ -16,11 +16,12 @@ use std::sync::Arc; use api::v1::value::ValueData; use api::v1::{Row, Rows, SemanticType}; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; use datafusion_common::Column; -use datafusion_expr::{lit, Expr}; +use datafusion_expr::{Expr, lit}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::ColumnSchema; +use mito_codec::row_converter::DensePrimaryKeyCodec; use mito2::memtable::bulk::context::BulkIterContext; use mito2::memtable::bulk::part::BulkPartConverter; use mito2::memtable::bulk::part_reader::BulkPartRecordBatchIter; @@ -29,12 +30,11 @@ use mito2::memtable::time_series::TimeSeriesMemtable; use mito2::memtable::{KeyValues, Memtable}; use mito2::read::flat_merge::FlatMergeIterator; use mito2::region::options::MergeMode; -use mito2::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; +use mito2::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; use mito2::test_util::memtable_util::{self, region_metadata_to_row_schema}; -use mito_codec::row_converter::DensePrimaryKeyCodec; +use rand::Rng; use rand::rngs::ThreadRng; use rand::seq::IndexedRandom; -use rand::Rng; use store_api::metadata::{ ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef, }; diff --git a/src/mito2/benches/simple_bulk_memtable.rs b/src/mito2/benches/simple_bulk_memtable.rs index b5c8a99347..ce8da2b18c 100644 --- a/src/mito2/benches/simple_bulk_memtable.rs +++ b/src/mito2/benches/simple_bulk_memtable.rs @@ -16,16 +16,16 @@ use std::sync::Arc; use api::v1::value::ValueData; use api::v1::{Mutation, OpType, Row, Rows, SemanticType}; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::ColumnSchema; use mito2::memtable::simple_bulk_memtable::SimpleBulkMemtable; use mito2::memtable::{KeyValues, Memtable, MemtableRanges}; use mito2::read; +use mito2::read::Source; use mito2::read::dedup::DedupReader; use mito2::read::merge::MergeReaderBuilder; use mito2::read::scan_region::PredicateGroup; -use mito2::read::Source; use mito2::region::options::MergeMode; use mito2::test_util::column_metadata_to_column_schema; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; diff --git a/src/mito2/src/access_layer.rs b/src/mito2/src/access_layer.rs index d9c15eef45..f6c2953588 100644 --- a/src/mito2/src/access_layer.rs +++ b/src/mito2/src/access_layer.rs @@ -20,7 +20,7 @@ use common_time::Timestamp; use futures::{Stream, TryStreamExt}; use object_store::services::Fs; use object_store::util::{join_dir, with_instrument_layers}; -use object_store::{ErrorKind, ObjectStore, ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR}; +use object_store::{ATOMIC_WRITE_DIR, ErrorKind, OLD_ATOMIC_WRITE_DIR, ObjectStore}; use smallvec::SmallVec; use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; @@ -28,18 +28,18 @@ use store_api::region_request::PathType; use store_api::sst_entry::StorageSstEntry; use store_api::storage::{RegionId, SequenceNumber}; +use crate::cache::CacheManagerRef; use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey}; use crate::cache::write_cache::SstUploadRequest; -use crate::cache::CacheManagerRef; use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig}; use crate::error::{CleanDirSnafu, DeleteIndexSnafu, DeleteSstSnafu, OpenDalSnafu, Result}; use crate::metrics::{COMPACTION_STAGE_ELAPSED, FLUSH_ELAPSED}; use crate::read::Source; use crate::region::options::IndexOptions; use crate::sst::file::{FileHandle, FileId, RegionFileId}; +use crate::sst::index::IndexerBuilderImpl; use crate::sst::index::intermediate::IntermediateManager; use crate::sst::index::puffin_manager::PuffinManagerFactory; -use crate::sst::index::IndexerBuilderImpl; use crate::sst::location::{self, region_dir_from_table_dir}; use crate::sst::parquet::reader::ParquetReaderBuilder; use crate::sst::parquet::writer::ParquetWriter; @@ -311,7 +311,7 @@ impl AccessLayer { } /// Lists the SST entries from the storage layer in the table directory. - pub fn storage_sst_entries(&self) -> impl Stream> { + pub fn storage_sst_entries(&self) -> impl Stream> + use<> { let object_store = self.object_store.clone(); let table_dir = self.table_dir.clone(); diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs index ecf87b1220..0fe98cbb89 100644 --- a/src/mito2/src/cache.rs +++ b/src/mito2/src/cache.rs @@ -290,13 +290,13 @@ impl CacheManager { // Try to get metadata from write cache let key = IndexKey::new(file_id.region_id(), file_id.file_id(), FileType::Parquet); - if let Some(write_cache) = &self.write_cache { - if let Some(metadata) = write_cache.file_cache().get_parquet_meta_data(key).await { - let metadata = Arc::new(metadata); - // Put metadata into sst meta cache - self.put_parquet_meta_data(file_id, metadata.clone()); - return Some(metadata); - } + if let Some(write_cache) = &self.write_cache + && let Some(metadata) = write_cache.file_cache().get_parquet_meta_data(key).await + { + let metadata = Arc::new(metadata); + // Put metadata into sst meta cache + self.put_parquet_meta_data(file_id, metadata.clone()); + return Some(metadata); }; None @@ -781,9 +781,11 @@ mod tests { let value = Value::Int64(10); let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10])); cache.put_repeated_vector(value.clone(), vector.clone()); - assert!(cache - .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value) - .is_none()); + assert!( + cache + .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value) + .is_none() + ); let key = PageKey::new(file_id.file_id(), 1, vec![Range { start: 0, end: 5 }]); let pages = Arc::new(PageValue::default()); @@ -810,9 +812,11 @@ mod tests { fn test_repeated_vector_cache() { let cache = CacheManager::builder().vector_cache_size(4096).build(); let value = Value::Int64(10); - assert!(cache - .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value) - .is_none()); + assert!( + cache + .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value) + .is_none() + ); let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10])); cache.put_repeated_vector(value.clone(), vector.clone()); let cached = cache diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs index 2fdc53dd0a..cb62b427cf 100644 --- a/src/mito2/src/cache/file_cache.rs +++ b/src/mito2/src/cache/file_cache.rs @@ -578,10 +578,12 @@ mod tests { None, )); // No entry before recovery. - assert!(cache - .reader(IndexKey::new(region_id, file_ids[0], file_type)) - .await - .is_none()); + assert!( + cache + .reader(IndexKey::new(region_id, file_ids[0], file_type)) + .await + .is_none() + ); cache.recover(true).await; // Check size. @@ -662,9 +664,9 @@ mod tests { assert!( parse_index_key("5299989643269.3368731b-a556-42b8-a5df-9c31ce155095.parque").is_none() ); - assert!(parse_index_key( - "5299989643269.3368731b-a556-42b8-a5df-9c31ce155095.parquet.puffin" - ) - .is_none()); + assert!( + parse_index_key("5299989643269.3368731b-a556-42b8-a5df-9c31ce155095.parquet.puffin") + .is_none() + ); } } diff --git a/src/mito2/src/cache/index.rs b/src/mito2/src/cache/index.rs index 5cb7770e62..0a803d7c8b 100644 --- a/src/mito2/src/cache/index.rs +++ b/src/mito2/src/cache/index.rs @@ -100,7 +100,9 @@ where weight_of_metadata: fn(&K, &Arc) -> u32, weight_of_content: fn(&(K, PageKey), &Bytes) -> u32, ) -> Self { - common_telemetry::debug!("Building IndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}, page size: {page_size}, index type: {index_type}"); + common_telemetry::debug!( + "Building IndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}, page size: {page_size}, index type: {index_type}" + ); let index_metadata = moka::sync::CacheBuilder::new(index_metadata_cap) .name(&format!("index_metadata_{}", index_type)) .weigher(weight_of_metadata) diff --git a/src/mito2/src/cache/index/bloom_filter_index.rs b/src/mito2/src/cache/index/bloom_filter_index.rs index ef8f8aecf2..95e17c390c 100644 --- a/src/mito2/src/cache/index/bloom_filter_index.rs +++ b/src/mito2/src/cache/index/bloom_filter_index.rs @@ -22,7 +22,7 @@ use index::bloom_filter::error::Result; use index::bloom_filter::reader::BloomFilterReader; use store_api::storage::ColumnId; -use crate::cache::index::{IndexCache, PageKey, INDEX_METADATA_TYPE}; +use crate::cache::index::{INDEX_METADATA_TYPE, IndexCache, PageKey}; use crate::metrics::{CACHE_HIT, CACHE_MISS}; use crate::sst::file::FileId; diff --git a/src/mito2/src/cache/index/inverted_index.rs b/src/mito2/src/cache/index/inverted_index.rs index 869aa699da..6add80edfa 100644 --- a/src/mito2/src/cache/index/inverted_index.rs +++ b/src/mito2/src/cache/index/inverted_index.rs @@ -22,7 +22,7 @@ use index::inverted_index::error::Result; use index::inverted_index::format::reader::InvertedIndexReader; use prost::Message; -use crate::cache::index::{IndexCache, PageKey, INDEX_METADATA_TYPE}; +use crate::cache::index::{INDEX_METADATA_TYPE, IndexCache, PageKey}; use crate::metrics::{CACHE_HIT, CACHE_MISS}; use crate::sst::file::FileId; @@ -130,10 +130,10 @@ mod test { use std::num::NonZeroUsize; use futures::stream; + use index::Bytes; use index::bitmap::{Bitmap, BitmapType}; use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader}; use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter}; - use index::Bytes; use prometheus::register_int_counter_vec; use rand::{Rng, RngCore}; @@ -175,7 +175,11 @@ mod test { if read != data.get(expected_range).unwrap() { panic!( "fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nrange: {:?}, page num: {}", - offset, size, page_size, read.len(), size as usize, + offset, + size, + page_size, + read.len(), + size as usize, PageKey::calculate_range(offset, size, page_size as u64), page_num ); diff --git a/src/mito2/src/cache/test_util.rs b/src/mito2/src/cache/test_util.rs index 9c3b08177f..a891ae7f72 100644 --- a/src/mito2/src/cache/test_util.rs +++ b/src/mito2/src/cache/test_util.rs @@ -19,10 +19,10 @@ use std::sync::Arc; use bytes::Bytes; use datatypes::arrow::array::{ArrayRef, Int64Array}; use datatypes::arrow::record_batch::RecordBatch; -use object_store::services::Fs; use object_store::ObjectStore; -use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; +use object_store::services::Fs; use parquet::arrow::ArrowWriter; +use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; use parquet::file::metadata::ParquetMetaData; /// Returns a parquet meta data. diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs index 0132a3de1c..322548b11c 100644 --- a/src/mito2/src/cache/write_cache.rs +++ b/src/mito2/src/cache/write_cache.rs @@ -25,8 +25,8 @@ use snafu::ResultExt; use store_api::storage::RegionId; use crate::access_layer::{ - new_fs_cache_store, FilePathProvider, Metrics, RegionFilePathFactory, SstInfoArray, - SstWriteRequest, TempFileCleaner, WriteCachePathProvider, WriteType, + FilePathProvider, Metrics, RegionFilePathFactory, SstInfoArray, SstWriteRequest, + TempFileCleaner, WriteCachePathProvider, WriteType, new_fs_cache_store, }; use crate::cache::file_cache::{FileCache, FileCacheRef, FileType, IndexKey, IndexValue}; use crate::error::{self, Result}; @@ -34,11 +34,11 @@ use crate::metrics::{ UPLOAD_BYTES_TOTAL, WRITE_CACHE_DOWNLOAD_BYTES_TOTAL, WRITE_CACHE_DOWNLOAD_ELAPSED, }; use crate::sst::file::RegionFileId; +use crate::sst::index::IndexerBuilderImpl; use crate::sst::index::intermediate::IntermediateManager; use crate::sst::index::puffin_manager::PuffinManagerFactory; -use crate::sst::index::IndexerBuilderImpl; -use crate::sst::parquet::writer::ParquetWriter; use crate::sst::parquet::WriteOptions; +use crate::sst::parquet::writer::ParquetWriter; use crate::sst::{DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY}; /// A cache for uploading files to remote object stores. @@ -436,11 +436,11 @@ mod tests { use crate::read::Source; use crate::region::options::IndexOptions; use crate::sst::parquet::reader::ParquetReaderBuilder; + use crate::test_util::TestEnv; use crate::test_util::sst_util::{ assert_parquet_metadata_eq, new_batch_by_range, new_source, sst_file_handle_with_file_id, sst_region_metadata, }; - use crate::test_util::TestEnv; #[tokio::test] async fn test_write_and_upload_sst() { diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs index 56a926227d..ed61ddc2ed 100644 --- a/src/mito2/src/compaction.rs +++ b/src/mito2/src/compaction.rs @@ -46,7 +46,7 @@ use tokio::sync::mpsc::{self, Sender}; use crate::access_layer::AccessLayerRef; use crate::cache::{CacheManagerRef, CacheStrategy}; use crate::compaction::compactor::{CompactionRegion, CompactionVersion, DefaultCompactor}; -use crate::compaction::picker::{new_picker, CompactionTask}; +use crate::compaction::picker::{CompactionTask, new_picker}; use crate::compaction::task::CompactionTaskImpl; use crate::config::MitoConfig; use crate::error::{ @@ -55,10 +55,10 @@ use crate::error::{ TimeRangePredicateOverflowSnafu, TimeoutSnafu, }; use crate::metrics::{COMPACTION_STAGE_ELAPSED, INFLIGHT_COMPACTION_COUNT}; +use crate::read::BoxedBatchReader; use crate::read::projection::ProjectionMapper; use crate::read::scan_region::{PredicateGroup, ScanInput}; use crate::read::seq_scan::SeqScan; -use crate::read::BoxedBatchReader; use crate::region::options::MergeMode; use crate::region::version::VersionControlRef; use crate::region::{ManifestContextRef, RegionLeaderState, RegionRoleState}; @@ -689,13 +689,17 @@ fn time_range_to_predicate( ] } (Some(start), None) => { - vec![datafusion_expr::col(ts_col.column_schema.name.clone()) - .gt_eq(ts_to_lit(*start, ts_col_unit)?)] + vec![ + datafusion_expr::col(ts_col.column_schema.name.clone()) + .gt_eq(ts_to_lit(*start, ts_col_unit)?), + ] } (None, Some(end)) => { - vec![datafusion_expr::col(ts_col.column_schema.name.clone()) - .lt(ts_to_lit(*end, ts_col_unit)?)] + vec![ + datafusion_expr::col(ts_col.column_schema.name.clone()) + .lt(ts_to_lit(*end, ts_col_unit)?), + ] } (None, None) => { return Ok(PredicateGroup::default()); @@ -761,7 +765,7 @@ mod tests { use crate::region::ManifestContext; use crate::test_util::mock_schema_metadata_manager; use crate::test_util::scheduler_util::{SchedulerEnv, VecScheduler}; - use crate::test_util::version_util::{apply_edit, VersionControlBuilder}; + use crate::test_util::version_util::{VersionControlBuilder, apply_edit}; #[tokio::test] async fn test_schedule_empty() { @@ -910,12 +914,14 @@ mod tests { .unwrap(); assert_eq!(1, scheduler.region_status.len()); assert_eq!(1, job_scheduler.num_jobs()); - assert!(!scheduler - .region_status - .get(&builder.region_id()) - .unwrap() - .waiters - .is_empty()); + assert!( + !scheduler + .region_status + .get(&builder.region_id()) + .unwrap() + .waiters + .is_empty() + ); // On compaction finished and schedule next compaction. scheduler @@ -947,12 +953,14 @@ mod tests { .await .unwrap(); assert_eq!(2, job_scheduler.num_jobs()); - assert!(!scheduler - .region_status - .get(&builder.region_id()) - .unwrap() - .waiters - .is_empty()); + assert!( + !scheduler + .region_status + .get(&builder.region_id()) + .unwrap() + .waiters + .is_empty() + ); } #[tokio::test] @@ -1023,12 +1031,14 @@ mod tests { // Should schedule 1 compaction. assert_eq!(1, scheduler.region_status.len()); assert_eq!(1, job_scheduler.num_jobs()); - assert!(scheduler - .region_status - .get(®ion_id) - .unwrap() - .pending_request - .is_none()); + assert!( + scheduler + .region_status + .get(®ion_id) + .unwrap() + .pending_request + .is_none() + ); // Schedule another manual compaction. let (tx, _rx) = oneshot::channel(); diff --git a/src/mito2/src/compaction/buckets.rs b/src/mito2/src/compaction/buckets.rs index 3761a45b1a..8e47ea3370 100644 --- a/src/mito2/src/compaction/buckets.rs +++ b/src/mito2/src/compaction/buckets.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use crate::sst::file::FileHandle; diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs index d5c60946a6..2006f89741 100644 --- a/src/mito2/src/compaction/compactor.rs +++ b/src/mito2/src/compaction/compactor.rs @@ -31,8 +31,8 @@ use store_api::storage::RegionId; use crate::access_layer::{AccessLayer, AccessLayerRef, OperationType, SstWriteRequest, WriteType}; use crate::cache::{CacheManager, CacheManagerRef}; -use crate::compaction::picker::{new_picker, PickerOutput}; -use crate::compaction::{find_ttl, CompactionSstReaderBuilder}; +use crate::compaction::picker::{PickerOutput, new_picker}; +use crate::compaction::{CompactionSstReaderBuilder, find_ttl}; use crate::config::MitoConfig; use crate::error::{EmptyRegionDirSnafu, JoinSnafu, ObjectStoreNotFoundSnafu, Result}; use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList}; diff --git a/src/mito2/src/compaction/run.rs b/src/mito2/src/compaction/run.rs index bfa43c65eb..e691709948 100644 --- a/src/mito2/src/compaction/run.rs +++ b/src/mito2/src/compaction/run.rs @@ -15,10 +15,10 @@ //! This file contains code to find sorted runs in a set if ranged items and //! along with the best way to merge these items to satisfy the desired run count. -use common_base::readable_size::ReadableSize; use common_base::BitVec; +use common_base::readable_size::ReadableSize; use common_time::Timestamp; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use crate::sst::file::{FileHandle, RegionFileId}; diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs index 29d8e8a49c..d65ec3c5fd 100644 --- a/src/mito2/src/compaction/twcs.rs +++ b/src/mito2/src/compaction/twcs.rs @@ -19,19 +19,19 @@ use std::num::NonZeroU64; use common_base::readable_size::ReadableSize; use common_telemetry::info; +use common_time::Timestamp; use common_time::timestamp::TimeUnit; use common_time::timestamp_millis::BucketAligned; -use common_time::Timestamp; use store_api::storage::RegionId; use crate::compaction::buckets::infer_time_bucket; use crate::compaction::compactor::CompactionRegion; use crate::compaction::picker::{Picker, PickerOutput}; use crate::compaction::run::{ - find_sorted_runs, merge_seq_files, reduce_runs, FileGroup, Item, Ranged, + FileGroup, Item, Ranged, find_sorted_runs, merge_seq_files, reduce_runs, }; -use crate::compaction::{get_expired_ssts, CompactionOutput}; -use crate::sst::file::{overlaps, FileHandle, Level}; +use crate::compaction::{CompactionOutput, get_expired_ssts}; +use crate::sst::file::{FileHandle, Level, overlaps}; use crate::sst::version::LevelMeta; const LEVEL_COMPACTED: Level = 1; @@ -66,20 +66,20 @@ impl TwcsPicker { let mut files_to_merge: Vec<_> = files.files().cloned().collect(); // Filter out large files in append mode - they won't benefit from compaction - if self.append_mode { - if let Some(max_size) = self.max_output_file_size { - let (kept_files, ignored_files) = files_to_merge - .into_iter() - .partition(|fg| fg.size() <= max_size as usize && fg.is_all_level_0()); - files_to_merge = kept_files; - info!( - "Skipped {} large files in append mode for region {}, window {}, max_size: {}", - ignored_files.len(), - region_id, - window, - max_size - ); - } + if self.append_mode + && let Some(max_size) = self.max_output_file_size + { + let (kept_files, ignored_files) = files_to_merge + .into_iter() + .partition(|fg| fg.size() <= max_size as usize && fg.is_all_level_0()); + files_to_merge = kept_files; + info!( + "Skipped {} large files in append mode for region {}, window {}, max_size: {}", + ignored_files.len(), + region_id, + window, + max_size + ); } let sorted_runs = find_sorted_runs(&mut files_to_merge); diff --git a/src/mito2/src/compaction/window.rs b/src/mito2/src/compaction/window.rs index 9dc56c8830..9b23f27e19 100644 --- a/src/mito2/src/compaction/window.rs +++ b/src/mito2/src/compaction/window.rs @@ -16,16 +16,16 @@ use std::collections::BTreeMap; use std::fmt::Debug; use common_telemetry::info; +use common_time::Timestamp; use common_time::range::TimestampRange; use common_time::timestamp::TimeUnit; use common_time::timestamp_millis::BucketAligned; -use common_time::Timestamp; use store_api::storage::RegionId; use crate::compaction::buckets::infer_time_bucket; use crate::compaction::compactor::{CompactionRegion, CompactionVersion}; use crate::compaction::picker::{Picker, PickerOutput}; -use crate::compaction::{get_expired_ssts, CompactionOutput}; +use crate::compaction::{CompactionOutput, get_expired_ssts}; use crate::sst::file::FileHandle; /// Compaction picker that splits the time range of all involved files to windows, and merges @@ -204,12 +204,12 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use common_time::range::TimestampRange; use common_time::Timestamp; + use common_time::range::TimestampRange; use store_api::storage::RegionId; use crate::compaction::compactor::CompactionVersion; - use crate::compaction::window::{file_time_bucket_span, WindowedCompactionPicker}; + use crate::compaction::window::{WindowedCompactionPicker, file_time_bucket_span}; use crate::region::options::RegionOptions; use crate::sst::file::{FileId, FileMeta, Level}; use crate::sst::file_purger::NoopFilePurger; diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs index 4f4ccc5eaa..5616a808dd 100644 --- a/src/mito2/src/engine.rs +++ b/src/mito2/src/engine.rs @@ -75,14 +75,15 @@ use common_error::ext::BoxedError; use common_meta::key::SchemaMetadataManagerRef; use common_recordbatch::SendableRecordBatchStream; use common_telemetry::{info, tracing}; -use common_wal::options::{WalOptions, WAL_OPTIONS_KEY}; +use common_wal::options::{WAL_OPTIONS_KEY, WalOptions}; use futures::future::{join_all, try_join_all}; use futures::stream::{self, Stream, StreamExt}; use object_store::manager::ObjectStoreManagerRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use store_api::ManifestVersion; use store_api::codec::PrimaryKeyEncoding; -use store_api::logstore::provider::Provider; use store_api::logstore::LogStore; +use store_api::logstore::provider::Provider; use store_api::metadata::{ColumnMetadata, RegionMetadataRef}; use store_api::metric_engine_consts::{ MANIFEST_INFO_EXTENSION_KEY, TABLE_COLUMN_METADATA_EXTENSION_KEY, @@ -94,8 +95,7 @@ use store_api::region_engine::{ use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest}; use store_api::sst_entry::{ManifestSstEntry, StorageSstEntry}; use store_api::storage::{RegionId, ScanRequest, SequenceNumber}; -use store_api::ManifestVersion; -use tokio::sync::{oneshot, Semaphore}; +use tokio::sync::{Semaphore, oneshot}; use crate::cache::{CacheManagerRef, CacheStrategy}; use crate::config::MitoConfig; @@ -115,7 +115,7 @@ use crate::request::{RegionEditRequest, WorkerRequest}; use crate::sst::file::FileMeta; use crate::sst::file_ref::FileReferenceManagerRef; use crate::wal::entry_distributor::{ - build_wal_entry_distributor_and_receivers, DEFAULT_ENTRY_RECEIVER_BUFFER_SIZE, + DEFAULT_ENTRY_RECEIVER_BUFFER_SIZE, build_wal_entry_distributor_and_receivers, }; use crate::wal::raw_entry_reader::{LogStoreRawEntryReader, RawEntryReader}; use crate::worker::WorkerGroup; diff --git a/src/mito2/src/engine/alter_test.rs b/src/mito2/src/engine/alter_test.rs index 7ff4c94d82..383ac48fe6 100644 --- a/src/mito2/src/engine/alter_test.rs +++ b/src/mito2/src/engine/alter_test.rs @@ -35,12 +35,12 @@ use store_api::region_request::{ use store_api::storage::{ColumnId, RegionId, ScanRequest}; use crate::config::MitoConfig; -use crate::engine::listener::{AlterFlushListener, NotifyRegionChangeResultListener}; use crate::engine::MitoEngine; +use crate::engine::listener::{AlterFlushListener, NotifyRegionChangeResultListener}; use crate::error; use crate::test_util::{ - build_rows, build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder, - TestEnv, + CreateRequestBuilder, TestEnv, build_rows, build_rows_for_key, flush_region, put_rows, + rows_schema, }; async fn scan_check_after_alter(engine: &MitoEngine, region_id: RegionId, expected: &str) { @@ -702,14 +702,16 @@ async fn test_alter_column_set_inverted_index() { alter_job.await.unwrap(); let check_inverted_index_set = |engine: &MitoEngine| { - assert!(engine - .get_region(region_id) - .unwrap() - .metadata() - .column_by_name("tag_0") - .unwrap() - .column_schema - .is_inverted_indexed()) + assert!( + engine + .get_region(region_id) + .unwrap() + .metadata() + .column_by_name("tag_0") + .unwrap() + .column_schema + .is_inverted_indexed() + ) }; check_inverted_index_set(&engine); check_region_version(&engine, region_id, 1, 3, 1, 3); diff --git a/src/mito2/src/engine/append_mode_test.rs b/src/mito2/src/engine/append_mode_test.rs index 02140037e8..3357887fd7 100644 --- a/src/mito2/src/engine/append_mode_test.rs +++ b/src/mito2/src/engine/append_mode_test.rs @@ -23,8 +23,8 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::test_util::batch_util::sort_batches_and_print; use crate::test_util::{ - build_rows, build_rows_for_key, flush_region, put_rows, reopen_region, rows_schema, - CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, build_rows_for_key, flush_region, put_rows, + reopen_region, rows_schema, }; #[tokio::test] diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs index 96be56875e..bf19befa06 100644 --- a/src/mito2/src/engine/basic_test.rs +++ b/src/mito2/src/engine/basic_test.rs @@ -40,10 +40,10 @@ use store_api::storage::RegionId; use super::*; use crate::region::version::VersionControlData; use crate::test_util::{ - build_delete_rows_for_key, build_rows, build_rows_for_key, delete_rows, delete_rows_schema, - flush_region, kafka_log_store_factory, multiple_log_store_factories, - prepare_test_for_kafka_log_store, put_rows, raft_engine_log_store_factory, reopen_region, - rows_schema, CreateRequestBuilder, LogStoreFactory, TestEnv, + CreateRequestBuilder, LogStoreFactory, TestEnv, build_delete_rows_for_key, build_rows, + build_rows_for_key, delete_rows, delete_rows_schema, flush_region, kafka_log_store_factory, + multiple_log_store_factories, prepare_test_for_kafka_log_store, put_rows, + raft_engine_log_store_factory, reopen_region, rows_schema, }; #[tokio::test] diff --git a/src/mito2/src/engine/batch_open_test.rs b/src/mito2/src/engine/batch_open_test.rs index 9fae7164a0..0862cf3940 100644 --- a/src/mito2/src/engine/batch_open_test.rs +++ b/src/mito2/src/engine/batch_open_test.rs @@ -18,7 +18,7 @@ use api::v1::Rows; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_recordbatch::RecordBatches; -use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY}; +use common_wal::options::{KafkaWalOptions, WAL_OPTIONS_KEY, WalOptions}; use rstest::rstest; use rstest_reuse::apply; use store_api::region_engine::RegionEngine; @@ -28,9 +28,9 @@ use store_api::storage::{RegionId, ScanRequest}; use super::MitoEngine; use crate::config::MitoConfig; use crate::test_util::{ - build_rows, kafka_log_store_factory, multiple_log_store_factories, - prepare_test_for_kafka_log_store, put_rows, raft_engine_log_store_factory, rows_schema, - CreateRequestBuilder, LogStoreFactory, TestEnv, + CreateRequestBuilder, LogStoreFactory, TestEnv, build_rows, kafka_log_store_factory, + multiple_log_store_factories, prepare_test_for_kafka_log_store, put_rows, + raft_engine_log_store_factory, rows_schema, }; #[apply(multiple_log_store_factories)] diff --git a/src/mito2/src/engine/catchup_test.rs b/src/mito2/src/engine/catchup_test.rs index 968ce9ac3a..5c3d0cbe2d 100644 --- a/src/mito2/src/engine/catchup_test.rs +++ b/src/mito2/src/engine/catchup_test.rs @@ -19,7 +19,7 @@ use api::v1::Rows; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_recordbatch::RecordBatches; -use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY}; +use common_wal::options::{KafkaWalOptions, WAL_OPTIONS_KEY, WalOptions}; use rstest::rstest; use rstest_reuse::{self, apply}; use store_api::logstore::provider::RaftEngineProvider; @@ -33,9 +33,10 @@ use crate::config::MitoConfig; use crate::engine::MitoEngine; use crate::error::Error; use crate::test_util::{ - build_rows, flush_region, kafka_log_store_factory, prepare_test_for_kafka_log_store, put_rows, + CreateRequestBuilder, LogStoreFactory, TestEnv, build_rows, flush_region, + kafka_log_store_factory, prepare_test_for_kafka_log_store, put_rows, raft_engine_log_store_factory, rows_schema, single_kafka_log_store_factory, - single_raft_engine_log_store_factory, CreateRequestBuilder, LogStoreFactory, TestEnv, + single_raft_engine_log_store_factory, }; use crate::wal::EntryId; diff --git a/src/mito2/src/engine/compaction_test.rs b/src/mito2/src/engine/compaction_test.rs index e7a23a23c4..731d4e4013 100644 --- a/src/mito2/src/engine/compaction_test.rs +++ b/src/mito2/src/engine/compaction_test.rs @@ -31,10 +31,10 @@ use store_api::storage::{RegionId, ScanRequest}; use tokio::sync::Notify; use crate::config::MitoConfig; -use crate::engine::listener::CompactionListener; use crate::engine::MitoEngine; +use crate::engine::listener::CompactionListener; use crate::test_util::{ - build_rows_for_key, column_metadata_to_column_schema, put_rows, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows_for_key, column_metadata_to_column_schema, put_rows, }; async fn put_and_flush( @@ -231,14 +231,16 @@ async fn test_infer_compaction_time_window() { .await .unwrap(); // time window should be absent - assert!(engine - .get_region(region_id) - .unwrap() - .version_control - .current() - .version - .compaction_time_window - .is_none()); + assert!( + engine + .get_region(region_id) + .unwrap() + .version_control + .current() + .version + .compaction_time_window + .is_none() + ); put_and_flush(&engine, region_id, &column_schemas, 1..2).await; put_and_flush(&engine, region_id, &column_schemas, 2..3).await; diff --git a/src/mito2/src/engine/create_test.rs b/src/mito2/src/engine/create_test.rs index c194fc1cb5..de4c199d53 100644 --- a/src/mito2/src/engine/create_test.rs +++ b/src/mito2/src/engine/create_test.rs @@ -23,7 +23,7 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::region::options::MemtableOptions; use crate::test_util::{ - build_rows, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, put_rows, reopen_region, rows_schema, }; #[tokio::test] @@ -191,17 +191,21 @@ async fn test_engine_create_with_custom_store() { let region_dir = region.access_layer.build_region_dir(region_id); let object_store_manager = env.get_object_store_manager().unwrap(); - assert!(object_store_manager - .find("Gcs") - .unwrap() - .exists(®ion_dir) - .await - .unwrap()); - assert!(!object_store_manager - .default_object_store() - .exists(®ion_dir) - .await - .unwrap()); + assert!( + object_store_manager + .find("Gcs") + .unwrap() + .exists(®ion_dir) + .await + .unwrap() + ); + assert!( + !object_store_manager + .default_object_store() + .exists(®ion_dir) + .await + .unwrap() + ); } #[tokio::test] diff --git a/src/mito2/src/engine/drop_test.rs b/src/mito2/src/engine/drop_test.rs index 38f5d1499e..8c03f3b848 100644 --- a/src/mito2/src/engine/drop_test.rs +++ b/src/mito2/src/engine/drop_test.rs @@ -24,10 +24,10 @@ use store_api::region_request::{RegionDropRequest, RegionRequest}; use store_api::storage::RegionId; use crate::config::MitoConfig; -use crate::engine::listener::DropListener; use crate::engine::MitoEngine; +use crate::engine::listener::DropListener; use crate::test_util::{ - build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows_for_key, flush_region, put_rows, rows_schema, }; use crate::worker::DROPPING_MARKER_FILE; @@ -73,12 +73,13 @@ async fn test_engine_drop_region() { let region = engine.get_region(region_id).unwrap(); let region_dir = region.access_layer.build_region_dir(region_id); // no dropping marker file - assert!(!env - .get_object_store() - .unwrap() - .exists(&join_path(®ion_dir, DROPPING_MARKER_FILE)) - .await - .unwrap()); + assert!( + !env.get_object_store() + .unwrap() + .exists(&join_path(®ion_dir, DROPPING_MARKER_FILE)) + .await + .unwrap() + ); let rows = Rows { schema: column_schemas.clone(), @@ -206,18 +207,22 @@ async fn test_engine_drop_region_for_custom_store() { common_telemetry::info!("Before drop,default entries: {:?}", entries); // Both these regions should exist before dropping the custom region. - assert!(object_store_manager - .find("Gcs") - .unwrap() - .exists(&custom_region_dir) - .await - .unwrap()); - assert!(object_store_manager - .find("default") - .unwrap() - .exists(&global_region_dir) - .await - .unwrap()); + assert!( + object_store_manager + .find("Gcs") + .unwrap() + .exists(&custom_region_dir) + .await + .unwrap() + ); + assert!( + object_store_manager + .find("default") + .unwrap() + .exists(&global_region_dir) + .await + .unwrap() + ); // Drop the custom region. engine @@ -247,16 +252,20 @@ async fn test_engine_drop_region_for_custom_store() { .unwrap(); common_telemetry::info!("After drop,default entries: {:?}", entries); - assert!(!object_store_manager - .find("Gcs") - .unwrap() - .exists(&custom_region_dir) - .await - .unwrap()); - assert!(object_store_manager - .find("default") - .unwrap() - .exists(&global_region_dir) - .await - .unwrap()); + assert!( + !object_store_manager + .find("Gcs") + .unwrap() + .exists(&custom_region_dir) + .await + .unwrap() + ); + assert!( + object_store_manager + .find("default") + .unwrap() + .exists(&global_region_dir) + .await + .unwrap() + ); } diff --git a/src/mito2/src/engine/edit_region_test.rs b/src/mito2/src/engine/edit_region_test.rs index f7377d1be7..35a29db4e3 100644 --- a/src/mito2/src/engine/edit_region_test.rs +++ b/src/mito2/src/engine/edit_region_test.rs @@ -20,12 +20,12 @@ use object_store::ObjectStore; use store_api::region_engine::RegionEngine; use store_api::region_request::RegionRequest; use store_api::storage::RegionId; -use tokio::sync::{oneshot, Barrier}; +use tokio::sync::{Barrier, oneshot}; use crate::config::MitoConfig; +use crate::engine::MitoEngine; use crate::engine::flush_test::MockTimeProvider; use crate::engine::listener::EventListener; -use crate::engine::MitoEngine; use crate::manifest::action::RegionEdit; use crate::region::MitoRegionRef; use crate::sst::file::{FileId, FileMeta}; diff --git a/src/mito2/src/engine/filter_deleted_test.rs b/src/mito2/src/engine/filter_deleted_test.rs index 8e856ce75d..5499e4e168 100644 --- a/src/mito2/src/engine/filter_deleted_test.rs +++ b/src/mito2/src/engine/filter_deleted_test.rs @@ -21,7 +21,7 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::test_util::batch_util::sort_batches_and_print; use crate::test_util::{ - build_rows, delete_rows, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, delete_rows, flush_region, put_rows, rows_schema, }; #[tokio::test] diff --git a/src/mito2/src/engine/flush_test.rs b/src/mito2/src/engine/flush_test.rs index 052322b809..61cbfa96f2 100644 --- a/src/mito2/src/engine/flush_test.rs +++ b/src/mito2/src/engine/flush_test.rs @@ -14,8 +14,8 @@ //! Flush tests for mito engine. -use std::sync::atomic::{AtomicI64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicI64, Ordering}; use std::time::Duration; use api::v1::Rows; @@ -31,10 +31,10 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::engine::listener::{FlushListener, StallListener}; use crate::test_util::{ - build_rows, build_rows_for_key, flush_region, kafka_log_store_factory, - multiple_log_store_factories, prepare_test_for_kafka_log_store, put_rows, - raft_engine_log_store_factory, reopen_region, rows_schema, single_kafka_log_store_factory, - CreateRequestBuilder, LogStoreFactory, MockWriteBufferManager, TestEnv, + CreateRequestBuilder, LogStoreFactory, MockWriteBufferManager, TestEnv, build_rows, + build_rows_for_key, flush_region, kafka_log_store_factory, multiple_log_store_factories, + prepare_test_for_kafka_log_store, put_rows, raft_engine_log_store_factory, reopen_region, + rows_schema, single_kafka_log_store_factory, }; use crate::time_provider::TimeProvider; use crate::worker::MAX_INITIAL_CHECK_DELAY_SECS; diff --git a/src/mito2/src/engine/listener.rs b/src/mito2/src/engine/listener.rs index f11ae35456..8a2a444882 100644 --- a/src/mito2/src/engine/listener.rs +++ b/src/mito2/src/engine/listener.rs @@ -14,8 +14,8 @@ //! Engine event listener for tests. -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; use async_trait::async_trait; diff --git a/src/mito2/src/engine/merge_mode_test.rs b/src/mito2/src/engine/merge_mode_test.rs index 4f6dcd61a3..986edf8013 100644 --- a/src/mito2/src/engine/merge_mode_test.rs +++ b/src/mito2/src/engine/merge_mode_test.rs @@ -23,8 +23,8 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::test_util::batch_util::sort_batches_and_print; use crate::test_util::{ - build_delete_rows_for_key, build_rows_with_fields, delete_rows, delete_rows_schema, - flush_region, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_delete_rows_for_key, build_rows_with_fields, delete_rows, + delete_rows_schema, flush_region, put_rows, reopen_region, rows_schema, }; #[tokio::test] diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs index f10404f429..d20bc1157a 100644 --- a/src/mito2/src/engine/open_test.rs +++ b/src/mito2/src/engine/open_test.rs @@ -27,12 +27,12 @@ use store_api::region_request::{ use store_api::storage::{RegionId, ScanRequest}; use tokio::sync::oneshot; -use crate::compaction::compactor::{open_compaction_region, OpenCompactionRegionRequest}; +use crate::compaction::compactor::{OpenCompactionRegionRequest, open_compaction_region}; use crate::config::MitoConfig; use crate::error; use crate::region::options::RegionOptions; use crate::test_util::{ - build_rows, flush_region, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, flush_region, put_rows, reopen_region, rows_schema, }; #[tokio::test] @@ -238,17 +238,21 @@ async fn test_engine_region_open_with_custom_store() { // The region should not be opened with the default object store. let region = engine.get_region(region_id).unwrap(); let object_store_manager = env.get_object_store_manager().unwrap(); - assert!(!object_store_manager - .default_object_store() - .exists(®ion.access_layer.build_region_dir(region_id)) - .await - .unwrap()); - assert!(object_store_manager - .find("Gcs") - .unwrap() - .exists(®ion.access_layer.build_region_dir(region_id)) - .await - .unwrap()); + assert!( + !object_store_manager + .default_object_store() + .exists(®ion.access_layer.build_region_dir(region_id)) + .await + .unwrap() + ); + assert!( + object_store_manager + .find("Gcs") + .unwrap() + .exists(®ion.access_layer.build_region_dir(region_id)) + .await + .unwrap() + ); } #[tokio::test] diff --git a/src/mito2/src/engine/parallel_test.rs b/src/mito2/src/engine/parallel_test.rs index f423b4b41a..abdc0776a4 100644 --- a/src/mito2/src/engine/parallel_test.rs +++ b/src/mito2/src/engine/parallel_test.rs @@ -24,8 +24,8 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::test_util::{ - build_delete_rows_for_key, build_rows_for_key, delete_rows, delete_rows_schema, flush_region, - put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_delete_rows_for_key, build_rows_for_key, delete_rows, + delete_rows_schema, flush_region, put_rows, rows_schema, }; async fn scan_in_parallel( diff --git a/src/mito2/src/engine/projection_test.rs b/src/mito2/src/engine/projection_test.rs index 90b8a70e46..edae798560 100644 --- a/src/mito2/src/engine/projection_test.rs +++ b/src/mito2/src/engine/projection_test.rs @@ -20,7 +20,7 @@ use store_api::region_request::RegionRequest; use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; -use crate::test_util::{put_rows, rows_schema, CreateRequestBuilder, TestEnv}; +use crate::test_util::{CreateRequestBuilder, TestEnv, put_rows, rows_schema}; /// Build rows for multiple tags and fields. fn build_rows_multi_tags_fields( diff --git a/src/mito2/src/engine/prune_test.rs b/src/mito2/src/engine/prune_test.rs index 6d774d8b4d..ccac5428c9 100644 --- a/src/mito2/src/engine/prune_test.rs +++ b/src/mito2/src/engine/prune_test.rs @@ -23,7 +23,7 @@ use store_api::storage::{RegionId, ScanRequest}; use crate::config::MitoConfig; use crate::test_util::{ - build_rows, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, flush_region, put_rows, rows_schema, }; async fn check_prune_row_groups(exprs: Vec, expected: &str) { @@ -71,7 +71,7 @@ async fn test_read_parquet_stats() { check_prune_row_groups( vec![ - datafusion_expr::col("ts").gt(lit(ScalarValue::TimestampMillisecond(Some(4000), None))) + datafusion_expr::col("ts").gt(lit(ScalarValue::TimestampMillisecond(Some(4000), None))), ], "\ +-------+---------+---------------------+ diff --git a/src/mito2/src/engine/row_selector_test.rs b/src/mito2/src/engine/row_selector_test.rs index 44e3ff66b6..317ede5a97 100644 --- a/src/mito2/src/engine/row_selector_test.rs +++ b/src/mito2/src/engine/row_selector_test.rs @@ -21,7 +21,7 @@ use store_api::storage::{RegionId, ScanRequest, TimeSeriesRowSelector}; use crate::config::MitoConfig; use crate::test_util::batch_util::sort_batches_and_print; use crate::test_util::{ - build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows_for_key, flush_region, put_rows, rows_schema, }; async fn test_last_row(append_mode: bool) { diff --git a/src/mito2/src/engine/set_role_state_test.rs b/src/mito2/src/engine/set_role_state_test.rs index e0936d2666..30818caccd 100644 --- a/src/mito2/src/engine/set_role_state_test.rs +++ b/src/mito2/src/engine/set_role_state_test.rs @@ -23,7 +23,7 @@ use store_api::region_request::{RegionPutRequest, RegionRequest}; use store_api::storage::RegionId; use crate::config::MitoConfig; -use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder, TestEnv}; +use crate::test_util::{CreateRequestBuilder, TestEnv, build_rows, put_rows, rows_schema}; /// Helper function to assert a successful response with expected entry id fn assert_success_response(response: &SetRegionRoleStateResponse, expected_entry_id: u64) { diff --git a/src/mito2/src/engine/staging_test.rs b/src/mito2/src/engine/staging_test.rs index c22ddf92a9..c384774df7 100644 --- a/src/mito2/src/engine/staging_test.rs +++ b/src/mito2/src/engine/staging_test.rs @@ -26,7 +26,7 @@ use store_api::storage::RegionId; use crate::config::MitoConfig; use crate::region::{RegionLeaderState, RegionRoleState}; use crate::request::WorkerRequest; -use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder, TestEnv}; +use crate::test_util::{CreateRequestBuilder, TestEnv, build_rows, put_rows, rows_schema}; #[tokio::test] async fn test_staging_state_integration() { diff --git a/src/mito2/src/engine/sync_test.rs b/src/mito2/src/engine/sync_test.rs index a4d02dbb02..8116d874a7 100644 --- a/src/mito2/src/engine/sync_test.rs +++ b/src/mito2/src/engine/sync_test.rs @@ -31,7 +31,7 @@ use super::MitoEngine; use crate::config::MitoConfig; use crate::error::Error; use crate::test_util::{ - build_rows, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv, + CreateRequestBuilder, TestEnv, build_rows, flush_region, put_rows, rows_schema, }; fn add_tag1() -> RegionAlterRequest { diff --git a/src/mito2/src/engine/truncate_test.rs b/src/mito2/src/engine/truncate_test.rs index e724626af7..557a988487 100644 --- a/src/mito2/src/engine/truncate_test.rs +++ b/src/mito2/src/engine/truncate_test.rs @@ -28,7 +28,7 @@ use super::ScanRequest; use crate::config::MitoConfig; use crate::engine::listener::FlushTruncateListener; use crate::test_util::{ - build_rows, put_rows, rows_schema, CreateRequestBuilder, MockWriteBufferManager, TestEnv, + CreateRequestBuilder, MockWriteBufferManager, TestEnv, build_rows, put_rows, rows_schema, }; #[tokio::test] diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs index e4d93e1253..e7df6d8b9a 100644 --- a/src/mito2/src/error.rs +++ b/src/mito2/src/error.rs @@ -20,16 +20,16 @@ use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; use common_runtime::JoinError; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datatypes::arrow::error::ArrowError; use datatypes::prelude::ConcreteDataType; use object_store::ErrorKind; use prost::DecodeError; use snafu::{Location, Snafu}; +use store_api::ManifestVersion; use store_api::logstore::provider::Provider; use store_api::storage::RegionId; -use store_api::ManifestVersion; use tokio::time::error::Elapsed; use crate::cache::file_cache::FileType; @@ -959,7 +959,11 @@ pub enum Error { #[snafu(display("Manual compaction is override by following operations."))] ManualCompactionOverride {}, - #[snafu(display("Incompatible WAL provider change. This is typically caused by changing WAL provider in database config file without completely cleaning existing files. Global provider: {}, region provider: {}", global, region))] + #[snafu(display( + "Incompatible WAL provider change. This is typically caused by changing WAL provider in database config file without completely cleaning existing files. Global provider: {}, region provider: {}", + global, + region + ))] IncompatibleWalProviderChange { global: String, region: String }, #[snafu(display("Expected mito manifest info"))] diff --git a/src/mito2/src/extension.rs b/src/mito2/src/extension.rs index ba8991c1b6..58a16f7c4d 100644 --- a/src/mito2/src/extension.rs +++ b/src/mito2/src/extension.rs @@ -3,15 +3,15 @@ use std::sync::Arc; use async_trait::async_trait; use common_error::ext::BoxedError; -use common_time::range::TimestampRange; use common_time::Timestamp; +use common_time::range::TimestampRange; use store_api::storage::{ScanRequest, SequenceNumber}; use crate::error::Result; +use crate::read::BoxedBatchStream; use crate::read::range::RowGroupIndex; use crate::read::scan_region::StreamContext; use crate::read::scan_util::PartitionMetrics; -use crate::read::BoxedBatchStream; use crate::region::MitoRegionRef; pub type InclusiveTimeRange = (Timestamp, Timestamp); @@ -72,7 +72,7 @@ pub type BoxedExtensionRangeProvider = Box; /// The factory to create an [`ExtensionRangeProvider`], injecting some utilities. pub trait ExtensionRangeProviderFactory: Send + Sync { fn create_extension_range_provider(&self, region: MitoRegionRef) - -> BoxedExtensionRangeProvider; + -> BoxedExtensionRangeProvider; } pub type BoxedExtensionRangeProviderFactory = Box; diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs index 9d8bbc0826..d421b0e824 100644 --- a/src/mito2/src/flush.rs +++ b/src/mito2/src/flush.rs @@ -16,8 +16,8 @@ use std::collections::HashMap; use std::num::NonZeroU64; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use common_telemetry::{debug, error, info, trace}; use snafu::ResultExt; @@ -37,10 +37,10 @@ use crate::metrics::{ FLUSH_BYTES_TOTAL, FLUSH_ELAPSED, FLUSH_FAILURE_TOTAL, FLUSH_REQUESTS_TOTAL, INFLIGHT_FLUSH_COUNT, }; +use crate::read::Source; use crate::read::dedup::{DedupReader, LastNonNull, LastRow}; use crate::read::merge::MergeReaderBuilder; use crate::read::scan_region::PredicateGroup; -use crate::read::Source; use crate::region::options::{IndexOptions, MergeMode}; use crate::region::version::{VersionControlData, VersionControlRef}; use crate::region::{ManifestContextRef, RegionLeaderState, RegionRoleState}; @@ -136,7 +136,10 @@ impl WriteBufferManager for WriteBufferManagerImpl { if mutable_memtable_memory_usage > self.mutable_limit { debug!( "Engine should flush (over mutable limit), mutable_usage: {}, memory_usage: {}, mutable_limit: {}, global_limit: {}", - mutable_memtable_memory_usage, self.memory_usage(), self.mutable_limit, self.global_write_buffer_size, + mutable_memtable_memory_usage, + self.memory_usage(), + self.mutable_limit, + self.global_write_buffer_size, ); return true; } @@ -148,18 +151,16 @@ impl WriteBufferManager for WriteBufferManagerImpl { if memory_usage >= self.global_write_buffer_size { if mutable_memtable_memory_usage >= self.global_write_buffer_size / 2 { debug!( - "Engine should flush (over total limit), memory_usage: {}, global_write_buffer_size: {}, \ + "Engine should flush (over total limit), memory_usage: {}, global_write_buffer_size: {}, \ mutable_usage: {}.", - memory_usage, - self.global_write_buffer_size, - mutable_memtable_memory_usage); + memory_usage, self.global_write_buffer_size, mutable_memtable_memory_usage + ); return true; } else { trace!( "Engine won't flush, memory_usage: {}, global_write_buffer_size: {}, mutable_usage: {}.", - memory_usage, - self.global_write_buffer_size, - mutable_memtable_memory_usage); + memory_usage, self.global_write_buffer_size, mutable_memtable_memory_usage + ); } } @@ -739,10 +740,11 @@ impl FlushScheduler { /// Schedules a new flush task when the scheduler can submit next task. pub(crate) fn schedule_next_flush(&mut self) -> Result<()> { - debug_assert!(self - .region_status - .values() - .all(|status| status.flushing || status.pending_task.is_some())); + debug_assert!( + self.region_status + .values() + .all(|status| status.flushing || status.pending_task.is_some()) + ); // Get the first region from status map. let Some(flush_status) = self @@ -842,7 +844,7 @@ mod tests { use crate::cache::CacheManager; use crate::memtable::time_series::TimeSeriesMemtableBuilder; use crate::test_util::scheduler_util::{SchedulerEnv, VecScheduler}; - use crate::test_util::version_util::{write_rows_to_version, VersionControlBuilder}; + use crate::test_util::version_util::{VersionControlBuilder, write_rows_to_version}; #[test] fn test_get_mutable_limit() { diff --git a/src/mito2/src/manifest/action.rs b/src/mito2/src/manifest/action.rs index 7a0c1afa04..c4de57896c 100644 --- a/src/mito2/src/manifest/action.rs +++ b/src/mito2/src/manifest/action.rs @@ -20,9 +20,9 @@ use std::time::Duration; use chrono::Utc; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; +use store_api::ManifestVersion; use store_api::metadata::RegionMetadataRef; use store_api::storage::{RegionId, SequenceNumber}; -use store_api::ManifestVersion; use strum::Display; use crate::error::{ diff --git a/src/mito2/src/manifest/checkpointer.rs b/src/mito2/src/manifest/checkpointer.rs index 88a2c683bf..3f3164ad93 100644 --- a/src/mito2/src/manifest/checkpointer.rs +++ b/src/mito2/src/manifest/checkpointer.rs @@ -13,12 +13,12 @@ // limitations under the License. use std::fmt::Debug; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use common_telemetry::{error, info}; use store_api::storage::RegionId; -use store_api::{ManifestVersion, MIN_VERSION}; +use store_api::{MIN_VERSION, ManifestVersion}; use crate::error::Result; use crate::manifest::action::{RegionCheckpoint, RegionManifest}; diff --git a/src/mito2/src/manifest/manager.rs b/src/mito2/src/manifest/manager.rs index 2126f3a06c..3137e7c4bb 100644 --- a/src/mito2/src/manifest/manager.rs +++ b/src/mito2/src/manifest/manager.rs @@ -12,16 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use common_datasource::compression::CompressionType; use common_telemetry::{debug, info}; use futures::TryStreamExt; use object_store::ObjectStore; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; -use store_api::{ManifestVersion, MAX_VERSION, MIN_VERSION}; +use store_api::{MAX_VERSION, MIN_VERSION, ManifestVersion}; use crate::error::{ self, InstallManifestToSnafu, NoCheckpointSnafu, NoManifestsSnafu, RegionStoppedSnafu, Result, @@ -32,7 +32,7 @@ use crate::manifest::action::{ }; use crate::manifest::checkpointer::Checkpointer; use crate::manifest::storage::{ - file_version, is_checkpoint_file, is_delta_file, ManifestObjectStore, + ManifestObjectStore, file_version, is_checkpoint_file, is_delta_file, }; use crate::metrics::MANIFEST_OP_ELAPSED; use crate::region::{RegionLeaderState, RegionRoleState}; @@ -656,11 +656,12 @@ mod test { async fn open_manifest_manager() { let env = TestEnv::new().await; // Try to opens an empty manifest. - assert!(env - .create_manifest_manager(CompressionType::Uncompressed, 10, None) - .await - .unwrap() - .is_none()); + assert!( + env.create_manifest_manager(CompressionType::Uncompressed, 10, None) + .await + .unwrap() + .is_none() + ); // Creates a manifest. let metadata = Arc::new(basic_region_metadata()); diff --git a/src/mito2/src/manifest/storage.rs b/src/mito2/src/manifest/storage.rs index 746350b4bc..4b2f261f69 100644 --- a/src/mito2/src/manifest/storage.rs +++ b/src/mito2/src/manifest/storage.rs @@ -21,15 +21,15 @@ use std::sync::{Arc, RwLock}; use common_datasource::compression::CompressionType; use common_telemetry::debug; use crc32fast::Hasher; -use futures::future::try_join_all; use futures::TryStreamExt; +use futures::future::try_join_all; use lazy_static::lazy_static; -use object_store::{util, Entry, ErrorKind, Lister, ObjectStore}; +use object_store::{Entry, ErrorKind, Lister, ObjectStore, util}; use regex::Regex; use serde::{Deserialize, Serialize}; -use snafu::{ensure, ResultExt}; -use store_api::storage::RegionId; +use snafu::{ResultExt, ensure}; use store_api::ManifestVersion; +use store_api::storage::RegionId; use tokio::sync::Semaphore; use crate::error::{ @@ -342,11 +342,7 @@ impl ManifestObjectStore { .iter() .filter_map( |(_e, is_checkpoint, version)| { - if *is_checkpoint { - Some(version) - } else { - None - } + if *is_checkpoint { Some(version) } else { None } }, ) .max() @@ -378,11 +374,7 @@ impl ManifestObjectStore { debug!( "Deleting {} logs from manifest storage path {} until {}, checkpoint_version: {:?}, paths: {:?}", - ret, - self.path, - end, - checkpoint_version, - paths, + ret, self.path, end, checkpoint_version, paths, ); self.object_store @@ -689,8 +681,8 @@ impl CheckpointMetadata { #[cfg(test)] mod tests { use common_test_util::temp_dir::create_temp_dir; - use object_store::services::Fs; use object_store::ObjectStore; + use object_store::services::Fs; use super::*; @@ -794,11 +786,13 @@ mod tests { // delete all logs and checkpoints let _ = log_store.delete_until(11, false).await.unwrap(); - assert!(log_store - .load_checkpoint(new_checkpoint_metadata_with_version(3)) - .await - .unwrap() - .is_none()); + assert!( + log_store + .load_checkpoint(new_checkpoint_metadata_with_version(3)) + .await + .unwrap() + .is_none() + ); assert!(log_store.load_last_checkpoint().await.unwrap().is_none()); let manifests = log_store.fetch_manifests(0, 11).await.unwrap(); let mut it = manifests.into_iter(); diff --git a/src/mito2/src/manifest/tests/checkpoint.rs b/src/mito2/src/manifest/tests/checkpoint.rs index dff372eb7a..a160c24d11 100644 --- a/src/mito2/src/manifest/tests/checkpoint.rs +++ b/src/mito2/src/manifest/tests/checkpoint.rs @@ -96,12 +96,14 @@ async fn manager_without_checkpoint() { } // no checkpoint - assert!(manager - .store() - .load_last_checkpoint() - .await - .unwrap() - .is_none()); + assert!( + manager + .store() + .load_last_checkpoint() + .await + .unwrap() + .is_none() + ); // check files let mut expected = vec![ @@ -149,12 +151,14 @@ async fn manager_with_checkpoint_distance_1() { } // has checkpoint - assert!(manager - .store() - .load_last_checkpoint() - .await - .unwrap() - .is_some()); + assert!( + manager + .store() + .load_last_checkpoint() + .await + .unwrap() + .is_some() + ); // check files let mut expected = vec![ @@ -215,12 +219,14 @@ async fn test_corrupted_data_causing_checksum_error() { } // Check if there is a checkpoint - assert!(manager - .store() - .load_last_checkpoint() - .await - .unwrap() - .is_some()); + assert!( + manager + .store() + .load_last_checkpoint() + .await + .unwrap() + .is_some() + ); // Corrupt the last checkpoint data let mut corrupted_bytes = manager @@ -396,12 +402,14 @@ async fn manifest_install_manifest_to_with_checkpoint() { } // has checkpoint - assert!(manager - .store() - .load_last_checkpoint() - .await - .unwrap() - .is_some()); + assert!( + manager + .store() + .load_last_checkpoint() + .await + .unwrap() + .is_some() + ); // check files let mut expected = vec![ @@ -469,12 +477,14 @@ async fn test_checkpoint_bypass_in_staging_mode() { assert!(!manager.checkpointer().is_doing_checkpoint()); // Verify no checkpoint was created in staging mode - assert!(manager - .store() - .load_last_checkpoint() - .await - .unwrap() - .is_none()); + assert!( + manager + .store() + .load_last_checkpoint() + .await + .unwrap() + .is_none() + ); // Now switch to normal mode and apply one more action manager diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs index 65b070662a..5a8e9d2cd5 100644 --- a/src/mito2/src/memtable.rs +++ b/src/mito2/src/memtable.rs @@ -35,9 +35,9 @@ use crate::flush::WriteBufferManagerRef; use crate::memtable::partition_tree::{PartitionTreeConfig, PartitionTreeMemtableBuilder}; use crate::memtable::time_series::TimeSeriesMemtableBuilder; use crate::metrics::WRITE_BUFFER_BYTES; +use crate::read::Batch; use crate::read::prune::PruneTimeIterator; use crate::read::scan_region::PredicateGroup; -use crate::read::Batch; use crate::region::options::{MemtableOptions, MergeMode}; use crate::sst::file::FileTimeRange; @@ -260,15 +260,13 @@ impl AllocTracker { /// /// The region MUST ensure that it calls this method inside the region writer's write lock. pub(crate) fn done_allocating(&self) { - if let Some(write_buffer_manager) = &self.write_buffer_manager { - if self + if let Some(write_buffer_manager) = &self.write_buffer_manager + && self .is_done_allocating .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed) .is_ok() - { - write_buffer_manager - .schedule_free_mem(self.bytes_allocated.load(Ordering::Relaxed)); - } + { + write_buffer_manager.schedule_free_mem(self.bytes_allocated.load(Ordering::Relaxed)); } } diff --git a/src/mito2/src/memtable/bulk.rs b/src/mito2/src/memtable/bulk.rs index 1d8e4cfd47..0129c87d9d 100644 --- a/src/mito2/src/memtable/bulk.rs +++ b/src/mito2/src/memtable/bulk.rs @@ -42,7 +42,7 @@ use crate::memtable::{ MemtableRangeContext, MemtableRanges, MemtableRef, MemtableStats, PredicateGroup, }; use crate::sst::file::FileId; -use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; +use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; /// All parts in a bulk memtable. #[derive(Default)] @@ -460,7 +460,7 @@ mod tests { use super::*; use crate::memtable::bulk::part::BulkPartConverter; use crate::read::scan_region::PredicateGroup; - use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; + use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; use crate::test_util::memtable_util::{build_key_values_with_ts_seq_values, metadata_for_test}; fn create_bulk_part_with_converter( diff --git a/src/mito2/src/memtable/bulk/context.rs b/src/mito2/src/memtable/bulk/context.rs index a88bd31d1b..6b2da7d645 100644 --- a/src/mito2/src/memtable/bulk/context.rs +++ b/src/mito2/src/memtable/bulk/context.rs @@ -17,7 +17,7 @@ use std::collections::VecDeque; use std::sync::Arc; -use mito_codec::row_converter::{build_primary_key_codec, DensePrimaryKeyCodec}; +use mito_codec::row_converter::{DensePrimaryKeyCodec, build_primary_key_codec}; use parquet::file::metadata::ParquetMetaData; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; @@ -102,7 +102,7 @@ fn build_read_format( projection: &Option<&[ColumnId]>, flat_format: bool, ) -> ReadFormat { - let read_format = if let Some(column_ids) = &projection { + if let Some(column_ids) = &projection { ReadFormat::new(region_metadata, column_ids.iter().copied(), flat_format) } else { // No projection, lists all column ids to read. @@ -114,7 +114,5 @@ fn build_read_format( .map(|col| col.column_id), flat_format, ) - }; - - read_format + } } diff --git a/src/mito2/src/memtable/bulk/part.rs b/src/mito2/src/memtable/bulk/part.rs index 344e5c26bf..d045a5e793 100644 --- a/src/mito2/src/memtable/bulk/part.rs +++ b/src/mito2/src/memtable/bulk/part.rs @@ -17,9 +17,9 @@ use std::collections::VecDeque; use std::sync::Arc; -use api::helper::{value_to_grpc_value, ColumnDataTypeWrapper}; +use api::helper::{ColumnDataTypeWrapper, value_to_grpc_value}; use api::v1::bulk_wal_entry::Body; -use api::v1::{bulk_wal_entry, ArrowIpc, BulkWalEntry, Mutation, OpType}; +use api::v1::{ArrowIpc, BulkWalEntry, Mutation, OpType, bulk_wal_entry}; use bytes::Bytes; use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage}; use common_recordbatch::DfRecordBatch as RecordBatch; @@ -28,8 +28,8 @@ use datatypes::arrow; use datatypes::arrow::array::{ Array, ArrayRef, BinaryBuilder, BinaryDictionaryBuilder, DictionaryArray, StringBuilder, StringDictionaryBuilder, TimestampMicrosecondArray, TimestampMillisecondArray, - TimestampNanosecondArray, TimestampSecondArray, UInt32Array, UInt64Array, UInt64Builder, - UInt8Array, UInt8Builder, + TimestampNanosecondArray, TimestampSecondArray, UInt8Array, UInt8Builder, UInt32Array, + UInt64Array, UInt64Builder, }; use datatypes::arrow::compute::{SortColumn, SortOptions, TakeOptions}; use datatypes::arrow::datatypes::{SchemaRef, UInt32Type}; @@ -40,7 +40,7 @@ use datatypes::value::{Value, ValueRef}; use datatypes::vectors::Helper; use mito_codec::key_values::{KeyValue, KeyValues, KeyValuesRef}; use mito_codec::row_converter::{ - build_primary_key_codec, DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt, + DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt, build_primary_key_codec, }; use parquet::arrow::ArrowWriter; use parquet::data_type::AsBytes; @@ -49,18 +49,18 @@ use parquet::file::properties::WriterProperties; use snafu::{OptionExt, ResultExt, Snafu}; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::{RegionMetadata, RegionMetadataRef}; -use store_api::storage::consts::PRIMARY_KEY_COLUMN_NAME; use store_api::storage::SequenceNumber; +use store_api::storage::consts::PRIMARY_KEY_COLUMN_NAME; use table::predicate::Predicate; use crate::error::{ self, ColumnNotFoundSnafu, ComputeArrowSnafu, DataTypeMismatchSnafu, EncodeMemtableSnafu, EncodeSnafu, NewRecordBatchSnafu, Result, }; +use crate::memtable::BoxedRecordBatchIterator; use crate::memtable::bulk::context::BulkIterContextRef; use crate::memtable::bulk::part_reader::EncodedBulkPartIter; use crate::memtable::time_series::{ValueBuilder, Values}; -use crate::memtable::BoxedRecordBatchIterator; use crate::sst::parquet::flat_format::primary_key_column_index; use crate::sst::parquet::format::{PrimaryKeyArray, PrimaryKeyArrayBuilder, ReadFormat}; use crate::sst::parquet::helper::parse_parquet_metadata; @@ -886,7 +886,7 @@ mod tests { use super::*; use crate::memtable::bulk::context::BulkIterContext; use crate::sst::parquet::format::{PrimaryKeyReadFormat, ReadFormat}; - use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; + use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; use crate::test_util::memtable_util::{ build_key_values_with_ts_seq_values, metadata_for_test, region_metadata_to_row_schema, }; @@ -1356,7 +1356,7 @@ mod tests { check_prune_row_group( &part, Some(Predicate::new(vec![ - datafusion_expr::col("k0").eq(datafusion_expr::lit("a")) + datafusion_expr::col("k0").eq(datafusion_expr::lit("a")), ])), 100, ); @@ -1374,7 +1374,7 @@ mod tests { check_prune_row_group( &part, Some(Predicate::new(vec![ - datafusion_expr::col("v0").eq(datafusion_expr::lit(150i64)) + datafusion_expr::col("v0").eq(datafusion_expr::lit(150i64)), ])), 1, ); diff --git a/src/mito2/src/memtable/bulk/part_reader.rs b/src/mito2/src/memtable/bulk/part_reader.rs index e51f8caf01..fb80323619 100644 --- a/src/mito2/src/memtable/bulk/part_reader.rs +++ b/src/mito2/src/memtable/bulk/part_reader.rs @@ -20,8 +20,8 @@ use bytes::Bytes; use datatypes::arrow::array::{BooleanArray, Scalar, UInt64Array}; use datatypes::arrow::buffer::BooleanBuffer; use datatypes::arrow::record_batch::RecordBatch; -use parquet::arrow::arrow_reader::ParquetRecordBatchReader; use parquet::arrow::ProjectionMask; +use parquet::arrow::arrow_reader::ParquetRecordBatchReader; use parquet::file::metadata::ParquetMetaData; use snafu::ResultExt; use store_api::storage::SequenceNumber; @@ -267,7 +267,7 @@ mod tests { use api::v1::SemanticType; use datafusion_expr::{col, lit}; - use datatypes::arrow::array::{ArrayRef, Int64Array, StringArray, UInt64Array, UInt8Array}; + use datatypes::arrow::array::{ArrayRef, Int64Array, StringArray, UInt8Array, UInt64Array}; use datatypes::arrow::datatypes::{DataType, Field, Schema}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::ColumnSchema; diff --git a/src/mito2/src/memtable/bulk/row_group_reader.rs b/src/mito2/src/memtable/bulk/row_group_reader.rs index 660ed15ec4..9918d81871 100644 --- a/src/mito2/src/memtable/bulk/row_group_reader.rs +++ b/src/mito2/src/memtable/bulk/row_group_reader.rs @@ -19,7 +19,7 @@ use bytes::Bytes; use datatypes::arrow::array::RecordBatch; use datatypes::arrow::error::ArrowError; use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, RowGroups, RowSelection}; -use parquet::arrow::{parquet_to_arrow_field_levels, FieldLevels, ProjectionMask}; +use parquet::arrow::{FieldLevels, ProjectionMask, parquet_to_arrow_field_levels}; use parquet::column::page::{PageIterator, PageReader}; use parquet::file::metadata::ParquetMetaData; use snafu::ResultExt; @@ -27,10 +27,10 @@ use snafu::ResultExt; use crate::error; use crate::error::ReadDataPartSnafu; use crate::memtable::bulk::context::BulkIterContextRef; +use crate::sst::parquet::DEFAULT_READ_BATCH_SIZE; use crate::sst::parquet::format::ReadFormat; use crate::sst::parquet::reader::RowGroupReaderContext; use crate::sst::parquet::row_group::{ColumnChunkIterator, RowGroupBase}; -use crate::sst::parquet::DEFAULT_READ_BATCH_SIZE; /// Helper for reading specific row group inside Memtable Parquet parts. // This is similar to [mito2::sst::parquet::row_group::InMemoryRowGroup] since diff --git a/src/mito2/src/memtable/partition_tree.rs b/src/mito2/src/memtable/partition_tree.rs index 76cb2e7aee..7942eb58df 100644 --- a/src/mito2/src/memtable/partition_tree.rs +++ b/src/mito2/src/memtable/partition_tree.rs @@ -24,12 +24,12 @@ mod shard_builder; mod tree; use std::fmt; -use std::sync::atomic::{AtomicI64, AtomicU64, AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicI64, AtomicU64, AtomicUsize, Ordering}; use common_base::readable_size::ReadableSize; use mito_codec::key_values::KeyValue; -use mito_codec::row_converter::{build_primary_key_codec, PrimaryKeyCodec}; +use mito_codec::row_converter::{PrimaryKeyCodec, build_primary_key_codec}; use serde::{Deserialize, Serialize}; use store_api::metadata::RegionMetadataRef; use store_api::storage::{ColumnId, SequenceNumber}; diff --git a/src/mito2/src/memtable/partition_tree/data.rs b/src/mito2/src/memtable/partition_tree/data.rs index 016583277b..7e33df7491 100644 --- a/src/mito2/src/memtable/partition_tree/data.rs +++ b/src/mito2/src/memtable/partition_tree/data.rs @@ -30,12 +30,12 @@ use datatypes::schema::ColumnSchema; use datatypes::types::TimestampType; use datatypes::vectors::{ TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, - TimestampSecondVector, UInt16Vector, UInt16VectorBuilder, UInt64Vector, UInt64VectorBuilder, - UInt8Vector, UInt8VectorBuilder, + TimestampSecondVector, UInt8Vector, UInt8VectorBuilder, UInt16Vector, UInt16VectorBuilder, + UInt64Vector, UInt64VectorBuilder, }; use mito_codec::key_values::KeyValue; -use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder}; use parquet::arrow::ArrowWriter; +use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder}; use parquet::basic::{Compression, Encoding, ZstdLevel}; use parquet::file::properties::{EnabledStatistics, WriterProperties}; use parquet::schema::types::ColumnPath; @@ -45,8 +45,8 @@ use store_api::storage::consts::{OP_TYPE_COLUMN_NAME, SEQUENCE_COLUMN_NAME}; use crate::error; use crate::error::Result; -use crate::memtable::partition_tree::merger::{DataBatchKey, DataNode, DataSource, Merger}; use crate::memtable::partition_tree::PkIndex; +use crate::memtable::partition_tree::merger::{DataBatchKey, DataNode, DataSource, Merger}; use crate::metrics::{ PARTITION_TREE_DATA_BUFFER_FREEZE_STAGE_ELAPSED, PARTITION_TREE_READ_STAGE_ELAPSED, }; diff --git a/src/mito2/src/memtable/partition_tree/dedup.rs b/src/mito2/src/memtable/partition_tree/dedup.rs index 59f481266b..320016fa29 100644 --- a/src/mito2/src/memtable/partition_tree/dedup.rs +++ b/src/mito2/src/memtable/partition_tree/dedup.rs @@ -15,9 +15,9 @@ use std::ops::Range; use crate::error::Result; +use crate::memtable::partition_tree::PkId; use crate::memtable::partition_tree::data::DataBatch; use crate::memtable::partition_tree::shard::DataBatchSource; -use crate::memtable::partition_tree::PkId; /// A reader that dedup sorted batches from a merger. pub struct DedupReader { diff --git a/src/mito2/src/memtable/partition_tree/merger.rs b/src/mito2/src/memtable/partition_tree/merger.rs index 71dcacc08e..ed5ab78f15 100644 --- a/src/mito2/src/memtable/partition_tree/merger.rs +++ b/src/mito2/src/memtable/partition_tree/merger.rs @@ -18,8 +18,8 @@ use std::fmt::Debug; use std::ops::Range; use crate::error::Result; -use crate::memtable::partition_tree::data::{DataBatch, DataBufferReader, DataPartReader}; use crate::memtable::partition_tree::PkIndex; +use crate::memtable::partition_tree::data::{DataBatch, DataBufferReader, DataPartReader}; /// Nodes of merger's heap. pub trait Node: Ord { @@ -297,7 +297,7 @@ mod tests { use store_api::metadata::RegionMetadataRef; use super::*; - use crate::memtable::partition_tree::data::{timestamp_array_to_i64_slice, DataBuffer}; + use crate::memtable::partition_tree::data::{DataBuffer, timestamp_array_to_i64_slice}; use crate::test_util::memtable_util::{build_key_values_with_ts_seq_values, metadata_for_test}; fn write_rows_to_buffer( diff --git a/src/mito2/src/memtable/partition_tree/partition.rs b/src/mito2/src/memtable/partition_tree/partition.rs index 6e455bbfca..e6e3b8bf81 100644 --- a/src/mito2/src/memtable/partition_tree/partition.rs +++ b/src/mito2/src/memtable/partition_tree/partition.rs @@ -32,7 +32,7 @@ use store_api::metric_engine_consts::DATA_SCHEMA_TABLE_ID_COLUMN_NAME; use store_api::storage::ColumnId; use crate::error::{EncodeSnafu, Result}; -use crate::memtable::partition_tree::data::{DataBatch, DataParts, DATA_INIT_CAP}; +use crate::memtable::partition_tree::data::{DATA_INIT_CAP, DataBatch, DataParts}; use crate::memtable::partition_tree::dedup::DedupReader; use crate::memtable::partition_tree::shard::{ BoxedDataBatchSource, Shard, ShardMerger, ShardNode, ShardSource, diff --git a/src/mito2/src/memtable/partition_tree/shard.rs b/src/mito2/src/memtable/partition_tree/shard.rs index 0ea9c1ab87..f1f2be9d01 100644 --- a/src/mito2/src/memtable/partition_tree/shard.rs +++ b/src/mito2/src/memtable/partition_tree/shard.rs @@ -23,7 +23,7 @@ use store_api::metadata::RegionMetadataRef; use crate::error::Result; use crate::memtable::partition_tree::data::{ - DataBatch, DataParts, DataPartsReader, DataPartsReaderBuilder, DATA_INIT_CAP, + DATA_INIT_CAP, DataBatch, DataParts, DataPartsReader, DataPartsReaderBuilder, }; use crate::memtable::partition_tree::dict::KeyDictRef; use crate::memtable::partition_tree::merger::{Merger, Node}; @@ -234,10 +234,10 @@ impl ShardReader { while self.parts_reader.is_valid() { let pk_index = self.parts_reader.current_data_batch().pk_index(); - if let Some(yield_pk_index) = self.last_yield_pk_index { - if pk_index == yield_pk_index { - break; - } + if let Some(yield_pk_index) = self.last_yield_pk_index + && pk_index == yield_pk_index + { + break; } self.keys_before_pruning += 1; // Safety: `key_filter` is some so the shard has primary keys. @@ -429,11 +429,11 @@ mod tests { use std::sync::Arc; use super::*; + use crate::memtable::KeyValues; + use crate::memtable::partition_tree::PkIndex; use crate::memtable::partition_tree::data::timestamp_array_to_i64_slice; use crate::memtable::partition_tree::dict::KeyDictBuilder; - use crate::memtable::partition_tree::PkIndex; use crate::memtable::stats::WriteMetrics; - use crate::memtable::KeyValues; use crate::test_util::memtable_util::{ build_key_values_with_ts_seq_values, encode_keys, metadata_for_test, }; diff --git a/src/mito2/src/memtable/partition_tree/shard_builder.rs b/src/mito2/src/memtable/partition_tree/shard_builder.rs index 600097516d..63a2d895bc 100644 --- a/src/mito2/src/memtable/partition_tree/shard_builder.rs +++ b/src/mito2/src/memtable/partition_tree/shard_builder.rs @@ -24,7 +24,7 @@ use store_api::metadata::RegionMetadataRef; use crate::error::Result; use crate::memtable::partition_tree::data::{ - DataBatch, DataBuffer, DataBufferReader, DataBufferReaderBuilder, DataParts, DATA_INIT_CAP, + DATA_INIT_CAP, DataBatch, DataBuffer, DataBufferReader, DataBufferReaderBuilder, DataParts, }; use crate::memtable::partition_tree::dict::{DictBuilderReader, KeyDictBuilder}; use crate::memtable::partition_tree::shard::Shard; @@ -273,10 +273,10 @@ impl ShardBuilderReader { while self.data_reader.is_valid() { let pk_index = self.data_reader.current_data_batch().pk_index(); - if let Some(yield_pk_index) = self.last_yield_pk_index { - if pk_index == yield_pk_index { - break; - } + if let Some(yield_pk_index) = self.last_yield_pk_index + && pk_index == yield_pk_index + { + break; } self.keys_before_pruning += 1; let key = self.dict_reader.key_by_pk_index(pk_index); @@ -317,8 +317,8 @@ impl Drop for ShardBuilderReader { mod tests { use super::*; - use crate::memtable::partition_tree::data::timestamp_array_to_i64_slice; use crate::memtable::KeyValues; + use crate::memtable::partition_tree::data::timestamp_array_to_i64_slice; use crate::test_util::memtable_util::{ build_key_values_with_ts_seq_values, encode_key_by_kv, metadata_for_test, }; @@ -359,10 +359,12 @@ mod tests { let config = PartitionTreeConfig::default(); let mut shard_builder = ShardBuilder::new(metadata.clone(), &config, 1); let mut metrics = WriteMetrics::default(); - assert!(shard_builder - .finish(metadata.clone(), &mut HashMap::new()) - .unwrap() - .is_none()); + assert!( + shard_builder + .finish(metadata.clone(), &mut HashMap::new()) + .unwrap() + .is_none() + ); assert_eq!(1, shard_builder.current_shard_id); for key_values in &input { diff --git a/src/mito2/src/memtable/partition_tree/tree.rs b/src/mito2/src/memtable/partition_tree/tree.rs index 09924469c8..0b903a2616 100644 --- a/src/mito2/src/memtable/partition_tree/tree.rs +++ b/src/mito2/src/memtable/partition_tree/tree.rs @@ -27,7 +27,7 @@ use mito_codec::key_values::KeyValue; use mito_codec::primary_key_filter::is_partition_column; use mito_codec::row_converter::sparse::{FieldWithId, SparseEncoder}; use mito_codec::row_converter::{PrimaryKeyCodec, SortField}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::RegionMetadataRef; use store_api::storage::{ColumnId, SequenceNumber}; @@ -37,15 +37,15 @@ use crate::error::{ EncodeSnafu, EncodeSparsePrimaryKeySnafu, PrimaryKeyLengthMismatchSnafu, Result, }; use crate::flush::WriteBufferManagerRef; +use crate::memtable::partition_tree::PartitionTreeConfig; use crate::memtable::partition_tree::partition::{ Partition, PartitionKey, PartitionReader, PartitionRef, ReadPartitionContext, }; -use crate::memtable::partition_tree::PartitionTreeConfig; use crate::memtable::stats::WriteMetrics; use crate::memtable::{BoxedBatchIterator, KeyValues}; use crate::metrics::{PARTITION_TREE_READ_STAGE_ELAPSED, READ_ROWS_TOTAL, READ_STAGE_ELAPSED}; -use crate::read::dedup::LastNonNullIter; use crate::read::Batch; +use crate::read::dedup::LastNonNullIter; use crate::region::options::MergeMode; /// The partition tree. diff --git a/src/mito2/src/memtable/simple_bulk_memtable.rs b/src/mito2/src/memtable/simple_bulk_memtable.rs index b223ddd924..e1d4430a4d 100644 --- a/src/mito2/src/memtable/simple_bulk_memtable.rs +++ b/src/mito2/src/memtable/simple_bulk_memtable.rs @@ -38,9 +38,9 @@ use crate::memtable::{ MemtableRange, MemtableRangeContext, MemtableRanges, MemtableRef, MemtableStats, }; use crate::metrics::MEMTABLE_ACTIVE_SERIES_COUNT; +use crate::read::Batch; use crate::read::dedup::LastNonNullIter; use crate::read::scan_region::PredicateGroup; -use crate::read::Batch; use crate::region::options::MergeMode; use crate::{error, metrics}; diff --git a/src/mito2/src/memtable/time_partition.rs b/src/mito2/src/memtable/time_partition.rs index ce33beb329..9c77f61f3a 100644 --- a/src/mito2/src/memtable/time_partition.rs +++ b/src/mito2/src/memtable/time_partition.rs @@ -19,9 +19,9 @@ use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use common_telemetry::debug; +use common_time::Timestamp; use common_time::timestamp::TimeUnit; use common_time::timestamp_millis::BucketAligned; -use common_time::Timestamp; use datatypes::arrow; use datatypes::arrow::array::{ ArrayRef, BooleanArray, RecordBatch, RecordBatchOptions, TimestampMicrosecondArray, @@ -30,8 +30,8 @@ use datatypes::arrow::array::{ use datatypes::arrow::buffer::{BooleanBuffer, MutableBuffer}; use datatypes::arrow::datatypes::{DataType, Int64Type, SchemaRef}; use mito_codec::key_values::KeyValue; -use mito_codec::row_converter::{build_primary_key_codec, PrimaryKeyCodec}; -use smallvec::{smallvec, SmallVec}; +use mito_codec::row_converter::{PrimaryKeyCodec, build_primary_key_codec}; +use smallvec::{SmallVec, smallvec}; use snafu::{OptionExt, ResultExt}; use store_api::metadata::RegionMetadataRef; @@ -40,7 +40,7 @@ use crate::error::{InvalidRequestSnafu, Result}; use crate::memtable::bulk::part::{BulkPart, BulkPartConverter}; use crate::memtable::version::SmallMemtableVec; use crate::memtable::{KeyValues, MemtableBuilderRef, MemtableId, MemtableRef}; -use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; +use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; /// Initial time window if not specified. const INITIAL_TIME_WINDOW: Duration = Duration::from_days(1); @@ -353,13 +353,13 @@ impl TimePartitions { .builder .build(inner.alloc_memtable_id(), &self.metadata); debug!( - "Create time partition {:?} for region {}, duration: {:?}, memtable_id: {}, parts_total: {}", - range, - self.metadata.region_id, - self.part_duration, - memtable.id(), - inner.parts.len() + 1 - ); + "Create time partition {:?} for region {}, duration: {:?}, memtable_id: {}, parts_total: {}", + range, + self.metadata.region_id, + self.part_duration, + memtable.id(), + inner.parts.len() + 1 + ); let pos = inner.parts.len(); inner.parts.push(TimePartition { memtable, diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs index ca7b9e635a..7b7d328467 100644 --- a/src/mito2/src/memtable/time_series.rs +++ b/src/mito2/src/memtable/time_series.rs @@ -33,11 +33,11 @@ use datatypes::types::TimestampType; use datatypes::value::{Value, ValueRef}; use datatypes::vectors::{ Helper, TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector, - TimestampSecondVector, UInt64Vector, UInt8Vector, + TimestampSecondVector, UInt8Vector, UInt64Vector, }; use mito_codec::key_values::KeyValue; use mito_codec::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::storage::{ColumnId, SequenceNumber}; use table::predicate::Predicate; @@ -803,10 +803,12 @@ impl Series { let column_size = frozen[0].fields.len() + 3; if cfg!(debug_assertions) { - debug_assert!(frozen - .iter() - .zip(frozen.iter().skip(1)) - .all(|(prev, next)| { prev.fields.len() == next.fields.len() })); + debug_assert!( + frozen + .iter() + .zip(frozen.iter().skip(1)) + .all(|(prev, next)| { prev.fields.len() == next.fields.len() }) + ); } let arrays = frozen.iter().map(|v| v.columns()).collect::>(); diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs index 01f07692c7..a10b2ba021 100644 --- a/src/mito2/src/read.rs +++ b/src/mito2/src/read.rs @@ -52,13 +52,13 @@ use datatypes::value::{Value, ValueRef}; use datatypes::vectors::{ BooleanVector, Helper, TimestampMicrosecondVector, TimestampMillisecondVector, TimestampMillisecondVectorBuilder, TimestampNanosecondVector, TimestampSecondVector, - UInt32Vector, UInt64Vector, UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder, Vector, + UInt8Vector, UInt8VectorBuilder, UInt32Vector, UInt64Vector, UInt64VectorBuilder, Vector, VectorRef, }; -use futures::stream::BoxStream; use futures::TryStreamExt; +use futures::stream::BoxStream; use mito_codec::row_converter::{CompositeValues, PrimaryKeyCodec}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::RegionMetadata; use store_api::storage::{ColumnId, SequenceNumber}; @@ -693,21 +693,21 @@ impl BatchChecker { pub(crate) fn check_monotonic(&mut self, batch: &Batch) -> Result<(), String> { batch.check_monotonic()?; - if let (Some(start), Some(first)) = (self.start, batch.first_timestamp()) { - if start > first { - return Err(format!( - "batch's first timestamp is before the start timestamp: {:?} > {:?}", - start, first - )); - } + if let (Some(start), Some(first)) = (self.start, batch.first_timestamp()) + && start > first + { + return Err(format!( + "batch's first timestamp is before the start timestamp: {:?} > {:?}", + start, first + )); } - if let (Some(end), Some(last)) = (self.end, batch.last_timestamp()) { - if end <= last { - return Err(format!( - "batch's last timestamp is after the end timestamp: {:?} <= {:?}", - end, last - )); - } + if let (Some(end), Some(last)) = (self.end, batch.last_timestamp()) + && end <= last + { + return Err(format!( + "batch's last timestamp is after the end timestamp: {:?} <= {:?}", + end, last + )); } // Checks the batch is behind the last batch. diff --git a/src/mito2/src/read/compat.rs b/src/mito2/src/read/compat.rs index ebede80cf7..789a6b4192 100644 --- a/src/mito2/src/read/compat.rs +++ b/src/mito2/src/read/compat.rs @@ -21,7 +21,7 @@ use api::v1::SemanticType; use datatypes::arrow::array::{ Array, ArrayRef, BinaryArray, BinaryBuilder, DictionaryArray, UInt32Array, }; -use datatypes::arrow::compute::{take, TakeOptions}; +use datatypes::arrow::compute::{TakeOptions, take}; use datatypes::arrow::datatypes::{Schema, SchemaRef}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::data_type::ConcreteDataType; @@ -29,10 +29,10 @@ use datatypes::prelude::DataType; use datatypes::value::Value; use datatypes::vectors::VectorRef; use mito_codec::row_converter::{ - build_primary_key_codec, build_primary_key_codec_with_fields, CompositeValues, PrimaryKeyCodec, - SortField, + CompositeValues, PrimaryKeyCodec, SortField, build_primary_key_codec, + build_primary_key_codec_with_fields, }; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::{RegionMetadata, RegionMetadataRef}; use store_api::storage::ColumnId; @@ -40,11 +40,11 @@ use crate::error::{ CompatReaderSnafu, ComputeArrowSnafu, CreateDefaultSnafu, DecodeSnafu, EncodeSnafu, NewRecordBatchSnafu, Result, UnexpectedSnafu, }; -use crate::read::flat_projection::{flat_projected_columns, FlatProjectionMapper}; +use crate::read::flat_projection::{FlatProjectionMapper, flat_projected_columns}; use crate::read::projection::{PrimaryKeyProjectionMapper, ProjectionMapper}; use crate::read::{Batch, BatchColumn, BatchReader}; use crate::sst::parquet::flat_format::primary_key_column_index; -use crate::sst::parquet::format::{FormatProjection, PrimaryKeyArray, INTERNAL_COLUMN_NUM}; +use crate::sst::parquet::format::{FormatProjection, INTERNAL_COLUMN_NUM, PrimaryKeyArray}; use crate::sst::{internal_fields, tag_maybe_to_dictionary_field}; /// Reader to adapt schema of underlying reader to expected schema. @@ -397,11 +397,12 @@ impl CompatFields { #[must_use] fn compat(&self, batch: Batch) -> Batch { debug_assert_eq!(self.actual_fields.len(), batch.fields().len()); - debug_assert!(self - .actual_fields - .iter() - .zip(batch.fields()) - .all(|((id, _), batch_column)| *id == batch_column.column_id)); + debug_assert!( + self.actual_fields + .iter() + .zip(batch.fields()) + .all(|((id, _), batch_column)| *id == batch_column.column_id) + ); let len = batch.num_rows(); let fields = self @@ -627,17 +628,15 @@ struct RewritePrimaryKey { impl RewritePrimaryKey { /// Make primary key of the `batch` compatible. fn compat(&self, mut batch: Batch) -> Result { - let values = if let Some(pk_values) = batch.pk_values() { - pk_values - } else { + if batch.pk_values().is_none() { let new_pk_values = self .original .decode(batch.primary_key()) .context(DecodeSnafu)?; batch.set_pk_values(new_pk_values); - // Safety: We ensure pk_values is not None. - batch.pk_values().as_ref().unwrap() - }; + } + // Safety: We ensure pk_values is not None. + let values = batch.pk_values().unwrap(); let mut buffer = Vec::with_capacity( batch.primary_key().len() + self.new.estimated_size().unwrap_or_default(), @@ -906,14 +905,14 @@ mod tests { use api::v1::{OpType, SemanticType}; use datatypes::arrow::array::{ ArrayRef, BinaryDictionaryBuilder, Int64Array, StringDictionaryBuilder, - TimestampMillisecondArray, UInt64Array, UInt8Array, + TimestampMillisecondArray, UInt8Array, UInt64Array, }; use datatypes::arrow::datatypes::UInt32Type; use datatypes::arrow::record_batch::RecordBatch; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use datatypes::value::ValueRef; - use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, UInt64Vector, UInt8Vector}; + use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, UInt8Vector, UInt64Vector}; use mito_codec::row_converter::{ DensePrimaryKeyCodec, PrimaryKeyCodecExt, SparsePrimaryKeyCodec, }; @@ -924,8 +923,8 @@ mod tests { use super::*; use crate::read::flat_projection::FlatProjectionMapper; use crate::sst::parquet::flat_format::FlatReadFormat; - use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; - use crate::test_util::{check_reader_result, VecBatchReader}; + use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; + use crate::test_util::{VecBatchReader, check_reader_result}; /// Creates a new [RegionMetadata]. fn new_metadata( @@ -1100,9 +1099,11 @@ mod tests { ], &[1], ); - assert!(may_compat_primary_key(&reader_meta, &reader_meta) - .unwrap() - .is_none()); + assert!( + may_compat_primary_key(&reader_meta, &reader_meta) + .unwrap() + .is_none() + ); } #[test] @@ -1119,9 +1120,11 @@ mod tests { &[1], )); - assert!(may_compat_primary_key(&reader_meta, &reader_meta) - .unwrap() - .is_none()); + assert!( + may_compat_primary_key(&reader_meta, &reader_meta) + .unwrap() + .is_none() + ); } #[test] diff --git a/src/mito2/src/read/dedup.rs b/src/mito2/src/read/dedup.rs index 3ad3a160a5..6ff4d53ba4 100644 --- a/src/mito2/src/read/dedup.rs +++ b/src/mito2/src/read/dedup.rs @@ -586,11 +586,11 @@ mod tests { use std::sync::Arc; use api::v1::OpType; - use datatypes::arrow::array::{TimestampMillisecondArray, UInt64Array, UInt8Array}; + use datatypes::arrow::array::{TimestampMillisecondArray, UInt8Array, UInt64Array}; use super::*; use crate::read::BatchBuilder; - use crate::test_util::{check_reader_result, new_batch, VecBatchReader}; + use crate::test_util::{VecBatchReader, check_reader_result, new_batch}; #[tokio::test] async fn test_dedup_reader_no_duplications() { diff --git a/src/mito2/src/read/flat_dedup.rs b/src/mito2/src/read/flat_dedup.rs index f872e738ec..62484f9c12 100644 --- a/src/mito2/src/read/flat_dedup.rs +++ b/src/mito2/src/read/flat_dedup.rs @@ -19,15 +19,15 @@ use std::ops::Range; use api::v1::OpType; use async_stream::try_stream; use datatypes::arrow::array::{ - make_comparator, Array, ArrayRef, BinaryArray, BooleanArray, BooleanBufferBuilder, UInt64Array, - UInt8Array, + Array, ArrayRef, BinaryArray, BooleanArray, BooleanBufferBuilder, UInt8Array, UInt64Array, + make_comparator, }; use datatypes::arrow::buffer::BooleanBuffer; use datatypes::arrow::compute::kernels::cmp::distinct; -use datatypes::arrow::compute::kernels::partition::{partition, Partitions}; +use datatypes::arrow::compute::kernels::partition::{Partitions, partition}; use datatypes::arrow::compute::kernels::take::take; use datatypes::arrow::compute::{ - concat_batches, filter_record_batch, take_record_batch, SortOptions, TakeOptions, + SortOptions, TakeOptions, concat_batches, filter_record_batch, take_record_batch, }; use datatypes::arrow::error::ArrowError; use datatypes::arrow::record_batch::RecordBatch; @@ -40,7 +40,7 @@ use crate::read::dedup::DedupMetrics; use crate::sst::parquet::flat_format::{ op_type_column_index, primary_key_column_index, time_index_column_index, }; -use crate::sst::parquet::format::{PrimaryKeyArray, FIXED_POS_COLUMN_NUM}; +use crate::sst::parquet::format::{FIXED_POS_COLUMN_NUM, PrimaryKeyArray}; /// An iterator to dedup sorted batches from an iterator based on the dedup strategy. pub struct FlatDedupIterator { @@ -671,7 +671,7 @@ mod tests { use api::v1::OpType; use datatypes::arrow::array::{ ArrayRef, BinaryDictionaryBuilder, Int64Array, StringDictionaryBuilder, - TimestampMillisecondArray, UInt64Array, UInt8Array, + TimestampMillisecondArray, UInt8Array, UInt64Array, }; use datatypes::arrow::datatypes::{DataType, Field, Schema, SchemaRef, TimeUnit, UInt32Type}; use datatypes::arrow::record_batch::RecordBatch; diff --git a/src/mito2/src/read/flat_merge.rs b/src/mito2/src/read/flat_merge.rs index fa7994121b..890334f91c 100644 --- a/src/mito2/src/read/flat_merge.rs +++ b/src/mito2/src/read/flat_merge.rs @@ -825,7 +825,7 @@ mod tests { use api::v1::OpType; use datatypes::arrow::array::builder::BinaryDictionaryBuilder; - use datatypes::arrow::array::{Int64Array, TimestampMillisecondArray, UInt64Array, UInt8Array}; + use datatypes::arrow::array::{Int64Array, TimestampMillisecondArray, UInt8Array, UInt64Array}; use datatypes::arrow::datatypes::{DataType, Field, Schema, TimeUnit, UInt32Type}; use datatypes::arrow::record_batch::RecordBatch; diff --git a/src/mito2/src/read/flat_projection.rs b/src/mito2/src/read/flat_projection.rs index ce2e51f85a..e44d5335ef 100644 --- a/src/mito2/src/read/flat_projection.rs +++ b/src/mito2/src/read/flat_projection.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use api::v1::SemanticType; use common_error::ext::BoxedError; -use common_recordbatch::error::ExternalSnafu; use common_recordbatch::RecordBatch; +use common_recordbatch::error::ExternalSnafu; use datatypes::arrow::datatypes::Field; use datatypes::prelude::{ConcreteDataType, DataType}; use datatypes::schema::{Schema, SchemaRef}; diff --git a/src/mito2/src/read/last_row.rs b/src/mito2/src/read/last_row.rs index 92a7bfa1ad..c604397d38 100644 --- a/src/mito2/src/read/last_row.rs +++ b/src/mito2/src/read/last_row.rs @@ -21,8 +21,8 @@ use datatypes::vectors::UInt32Vector; use store_api::storage::TimeSeriesRowSelector; use crate::cache::{ - selector_result_cache_hit, selector_result_cache_miss, CacheStrategy, SelectorResultKey, - SelectorResultValue, + CacheStrategy, SelectorResultKey, SelectorResultValue, selector_result_cache_hit, + selector_result_cache_miss, }; use crate::error::Result; use crate::read::{Batch, BatchReader, BoxedBatchReader}; @@ -287,7 +287,7 @@ mod tests { use api::v1::OpType; use super::*; - use crate::test_util::{check_reader_result, new_batch, VecBatchReader}; + use crate::test_util::{VecBatchReader, check_reader_result, new_batch}; #[tokio::test] async fn test_last_row_one_batch() { diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs index 8060c53405..f9afbe66fd 100644 --- a/src/mito2/src/read/merge.rs +++ b/src/mito2/src/read/merge.rs @@ -466,7 +466,7 @@ mod tests { use api::v1::OpType; use super::*; - use crate::test_util::{check_reader_result, new_batch, VecBatchReader}; + use crate::test_util::{VecBatchReader, check_reader_result, new_batch}; #[tokio::test] async fn test_merge_reader_empty() { diff --git a/src/mito2/src/read/plain_batch.rs b/src/mito2/src/read/plain_batch.rs index e227154700..f22b6688d6 100644 --- a/src/mito2/src/read/plain_batch.rs +++ b/src/mito2/src/read/plain_batch.rs @@ -18,7 +18,7 @@ use std::collections::HashMap; use std::sync::Arc; use api::v1::OpType; -use datatypes::arrow::array::{ArrayRef, BooleanArray, UInt64Array, UInt8Array}; +use datatypes::arrow::array::{ArrayRef, BooleanArray, UInt8Array, UInt64Array}; use datatypes::arrow::compute::filter_record_batch; use datatypes::arrow::datatypes::SchemaRef; use datatypes::arrow::record_batch::RecordBatch; @@ -249,8 +249,8 @@ mod tests { Float64Array, Int32Array, StringArray, TimestampMillisecondArray, }; use datatypes::arrow::datatypes::{DataType, Field, Schema, TimeUnit}; - use datatypes::schema::constraint::ColumnDefaultConstraint; use datatypes::schema::ColumnSchema; + use datatypes::schema::constraint::ColumnDefaultConstraint; use datatypes::value::Value; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; use store_api::storage::consts::{OP_TYPE_COLUMN_NAME, SEQUENCE_COLUMN_NAME}; diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs index 447e59f1cd..aef5180e55 100644 --- a/src/mito2/src/read/projection.rs +++ b/src/mito2/src/read/projection.rs @@ -20,21 +20,21 @@ use std::sync::Arc; use api::v1::SemanticType; use common_error::ext::BoxedError; -use common_recordbatch::error::ExternalSnafu; use common_recordbatch::RecordBatch; +use common_recordbatch::error::ExternalSnafu; use datatypes::prelude::{ConcreteDataType, DataType}; use datatypes::schema::{Schema, SchemaRef}; use datatypes::value::Value; use datatypes::vectors::VectorRef; -use mito_codec::row_converter::{build_primary_key_codec, CompositeValues, PrimaryKeyCodec}; +use mito_codec::row_converter::{CompositeValues, PrimaryKeyCodec, build_primary_key_codec}; use snafu::{OptionExt, ResultExt}; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; use crate::cache::CacheStrategy; use crate::error::{InvalidRequestSnafu, Result}; -use crate::read::flat_projection::FlatProjectionMapper; use crate::read::Batch; +use crate::read::flat_projection::FlatProjectionMapper; /// Only cache vector when its length `<=` this value. const MAX_VECTOR_LENGTH_TO_CACHE: usize = 16384; @@ -293,11 +293,12 @@ impl PrimaryKeyProjectionMapper { } debug_assert_eq!(self.batch_fields.len(), batch.fields().len()); - debug_assert!(self - .batch_fields - .iter() - .zip(batch.fields()) - .all(|((id, _), batch_col)| *id == batch_col.column_id)); + debug_assert!( + self.batch_fields + .iter() + .zip(batch.fields()) + .all(|((id, _), batch_col)| *id == batch_col.column_id) + ); // Skips decoding pk if we don't need to output it. let pk_values = if self.has_tags { @@ -406,7 +407,7 @@ mod tests { use std::sync::Arc; use api::v1::OpType; - use datatypes::arrow::array::{Int64Array, TimestampMillisecondArray, UInt64Array, UInt8Array}; + use datatypes::arrow::array::{Int64Array, TimestampMillisecondArray, UInt8Array, UInt64Array}; use datatypes::arrow::datatypes::Field; use datatypes::arrow::util::pretty; use datatypes::value::ValueRef; @@ -509,15 +510,21 @@ mod tests { +---------------------+----+----+----+----+"; assert_eq!(expect, print_record_batch(record_batch)); - assert!(cache - .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(1)) - .is_some()); - assert!(cache - .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(2)) - .is_some()); - assert!(cache - .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(3)) - .is_none()); + assert!( + cache + .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(1)) + .is_some() + ); + assert!( + cache + .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(2)) + .is_some() + ); + assert!( + cache + .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(3)) + .is_none() + ); let record_batch = mapper .as_primary_key() .unwrap() diff --git a/src/mito2/src/read/prune.rs b/src/mito2/src/read/prune.rs index 3a5fc81e30..413f787980 100644 --- a/src/mito2/src/read/prune.rs +++ b/src/mito2/src/read/prune.rs @@ -331,7 +331,7 @@ impl FlatPruneReader { mod tests { use api::v1::OpType; use datafusion_common::ScalarValue; - use datafusion_expr::{col, lit, Expr}; + use datafusion_expr::{Expr, col, lit}; use super::*; use crate::test_util::new_batch; diff --git a/src/mito2/src/read/range.rs b/src/mito2/src/read/range.rs index 28c47174bc..b5cc5c9aa0 100644 --- a/src/mito2/src/read/range.rs +++ b/src/mito2/src/read/range.rs @@ -17,7 +17,7 @@ use std::sync::{Arc, Mutex}; use common_time::Timestamp; -use smallvec::{smallvec, SmallVec}; +use smallvec::{SmallVec, smallvec}; use store_api::region_engine::PartitionRange; use store_api::storage::TimeSeriesDistribution; @@ -25,12 +25,12 @@ use crate::cache::CacheStrategy; use crate::error::Result; use crate::memtable::{MemtableRange, MemtableStats}; use crate::read::scan_region::ScanInput; -use crate::sst::file::{overlaps, FileHandle, FileTimeRange}; +use crate::sst::file::{FileHandle, FileTimeRange, overlaps}; +use crate::sst::parquet::DEFAULT_ROW_GROUP_SIZE; use crate::sst::parquet::file_range::{FileRange, FileRangeContextRef}; use crate::sst::parquet::format::parquet_row_group_time_range; use crate::sst::parquet::reader::ReaderMetrics; use crate::sst::parquet::row_selection::RowGroupSelection; -use crate::sst::parquet::DEFAULT_ROW_GROUP_SIZE; const ALL_ROW_GROUPS: i64 = -1; @@ -135,10 +135,11 @@ impl RangeMeta { fn merge(&mut self, mut other: RangeMeta) { debug_assert!(self.overlaps(&other)); debug_assert!(self.indices.iter().all(|idx| !other.indices.contains(idx))); - debug_assert!(self - .row_group_indices - .iter() - .all(|idx| !other.row_group_indices.contains(idx))); + debug_assert!( + self.row_group_indices + .iter() + .all(|idx| !other.row_group_indices.contains(idx)) + ); self.time_range = ( self.time_range.0.min(other.time_range.0), @@ -524,8 +525,8 @@ impl RangeBuilderList { #[cfg(test)] mod tests { - use common_time::timestamp::TimeUnit; use common_time::Timestamp; + use common_time::timestamp::TimeUnit; use super::*; diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs index 4b5fcdf1b3..6e2e461c56 100644 --- a/src/mito2/src/read/scan_region.rs +++ b/src/mito2/src/read/scan_region.rs @@ -22,20 +22,20 @@ use std::time::Instant; use api::v1::SemanticType; use common_error::ext::BoxedError; -use common_recordbatch::filter::SimpleFilterEvaluator; use common_recordbatch::SendableRecordBatchStream; +use common_recordbatch::filter::SimpleFilterEvaluator; use common_telemetry::{debug, error, tracing, warn}; use common_time::range::TimestampRange; use datafusion_common::Column; -use datafusion_expr::utils::expr_to_columns; use datafusion_expr::Expr; +use datafusion_expr::utils::expr_to_columns; use futures::StreamExt; use smallvec::SmallVec; use store_api::metadata::{RegionMetadata, RegionMetadataRef}; use store_api::region_engine::{PartitionRange, RegionScannerRef}; use store_api::storage::{RegionId, ScanRequest, TimeSeriesDistribution, TimeSeriesRowSelector}; -use table::predicate::{build_time_range_predicate, Predicate}; -use tokio::sync::{mpsc, Semaphore}; +use table::predicate::{Predicate, build_time_range_predicate}; +use tokio::sync::{Semaphore, mpsc}; use tokio_stream::wrappers::ReceiverStream; use crate::access_layer::AccessLayerRef; @@ -60,10 +60,10 @@ use crate::sst::file::FileHandle; use crate::sst::index::bloom_filter::applier::{ BloomFilterIndexApplierBuilder, BloomFilterIndexApplierRef, }; -use crate::sst::index::fulltext_index::applier::builder::FulltextIndexApplierBuilder; use crate::sst::index::fulltext_index::applier::FulltextIndexApplierRef; -use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder; +use crate::sst::index::fulltext_index::applier::builder::FulltextIndexApplierBuilder; use crate::sst::index::inverted_index::applier::InvertedIndexApplierRef; +use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder; use crate::sst::parquet::reader::ReaderMetrics; /// Parallel scan channel size for flat format. @@ -1271,12 +1271,11 @@ impl StreamContext { .collect(); write!(f, ", \"projection\": {:?}", names)?; } - if let Some(predicate) = &self.input.predicate.predicate() { - if !predicate.exprs().is_empty() { - let exprs: Vec<_> = - predicate.exprs().iter().map(|e| e.to_string()).collect(); - write!(f, ", \"filters\": {:?}", exprs)?; - } + if let Some(predicate) = &self.input.predicate.predicate() + && !predicate.exprs().is_empty() + { + let exprs: Vec<_> = predicate.exprs().iter().map(|e| e.to_string()).collect(); + write!(f, ", \"filters\": {:?}", exprs)?; } if !self.input.files.is_empty() { write!(f, ", \"files\": ")?; diff --git a/src/mito2/src/read/scan_util.rs b/src/mito2/src/read/scan_util.rs index 1e37aacf65..fd7d4329ca 100644 --- a/src/mito2/src/read/scan_util.rs +++ b/src/mito2/src/read/scan_util.rs @@ -30,8 +30,8 @@ use store_api::storage::RegionId; use crate::error::{Result, UnexpectedSnafu}; use crate::memtable::MemScanMetrics; use crate::metrics::{ - IN_PROGRESS_SCAN, PRECISE_FILTER_ROWS_TOTAL, READ_BATCHES_RETURN, READ_ROWS_IN_ROW_GROUP_TOTAL, - READ_ROWS_RETURN, READ_ROW_GROUPS_TOTAL, READ_STAGE_ELAPSED, + IN_PROGRESS_SCAN, PRECISE_FILTER_ROWS_TOTAL, READ_BATCHES_RETURN, READ_ROW_GROUPS_TOTAL, + READ_ROWS_IN_ROW_GROUP_TOTAL, READ_ROWS_RETURN, READ_STAGE_ELAPSED, }; use crate::read::range::{RangeBuilderList, RowGroupIndex}; use crate::read::scan_region::StreamContext; @@ -450,12 +450,20 @@ impl Drop for PartitionMetricsInner { if self.explain_verbose { common_telemetry::info!( "{} finished, region_id: {}, partition: {}, scan_metrics: {:?}, convert_batch_costs: {}", - self.scanner_type, self.region_id, self.partition, metrics, self.convert_cost, + self.scanner_type, + self.region_id, + self.partition, + metrics, + self.convert_cost, ); } else { common_telemetry::debug!( "{} finished, region_id: {}, partition: {}, scan_metrics: {:?}, convert_batch_costs: {}", - self.scanner_type, self.region_id, self.partition, metrics, self.convert_cost, + self.scanner_type, + self.region_id, + self.partition, + metrics, + self.convert_cost, ); } } diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs index fd597b829a..5ccbe0ae30 100644 --- a/src/mito2/src/read/seq_scan.rs +++ b/src/mito2/src/read/seq_scan.rs @@ -27,7 +27,7 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::physical_plan::{DisplayAs, DisplayFormatType}; use datatypes::schema::SchemaRef; use futures::{StreamExt, TryStreamExt}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::region_engine::{ PartitionRange, PrepareRequest, QueryScanContext, RegionScanner, ScannerProperties, @@ -44,12 +44,12 @@ use crate::read::merge::MergeReaderBuilder; use crate::read::range::{RangeBuilderList, RangeMeta}; use crate::read::scan_region::{ScanInput, StreamContext}; use crate::read::scan_util::{ - scan_file_ranges, scan_flat_file_ranges, scan_flat_mem_ranges, scan_mem_ranges, - PartitionMetrics, PartitionMetricsList, + PartitionMetrics, PartitionMetricsList, scan_file_ranges, scan_flat_file_ranges, + scan_flat_mem_ranges, scan_mem_ranges, }; use crate::read::stream::{ConvertBatchStream, ScanBatch, ScanBatchStream}; use crate::read::{ - scan_util, Batch, BatchReader, BoxedBatchReader, BoxedRecordBatchStream, ScannerMetrics, Source, + Batch, BatchReader, BoxedBatchReader, BoxedRecordBatchStream, ScannerMetrics, Source, scan_util, }; use crate::region::options::MergeMode; use crate::sst::parquet::DEFAULT_READ_BATCH_SIZE; diff --git a/src/mito2/src/read/series_scan.rs b/src/mito2/src/read/series_scan.rs index 63080bb146..b8081bb2e9 100644 --- a/src/mito2/src/read/series_scan.rs +++ b/src/mito2/src/read/series_scan.rs @@ -26,15 +26,15 @@ use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion::physical_plan::{DisplayAs, DisplayFormatType}; use datatypes::schema::SchemaRef; use futures::StreamExt; -use smallvec::{smallvec, SmallVec}; -use snafu::{ensure, OptionExt, ResultExt}; +use smallvec::{SmallVec, smallvec}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::region_engine::{ PartitionRange, PrepareRequest, QueryScanContext, RegionScanner, ScannerProperties, }; +use tokio::sync::Semaphore; use tokio::sync::mpsc::error::{SendTimeoutError, TrySendError}; use tokio::sync::mpsc::{self, Receiver, Sender}; -use tokio::sync::Semaphore; use crate::error::{ Error, InvalidSenderSnafu, PartitionOutOfRangeSnafu, Result, ScanMultiTimesSnafu, @@ -43,7 +43,7 @@ use crate::error::{ use crate::read::range::RangeBuilderList; use crate::read::scan_region::{ScanInput, StreamContext}; use crate::read::scan_util::{PartitionMetrics, PartitionMetricsList, SeriesDistributorMetrics}; -use crate::read::seq_scan::{build_sources, SeqScan}; +use crate::read::seq_scan::{SeqScan, build_sources}; use crate::read::stream::{ConvertBatchStream, ScanBatch, ScanBatchStream}; use crate::read::{Batch, ScannerMetrics}; diff --git a/src/mito2/src/read/stream.rs b/src/mito2/src/read/stream.rs index 85a5dd4d9d..f149b8f6e7 100644 --- a/src/mito2/src/read/stream.rs +++ b/src/mito2/src/read/stream.rs @@ -27,10 +27,10 @@ use snafu::{OptionExt, ResultExt}; use crate::cache::CacheStrategy; use crate::error::{Result, UnexpectedSnafu}; +use crate::read::Batch; use crate::read::projection::ProjectionMapper; use crate::read::scan_util::PartitionMetrics; use crate::read::series_scan::SeriesBatch; -use crate::read::Batch; /// All kinds of [`Batch`]es to produce in scanner. pub enum ScanBatch { diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs index 1d9004106d..4dc5d59b98 100644 --- a/src/mito2/src/read/unordered_scan.rs +++ b/src/mito2/src/read/unordered_scan.rs @@ -36,11 +36,11 @@ use crate::error::{PartitionOutOfRangeSnafu, Result}; use crate::read::range::RangeBuilderList; use crate::read::scan_region::{ScanInput, StreamContext}; use crate::read::scan_util::{ - scan_file_ranges, scan_flat_file_ranges, scan_flat_mem_ranges, scan_mem_ranges, - PartitionMetrics, PartitionMetricsList, + PartitionMetrics, PartitionMetricsList, scan_file_ranges, scan_flat_file_ranges, + scan_flat_mem_ranges, scan_mem_ranges, }; use crate::read::stream::{ConvertBatchStream, ScanBatch, ScanBatchStream}; -use crate::read::{scan_util, Batch, ScannerMetrics}; +use crate::read::{Batch, ScannerMetrics, scan_util}; /// Scans a region without providing any output ordering guarantee. /// diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs index 361e2c8f10..da47cf7126 100644 --- a/src/mito2/src/region.rs +++ b/src/mito2/src/region.rs @@ -18,14 +18,15 @@ pub mod opener; pub mod options; pub(crate) mod version; -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::atomic::{AtomicI64, AtomicU64, Ordering}; use std::sync::{Arc, RwLock}; use common_telemetry::{error, info, warn}; use crossbeam_utils::atomic::AtomicCell; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; +use store_api::ManifestVersion; use store_api::codec::PrimaryKeyEncoding; use store_api::logstore::provider::Provider; use store_api::metadata::RegionMetadataRef; @@ -34,7 +35,6 @@ use store_api::region_engine::{ }; use store_api::sst_entry::ManifestSstEntry; use store_api::storage::{RegionId, SequenceNumber}; -use store_api::ManifestVersion; use crate::access_layer::AccessLayerRef; use crate::error::{ @@ -1029,14 +1029,14 @@ impl ManifestStats { #[cfg(test)] mod tests { - use std::sync::atomic::AtomicU64; use std::sync::Arc; + use std::sync::atomic::AtomicU64; use common_datasource::compression::CompressionType; use common_test_util::temp_dir::create_temp_dir; use crossbeam_utils::atomic::AtomicCell; - use object_store::services::Fs; use object_store::ObjectStore; + use object_store::services::Fs; use store_api::logstore::provider::Provider; use store_api::region_engine::RegionRole; use store_api::region_request::PathType; diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs index 1ec85299ed..d8af3e3209 100644 --- a/src/mito2/src/region/opener.rs +++ b/src/mito2/src/region/opener.rs @@ -16,21 +16,21 @@ use std::any::TypeId; use std::collections::HashMap; -use std::sync::atomic::{AtomicI64, AtomicU64}; use std::sync::Arc; +use std::sync::atomic::{AtomicI64, AtomicU64}; use std::time::Instant; use common_telemetry::{debug, error, info, warn}; use common_wal::options::WalOptions; -use futures::future::BoxFuture; use futures::StreamExt; +use futures::future::BoxFuture; use log_store::kafka::log_store::KafkaLogStore; use log_store::raft_engine::log_store::RaftEngineLogStore; use object_store::manager::ObjectStoreManagerRef; use object_store::util::{join_dir, normalize_dir}; -use snafu::{ensure, OptionExt, ResultExt}; -use store_api::logstore::provider::Provider; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::logstore::LogStore; +use store_api::logstore::provider::Provider; use store_api::metadata::{ ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef, }; @@ -49,9 +49,9 @@ use crate::error::{ use crate::manifest::action::RegionManifest; use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions, RemoveFileOptions}; use crate::manifest::storage::manifest_compress_type; +use crate::memtable::MemtableBuilderProvider; use crate::memtable::bulk::part::BulkPart; use crate::memtable::time_partition::TimePartitions; -use crate::memtable::MemtableBuilderProvider; use crate::region::options::RegionOptions; use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef}; use crate::region::{ @@ -455,10 +455,7 @@ impl RegionOpener { .max(flushed_entry_id); info!( "Start replaying memtable at replay_from_entry_id: {} for region {}, manifest version: {}, flushed entry id: {}", - replay_from_entry_id, - region_id, - manifest.manifest_version, - flushed_entry_id + replay_from_entry_id, region_id, manifest.manifest_version, flushed_entry_id ); replay_memtable( &provider, @@ -472,15 +469,12 @@ impl RegionOpener { .await?; // For remote WAL, we need to set topic_latest_entry_id to current topic's latest entry id. // Only set after the WAL replay is completed. - let topic_latest_entry_id = if provider.is_remote_wal() - && version_control.current().version.memtables.is_empty() - { + + if provider.is_remote_wal() && version_control.current().version.memtables.is_empty() { wal.store().latest_entry_id(&provider).unwrap_or(0) } else { 0 - }; - - topic_latest_entry_id + } } else { info!( "Skip the WAL replay for region: {}, manifest version: {}, flushed_entry_id: {}", @@ -677,7 +671,10 @@ where while let Some(res) = wal_stream.next().await { let (entry_id, entry) = res?; if entry_id <= flushed_entry_id { - warn!("Stale WAL entries read during replay, region id: {}, flushed entry id: {}, entry id read: {}", region_id, flushed_entry_id, entry_id); + warn!( + "Stale WAL entries read during replay, region id: {}, flushed entry id: {}, entry id read: {}", + region_id, flushed_entry_id, entry_id + ); ensure!( allow_stale_entries, StaleLogEntrySnafu { @@ -735,7 +732,13 @@ where let series_count = version_control.current().series_count(); info!( "Replay WAL for region: {}, provider: {:?}, rows recovered: {}, replay from entry id: {}, last entry id: {}, total timeseries replayed: {}, elapsed: {:?}", - region_id, provider, rows_replayed, replay_from_entry_id, last_entry_id, series_count, now.elapsed() + region_id, + provider, + rows_replayed, + replay_from_entry_id, + last_entry_id, + series_count, + now.elapsed() ); Ok(last_entry_id) } diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs index c79706c104..1c075bf6b4 100644 --- a/src/mito2/src/region/options.rs +++ b/src/mito2/src/region/options.rs @@ -21,12 +21,12 @@ use std::time::Duration; use common_base::readable_size::ReadableSize; use common_time::TimeToLive; -use common_wal::options::{WalOptions, WAL_OPTIONS_KEY}; +use common_wal::options::{WAL_OPTIONS_KEY, WalOptions}; use serde::de::Error as _; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; -use serde_with::{serde_as, with_prefix, DisplayFromStr, NoneAsEmptyString}; -use snafu::{ensure, ResultExt}; +use serde_with::{DisplayFromStr, NoneAsEmptyString, serde_as, with_prefix}; +use snafu::{ResultExt, ensure}; use store_api::codec::PrimaryKeyEncoding; use store_api::storage::ColumnId; use strum::EnumString; diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs index 011f468595..6a62219855 100644 --- a/src/mito2/src/region/version.rs +++ b/src/mito2/src/region/version.rs @@ -454,9 +454,7 @@ impl VersionBuilder { { info!( "VersionBuilder overwrites region compaction time window from {:?} to {:?}, region: {}", - self.compaction_time_window, - compaction_time_window, - self.metadata.region_id + self.compaction_time_window, compaction_time_window, self.metadata.region_id ); } diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs index dca5807833..231ad085cb 100644 --- a/src/mito2/src/region_write_ctx.rs +++ b/src/mito2/src/region_write_ctx.rs @@ -13,19 +13,19 @@ // limitations under the License. use std::mem; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use api::v1::{BulkWalEntry, Mutation, OpType, Rows, WalEntry, WriteHint}; use futures::stream::{FuturesUnordered, StreamExt}; use snafu::ResultExt; -use store_api::logstore::provider::Provider; use store_api::logstore::LogStore; +use store_api::logstore::provider::Provider; use store_api::storage::{RegionId, SequenceNumber}; use crate::error::{Error, Result, WriteGroupSnafu}; -use crate::memtable::bulk::part::BulkPart; use crate::memtable::KeyValues; +use crate::memtable::bulk::part::BulkPart; use crate::metrics; use crate::region::version::{VersionControlData, VersionControlRef, VersionRef}; use crate::request::OptionOutputTx; diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs index 94e66a97ac..d888c518ff 100644 --- a/src/mito2/src/request.rs +++ b/src/mito2/src/request.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use std::time::Instant; use api::helper::{ - is_column_type_value_eq, is_semantic_type_eq, proto_value_type, to_proto_value, - ColumnDataTypeWrapper, + ColumnDataTypeWrapper, is_column_type_value_eq, is_semantic_type_eq, proto_value_type, + to_proto_value, }; use api::v1::column_def::options_from_column_schema; use api::v1::{ColumnDataType, ColumnSchema, OpType, Rows, SemanticType, Value, WriteHint}; @@ -29,8 +29,9 @@ use datatypes::prelude::DataType; use prometheus::HistogramTimer; use prost::Message; use smallvec::SmallVec; -use snafu::{ensure, OptionExt, ResultExt}; -use store_api::codec::{infer_primary_key_encoding_from_hint, PrimaryKeyEncoding}; +use snafu::{OptionExt, ResultExt, ensure}; +use store_api::ManifestVersion; +use store_api::codec::{PrimaryKeyEncoding, infer_primary_key_encoding_from_hint}; use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataRef}; use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState}; use store_api::region_request::{ @@ -39,7 +40,6 @@ use store_api::region_request::{ RegionOpenRequest, RegionRequest, RegionTruncateRequest, }; use store_api::storage::RegionId; -use store_api::ManifestVersion; use tokio::sync::oneshot::{self, Receiver, Sender}; use crate::error::{ @@ -47,11 +47,11 @@ use crate::error::{ FlushRegionSnafu, InvalidRequestSnafu, Result, UnexpectedSnafu, }; use crate::manifest::action::{RegionEdit, TruncateKind}; -use crate::memtable::bulk::part::BulkPart; use crate::memtable::MemtableId; +use crate::memtable::bulk::part::BulkPart; use crate::metrics::COMPACTION_ELAPSED_TOTAL; -use crate::wal::entry_distributor::WalEntryReceiver; use crate::wal::EntryId; +use crate::wal::entry_distributor::WalEntryReceiver; /// Request to write a region. #[derive(Debug)] @@ -1080,7 +1080,10 @@ mod tests { let request = WriteRequest::new(RegionId::new(1, 1), OpType::Put, rows, None).unwrap(); let err = request.check_schema(&metadata).unwrap_err(); - check_invalid_request(&err, "column ts expect type Timestamp(Millisecond(TimestampMillisecondType)), given: INT64(4)"); + check_invalid_request( + &err, + "column ts expect type Timestamp(Millisecond(TimestampMillisecondType)), given: INT64(4)", + ); } #[test] @@ -1213,11 +1216,13 @@ mod tests { let mut request = WriteRequest::new(RegionId::new(1, 1), OpType::Put, rows, None).unwrap(); let err = request.check_schema(&metadata).unwrap_err(); assert!(err.is_fill_default()); - assert!(request - .fill_missing_columns(&metadata) - .unwrap_err() - .to_string() - .contains("unexpected impure default value with region_id")); + assert!( + request + .fill_missing_columns(&metadata) + .unwrap_err() + .to_string() + .contains("unexpected impure default value with region_id") + ); } #[test] diff --git a/src/mito2/src/schedule/scheduler.rs b/src/mito2/src/schedule/scheduler.rs index 1b2d34cb58..ae25815a2a 100644 --- a/src/mito2/src/schedule/scheduler.rs +++ b/src/mito2/src/schedule/scheduler.rs @@ -18,7 +18,7 @@ use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::{Arc, RwLock}; use common_telemetry::warn; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; @@ -149,7 +149,9 @@ impl Scheduler for LocalScheduler { impl Drop for LocalScheduler { fn drop(&mut self) { if self.state.load(Ordering::Relaxed) != STATE_STOP { - warn!("scheduler should be stopped before dropping, which means the state of scheduler must be STATE_STOP"); + warn!( + "scheduler should be stopped before dropping, which means the state of scheduler must be STATE_STOP" + ); // We didn't call `stop()` so we cancel all background workers here. self.sender.write().unwrap().take(); @@ -160,8 +162,8 @@ impl Drop for LocalScheduler { #[cfg(test)] mod tests { - use std::sync::atomic::AtomicI32; use std::sync::Arc; + use std::sync::atomic::AtomicI32; use tokio::sync::Barrier; use tokio::time::Duration; diff --git a/src/mito2/src/sst/file.rs b/src/mito2/src/sst/file.rs index 42ad478deb..35373e55c1 100644 --- a/src/mito2/src/sst/file.rs +++ b/src/mito2/src/sst/file.rs @@ -18,8 +18,8 @@ use std::fmt; use std::fmt::{Debug, Formatter}; use std::num::NonZeroU64; use std::str::FromStr; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use common_base::readable_size::ReadableSize; use common_time::Timestamp; diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs index 05d4cc4682..a672028aeb 100644 --- a/src/mito2/src/sst/file_purger.rs +++ b/src/mito2/src/sst/file_purger.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use common_telemetry::{error, info}; use crate::access_layer::AccessLayerRef; -use crate::cache::file_cache::{FileType, IndexKey}; use crate::cache::CacheManagerRef; +use crate::cache::file_cache::{FileType, IndexKey}; use crate::error::Result; use crate::schedule::scheduler::SchedulerRef; use crate::sst::file::FileMeta; @@ -219,8 +219,8 @@ mod tests { use std::num::NonZeroU64; use common_test_util::temp_dir::create_temp_dir; - use object_store::services::Fs; use object_store::ObjectStore; + use object_store::services::Fs; use smallvec::SmallVec; use store_api::region_request::PathType; use store_api::storage::RegionId; diff --git a/src/mito2/src/sst/file_ref.rs b/src/mito2/src/sst/file_ref.rs index 2e6594f1c0..0a78a37cc3 100644 --- a/src/mito2/src/sst/file_ref.rs +++ b/src/mito2/src/sst/file_ref.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use common_telemetry::debug; use dashmap::{DashMap, Entry}; use serde::{Deserialize, Serialize}; -use store_api::storage::{RegionId, TableId}; use store_api::ManifestVersion; +use store_api::storage::{RegionId, TableId}; use crate::error::Result; use crate::metrics::GC_REF_FILE_CNT; diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs index 58c5e99f04..e96e8ee2e7 100644 --- a/src/mito2/src/sst/index.rs +++ b/src/mito2/src/sst/index.rs @@ -393,8 +393,8 @@ mod tests { use datatypes::schema::{ ColumnSchema, FulltextOptions, SkippingIndexOptions, SkippingIndexType, }; - use object_store::services::Memory; use object_store::ObjectStore; + use object_store::services::Memory; use puffin_manager::PuffinManagerFactory; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; diff --git a/src/mito2/src/sst/index/bloom_filter/applier.rs b/src/mito2/src/sst/index/bloom_filter/applier.rs index 3b6550ee89..fcd54931f1 100644 --- a/src/mito2/src/sst/index/bloom_filter/applier.rs +++ b/src/mito2/src/sst/index/bloom_filter/applier.rs @@ -41,10 +41,10 @@ use crate::error::{ }; use crate::metrics::INDEX_APPLY_ELAPSED; use crate::sst::file::RegionFileId; -pub use crate::sst::index::bloom_filter::applier::builder::BloomFilterIndexApplierBuilder; -use crate::sst::index::bloom_filter::INDEX_BLOB_TYPE; -use crate::sst::index::puffin_manager::{BlobReader, PuffinManagerFactory}; use crate::sst::index::TYPE_BLOOM_FILTER_INDEX; +use crate::sst::index::bloom_filter::INDEX_BLOB_TYPE; +pub use crate::sst::index::bloom_filter::applier::builder::BloomFilterIndexApplierBuilder; +use crate::sst::index::puffin_manager::{BlobReader, PuffinManagerFactory}; pub(crate) type BloomFilterIndexApplierRef = Arc; @@ -349,17 +349,17 @@ fn is_blob_not_found(err: &Error) -> bool { #[cfg(test)] mod tests { - use datafusion_expr::{col, lit, Expr}; + use datafusion_expr::{Expr, col, lit}; use futures::future::BoxFuture; use puffin::puffin_manager::PuffinWriter; use store_api::metadata::RegionMetadata; use super::*; use crate::sst::file::FileId; + use crate::sst::index::bloom_filter::creator::BloomFilterIndexer; use crate::sst::index::bloom_filter::creator::tests::{ mock_object_store, mock_region_metadata, new_batch, new_intm_mgr, }; - use crate::sst::index::bloom_filter::creator::BloomFilterIndexer; #[allow(clippy::type_complexity)] fn tester( @@ -369,7 +369,7 @@ mod tests { puffin_manager_factory: PuffinManagerFactory, file_id: RegionFileId, ) -> impl Fn(&[Expr], Vec<(usize, bool)>) -> BoxFuture<'static, Vec<(usize, Vec>)>> - + use<'_> { + + use<'_> { move |exprs, row_groups| { let table_dir = table_dir.clone(); let object_store: ObjectStore = object_store.clone(); diff --git a/src/mito2/src/sst/index/bloom_filter/applier/builder.rs b/src/mito2/src/sst/index/bloom_filter/applier/builder.rs index 0c71a2df4c..a2930d4075 100644 --- a/src/mito2/src/sst/index/bloom_filter/applier/builder.rs +++ b/src/mito2/src/sst/index/bloom_filter/applier/builder.rs @@ -20,8 +20,8 @@ use datafusion_expr::expr::InList; use datafusion_expr::{BinaryExpr, Expr, Operator}; use datatypes::data_type::ConcreteDataType; use datatypes::value::Value; -use index::bloom_filter::applier::InListPredicate; use index::Bytes; +use index::bloom_filter::applier::InListPredicate; use mito_codec::index::IndexValueCodec; use mito_codec::row_converter::SortField; use object_store::ObjectStore; @@ -335,7 +335,7 @@ fn encode_lit(lit: &ScalarValue, data_type: ConcreteDataType) -> Result { mod tests { use api::v1::SemanticType; use datafusion_common::Column; - use datafusion_expr::{col, lit, Literal}; + use datafusion_expr::{Literal, col, lit}; use datatypes::schema::ColumnSchema; use object_store::services::Memory; use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder}; diff --git a/src/mito2/src/sst/index/bloom_filter/creator.rs b/src/mito2/src/sst/index/bloom_filter/creator.rs index 1b0f40114f..d373b0701c 100644 --- a/src/mito2/src/sst/index/bloom_filter/creator.rs +++ b/src/mito2/src/sst/index/bloom_filter/creator.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashMap; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use std::sync::atomic::AtomicUsize; use common_telemetry::{debug, warn}; use datatypes::arrow::record_batch::RecordBatch; @@ -24,7 +24,7 @@ use index::bloom_filter::creator::BloomFilterCreator; use mito_codec::index::{IndexValueCodec, IndexValuesCodec}; use mito_codec::row_converter::SortField; use puffin::puffin_manager::{PuffinWriter, PutOptions}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt}; @@ -35,13 +35,13 @@ use crate::error::{ }; use crate::read::Batch; use crate::sst::file::FileId; +use crate::sst::index::TYPE_BLOOM_FILTER_INDEX; use crate::sst::index::bloom_filter::INDEX_BLOB_TYPE; use crate::sst::index::intermediate::{ IntermediateLocation, IntermediateManager, TempFileProvider, }; use crate::sst::index::puffin_manager::SstPuffinWriter; use crate::sst::index::statistics::{ByteCount, RowCount, Statistics}; -use crate::sst::index::TYPE_BLOOM_FILTER_INDEX; /// The buffer size for the pipe used to send index data to the puffin blob. const PIPE_BUFFER_SIZE_FOR_SENDING_BLOB: usize = 8192; @@ -432,11 +432,11 @@ pub(crate) mod tests { use datatypes::data_type::ConcreteDataType; use datatypes::schema::{ColumnSchema, SkippingIndexOptions}; use datatypes::value::ValueRef; - use datatypes::vectors::{UInt64Vector, UInt8Vector}; + use datatypes::vectors::{UInt8Vector, UInt64Vector}; use index::bloom_filter::reader::{BloomFilterReader, BloomFilterReaderImpl}; use mito_codec::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt}; - use object_store::services::Memory; use object_store::ObjectStore; + use object_store::services::Memory; use puffin::puffin_manager::{PuffinManager, PuffinReader}; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; use store_api::storage::RegionId; diff --git a/src/mito2/src/sst/index/fulltext_index/applier.rs b/src/mito2/src/sst/index/fulltext_index/applier.rs index 1c23a32fcd..c88fc611db 100644 --- a/src/mito2/src/sst/index/fulltext_index/applier.rs +++ b/src/mito2/src/sst/index/fulltext_index/applier.rs @@ -43,12 +43,12 @@ use crate::error::{ }; use crate::metrics::INDEX_APPLY_ELAPSED; use crate::sst::file::RegionFileId; +use crate::sst::index::TYPE_FULLTEXT_INDEX; use crate::sst::index::fulltext_index::applier::builder::{FulltextRequest, FulltextTerm}; use crate::sst::index::fulltext_index::{INDEX_BLOB_TYPE_BLOOM, INDEX_BLOB_TYPE_TANTIVY}; use crate::sst::index::puffin_manager::{ PuffinManagerFactory, SstPuffinBlob, SstPuffinDir, SstPuffinReader, }; -use crate::sst::index::TYPE_FULLTEXT_INDEX; pub mod builder; @@ -151,10 +151,10 @@ impl FulltextIndexApplier { row_ids = Some(result); } - if let Some(ids) = row_ids.as_ref() { - if ids.is_empty() { - break; - } + if let Some(ids) = row_ids.as_ref() + && ids.is_empty() + { + break; } } @@ -204,10 +204,10 @@ impl FulltextIndexApplier { row_ids = Some(result); } - if let Some(ids) = row_ids.as_ref() { - if ids.is_empty() { - break; - } + if let Some(ids) = row_ids.as_ref() + && ids.is_empty() + { + break; } } diff --git a/src/mito2/src/sst/index/fulltext_index/creator.rs b/src/mito2/src/sst/index/fulltext_index/creator.rs index 967e0d9dce..b5122e93d2 100644 --- a/src/mito2/src/sst/index/fulltext_index/creator.rs +++ b/src/mito2/src/sst/index/fulltext_index/creator.rs @@ -13,8 +13,8 @@ // limitations under the License. use std::collections::HashMap; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use std::sync::atomic::AtomicUsize; use common_telemetry::warn; use datatypes::arrow::array::{Array, StringArray}; @@ -27,7 +27,7 @@ use index::fulltext_index::create::{ use index::fulltext_index::{Analyzer, Config}; use puffin::blob_metadata::CompressionCodec; use puffin::puffin_manager::PutOptions; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::storage::{ColumnId, ConcreteDataType, RegionId}; @@ -38,13 +38,13 @@ use crate::error::{ }; use crate::read::Batch; use crate::sst::file::FileId; +use crate::sst::index::TYPE_FULLTEXT_INDEX; use crate::sst::index::fulltext_index::{INDEX_BLOB_TYPE_BLOOM, INDEX_BLOB_TYPE_TANTIVY}; use crate::sst::index::intermediate::{ IntermediateLocation, IntermediateManager, TempFileProvider, }; use crate::sst::index::puffin_manager::SstPuffinWriter; use crate::sst::index::statistics::{ByteCount, RowCount, Statistics}; -use crate::sst::index::TYPE_FULLTEXT_INDEX; /// `FulltextIndexer` is responsible for creating fulltext indexes for SST files. pub struct FulltextIndexer { @@ -427,12 +427,12 @@ mod tests { use common_base::BitVec; use datatypes::data_type::DataType; use datatypes::schema::{ColumnSchema, FulltextAnalyzer, FulltextOptions}; - use datatypes::vectors::{UInt64Vector, UInt8Vector}; - use futures::future::BoxFuture; + use datatypes::vectors::{UInt8Vector, UInt64Vector}; use futures::FutureExt; + use futures::future::BoxFuture; use index::fulltext_index::search::RowId; - use object_store::services::Memory; use object_store::ObjectStore; + use object_store::services::Memory; use puffin::puffin_manager::{PuffinManager, PuffinWriter}; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef}; use store_api::region_request::PathType; @@ -442,10 +442,10 @@ mod tests { use crate::access_layer::RegionFilePathFactory; use crate::read::{Batch, BatchColumn}; use crate::sst::file::{FileId, RegionFileId}; + use crate::sst::index::fulltext_index::applier::FulltextIndexApplier; use crate::sst::index::fulltext_index::applier::builder::{ FulltextQuery, FulltextRequest, FulltextTerm, }; - use crate::sst::index::fulltext_index::applier::FulltextIndexApplier; use crate::sst::index::puffin_manager::PuffinManagerFactory; fn mock_object_store() -> ObjectStore { diff --git a/src/mito2/src/sst/index/indexer/finish.rs b/src/mito2/src/sst/index/indexer/finish.rs index c97955495c..632b0a68d1 100644 --- a/src/mito2/src/sst/index/indexer/finish.rs +++ b/src/mito2/src/sst/index/indexer/finish.rs @@ -273,10 +273,10 @@ impl Indexer { } pub(crate) async fn do_prune_intm_sst_dir(&mut self) { - if let Some(manager) = self.intermediate_manager.take() { - if let Err(e) = manager.prune_sst_dir(&self.region_id, &self.file_id).await { - warn!(e; "Failed to prune intermediate SST directory, region_id: {}, file_id: {}", self.region_id, self.file_id); - } + if let Some(manager) = self.intermediate_manager.take() + && let Err(e) = manager.prune_sst_dir(&self.region_id, &self.file_id).await + { + warn!(e; "Failed to prune intermediate SST directory, region_id: {}, file_id: {}", self.region_id, self.file_id); } } } diff --git a/src/mito2/src/sst/index/intermediate.rs b/src/mito2/src/sst/index/intermediate.rs index e8611f5a71..e499152971 100644 --- a/src/mito2/src/sst/index/intermediate.rs +++ b/src/mito2/src/sst/index/intermediate.rs @@ -289,9 +289,11 @@ mod tests { let _manager = IntermediateManager::init_fs(path).await.unwrap(); // cleaned up by `init_fs` - assert!(!tokio::fs::try_exists(format!("{path}/{INTERMEDIATE_DIR}")) - .await - .unwrap()); + assert!( + !tokio::fs::try_exists(format!("{path}/{INTERMEDIATE_DIR}")) + .await + .unwrap() + ); } #[tokio::test] @@ -396,9 +398,11 @@ mod tests { assert_eq!(pi.next().unwrap(), INTERMEDIATE_DIR); assert_eq!(pi.next().unwrap(), "0"); // region id assert_eq!(pi.next().unwrap(), OsStr::new(&sst_file_id.to_string())); // sst file id - assert!(Regex::new(r"fulltext-1-\w{8}-\w{4}-\w{4}-\w{4}-\w{12}") - .unwrap() - .is_match(&pi.next().unwrap().to_string_lossy())); // fulltext path + assert!( + Regex::new(r"fulltext-1-\w{8}-\w{4}-\w{4}-\w{4}-\w{12}") + .unwrap() + .is_match(&pi.next().unwrap().to_string_lossy()) + ); // fulltext path assert!(pi.next().is_none()); } @@ -448,12 +452,14 @@ mod tests { provider.cleanup().await.unwrap(); - assert!(provider - .manager - .store() - .list(location.dir_to_cleanup()) - .await - .unwrap() - .is_empty()); + assert!( + provider + .manager + .store() + .list(location.dir_to_cleanup()) + .await + .unwrap() + .is_empty() + ); } } diff --git a/src/mito2/src/sst/index/inverted_index/applier.rs b/src/mito2/src/sst/index/inverted_index/applier.rs index d070308916..2a973db781 100644 --- a/src/mito2/src/sst/index/inverted_index/applier.rs +++ b/src/mito2/src/sst/index/inverted_index/applier.rs @@ -40,9 +40,9 @@ use crate::error::{ }; use crate::metrics::{INDEX_APPLY_ELAPSED, INDEX_APPLY_MEMORY_USAGE}; use crate::sst::file::RegionFileId; +use crate::sst::index::TYPE_INVERTED_INDEX; use crate::sst::index::inverted_index::INDEX_BLOB_TYPE; use crate::sst::index::puffin_manager::{BlobReader, PuffinManagerFactory}; -use crate::sst::index::TYPE_INVERTED_INDEX; /// `InvertedIndexApplier` is responsible for applying predicates to the provided SST files /// and returning the relevant row group ids for further scan. diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder.rs b/src/mito2/src/sst/index/inverted_index/applier/builder.rs index c743ab9b01..60690df3a8 100644 --- a/src/mito2/src/sst/index/inverted_index/applier/builder.rs +++ b/src/mito2/src/sst/index/inverted_index/applier/builder.rs @@ -254,8 +254,8 @@ mod tests { use index::inverted_index::search::predicate::{ Bound, Range, RangePredicate, RegexMatchPredicate, }; - use object_store::services::Memory; use object_store::ObjectStore; + use object_store::services::Memory; use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder}; use store_api::storage::RegionId; diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs index 5ce72f474a..c3a6dacf7b 100644 --- a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs +++ b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs @@ -13,8 +13,8 @@ // limitations under the License. use datafusion_expr::{Expr as DfExpr, Operator}; -use index::inverted_index::search::predicate::{Bound, Predicate, Range, RangePredicate}; use index::Bytes; +use index::inverted_index::search::predicate::{Bound, Predicate, Range, RangePredicate}; use crate::error::Result; use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder; diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs index 41e6457ae4..5c2016e85d 100644 --- a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs +++ b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs @@ -16,8 +16,8 @@ use std::collections::BTreeSet; use datafusion_expr::{BinaryExpr, Expr as DfExpr, Operator}; use datatypes::data_type::ConcreteDataType; -use index::inverted_index::search::predicate::{InListPredicate, Predicate}; use index::Bytes; +use index::inverted_index::search::predicate::{InListPredicate, Predicate}; use crate::error::Result; use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder; diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs index 43dc3d6801..2121415f19 100644 --- a/src/mito2/src/sst/index/inverted_index/creator.rs +++ b/src/mito2/src/sst/index/inverted_index/creator.rs @@ -14,20 +14,20 @@ use std::collections::HashSet; use std::num::NonZeroUsize; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use std::sync::atomic::AtomicUsize; use common_telemetry::{debug, warn}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::vectors::Helper; +use index::inverted_index::create::InvertedIndexCreator; use index::inverted_index::create::sort::external_sort::ExternalSorter; use index::inverted_index::create::sort_create::SortIndexCreator; -use index::inverted_index::create::InvertedIndexCreator; use index::inverted_index::format::writer::InvertedIndexBlobWriter; use mito_codec::index::{IndexValueCodec, IndexValuesCodec}; use mito_codec::row_converter::SortField; use puffin::puffin_manager::{PuffinWriter, PutOptions}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use store_api::metadata::RegionMetadataRef; use store_api::storage::ColumnId; use tokio::io::duplex; @@ -39,13 +39,13 @@ use crate::error::{ }; use crate::read::Batch; use crate::sst::file::FileId; +use crate::sst::index::TYPE_INVERTED_INDEX; use crate::sst::index::intermediate::{ IntermediateLocation, IntermediateManager, TempFileProvider, }; use crate::sst::index::inverted_index::INDEX_BLOB_TYPE; use crate::sst::index::puffin_manager::SstPuffinWriter; use crate::sst::index::statistics::{ByteCount, RowCount, Statistics}; -use crate::sst::index::TYPE_INVERTED_INDEX; /// The minimum memory usage threshold for one column. const MIN_MEMORY_USAGE_THRESHOLD_PER_COLUMN: usize = 1024 * 1024; // 1MB @@ -427,17 +427,17 @@ mod tests { use std::collections::BTreeSet; use api::v1::SemanticType; - use datafusion_expr::{binary_expr, col, lit, Expr as DfExpr, Operator}; + use datafusion_expr::{Expr as DfExpr, Operator, binary_expr, col, lit}; use datatypes::data_type::ConcreteDataType; use datatypes::schema::ColumnSchema; use datatypes::value::ValueRef; - use datatypes::vectors::{UInt64Vector, UInt8Vector}; + use datatypes::vectors::{UInt8Vector, UInt64Vector}; use futures::future::BoxFuture; use mito_codec::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt}; - use object_store::services::Memory; use object_store::ObjectStore; - use puffin::puffin_manager::cache::PuffinMetadataCache; + use object_store::services::Memory; use puffin::puffin_manager::PuffinManager; + use puffin::puffin_manager::cache::PuffinMetadataCache; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; use store_api::region_request::PathType; use store_api::storage::RegionId; diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs index c5be534d03..2c936d2afa 100644 --- a/src/mito2/src/sst/index/puffin_manager.rs +++ b/src/mito2/src/sst/index/puffin_manager.rs @@ -29,8 +29,8 @@ use snafu::ResultExt; use crate::access_layer::FilePathProvider; use crate::error::{PuffinInitStagerSnafu, PuffinPurgeStagerSnafu, Result}; use crate::metrics::{ - StagerMetrics, INDEX_PUFFIN_FLUSH_OP_TOTAL, INDEX_PUFFIN_READ_BYTES_TOTAL, - INDEX_PUFFIN_READ_OP_TOTAL, INDEX_PUFFIN_WRITE_BYTES_TOTAL, INDEX_PUFFIN_WRITE_OP_TOTAL, + INDEX_PUFFIN_FLUSH_OP_TOTAL, INDEX_PUFFIN_READ_BYTES_TOTAL, INDEX_PUFFIN_READ_OP_TOTAL, + INDEX_PUFFIN_WRITE_BYTES_TOTAL, INDEX_PUFFIN_WRITE_OP_TOTAL, StagerMetrics, }; use crate::sst::file::RegionFileId; use crate::sst::index::store::{self, InstrumentedStore}; diff --git a/src/mito2/src/sst/parquet.rs b/src/mito2/src/sst/parquet.rs index d02882a5fb..27a221c146 100644 --- a/src/mito2/src/sst/parquet.rs +++ b/src/mito2/src/sst/parquet.rs @@ -19,9 +19,9 @@ use std::sync::Arc; use common_base::readable_size::ReadableSize; use parquet::file::metadata::ParquetMetaData; +use crate::sst::DEFAULT_WRITE_BUFFER_SIZE; use crate::sst::file::{FileId, FileTimeRange}; use crate::sst::index::IndexOutput; -use crate::sst::DEFAULT_WRITE_BUFFER_SIZE; pub(crate) mod file_range; pub mod flat_format; @@ -93,11 +93,11 @@ mod tests { use api::v1::OpType; use common_time::Timestamp; use datafusion_common::{Column, ScalarValue}; - use datafusion_expr::{col, lit, BinaryExpr, Expr, Literal, Operator}; + use datafusion_expr::{BinaryExpr, Expr, Literal, Operator, col, lit}; use datatypes::arrow; use datatypes::arrow::array::{ ArrayRef, BinaryDictionaryBuilder, RecordBatch, StringDictionaryBuilder, - TimestampMillisecondArray, UInt64Array, UInt8Array, + TimestampMillisecondArray, UInt8Array, UInt64Array, }; use datatypes::arrow::datatypes::{DataType, Field, Schema, UInt32Type}; use parquet::arrow::AsyncArrowWriter; @@ -124,14 +124,14 @@ mod tests { use crate::sst::parquet::reader::{ParquetReader, ParquetReaderBuilder, ReaderMetrics}; use crate::sst::parquet::writer::ParquetWriter; use crate::sst::{ - location, to_flat_sst_arrow_schema, FlatSchemaOptions, DEFAULT_WRITE_CONCURRENCY, + DEFAULT_WRITE_CONCURRENCY, FlatSchemaOptions, location, to_flat_sst_arrow_schema, }; use crate::test_util::sst_util::{ assert_parquet_metadata_eq, build_test_binary_test_region_metadata, new_batch_by_range, new_batch_with_binary, new_batch_with_custom_sequence, new_primary_key, new_source, sst_file_handle, sst_file_handle_with_file_id, sst_region_metadata, }; - use crate::test_util::{check_reader_result, TestEnv}; + use crate::test_util::{TestEnv, check_reader_result}; const FILE_DIR: &str = "/"; diff --git a/src/mito2/src/sst/parquet/file_range.rs b/src/mito2/src/sst/parquet/file_range.rs index 92c0ad2b74..d216f1e132 100644 --- a/src/mito2/src/sst/parquet/file_range.rs +++ b/src/mito2/src/sst/parquet/file_range.rs @@ -32,10 +32,10 @@ use crate::error::{ ComputeArrowSnafu, DataTypeMismatchSnafu, DecodeSnafu, DecodeStatsSnafu, RecordBatchSnafu, Result, StatsNotPresentSnafu, }; +use crate::read::Batch; use crate::read::compat::CompatBatch; use crate::read::last_row::RowGroupLastRowCachedReader; use crate::read::prune::{FlatPruneReader, PruneReader}; -use crate::read::Batch; use crate::sst::file::FileHandle; use crate::sst::parquet::format::ReadFormat; use crate::sst::parquet::reader::{ diff --git a/src/mito2/src/sst/parquet/flat_format.rs b/src/mito2/src/sst/parquet/flat_format.rs index 0e60d95bd3..bdf2e12005 100644 --- a/src/mito2/src/sst/parquet/flat_format.rs +++ b/src/mito2/src/sst/parquet/flat_format.rs @@ -40,9 +40,9 @@ use datatypes::arrow::compute::kernels::take::take; use datatypes::arrow::datatypes::{Schema, SchemaRef}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::prelude::{ConcreteDataType, DataType}; -use mito_codec::row_converter::{build_primary_key_codec, CompositeValues, PrimaryKeyCodec}; +use mito_codec::row_converter::{CompositeValues, PrimaryKeyCodec, build_primary_key_codec}; use parquet::file::metadata::RowGroupMetaData; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::{RegionMetadata, RegionMetadataRef}; use store_api::storage::{ColumnId, SequenceNumber}; @@ -51,9 +51,9 @@ use crate::error::{ NewRecordBatchSnafu, Result, }; use crate::sst::parquet::format::{ - FormatProjection, PrimaryKeyArray, ReadFormat, StatValues, INTERNAL_COLUMN_NUM, + FormatProjection, INTERNAL_COLUMN_NUM, PrimaryKeyArray, ReadFormat, StatValues, }; -use crate::sst::{tag_maybe_to_dictionary_field, to_flat_sst_arrow_schema, FlatSchemaOptions}; +use crate::sst::{FlatSchemaOptions, tag_maybe_to_dictionary_field, to_flat_sst_arrow_schema}; /// Helper for writing the SST format. #[allow(dead_code)] diff --git a/src/mito2/src/sst/parquet/format.rs b/src/mito2/src/sst/parquet/format.rs index 87377f6e6d..626b7baa4a 100644 --- a/src/mito2/src/sst/parquet/format.rs +++ b/src/mito2/src/sst/parquet/format.rs @@ -40,10 +40,10 @@ use datatypes::arrow::datatypes::{SchemaRef, UInt32Type}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::prelude::DataType; use datatypes::vectors::{Helper, Vector}; -use mito_codec::row_converter::{build_primary_key_codec_with_fields, SortField}; +use mito_codec::row_converter::{SortField, build_primary_key_codec_with_fields}; use parquet::file::metadata::{ParquetMetaData, RowGroupMetaData}; use parquet::file::statistics::Statistics; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metadata::{ColumnMetadata, RegionMetadataRef}; use store_api::storage::{ColumnId, SequenceNumber}; @@ -653,12 +653,13 @@ impl PrimaryKeyReadFormat { column: &ColumnMetadata, is_min: bool, ) -> Option { - debug_assert!(self - .metadata - .primary_key - .first() - .map(|id| *id == column.column_id) - .unwrap_or(false)); + debug_assert!( + self.metadata + .primary_key + .first() + .map(|id| *id == column.column_id) + .unwrap_or(false) + ); let primary_key_encoding = self.metadata.primary_key_encoding; let converter = build_primary_key_codec_with_fields( @@ -935,24 +936,24 @@ mod tests { use api::v1::OpType; use datatypes::arrow::array::{ - Int64Array, StringArray, TimestampMillisecondArray, UInt64Array, UInt8Array, + Int64Array, StringArray, TimestampMillisecondArray, UInt8Array, UInt64Array, }; use datatypes::arrow::datatypes::{DataType as ArrowDataType, Field, Schema, TimeUnit}; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use datatypes::value::ValueRef; - use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, UInt64Vector, UInt8Vector}; + use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, UInt8Vector, UInt64Vector}; use mito_codec::row_converter::{ DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt, SparsePrimaryKeyCodec, }; use store_api::codec::PrimaryKeyEncoding; use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder}; - use store_api::storage::consts::ReservedColumnId; use store_api::storage::RegionId; + use store_api::storage::consts::ReservedColumnId; use super::*; - use crate::sst::parquet::flat_format::{sequence_column_index, FlatWriteFormat}; - use crate::sst::{to_flat_sst_arrow_schema, FlatSchemaOptions}; + use crate::sst::parquet::flat_format::{FlatWriteFormat, sequence_column_index}; + use crate::sst::{FlatSchemaOptions, to_flat_sst_arrow_schema}; const TEST_SEQUENCE: u64 = 1; const TEST_OP_TYPE: u8 = OpType::Put as u8; diff --git a/src/mito2/src/sst/parquet/helper.rs b/src/mito2/src/sst/parquet/helper.rs index 95054a6092..83a10d6ba2 100644 --- a/src/mito2/src/sst/parquet/helper.rs +++ b/src/mito2/src/sst/parquet/helper.rs @@ -20,7 +20,7 @@ use object_store::ObjectStore; use parquet::basic::ColumnOrder; use parquet::file::metadata::{FileMetaData, ParquetMetaData, RowGroupMetaData}; use parquet::format; -use parquet::schema::types::{from_thrift, SchemaDescriptor}; +use parquet::schema::types::{SchemaDescriptor, from_thrift}; use snafu::ResultExt; use crate::error; diff --git a/src/mito2/src/sst/parquet/metadata.rs b/src/mito2/src/sst/parquet/metadata.rs index 772e8012e1..2cf1ecfda8 100644 --- a/src/mito2/src/sst/parquet/metadata.rs +++ b/src/mito2/src/sst/parquet/metadata.rs @@ -13,8 +13,8 @@ // limitations under the License. use object_store::ObjectStore; -use parquet::file::metadata::{ParquetMetaData, ParquetMetaDataReader}; use parquet::file::FOOTER_SIZE; +use parquet::file::metadata::{ParquetMetaData, ParquetMetaDataReader}; use snafu::ResultExt; use crate::error::{self, Result}; diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs index 9798eda826..56848d7576 100644 --- a/src/mito2/src/sst/parquet/reader.rs +++ b/src/mito2/src/sst/parquet/reader.rs @@ -30,7 +30,7 @@ use datatypes::data_type::ConcreteDataType; use mito_codec::row_converter::build_primary_key_codec; use object_store::ObjectStore; use parquet::arrow::arrow_reader::{ParquetRecordBatchReader, RowSelection}; -use parquet::arrow::{parquet_to_arrow_field_levels, FieldLevels, ProjectionMask}; +use parquet::arrow::{FieldLevels, ProjectionMask, parquet_to_arrow_field_levels}; use parquet::file::metadata::ParquetMetaData; use parquet::format::KeyValue; use snafu::{OptionExt, ResultExt}; @@ -39,15 +39,15 @@ use store_api::region_request::PathType; use store_api::storage::ColumnId; use table::predicate::Predicate; -use crate::cache::index::result_cache::PredicateKey; use crate::cache::CacheStrategy; +use crate::cache::index::result_cache::PredicateKey; use crate::error::{ ArrowReaderSnafu, InvalidMetadataSnafu, InvalidParquetSnafu, ReadDataPartSnafu, ReadParquetSnafu, Result, }; use crate::metrics::{ - PRECISE_FILTER_ROWS_TOTAL, READ_ROWS_IN_ROW_GROUP_TOTAL, READ_ROWS_TOTAL, - READ_ROW_GROUPS_TOTAL, READ_STAGE_ELAPSED, + PRECISE_FILTER_ROWS_TOTAL, READ_ROW_GROUPS_TOTAL, READ_ROWS_IN_ROW_GROUP_TOTAL, + READ_ROWS_TOTAL, READ_STAGE_ELAPSED, }; use crate::read::prune::{PruneReader, Source}; use crate::read::{Batch, BatchReader}; @@ -56,7 +56,7 @@ use crate::sst::index::bloom_filter::applier::BloomFilterIndexApplierRef; use crate::sst::index::fulltext_index::applier::FulltextIndexApplierRef; use crate::sst::index::inverted_index::applier::InvertedIndexApplierRef; use crate::sst::parquet::file_range::{FileRangeContext, FileRangeContextRef}; -use crate::sst::parquet::format::{need_override_sequence, ReadFormat}; +use crate::sst::parquet::format::{ReadFormat, need_override_sequence}; use crate::sst::parquet::metadata::MetadataLoader; use crate::sst::parquet::row_group::InMemoryRowGroup; use crate::sst::parquet::row_selection::RowGroupSelection; @@ -456,11 +456,11 @@ impl ParquetReaderBuilder { .cache_strategy .index_result_cache() .and_then(|cache| cache.get(predicate_key, self.file_handle.file_id().file_id())); - if let Some(result) = cached.as_ref() { - if all_required_row_groups_searched(output, result) { - apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_FULLTEXT); - return true; - } + if let Some(result) = cached.as_ref() + && all_required_row_groups_searched(output, result) + { + apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_FULLTEXT); + return true; } // Slow path: apply the index from the file. @@ -513,11 +513,11 @@ impl ParquetReaderBuilder { .cache_strategy .index_result_cache() .and_then(|cache| cache.get(predicate_key, self.file_handle.file_id().file_id())); - if let Some(result) = cached.as_ref() { - if all_required_row_groups_searched(output, result) { - apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_INVERTED); - return true; - } + if let Some(result) = cached.as_ref() + && all_required_row_groups_searched(output, result) + { + apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_INVERTED); + return true; } // Slow path: apply the index from the file. @@ -568,11 +568,11 @@ impl ParquetReaderBuilder { .cache_strategy .index_result_cache() .and_then(|cache| cache.get(predicate_key, self.file_handle.file_id().file_id())); - if let Some(result) = cached.as_ref() { - if all_required_row_groups_searched(output, result) { - apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_BLOOM); - return true; - } + if let Some(result) = cached.as_ref() + && all_required_row_groups_searched(output, result) + { + apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_BLOOM); + return true; } // Slow path: apply the index from the file. @@ -635,11 +635,11 @@ impl ParquetReaderBuilder { .cache_strategy .index_result_cache() .and_then(|cache| cache.get(predicate_key, self.file_handle.file_id().file_id())); - if let Some(result) = cached.as_ref() { - if all_required_row_groups_searched(output, result) { - apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_FULLTEXT); - return true; - } + if let Some(result) = cached.as_ref() + && all_required_row_groups_searched(output, result) + { + apply_selection_and_update_metrics(output, result, metrics, INDEX_TYPE_FULLTEXT); + return true; } // Slow path: apply the index from the file. diff --git a/src/mito2/src/sst/parquet/row_group.rs b/src/mito2/src/sst/parquet/row_group.rs index 3f9c5536aa..f31db23dfc 100644 --- a/src/mito2/src/sst/parquet/row_group.rs +++ b/src/mito2/src/sst/parquet/row_group.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use bytes::{Buf, Bytes}; use object_store::ObjectStore; -use parquet::arrow::arrow_reader::{RowGroups, RowSelection}; use parquet::arrow::ProjectionMask; +use parquet::arrow::arrow_reader::{RowGroups, RowSelection}; use parquet::column::page::{PageIterator, PageReader}; use parquet::errors::{ParquetError, Result}; use parquet::file::metadata::{ParquetMetaData, RowGroupMetaData}; @@ -34,7 +34,7 @@ use crate::cache::file_cache::{FileType, IndexKey}; use crate::cache::{CacheStrategy, PageKey, PageValue}; use crate::metrics::{READ_STAGE_ELAPSED, READ_STAGE_FETCH_PAGES}; use crate::sst::file::FileId; -use crate::sst::parquet::helper::{fetch_byte_ranges, MERGE_GAP}; +use crate::sst::parquet::helper::{MERGE_GAP, fetch_byte_ranges}; pub(crate) struct RowGroupBase<'a> { metadata: &'a RowGroupMetaData, @@ -182,7 +182,7 @@ impl<'a> RowGroupBase<'a> { None => { return Err(ParquetError::General(format!( "Invalid column index {col_idx}, column was not fetched" - ))) + ))); } Some(data) => { let page_locations = self @@ -296,10 +296,10 @@ impl<'a> InMemoryRowGroup<'a> { let _timer = READ_STAGE_ELAPSED .with_label_values(&["cache_miss_read"]) .start_timer(); - let data = fetch_byte_ranges(self.file_path, self.object_store.clone(), ranges) + + fetch_byte_ranges(self.file_path, self.object_store.clone(), ranges) .await - .map_err(|e| ParquetError::External(Box::new(e)))?; - data + .map_err(|e| ParquetError::External(Box::new(e)))? } }; diff --git a/src/mito2/src/sst/parquet/writer.rs b/src/mito2/src/sst/parquet/writer.rs index 28295777f9..7bc6bf2d76 100644 --- a/src/mito2/src/sst/parquet/writer.rs +++ b/src/mito2/src/sst/parquet/writer.rs @@ -17,8 +17,8 @@ use std::future::Future; use std::mem; use std::pin::Pin; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::{Context, Poll}; use std::time::Instant; @@ -40,8 +40,8 @@ use parquet::schema::types::ColumnPath; use smallvec::smallvec; use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; -use store_api::storage::consts::SEQUENCE_COLUMN_NAME; use store_api::storage::SequenceNumber; +use store_api::storage::consts::SEQUENCE_COLUMN_NAME; use tokio::io::AsyncWrite; use tokio_util::compat::{Compat, FuturesAsyncWriteCompatExt}; @@ -52,11 +52,11 @@ use crate::error::{ use crate::read::{Batch, FlatSource, Source}; use crate::sst::file::{FileId, RegionFileId}; use crate::sst::index::{Indexer, IndexerBuilder}; -use crate::sst::parquet::flat_format::{time_index_column_index, FlatWriteFormat}; +use crate::sst::parquet::flat_format::{FlatWriteFormat, time_index_column_index}; use crate::sst::parquet::format::PrimaryKeyWriteFormat; use crate::sst::parquet::helper::parse_parquet_metadata; -use crate::sst::parquet::{SstInfo, WriteOptions, PARQUET_METADATA_KEY}; -use crate::sst::{FlatSchemaOptions, DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY}; +use crate::sst::parquet::{PARQUET_METADATA_KEY, SstInfo, WriteOptions}; +use crate::sst::{DEFAULT_WRITE_BUFFER_SIZE, DEFAULT_WRITE_CONCURRENCY, FlatSchemaOptions}; /// Parquet SST writer. pub struct ParquetWriter { @@ -351,11 +351,13 @@ where builder: WriterPropertiesBuilder, region_metadata: &RegionMetadataRef, ) -> WriterPropertiesBuilder { - let ts_col = ColumnPath::new(vec![region_metadata - .time_index_column() - .column_schema - .name - .clone()]); + let ts_col = ColumnPath::new(vec![ + region_metadata + .time_index_column() + .column_schema + .name + .clone(), + ]); let seq_col = ColumnPath::new(vec![SEQUENCE_COLUMN_NAME.to_string()]); builder @@ -561,7 +563,7 @@ fn timestamp_range_from_array( timestamp_array.data_type() ), } - .fail() + .fail(); } }; diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs index 27f60712f8..13a91d1968 100644 --- a/src/mito2/src/test_util.rs +++ b/src/mito2/src/test_util.rs @@ -23,8 +23,8 @@ pub mod wal_util; use std::collections::HashMap; use std::path::Path; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use api::greptime_proto::v1; use api::helper::ColumnDataTypeWrapper; @@ -32,26 +32,26 @@ use api::v1::column_def::options_from_column_schema; use api::v1::helper::row; use api::v1::value::ValueData; use api::v1::{OpType, Row, Rows, SemanticType}; -use common_base::readable_size::ReadableSize; use common_base::Plugins; +use common_base::readable_size::ReadableSize; use common_datasource::compression::CompressionType; use common_meta::cache::{new_schema_cache, new_table_schema_cache}; use common_meta::key::{SchemaMetadataManager, SchemaMetadataManagerRef}; -use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; +use common_meta::kv_backend::memory::MemoryKvBackend; use common_telemetry::warn; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; -use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY}; -use datatypes::arrow::array::{TimestampMillisecondArray, UInt64Array, UInt8Array}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; +use common_wal::options::{KafkaWalOptions, WAL_OPTIONS_KEY, WalOptions}; +use datatypes::arrow::array::{TimestampMillisecondArray, UInt8Array, UInt64Array}; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use log_store::kafka::log_store::KafkaLogStore; use log_store::raft_engine::log_store::RaftEngineLogStore; use log_store::test_util::log_store_util; use moka::future::CacheBuilder; +use object_store::ObjectStore; use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef}; use object_store::services::Fs; -use object_store::ObjectStore; use rskafka::client::partition::{Compression, UnknownTopicHandling}; use rskafka::client::{Client, ClientBuilder}; use rskafka::record::Record; @@ -68,7 +68,7 @@ use store_api::storage::{ColumnId, RegionId}; use crate::cache::write_cache::{WriteCache, WriteCacheRef}; use crate::config::MitoConfig; use crate::engine::listener::EventListenerRef; -use crate::engine::{MitoEngine, MITO_ENGINE_NAME}; +use crate::engine::{MITO_ENGINE_NAME, MitoEngine}; use crate::error::Result; use crate::flush::{WriteBufferManager, WriteBufferManagerRef}; use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions}; diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs index cb55a87c80..2681bf0a73 100644 --- a/src/mito2/src/test_util/memtable_util.rs +++ b/src/mito2/src/test_util/memtable_util.rs @@ -35,7 +35,7 @@ use table::predicate::Predicate; use crate::error::Result; use crate::memtable::bulk::part::BulkPart; -use crate::memtable::partition_tree::data::{timestamp_array_to_i64_slice, DataBatch, DataBuffer}; +use crate::memtable::partition_tree::data::{DataBatch, DataBuffer, timestamp_array_to_i64_slice}; use crate::memtable::{ BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRanges, MemtableRef, MemtableStats, diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs index 29dbd279f3..90f272c1eb 100644 --- a/src/mito2/src/test_util/scheduler_util.rs +++ b/src/mito2/src/test_util/scheduler_util.rs @@ -18,9 +18,9 @@ use std::sync::{Arc, Mutex}; use common_base::Plugins; use common_datasource::compression::CompressionType; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; -use object_store::services::Fs; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use object_store::ObjectStore; +use object_store::services::Fs; use store_api::metadata::RegionMetadataRef; use store_api::region_request::PathType; use tokio::sync::mpsc::Sender; diff --git a/src/mito2/src/test_util/sst_util.rs b/src/mito2/src/test_util/sst_util.rs index 4af49595e3..a55a58465a 100644 --- a/src/mito2/src/test_util/sst_util.rs +++ b/src/mito2/src/test_util/sst_util.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use api::v1::{OpType, SemanticType}; use common_time::Timestamp; -use datatypes::arrow::array::{BinaryArray, TimestampMillisecondArray, UInt64Array, UInt8Array}; +use datatypes::arrow::array::{BinaryArray, TimestampMillisecondArray, UInt8Array, UInt64Array}; use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, SkippingIndexOptions}; use datatypes::value::ValueRef; @@ -31,7 +31,7 @@ use store_api::storage::RegionId; use crate::read::{Batch, BatchBuilder, Source}; use crate::sst::file::{FileHandle, FileId, FileMeta}; -use crate::test_util::{new_batch_builder, new_noop_file_purger, VecBatchReader}; +use crate::test_util::{VecBatchReader, new_batch_builder, new_noop_file_purger}; /// Test region id. const REGION_ID: RegionId = RegionId::new(0, 0); diff --git a/src/mito2/src/test_util/wal_util.rs b/src/mito2/src/test_util/wal_util.rs index 823242faae..2521219510 100644 --- a/src/mito2/src/test_util/wal_util.rs +++ b/src/mito2/src/test_util/wal_util.rs @@ -15,9 +15,9 @@ use api::v1::WalEntry; use futures::stream; use prost::Message; +use store_api::logstore::EntryId; use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader}; use store_api::logstore::provider::Provider; -use store_api::logstore::EntryId; use store_api::storage::RegionId; use crate::error::Result; diff --git a/src/mito2/src/wal.rs b/src/mito2/src/wal.rs index 9a75ea440e..f4515070b7 100644 --- a/src/mito2/src/wal.rs +++ b/src/mito2/src/wal.rs @@ -223,12 +223,12 @@ impl WalWriter { mod tests { use api::v1::helper::{tag_column_schema, time_index_column_schema}; use api::v1::{ - bulk_wal_entry, value, ArrowIpc, BulkWalEntry, ColumnDataType, Mutation, OpType, Row, Rows, - Value, + ArrowIpc, BulkWalEntry, ColumnDataType, Mutation, OpType, Row, Rows, Value, bulk_wal_entry, + value, }; use common_recordbatch::DfRecordBatch; use common_test_util::flight::encode_to_flight_data; - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use datatypes::arrow; use datatypes::arrow::array::{ArrayRef, TimestampMillisecondArray}; use datatypes::arrow::datatypes::Field; diff --git a/src/mito2/src/wal/entry_distributor.rs b/src/mito2/src/wal/entry_distributor.rs index 70d040c527..e48c21967a 100644 --- a/src/mito2/src/wal/entry_distributor.rs +++ b/src/mito2/src/wal/entry_distributor.rs @@ -27,7 +27,7 @@ use tokio::sync::oneshot; use tokio_stream::StreamExt; use crate::error::{self, Result}; -use crate::wal::entry_reader::{decode_raw_entry, WalEntryReader}; +use crate::wal::entry_reader::{WalEntryReader, decode_raw_entry}; use crate::wal::raw_entry_reader::RawEntryReader; use crate::wal::{EntryId, WalEntryStream}; @@ -80,10 +80,10 @@ impl WalEntryDistributor { let region_id = entry.region_id(); if let Some(EntryReceiver { sender, start_id }) = receivers.get(®ion_id) { - if entry_id >= *start_id { - if let Err(err) = sender.send(entry).await { - error!(err; "Failed to distribute raw entry, entry_id:{}, region_id: {}", entry_id, region_id); - } + if entry_id >= *start_id + && let Err(err) = sender.send(entry).await + { + error!(err; "Failed to distribute raw entry, entry_id:{}, region_id: {}", entry_id, region_id); } } else { debug!("Subscriber not found, region_id: {}", region_id); @@ -221,14 +221,14 @@ pub fn build_wal_entry_distributor_and_receivers( mod tests { use api::v1::{Mutation, OpType, WalEntry}; - use futures::{stream, TryStreamExt}; + use futures::{TryStreamExt, stream}; use prost::Message; use store_api::logstore::entry::{Entry, MultiplePartEntry, MultiplePartHeader, NaiveEntry}; use super::*; use crate::test_util::wal_util::generate_tail_corrupted_stream; - use crate::wal::raw_entry_reader::{EntryStream, RawEntryReader}; use crate::wal::EntryId; + use crate::wal::raw_entry_reader::{EntryStream, RawEntryReader}; struct MockRawEntryReader { entries: Vec, diff --git a/src/mito2/src/wal/raw_entry_reader.rs b/src/mito2/src/wal/raw_entry_reader.rs index 7939ababa6..de63f92251 100644 --- a/src/mito2/src/wal/raw_entry_reader.rs +++ b/src/mito2/src/wal/raw_entry_reader.rs @@ -127,7 +127,7 @@ where mod tests { use std::sync::Arc; - use futures::{stream, TryStreamExt}; + use futures::{TryStreamExt, stream}; use store_api::logstore::entry::{Entry, NaiveEntry}; use store_api::logstore::{ AppendBatchResponse, EntryId, LogStore, SendableEntryStream, WalIndex, diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs index be84ba4b5f..3b110e2064 100644 --- a/src/mito2/src/worker.rs +++ b/src/mito2/src/worker.rs @@ -29,8 +29,8 @@ mod handle_write; use std::collections::HashMap; use std::path::Path; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use common_base::Plugins; @@ -41,15 +41,15 @@ use common_telemetry::{error, info, warn}; use futures::future::try_join_all; use object_store::manager::ObjectStoreManagerRef; use prometheus::{Histogram, IntGauge}; -use rand::{rng, Rng}; -use snafu::{ensure, ResultExt}; +use rand::{Rng, rng}; +use snafu::{ResultExt, ensure}; use store_api::logstore::LogStore; use store_api::region_engine::{ SetRegionRoleStateResponse, SetRegionRoleStateSuccess, SettableRegionRoleState, }; use store_api::storage::RegionId; use tokio::sync::mpsc::{Receiver, Sender}; -use tokio::sync::{mpsc, oneshot, watch, Mutex}; +use tokio::sync::{Mutex, mpsc, oneshot, watch}; use crate::cache::write_cache::{WriteCache, WriteCacheRef}; use crate::cache::{CacheManager, CacheManagerRef}; diff --git a/src/mito2/src/worker/handle_alter.rs b/src/mito2/src/worker/handle_alter.rs index 4305a27669..aff97e09a1 100644 --- a/src/mito2/src/worker/handle_alter.rs +++ b/src/mito2/src/worker/handle_alter.rs @@ -33,10 +33,10 @@ use store_api::storage::RegionId; use crate::error::{InvalidMetadataSnafu, InvalidRegionRequestSnafu, Result}; use crate::flush::FlushReason; use crate::manifest::action::RegionChange; +use crate::region::MitoRegionRef; use crate::region::options::CompactionOptions::Twcs; use crate::region::options::TwcsOptions; use crate::region::version::VersionRef; -use crate::region::MitoRegionRef; use crate::request::{DdlRequest, OptionOutputTx, SenderDdlRequest}; use crate::worker::RegionWorkerLoop; diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs index 415e426a4d..4924591e69 100644 --- a/src/mito2/src/worker/handle_catchup.rs +++ b/src/mito2/src/worker/handle_catchup.rs @@ -26,8 +26,8 @@ use store_api::storage::RegionId; use tokio::time::Instant; use crate::error::{self, Result}; -use crate::region::opener::{replay_memtable, RegionOpener}; use crate::region::MitoRegion; +use crate::region::opener::{RegionOpener, replay_memtable}; use crate::worker::RegionWorkerLoop; impl RegionWorkerLoop { @@ -52,7 +52,8 @@ impl RegionWorkerLoop { // Utilizes the short circuit evaluation. let region = if !is_empty_memtable || region.manifest_ctx.has_update().await? { if !is_empty_memtable { - warn!("Region {} memtables is not empty, which should not happen, manifest version: {}, last entry id: {}", + warn!( + "Region {} memtables is not empty, which should not happen, manifest version: {}, last entry id: {}", region.region_id, region.manifest_ctx.manifest_version().await, region.version_control.current().last_entry_id @@ -70,7 +71,10 @@ impl RegionWorkerLoop { .map(|c| c.entry_id) .unwrap_or_default() .max(flushed_entry_id); - info!("Trying to replay memtable for region: {region_id}, provider: {:?}, replay from entry id: {replay_from_entry_id}, flushed entry id: {flushed_entry_id}", region.provider); + info!( + "Trying to replay memtable for region: {region_id}, provider: {:?}, replay from entry id: {replay_from_entry_id}, flushed entry id: {flushed_entry_id}", + region.provider + ); let timer = Instant::now(); let wal_entry_reader = self.wal @@ -98,7 +102,7 @@ impl RegionWorkerLoop { last_entry_id >= expected_last_entry_id, error::UnexpectedSnafu { reason: format!( - "failed to set region {} to writable, it was expected to replayed to {}, but actually replayed to {}", + "failed to set region {} to writable, it was expected to replayed to {}, but actually replayed to {}", region_id, expected_last_entry_id, last_entry_id, ), } @@ -145,7 +149,9 @@ impl RegionWorkerLoop { let region_id = region.region_id; let manifest_version = region.manifest_ctx.manifest_version().await; let flushed_entry_id = region.version_control.current().last_entry_id; - info!("Reopening the region: {region_id}, manifest version: {manifest_version}, flushed entry id: {flushed_entry_id}"); + info!( + "Reopening the region: {region_id}, manifest version: {manifest_version}, flushed entry id: {flushed_entry_id}" + ); let reopened_region = Arc::new( RegionOpener::new( region_id, diff --git a/src/mito2/src/worker/handle_compaction.rs b/src/mito2/src/worker/handle_compaction.rs index 6fb9f640f7..b74819a733 100644 --- a/src/mito2/src/worker/handle_compaction.rs +++ b/src/mito2/src/worker/handle_compaction.rs @@ -104,8 +104,7 @@ impl RegionWorkerLoop { let now = self.time_provider.current_time_millis(); if now - region.last_compaction_millis() >= self.config.min_compaction_interval.as_millis() as i64 - { - if let Err(e) = self + && let Err(e) = self .compaction_scheduler .schedule_compaction( region.region_id, @@ -118,12 +117,11 @@ impl RegionWorkerLoop { 1, ) .await - { - warn!( - "Failed to schedule compaction for region: {}, err: {}", - region.region_id, e - ); - } + { + warn!( + "Failed to schedule compaction for region: {}, err: {}", + region.region_id, e + ); } } } diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs index 166a005e93..492f4d6186 100644 --- a/src/mito2/src/worker/handle_create.rs +++ b/src/mito2/src/worker/handle_create.rs @@ -23,7 +23,7 @@ use store_api::region_request::{AffectedRows, RegionCreateRequest}; use store_api::storage::RegionId; use crate::error::Result; -use crate::region::opener::{check_recovered_region, RegionOpener}; +use crate::region::opener::{RegionOpener, check_recovered_region}; use crate::worker::RegionWorkerLoop; impl RegionWorkerLoop { diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs index a060b0fb48..fd90ef7f3e 100644 --- a/src/mito2/src/worker/handle_drop.rs +++ b/src/mito2/src/worker/handle_drop.rs @@ -29,7 +29,7 @@ use tokio::time::sleep; use crate::error::{OpenDalSnafu, Result}; use crate::region::{RegionLeaderState, RegionMapRef}; -use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE}; +use crate::worker::{DROPPING_MARKER_FILE, RegionWorkerLoop}; const GC_TASK_INTERVAL_SEC: u64 = 5 * 60; // 5 minutes const MAX_RETRY_TIMES: u64 = 12; // 1 hours (5m * 12) diff --git a/src/mito2/src/worker/handle_flush.rs b/src/mito2/src/worker/handle_flush.rs index b676ecf773..aabee2db4f 100644 --- a/src/mito2/src/worker/handle_flush.rs +++ b/src/mito2/src/worker/handle_flush.rs @@ -14,8 +14,8 @@ //! Handling flush related requests. -use std::sync::atomic::Ordering; use std::sync::Arc; +use std::sync::atomic::Ordering; use common_telemetry::{debug, error, info}; use store_api::logstore::LogStore; @@ -87,16 +87,13 @@ impl RegionWorkerLoop { // Flush memtable with max mutable memtable. // TODO(yingwen): Maybe flush more tables to reduce write buffer size. - if let Some(region) = max_mem_region { - if !self.flush_scheduler.is_flush_requested(region.region_id) { - let task = - self.new_flush_task(region, FlushReason::EngineFull, None, self.config.clone()); - self.flush_scheduler.schedule_flush( - region.region_id, - ®ion.version_control, - task, - )?; - } + if let Some(region) = max_mem_region + && !self.flush_scheduler.is_flush_requested(region.region_id) + { + let task = + self.new_flush_task(region, FlushReason::EngineFull, None, self.config.clone()); + self.flush_scheduler + .schedule_flush(region.region_id, ®ion.version_control, task)?; } Ok(()) diff --git a/src/mito2/src/worker/handle_manifest.rs b/src/mito2/src/worker/handle_manifest.rs index 9cf94f1021..6e897be971 100644 --- a/src/mito2/src/worker/handle_manifest.rs +++ b/src/mito2/src/worker/handle_manifest.rs @@ -23,8 +23,8 @@ use common_telemetry::{info, warn}; use store_api::logstore::LogStore; use store_api::storage::RegionId; -use crate::cache::file_cache::{FileType, IndexKey}; use crate::cache::CacheManagerRef; +use crate::cache::file_cache::{FileType, IndexKey}; use crate::error::{RegionBusySnafu, RegionNotFoundSnafu, Result}; use crate::manifest::action::{ RegionChange, RegionEdit, RegionMetaAction, RegionMetaActionList, RegionTruncate, @@ -272,10 +272,10 @@ impl RegionWorkerLoop { let _ = edit_result.sender.send(edit_result.result); - if let Some(edit_queue) = self.region_edit_queues.get_mut(&edit_result.region_id) { - if let Some(request) = edit_queue.dequeue() { - self.handle_region_edit(request).await; - } + if let Some(edit_queue) = self.region_edit_queues.get_mut(&edit_result.region_id) + && let Some(request) = edit_queue.dequeue() + { + self.handle_region_edit(request).await; } if need_compaction { diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs index 9c98a29317..b0c93919c8 100644 --- a/src/mito2/src/worker/handle_open.rs +++ b/src/mito2/src/worker/handle_open.rs @@ -32,7 +32,7 @@ use crate::request::OptionOutputTx; use crate::sst::location::region_dir_from_table_dir; use crate::wal::entry_distributor::WalEntryReceiver; use crate::worker::handle_drop::remove_region_dir_once; -use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE}; +use crate::worker::{DROPPING_MARKER_FILE, RegionWorkerLoop}; impl RegionWorkerLoop { async fn check_and_cleanup_region( diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs index bbb27548e6..7b8f6a70cb 100644 --- a/src/mito2/src/worker/handle_write.rs +++ b/src/mito2/src/worker/handle_write.rs @@ -14,7 +14,7 @@ //! Handling write requests. -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; use std::sync::Arc; use api::v1::OpType; @@ -301,15 +301,13 @@ impl RegionWorkerLoop { // Only fill missing columns if primary key is dense encoded. if need_fill_missing_columns && sender_req.request.primary_key_encoding() == PrimaryKeyEncoding::Dense - { - if let Err(e) = sender_req + && let Err(e) = sender_req .request .maybe_fill_missing_columns(®ion_ctx.version().metadata) - { - sender_req.sender.send(Err(e)); + { + sender_req.sender.send(Err(e)); - continue; - } + continue; } // Collect requests by region. diff --git a/src/object-store/src/factory.rs b/src/object-store/src/factory.rs index 7614454c07..010b8be613 100644 --- a/src/object-store/src/factory.rs +++ b/src/object-store/src/factory.rs @@ -24,7 +24,7 @@ use crate::config::{AzblobConfig, FileConfig, GcsConfig, ObjectStoreConfig, OssC use crate::error::{self, Result}; use crate::services::Azblob; use crate::util::{build_http_client, clean_temp_dir, join_dir, normalize_dir}; -use crate::{util, ObjectStore, ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR}; +use crate::{ATOMIC_WRITE_DIR, OLD_ATOMIC_WRITE_DIR, ObjectStore, util}; pub async fn new_raw_object_store( store: &ObjectStoreConfig, diff --git a/src/object-store/src/layers/lru_cache.rs b/src/object-store/src/layers/lru_cache.rs index 3b673bb381..967efe80fe 100644 --- a/src/object-store/src/layers/lru_cache.rs +++ b/src/object-store/src/layers/lru_cache.rs @@ -14,11 +14,11 @@ use std::sync::Arc; +use opendal::Result; use opendal::raw::oio::Reader; use opendal::raw::{ Access, Layer, LayeredAccess, OpList, OpRead, OpWrite, RpDelete, RpList, RpRead, RpWrite, }; -use opendal::Result; mod read_cache; use std::time::Instant; diff --git a/src/object-store/src/layers/lru_cache/read_cache.rs b/src/object-store/src/layers/lru_cache/read_cache.rs index b27a2801bc..1b1f36634d 100644 --- a/src/object-store/src/layers/lru_cache/read_cache.rs +++ b/src/object-store/src/layers/lru_cache/read_cache.rs @@ -20,7 +20,7 @@ use moka::future::Cache; use moka::notification::ListenerFuture; use moka::policy::EvictionPolicy; use opendal::raw::oio::{Read, Reader, Write}; -use opendal::raw::{oio, Access, OpDelete, OpRead, OpStat, OpWrite, RpRead}; +use opendal::raw::{Access, OpDelete, OpRead, OpStat, OpWrite, RpRead, oio}; use opendal::{Error as OpendalError, ErrorKind, OperatorBuilder, Result}; use crate::metrics::{ diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs index dd37e2116f..c3cf5e8ce1 100644 --- a/src/object-store/src/lib.rs +++ b/src/object-store/src/lib.rs @@ -14,9 +14,8 @@ pub use opendal::raw::{Access, HttpClient}; pub use opendal::{ - services, Buffer, Builder as ObjectStoreBuilder, Entry, EntryMode, Error, ErrorKind, - FuturesAsyncReader, FuturesAsyncWriter, Lister, Operator as ObjectStore, Reader, Result, - Scheme, Writer, + Buffer, Builder as ObjectStoreBuilder, Entry, EntryMode, Error, ErrorKind, FuturesAsyncReader, + FuturesAsyncWriter, Lister, Operator as ObjectStore, Reader, Result, Scheme, Writer, services, }; pub mod config; diff --git a/src/object-store/src/manager.rs b/src/object-store/src/manager.rs index b73147baf1..3753640824 100644 --- a/src/object-store/src/manager.rs +++ b/src/object-store/src/manager.rs @@ -59,11 +59,11 @@ impl ObjectStoreManager { #[cfg(test)] mod tests { - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use super::ObjectStoreManager; - use crate::services::Fs as Builder; use crate::ObjectStore; + use crate::services::Fs as Builder; fn new_object_store(dir: &TempDir) -> ObjectStore { let store_dir = dir.path().to_str().unwrap(); diff --git a/src/object-store/src/test_util.rs b/src/object-store/src/test_util.rs index 945244bb87..3279db7b7c 100644 --- a/src/object-store/src/test_util.rs +++ b/src/object-store/src/test_util.rs @@ -48,16 +48,16 @@ pub struct TestS3Config { /// Returns s3 test config, return None if not found. pub fn s3_test_config() -> Option { - if let Ok(b) = env::var("GT_S3_BUCKET") { - if !b.is_empty() { - return Some(TestS3Config { - root: uuid::Uuid::new_v4().to_string(), - access_key_id: env::var("GT_S3_ACCESS_KEY_ID").ok()?, - secret_access_key: env::var("GT_S3_ACCESS_KEY").ok()?, - bucket: env::var("GT_S3_BUCKET").ok()?, - region: Some(env::var("GT_S3_REGION").ok()?), - }); - } + if let Ok(b) = env::var("GT_S3_BUCKET") + && !b.is_empty() + { + return Some(TestS3Config { + root: uuid::Uuid::new_v4().to_string(), + access_key_id: env::var("GT_S3_ACCESS_KEY_ID").ok()?, + secret_access_key: env::var("GT_S3_ACCESS_KEY").ok()?, + bucket: env::var("GT_S3_BUCKET").ok()?, + region: Some(env::var("GT_S3_REGION").ok()?), + }); } None diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs index 0e26a00022..e26963f1f5 100644 --- a/src/object-store/src/util.rs +++ b/src/object-store/src/util.rs @@ -23,7 +23,7 @@ use opendal::{Error, ErrorKind}; use snafu::ResultExt; use crate::config::HttpClientConfig; -use crate::{error, ObjectStore}; +use crate::{ObjectStore, error}; /// Join two paths and normalize the output dir. /// @@ -195,7 +195,9 @@ impl LoggingInterceptor for DefaultLoggingInterceptor { pub(crate) fn build_http_client(config: &HttpClientConfig) -> error::Result { if config.skip_ssl_validation { - common_telemetry::warn!("Skipping SSL validation for object storage HTTP client. Please ensure the environment is trusted."); + common_telemetry::warn!( + "Skipping SSL validation for object storage HTTP client. Please ensure the environment is trusted." + ); } let client = reqwest::ClientBuilder::new() diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs index d34fb57ab7..58fecb0f93 100644 --- a/src/object-store/tests/object_store_test.rs +++ b/src/object-store/tests/object_store_test.rs @@ -128,26 +128,26 @@ async fn test_fs_backend() -> Result<()> { #[tokio::test] async fn test_s3_backend() -> Result<()> { common_telemetry::init_default_ut_logging(); - if let Ok(bucket) = env::var("GT_S3_BUCKET") { - if !bucket.is_empty() { - info!("Running s3 test."); + if let Ok(bucket) = env::var("GT_S3_BUCKET") + && !bucket.is_empty() + { + info!("Running s3 test."); - let root = uuid::Uuid::new_v4().to_string(); + let root = uuid::Uuid::new_v4().to_string(); - let builder = S3::default() - .root(&root) - .access_key_id(&env::var("GT_S3_ACCESS_KEY_ID")?) - .secret_access_key(&env::var("GT_S3_ACCESS_KEY")?) - .region(&env::var("GT_S3_REGION")?) - .bucket(&bucket); + let builder = S3::default() + .root(&root) + .access_key_id(&env::var("GT_S3_ACCESS_KEY_ID")?) + .secret_access_key(&env::var("GT_S3_ACCESS_KEY")?) + .region(&env::var("GT_S3_REGION")?) + .bucket(&bucket); - let store = ObjectStore::new(builder).unwrap().finish(); + let store = ObjectStore::new(builder).unwrap().finish(); - let guard = TempFolder::new(&store, "/"); - test_object_crud(&store).await?; - test_object_list(&store).await?; - guard.remove_all().await?; - } + let guard = TempFolder::new(&store, "/"); + test_object_crud(&store).await?; + test_object_list(&store).await?; + guard.remove_all().await?; } Ok(()) @@ -156,25 +156,25 @@ async fn test_s3_backend() -> Result<()> { #[tokio::test] async fn test_oss_backend() -> Result<()> { common_telemetry::init_default_ut_logging(); - if let Ok(bucket) = env::var("GT_OSS_BUCKET") { - if !bucket.is_empty() { - info!("Running oss test."); + if let Ok(bucket) = env::var("GT_OSS_BUCKET") + && !bucket.is_empty() + { + info!("Running oss test."); - let root = uuid::Uuid::new_v4().to_string(); + let root = uuid::Uuid::new_v4().to_string(); - let builder = Oss::default() - .root(&root) - .access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?) - .access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?) - .bucket(&bucket); + let builder = Oss::default() + .root(&root) + .access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?) + .access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?) + .bucket(&bucket); - let store = ObjectStore::new(builder).unwrap().finish(); + let store = ObjectStore::new(builder).unwrap().finish(); - let guard = TempFolder::new(&store, "/"); - test_object_crud(&store).await?; - test_object_list(&store).await?; - guard.remove_all().await?; - } + let guard = TempFolder::new(&store, "/"); + test_object_crud(&store).await?; + test_object_list(&store).await?; + guard.remove_all().await?; } Ok(()) @@ -183,25 +183,25 @@ async fn test_oss_backend() -> Result<()> { #[tokio::test] async fn test_azblob_backend() -> Result<()> { common_telemetry::init_default_ut_logging(); - if let Ok(container) = env::var("GT_AZBLOB_CONTAINER") { - if !container.is_empty() { - info!("Running azblob test."); + if let Ok(container) = env::var("GT_AZBLOB_CONTAINER") + && !container.is_empty() + { + info!("Running azblob test."); - let root = uuid::Uuid::new_v4().to_string(); + let root = uuid::Uuid::new_v4().to_string(); - let builder = Azblob::default() - .root(&root) - .account_name(&env::var("GT_AZBLOB_ACCOUNT_NAME")?) - .account_key(&env::var("GT_AZBLOB_ACCOUNT_KEY")?) - .container(&container); + let builder = Azblob::default() + .root(&root) + .account_name(&env::var("GT_AZBLOB_ACCOUNT_NAME")?) + .account_key(&env::var("GT_AZBLOB_ACCOUNT_KEY")?) + .container(&container); - let store = ObjectStore::new(builder).unwrap().finish(); + let store = ObjectStore::new(builder).unwrap().finish(); - let guard = TempFolder::new(&store, "/"); - test_object_crud(&store).await?; - test_object_list(&store).await?; - guard.remove_all().await?; - } + let guard = TempFolder::new(&store, "/"); + test_object_crud(&store).await?; + test_object_list(&store).await?; + guard.remove_all().await?; } Ok(()) } @@ -209,25 +209,25 @@ async fn test_azblob_backend() -> Result<()> { #[tokio::test] async fn test_gcs_backend() -> Result<()> { common_telemetry::init_default_ut_logging(); - if let Ok(container) = env::var("GT_AZBLOB_CONTAINER") { - if !container.is_empty() { - info!("Running azblob test."); + if let Ok(container) = env::var("GT_AZBLOB_CONTAINER") + && !container.is_empty() + { + info!("Running azblob test."); - let builder = Gcs::default() - .root(&uuid::Uuid::new_v4().to_string()) - .bucket(&env::var("GT_GCS_BUCKET").unwrap()) - .scope(&env::var("GT_GCS_SCOPE").unwrap()) - .credential_path(&env::var("GT_GCS_CREDENTIAL_PATH").unwrap()) - .credential(&env::var("GT_GCS_CREDENTIAL").unwrap()) - .endpoint(&env::var("GT_GCS_ENDPOINT").unwrap()); + let builder = Gcs::default() + .root(&uuid::Uuid::new_v4().to_string()) + .bucket(&env::var("GT_GCS_BUCKET").unwrap()) + .scope(&env::var("GT_GCS_SCOPE").unwrap()) + .credential_path(&env::var("GT_GCS_CREDENTIAL_PATH").unwrap()) + .credential(&env::var("GT_GCS_CREDENTIAL").unwrap()) + .endpoint(&env::var("GT_GCS_ENDPOINT").unwrap()); - let store = ObjectStore::new(builder).unwrap().finish(); + let store = ObjectStore::new(builder).unwrap().finish(); - let guard = TempFolder::new(&store, "/"); - test_object_crud(&store).await?; - test_object_list(&store).await?; - guard.remove_all().await?; - } + let guard = TempFolder::new(&store, "/"); + test_object_crud(&store).await?; + test_object_list(&store).await?; + guard.remove_all().await?; } Ok(()) } diff --git a/src/operator/src/bulk_insert.rs b/src/operator/src/bulk_insert.rs index a7946abd7d..70d5002a11 100644 --- a/src/operator/src/bulk_insert.rs +++ b/src/operator/src/bulk_insert.rs @@ -15,22 +15,22 @@ use std::collections::HashSet; use ahash::{HashMap, HashMapExt}; +use api::v1::ArrowIpc; use api::v1::flow::DirtyWindowRequest; use api::v1::region::{ - bulk_insert_request, region_request, BulkInsertRequest, RegionRequest, RegionRequestHeader, + BulkInsertRequest, RegionRequest, RegionRequestHeader, bulk_insert_request, region_request, }; -use api::v1::ArrowIpc; use arrow::array::Array; use arrow::record_batch::RecordBatch; use common_base::AffectedRows; -use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage}; use common_grpc::FlightData; +use common_grpc::flight::{FlightDecoder, FlightEncoder, FlightMessage}; use common_telemetry::error; use common_telemetry::tracing_context::TracingContext; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::RegionId; -use table::metadata::TableInfoRef; use table::TableRef; +use table::metadata::TableInfoRef; use crate::insert::Inserter; use crate::{error, metrics}; diff --git a/src/operator/src/delete.rs b/src/operator/src/delete.rs index a97a63eb45..1e9fef919a 100644 --- a/src/operator/src/delete.rs +++ b/src/operator/src/delete.rs @@ -26,9 +26,9 @@ use common_telemetry::tracing_context::TracingContext; use futures_util::future; use partition::manager::PartitionRuleManagerRef; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; -use table::requests::DeleteRequest as TableDeleteRequest; +use snafu::{OptionExt, ResultExt, ensure}; use table::TableRef; +use table::requests::DeleteRequest as TableDeleteRequest; use crate::error::{ CatalogSnafu, FindRegionLeaderSnafu, InvalidDeleteRequestSnafu, JoinTaskSnafu, diff --git a/src/operator/src/expr_helper.rs b/src/operator/src/expr_helper.rs index dc06ddf052..03ef696102 100644 --- a/src/operator/src/expr_helper.rs +++ b/src/operator/src/expr_helper.rs @@ -22,20 +22,21 @@ use api::v1::alter_database_expr::Kind as AlterDatabaseKind; use api::v1::alter_table_expr::Kind as AlterTableKind; use api::v1::column_def::options_from_column_schema; use api::v1::{ - set_index, unset_index, AddColumn, AddColumns, AlterDatabaseExpr, AlterTableExpr, Analyzer, - ColumnDataType, ColumnDataTypeExtension, CreateFlowExpr, CreateTableExpr, CreateViewExpr, - DropColumn, DropColumns, DropDefaults, ExpireAfter, FulltextBackend as PbFulltextBackend, - ModifyColumnType, ModifyColumnTypes, RenameTable, SemanticType, SetDatabaseOptions, - SetDefaults, SetFulltext, SetIndex, SetIndexes, SetInverted, SetSkipping, SetTableOptions, + AddColumn, AddColumns, AlterDatabaseExpr, AlterTableExpr, Analyzer, ColumnDataType, + ColumnDataTypeExtension, CreateFlowExpr, CreateTableExpr, CreateViewExpr, DropColumn, + DropColumns, DropDefaults, ExpireAfter, FulltextBackend as PbFulltextBackend, ModifyColumnType, + ModifyColumnTypes, RenameTable, SemanticType, SetDatabaseOptions, SetDefaults, SetFulltext, + SetIndex, SetIndexes, SetInverted, SetSkipping, SetTableOptions, SkippingIndexType as PbSkippingIndexType, TableName, UnsetDatabaseOptions, UnsetFulltext, - UnsetIndex, UnsetIndexes, UnsetInverted, UnsetSkipping, UnsetTableOptions, + UnsetIndex, UnsetIndexes, UnsetInverted, UnsetSkipping, UnsetTableOptions, set_index, + unset_index, }; use common_error::ext::BoxedError; use common_grpc_expr::util::ColumnExpr; use common_time::Timezone; use datafusion::sql::planner::object_name_to_table_reference; use datatypes::schema::{ - ColumnSchema, FulltextAnalyzer, FulltextBackend, Schema, SkippingIndexType, COMMENT_KEY, + COMMENT_KEY, ColumnSchema, FulltextAnalyzer, FulltextBackend, Schema, SkippingIndexType, }; use file_engine::FileOptions; use query::sql::{ @@ -44,7 +45,7 @@ use query::sql::{ }; use session::context::QueryContextRef; use session::table_name::table_idents_to_full_name; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::{ColumnOption, ObjectName, ObjectNamePartExt}; use sql::statements::alter::{ AlterDatabase, AlterDatabaseOperation, AlterTable, AlterTableOperation, @@ -56,7 +57,7 @@ use sql::statements::{ column_to_schema, sql_column_def_to_grpc_column_def, sql_data_type_to_concrete_data_type, }; use sql::util::extract_tables_from_query; -use table::requests::{TableOptions, FILE_TABLE_META_KEY}; +use table::requests::{FILE_TABLE_META_KEY, TableOptions}; use table::table_reference::TableReference; #[cfg(feature = "enterprise")] pub use trigger::to_create_trigger_task_expr; @@ -968,10 +969,11 @@ SELECT max(c1), min(c2) FROM schema_2.table_2;"; let res = to_create_flow_task_expr(create_flow, &QueryContext::arc()); assert!(res.is_err()); - assert!(res - .unwrap_err() - .to_string() - .contains("Invalid flow name: abc.`task_2`")); + assert!( + res.unwrap_err() + .to_string() + .contains("Invalid flow name: abc.`task_2`") + ); } #[test] @@ -1002,7 +1004,7 @@ SELECT max(c1), min(c2) FROM schema_2.table_2;"; // duplicate primary key "CREATE TABLE monitor (host STRING, ts TIMESTAMP TIME INDEX, some_column STRING, PRIMARY KEY (some_column, host, some_column));", // time index is primary key - "CREATE TABLE monitor (host STRING, ts TIMESTAMP TIME INDEX, PRIMARY KEY (host, ts));" + "CREATE TABLE monitor (host STRING, ts TIMESTAMP TIME INDEX, PRIMARY KEY (host, ts));", ]; for sql in cases { diff --git a/src/operator/src/expr_helper/trigger.rs b/src/operator/src/expr_helper/trigger.rs index c6e7e5cead..2df91a0c07 100644 --- a/src/operator/src/expr_helper/trigger.rs +++ b/src/operator/src/expr_helper/trigger.rs @@ -4,7 +4,7 @@ use api::v1::{ WebhookOptions as PbWebhookOptions, }; use session::context::QueryContextRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sql::ast::{ObjectName, ObjectNamePartExt}; use sql::statements::create::trigger::{ChannelType, CreateTrigger, TriggerOn}; diff --git a/src/operator/src/flow.rs b/src/operator/src/flow.rs index d21f7b2d9c..306dd83df1 100644 --- a/src/operator/src/flow.rs +++ b/src/operator/src/flow.rs @@ -20,8 +20,8 @@ use common_meta::key::flow::FlowMetadataManagerRef; use common_meta::node_manager::NodeManagerRef; use common_query::error::Result; use common_telemetry::tracing_context::TracingContext; -use futures::stream::FuturesUnordered; use futures::StreamExt; +use futures::stream::FuturesUnordered; use session::context::QueryContextRef; use snafu::{OptionExt, ResultExt}; @@ -101,7 +101,7 @@ impl FlowServiceOperator { let mut final_result: Option = None; for node in all_flow_nodes { let res = { - use api::v1::flow::{flow_request, FlowRequest, FlushFlow}; + use api::v1::flow::{FlowRequest, FlushFlow, flow_request}; let flush_req = FlowRequest { header: Some(FlowRequestHeader { tracing_context: TracingContext::from_current_span().to_w3c(), diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs index 8d6a11bdaf..44f6233aa1 100644 --- a/src/operator/src/insert.rs +++ b/src/operator/src/insert.rs @@ -28,15 +28,15 @@ use api::v1::{ use catalog::CatalogManagerRef; use client::{OutputData, OutputMeta}; use common_catalog::consts::{ - default_engine, trace_services_table_name, PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN, - TRACE_ID_COLUMN, TRACE_TABLE_NAME, TRACE_TABLE_NAME_SESSION_KEY, + PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN, TRACE_ID_COLUMN, TRACE_TABLE_NAME, + TRACE_TABLE_NAME_SESSION_KEY, default_engine, trace_services_table_name, }; use common_grpc_expr::util::ColumnExpr; use common_meta::cache::TableFlownodeSetCacheRef; use common_meta::node_manager::{AffectedRows, NodeManagerRef}; use common_meta::peer::Peer; -use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_query::Output; +use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_telemetry::tracing_context::TracingContext; use common_telemetry::{error, info, warn}; use datatypes::schema::SkippingIndexOptions; @@ -44,8 +44,8 @@ use futures_util::future; use meter_macros::write_meter; use partition::manager::PartitionRuleManagerRef; use session::context::QueryContextRef; -use snafu::prelude::*; use snafu::ResultExt; +use snafu::prelude::*; use sql::partition::partition_rule_for_hexstring; use sql::statements::create::Partitions; use sql::statements::insert::Insert; @@ -56,13 +56,13 @@ use store_api::mito_engine_options::{ APPEND_MODE_KEY, COMPACTION_TYPE, COMPACTION_TYPE_TWCS, MERGE_MODE_KEY, TWCS_TIME_WINDOW, }; use store_api::storage::{RegionId, TableId}; +use table::TableRef; use table::metadata::TableInfo; use table::requests::{ - InsertRequest as TableInsertRequest, AUTO_CREATE_TABLE_KEY, TABLE_DATA_MODEL, + AUTO_CREATE_TABLE_KEY, InsertRequest as TableInsertRequest, TABLE_DATA_MODEL, TABLE_DATA_MODEL_TRACE_V1, VALID_TABLE_OPTION_KEYS, }; use table::table_reference::TableReference; -use table::TableRef; use crate::error::{ CatalogSnafu, ColumnOptionsSnafu, CreatePartitionRulesSnafu, FindRegionLeaderSnafu, @@ -72,7 +72,7 @@ use crate::expr_helper; use crate::region_req_factory::RegionRequestFactory; use crate::req_convert::common::preprocess_row_insert_requests; use crate::req_convert::insert::{ - fill_reqs_with_impure_default, ColumnToRow, RowToRegion, StatementToRegion, TableToRegion, + ColumnToRow, RowToRegion, StatementToRegion, TableToRegion, fill_reqs_with_impure_default, }; use crate::statement::StatementExecutor; @@ -849,17 +849,17 @@ impl Inserter { for col in &mut rows.schema { match col.semantic_type { x if x == SemanticType::Timestamp as i32 => { - if let Some(ref ts_name) = ts_col_name { - if col.column_name != *ts_name { - col.column_name = ts_name.clone(); - } + if let Some(ref ts_name) = ts_col_name + && col.column_name != *ts_name + { + col.column_name = ts_name.clone(); } } x if x == SemanticType::Field as i32 => { - if let Some(ref field_name) = field_col_name { - if col.column_name != *field_name { - col.column_name = field_name.clone(); - } + if let Some(ref field_name) = field_col_name + && col.column_name != *field_name + { + col.column_name = field_name.clone(); } } _ => {} @@ -1184,9 +1184,9 @@ mod tests { use datatypes::schema::ColumnSchema; use moka::future::Cache; use session::context::QueryContext; + use table::TableRef; use table::dist_table::DummyDataSource; use table::metadata::{TableInfoBuilder, TableMetaBuilder, TableType}; - use table::TableRef; use super::*; use crate::tests::{create_partition_rule_manager, prepare_mocked_backend}; diff --git a/src/operator/src/req_convert/common.rs b/src/operator/src/req_convert/common.rs index 79a661be7f..9221d6522e 100644 --- a/src/operator/src/req_convert/common.rs +++ b/src/operator/src/req_convert/common.rs @@ -27,8 +27,8 @@ use api::v1::{ use common_base::BitVec; use datatypes::prelude::ConcreteDataType; use datatypes::vectors::VectorRef; -use snafu::prelude::*; use snafu::ResultExt; +use snafu::prelude::*; use table::metadata::TableInfo; use crate::error::{ diff --git a/src/operator/src/req_convert/common/partitioner.rs b/src/operator/src/req_convert/common/partitioner.rs index fbee58bcdd..4e2d288488 100644 --- a/src/operator/src/req_convert/common/partitioner.rs +++ b/src/operator/src/req_convert/common/partitioner.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::region::{DeleteRequest, InsertRequest}; use api::v1::Rows; +use api::v1::region::{DeleteRequest, InsertRequest}; use partition::manager::PartitionRuleManager; use snafu::ResultExt; use store_api::storage::RegionId; diff --git a/src/operator/src/req_convert/delete/row_to_region.rs b/src/operator/src/req_convert/delete/row_to_region.rs index 7dca98003f..1a6b041004 100644 --- a/src/operator/src/req_convert/delete/row_to_region.rs +++ b/src/operator/src/req_convert/delete/row_to_region.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::region::DeleteRequests as RegionDeleteRequests; use api::v1::RowDeleteRequests; +use api::v1::region::DeleteRequests as RegionDeleteRequests; use catalog::CatalogManager; use partition::manager::PartitionRuleManager; use session::context::QueryContext; diff --git a/src/operator/src/req_convert/delete/table_to_region.rs b/src/operator/src/req_convert/delete/table_to_region.rs index 31bf4477f7..d68a8987cb 100644 --- a/src/operator/src/req_convert/delete/table_to_region.rs +++ b/src/operator/src/req_convert/delete/table_to_region.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::region::DeleteRequests as RegionDeleteRequests; use api::v1::Rows; +use api::v1::region::DeleteRequests as RegionDeleteRequests; use partition::manager::PartitionRuleManager; use table::metadata::TableInfo; use table::requests::DeleteRequest as TableDeleteRequest; diff --git a/src/operator/src/req_convert/insert/row_to_region.rs b/src/operator/src/req_convert/insert/row_to_region.rs index fe37be0140..f08a7cac85 100644 --- a/src/operator/src/req_convert/insert/row_to_region.rs +++ b/src/operator/src/req_convert/insert/row_to_region.rs @@ -13,8 +13,8 @@ // limitations under the License. use ahash::{HashMap, HashSet}; -use api::v1::region::{InsertRequest, InsertRequests as RegionInsertRequests}; use api::v1::RowInsertRequests; +use api::v1::region::{InsertRequest, InsertRequests as RegionInsertRequests}; use partition::manager::PartitionRuleManager; use snafu::OptionExt; use store_api::storage::{RegionId, RegionNumber}; diff --git a/src/operator/src/req_convert/insert/stmt_to_region.rs b/src/operator/src/req_convert/insert/stmt_to_region.rs index d82fc56f49..aca31b289a 100644 --- a/src/operator/src/req_convert/insert/stmt_to_region.rs +++ b/src/operator/src/req_convert/insert/stmt_to_region.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::helper::{value_to_grpc_value, ColumnDataTypeWrapper}; +use api::helper::{ColumnDataTypeWrapper, value_to_grpc_value}; use api::v1::column_def::options_from_column_schema; use api::v1::region::InsertRequests as RegionInsertRequests; use api::v1::{ColumnSchema as GrpcColumnSchema, Row, Rows, Value as GrpcValue}; @@ -21,12 +21,12 @@ use common_time::Timezone; use datatypes::schema::{ColumnSchema, SchemaRef}; use partition::manager::PartitionRuleManager; use session::context::{QueryContext, QueryContextRef}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::ObjectNamePartExt; use sql::statements::insert::Insert; use sqlparser::ast::{ObjectName, Value as SqlValue}; -use table::metadata::TableInfoRef; use table::TableRef; +use table::metadata::TableInfoRef; use crate::error::{ CatalogSnafu, ColumnDataTypeSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, diff --git a/src/operator/src/req_convert/insert/table_to_region.rs b/src/operator/src/req_convert/insert/table_to_region.rs index 5a4db68441..83899eb1e6 100644 --- a/src/operator/src/req_convert/insert/table_to_region.rs +++ b/src/operator/src/req_convert/insert/table_to_region.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::region::InsertRequests as RegionInsertRequests; use api::v1::Rows; +use api::v1::region::InsertRequests as RegionInsertRequests; use partition::manager::PartitionRuleManager; use table::metadata::TableInfo; use table::requests::InsertRequest as TableInsertRequest; diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs index f20047f8ad..9434cc37e9 100644 --- a/src/operator/src/statement.rs +++ b/src/operator/src/statement.rs @@ -30,12 +30,12 @@ use std::collections::HashMap; use std::sync::Arc; use api::v1::RowInsertRequests; +use catalog::CatalogManagerRef; use catalog::kvbackend::KvBackendCatalogManager; use catalog::process_manager::ProcessManagerRef; -use catalog::CatalogManagerRef; +use client::RecordBatches; use client::error::{ExternalSnafu as ClientExternalSnafu, Result as ClientResult}; use client::inserter::{InsertOptions, Inserter}; -use client::RecordBatches; use common_error::ext::BoxedError; use common_meta::cache::TableRouteCacheRef; use common_meta::cache_invalidator::CacheInvalidatorRef; @@ -47,33 +47,33 @@ use common_meta::kv_backend::KvBackendRef; use common_meta::procedure_executor::ProcedureExecutorRef; use common_query::Output; use common_telemetry::tracing; -use common_time::range::TimestampRange; use common_time::Timestamp; +use common_time::range::TimestampRange; use datafusion_expr::LogicalPlan; use datatypes::prelude::ConcreteDataType; use humantime::format_duration; use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef}; -use query::parser::QueryStatement; use query::QueryEngineRef; +use query::parser::QueryStatement; use session::context::{Channel, QueryContextBuilder, QueryContextRef}; use session::table_name::table_idents_to_full_name; use set::{set_query_timeout, set_read_preference}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::ObjectNamePartExt; +use sql::statements::OptionMap; use sql::statements::copy::{ CopyDatabase, CopyDatabaseArgument, CopyQueryToArgument, CopyTable, CopyTableArgument, }; use sql::statements::set_variables::SetVariables; use sql::statements::show::ShowCreateTableVariant; use sql::statements::statement::Statement; -use sql::statements::OptionMap; use sql::util::format_raw_object_name; use sqlparser::ast::ObjectName; use store_api::mito_engine_options::{APPEND_MODE_KEY, TTL_KEY}; +use table::TableRef; use table::requests::{CopyDatabaseRequest, CopyDirection, CopyQueryToRequest, CopyTableRequest}; use table::table_name::TableName; use table::table_reference::TableReference; -use table::TableRef; use self::set::{ set_bytea_output, set_datestyle, set_search_path, set_timezone, validate_client_encoding, @@ -440,7 +440,7 @@ impl StatementExecutor { return NotSupportedSnafu { feat: format!("Unsupported set variable {}", var_name), } - .fail() + .fail(); } }, "STATEMENT_TIMEOUT" => { diff --git a/src/operator/src/statement/admin.rs b/src/operator/src/statement/admin.rs index 4556f66de0..9374fe653c 100644 --- a/src/operator/src/statement/admin.rs +++ b/src/operator/src/statement/admin.rs @@ -29,7 +29,7 @@ use datatypes::schema::{ColumnSchema, Schema}; use datatypes::value::Value; use datatypes::vectors::VectorRef; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::{Expr, FunctionArg, FunctionArgExpr, FunctionArguments, Value as SqlValue}; use sql::statements::admin::Admin; diff --git a/src/operator/src/statement/copy_database.rs b/src/operator/src/statement/copy_database.rs index 662c2a9fbf..c7cf0b47b0 100644 --- a/src/operator/src/statement/copy_database.rs +++ b/src/operator/src/statement/copy_database.rs @@ -23,7 +23,7 @@ use common_telemetry::{debug, error, info, tracing}; use object_store::Entry; use regex::Regex; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME}; use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest}; use table::table_reference::TableReference; @@ -231,9 +231,9 @@ async fn list_files_to_copy(req: &CopyDatabaseRequest, suffix: &str) -> error::R mod tests { use std::collections::HashSet; + use object_store::ObjectStore; use object_store::services::Fs; use object_store::util::normalize_dir; - use object_store::ObjectStore; use path_slash::PathExt; use table::requests::CopyDatabaseRequest; diff --git a/src/operator/src/statement/copy_table_from.rs b/src/operator/src/statement/copy_table_from.rs index 90529ded01..550bdf52a6 100644 --- a/src/operator/src/statement/copy_table_from.rs +++ b/src/operator/src/statement/copy_table_from.rs @@ -20,22 +20,22 @@ use std::sync::Arc; use client::{Output, OutputData, OutputMeta}; use common_base::readable_size::ReadableSize; use common_datasource::file_format::csv::CsvFormat; -use common_datasource::file_format::orc::{infer_orc_schema, new_orc_stream_reader, ReaderAdapter}; +use common_datasource::file_format::orc::{ReaderAdapter, infer_orc_schema, new_orc_stream_reader}; use common_datasource::file_format::{FileFormat, Format}; use common_datasource::lister::{Lister, Source}; -use common_datasource::object_store::{build_backend, parse_url, FS_SCHEMA}; +use common_datasource::object_store::{FS_SCHEMA, build_backend, parse_url}; use common_datasource::util::find_dir_and_filename; use common_query::{OutputCost, OutputRows}; -use common_recordbatch::adapter::RecordBatchStreamTypeAdapter; use common_recordbatch::DfSendableRecordBatchStream; +use common_recordbatch::adapter::RecordBatchStreamTypeAdapter; use common_telemetry::{debug, tracing}; use datafusion::datasource::listing::PartitionedFile; use datafusion::datasource::object_store::ObjectStoreUrl; use datafusion::datasource::physical_plan::{ CsvSource, FileGroup, FileScanConfigBuilder, FileSource, FileStream, JsonSource, }; -use datafusion::parquet::arrow::arrow_reader::ArrowReaderMetadata; use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder; +use datafusion::parquet::arrow::arrow_reader::ArrowReaderMetadata; use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet; use datafusion_expr::Expr; use datatypes::arrow::compute::can_cast_types; @@ -45,7 +45,7 @@ use futures_util::StreamExt; use object_store::{Entry, EntryMode, ObjectStore}; use regex::Regex; use session::context::QueryContextRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use table::requests::{CopyTableRequest, InsertRequest}; use table::table_reference::TableReference; use tokio_util::compat::FuturesAsyncReadCompatExt; @@ -467,10 +467,10 @@ impl StatementExecutor { insert_cost += cost; } - if let Some(max_insert_rows) = max_insert_rows { - if rows_inserted >= max_insert_rows { - return Ok(gen_insert_output(rows_inserted, insert_cost)); - } + if let Some(max_insert_rows) = max_insert_rows + && rows_inserted >= max_insert_rows + { + return Ok(gen_insert_output(rows_inserted, insert_cost)); } } @@ -511,10 +511,10 @@ async fn batch_insert( /// Custom type compatibility check for GreptimeDB that handles Map -> Binary (JSON) conversion fn can_cast_types_for_greptime(from: &ArrowDataType, to: &ArrowDataType) -> bool { // Handle Map -> Binary conversion for JSON types - if let ArrowDataType::Map(_, _) = from { - if let ArrowDataType::Binary = to { - return true; - } + if let ArrowDataType::Map(_, _) = from + && let ArrowDataType::Binary = to + { + return true; } // For all other cases, use Arrow's built-in can_cast_types diff --git a/src/operator/src/statement/copy_table_to.rs b/src/operator/src/statement/copy_table_to.rs index 023641e83f..00261139b1 100644 --- a/src/operator/src/statement/copy_table_to.rs +++ b/src/operator/src/statement/copy_table_to.rs @@ -17,17 +17,17 @@ use std::sync::Arc; use client::OutputData; use common_base::readable_size::ReadableSize; +use common_datasource::file_format::Format; use common_datasource::file_format::csv::stream_to_csv; use common_datasource::file_format::json::stream_to_json; use common_datasource::file_format::parquet::stream_to_parquet; -use common_datasource::file_format::Format; use common_datasource::object_store::{build_backend, parse_url}; use common_datasource::util::find_dir_and_filename; use common_query::Output; use common_recordbatch::adapter::DfRecordBatchStreamAdapter; use common_recordbatch::{ - map_json_type_to_string, map_json_type_to_string_schema, RecordBatchStream, - SendableRecordBatchMapper, SendableRecordBatchStream, + RecordBatchStream, SendableRecordBatchMapper, SendableRecordBatchStream, + map_json_type_to_string, map_json_type_to_string_schema, }; use common_telemetry::{debug, tracing}; use datafusion::datasource::DefaultTableSource; diff --git a/src/operator/src/statement/cursor.rs b/src/operator/src/statement/cursor.rs index 85de4ef366..f5d07da9f9 100644 --- a/src/operator/src/statement/cursor.rs +++ b/src/operator/src/statement/cursor.rs @@ -13,8 +13,8 @@ // limitations under the License. use common_query::{Output, OutputData}; -use common_recordbatch::cursor::RecordBatchStreamCursor; use common_recordbatch::RecordBatches; +use common_recordbatch::cursor::RecordBatchStreamCursor; use common_telemetry::tracing; use query::parser::QueryStatement; use session::context::QueryContextRef; diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs index 76e066aae4..96cb230418 100644 --- a/src/operator/src/statement/ddl.rs +++ b/src/operator/src/statement/ddl.rs @@ -18,22 +18,22 @@ use std::sync::Arc; use api::helper::ColumnDataTypeWrapper; use api::v1::meta::CreateFlowTask as PbCreateFlowTask; use api::v1::{ - column_def, AlterDatabaseExpr, AlterTableExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr, + AlterDatabaseExpr, AlterTableExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr, column_def, }; #[cfg(feature = "enterprise")] use api::v1::{ - meta::CreateTriggerTask as PbCreateTriggerTask, CreateTriggerExpr as PbCreateTriggerExpr, + CreateTriggerExpr as PbCreateTriggerExpr, meta::CreateTriggerTask as PbCreateTriggerTask, }; use catalog::CatalogManagerRef; use chrono::Utc; -use common_catalog::consts::{is_readonly_schema, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, is_readonly_schema}; use common_catalog::{format_full_flow_name, format_full_table_name}; use common_error::ext::BoxedError; use common_meta::cache_invalidator::Context; use common_meta::ddl::create_flow::FlowType; use common_meta::instruction::CacheIdent; -use common_meta::key::schema_name::{SchemaName, SchemaNameKey}; use common_meta::key::NAME_PATTERN; +use common_meta::key::schema_name::{SchemaName, SchemaNameKey}; use common_meta::procedure_executor::ExecutorContext; #[cfg(feature = "enterprise")] use common_meta::rpc::ddl::trigger::CreateTriggerTask; @@ -62,7 +62,7 @@ use query::sql::create_table_stmt; use regex::Regex; use session::context::QueryContextRef; use session::table_name::table_idents_to_full_name; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::parser::{ParseOptions, ParserContext}; #[cfg(feature = "enterprise")] use sql::statements::alter::trigger::AlterTrigger; @@ -76,11 +76,11 @@ use sql::statements::statement::Statement; use sqlparser::ast::{Expr, Ident, UnaryOperator, Value as ParserValue}; use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME}; use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan}; +use table::TableRef; use table::dist_table::DistTable; use table::metadata::{self, RawTableInfo, RawTableMeta, TableId, TableInfo, TableType}; -use table::requests::{AlterKind, AlterTableRequest, TableOptions, COMMENT_KEY}; +use table::requests::{AlterKind, AlterTableRequest, COMMENT_KEY, TableOptions}; use table::table_name::TableName; -use table::TableRef; use crate::error::{ self, AlterExprToRequestSnafu, BuildDfLogicalPlanSnafu, CatalogSnafu, ColumnDataTypeSnafu, @@ -93,8 +93,8 @@ use crate::error::{ ViewAlreadyExistsSnafu, }; use crate::expr_helper; -use crate::statement::show::create_partitions_stmt; use crate::statement::StatementExecutor; +use crate::statement::show::create_partitions_stmt; lazy_static! { pub static ref NAME_PATTERN_REG: Regex = Regex::new(&format!("^{NAME_PATTERN}$")).unwrap(); @@ -379,9 +379,16 @@ impl StatementExecutor { .await?; let table_ids = resp.table_ids; - ensure!(table_ids.len() == raw_tables_info.len(), CreateLogicalTablesSnafu { - reason: format!("The number of tables is inconsistent with the expected number to be created, expected: {}, actual: {}", raw_tables_info.len(), table_ids.len()) - }); + ensure!( + table_ids.len() == raw_tables_info.len(), + CreateLogicalTablesSnafu { + reason: format!( + "The number of tables is inconsistent with the expected number to be created, expected: {}, actual: {}", + raw_tables_info.len(), + table_ids.len() + ) + } + ); info!("Successfully created logical tables: {:?}", table_ids); for (i, table_info) in raw_tables_info.iter_mut().enumerate() { diff --git a/src/operator/src/statement/set.rs b/src/operator/src/statement/set.rs index 0f7492b6ea..c4bf3758a0 100644 --- a/src/operator/src/statement/set.rs +++ b/src/operator/src/statement/set.rs @@ -18,11 +18,11 @@ use std::time::Duration; use common_time::Timezone; use lazy_static::lazy_static; use regex::Regex; +use session::ReadPreference; use session::context::Channel::Postgres; use session::context::QueryContextRef; use session::session_config::{PGByteaOutputValue, PGDateOrder, PGDateTimeStyle}; -use session::ReadPreference; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::{Expr, Ident, Value}; use sql::statements::set_variables::SetVariables; use sqlparser::ast::ValueWithSpan; @@ -61,7 +61,7 @@ pub fn set_read_preference(exprs: Vec, ctx: QueryContextRef) -> Result<()> expr, ), } - .fail() + .fail(); } } Ok(()) @@ -95,7 +95,7 @@ pub fn set_timezone(exprs: Vec, ctx: QueryContextRef) -> Result<()> { return NotSupportedSnafu { feat: format!("Invalid timezone expr {} in set variable statement", tz), } - .fail() + .fail(); } } Ok(()) @@ -316,7 +316,7 @@ pub fn set_query_timeout(exprs: Vec, ctx: QueryContextRef) -> Result<()> { return NotSupportedSnafu { feat: format!("Invalid timeout expr {} in set variable statement", timeout), } - .fail() + .fail(); } } Ok(()) diff --git a/src/operator/src/statement/show.rs b/src/operator/src/statement/show.rs index 9cf2f7ff04..b98ff1dab5 100644 --- a/src/operator/src/statement/show.rs +++ b/src/operator/src/statement/show.rs @@ -21,15 +21,15 @@ use session::context::QueryContextRef; use session::table_name::table_idents_to_full_name; use snafu::{OptionExt, ResultExt}; use sql::ast::ObjectNamePartExt; +use sql::statements::OptionMap; use sql::statements::create::Partitions; use sql::statements::show::{ ShowColumns, ShowCreateFlow, ShowCreateView, ShowDatabases, ShowFlows, ShowIndex, ShowKind, ShowProcessList, ShowRegion, ShowTableStatus, ShowTables, ShowVariables, ShowViews, }; -use sql::statements::OptionMap; +use table::TableRef; use table::metadata::{TableInfo, TableType}; use table::table_name::TableName; -use table::TableRef; use crate::error::{ self, CatalogSnafu, ExecLogicalPlanSnafu, ExecuteStatementSnafu, ExternalSnafu, diff --git a/src/operator/src/statement/tql.rs b/src/operator/src/statement/tql.rs index 5eae6dc006..cc4adc7ca8 100644 --- a/src/operator/src/statement/tql.rs +++ b/src/operator/src/statement/tql.rs @@ -18,8 +18,8 @@ use common_query::Output; use common_telemetry::tracing; use datafusion_expr::LogicalPlan; use query::parser::{ - PromQuery, QueryLanguageParser, ANALYZE_NODE_NAME, ANALYZE_VERBOSE_NODE_NAME, - DEFAULT_LOOKBACK_STRING, EXPLAIN_NODE_NAME, EXPLAIN_VERBOSE_NODE_NAME, + ANALYZE_NODE_NAME, ANALYZE_VERBOSE_NODE_NAME, DEFAULT_LOOKBACK_STRING, EXPLAIN_NODE_NAME, + EXPLAIN_VERBOSE_NODE_NAME, PromQuery, QueryLanguageParser, }; use session::context::QueryContextRef; use snafu::ResultExt; diff --git a/src/operator/src/tests/kv_backend.rs b/src/operator/src/tests/kv_backend.rs index 2106c46a76..d21905cea3 100644 --- a/src/operator/src/tests/kv_backend.rs +++ b/src/operator/src/tests/kv_backend.rs @@ -16,8 +16,8 @@ use std::sync::Arc; use common_meta::key::catalog_name::{CatalogManager, CatalogNameKey}; use common_meta::key::schema_name::{SchemaManager, SchemaNameKey}; -use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::kv_backend::KvBackendRef; +use common_meta::kv_backend::memory::MemoryKvBackend; pub async fn prepare_mocked_backend() -> KvBackendRef { let backend = Arc::new(MemoryKvBackend::default()); diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs index c9f6a5cbff..2ecf338bb7 100644 --- a/src/operator/src/tests/partition_manager.rs +++ b/src/operator/src/tests/partition_manager.rs @@ -15,9 +15,9 @@ use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use common_meta::cache::{new_table_route_cache, TableRouteCacheRef}; -use common_meta::key::table_route::TableRouteValue; +use common_meta::cache::{TableRouteCacheRef, new_table_route_cache}; use common_meta::key::TableMetadataManager; +use common_meta::key::table_route::TableRouteValue; use common_meta::kv_backend::KvBackendRef; use common_meta::peer::Peer; use common_meta::rpc::router::{LegacyPartition, Region, RegionRoute}; diff --git a/src/partition/benches/bench_split_record_batch.rs b/src/partition/benches/bench_split_record_batch.rs index a3aa2c8e74..e677333a25 100644 --- a/src/partition/benches/bench_split_record_batch.rs +++ b/src/partition/benches/bench_split_record_batch.rs @@ -15,14 +15,14 @@ use std::sync::Arc; use std::vec; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use datatypes::arrow::array::{ArrayRef, Int32Array, StringArray, TimestampMillisecondArray}; use datatypes::arrow::datatypes::{DataType, Field, Schema, TimeUnit}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::value::Value; -use partition::expr::{col, Operand}; -use partition::multi_dim::MultiDimPartitionRule; use partition::PartitionRule; +use partition::expr::{Operand, col}; +use partition::multi_dim::MultiDimPartitionRule; use rand::Rng; use store_api::storage::RegionNumber; diff --git a/src/partition/src/checker.rs b/src/partition/src/checker.rs index c613c2c85b..c88941e4df 100644 --- a/src/partition/src/checker.rs +++ b/src/partition/src/checker.rs @@ -19,7 +19,7 @@ use datatypes::arrow::array::{BooleanArray, Float64Array, Float64Builder, Record use datatypes::arrow::datatypes::{DataType, Field, Schema}; use datatypes::value::OrderedF64; -use crate::collider::{Collider, CHECK_STEP, NORMALIZE_STEP}; +use crate::collider::{CHECK_STEP, Collider, NORMALIZE_STEP}; use crate::error::{ CheckpointNotCoveredSnafu, CheckpointOverlappedSnafu, DuplicateExprSnafu, Result, }; diff --git a/src/partition/src/collider.rs b/src/partition/src/collider.rs index 67bf89b806..1bd5000f9d 100644 --- a/src/partition/src/collider.rs +++ b/src/partition/src/collider.rs @@ -30,8 +30,8 @@ use std::fmt::Debug; use std::sync::Arc; use datafusion_expr::Operator; -use datafusion_physical_expr::expressions::{col, lit, BinaryExpr}; use datafusion_physical_expr::PhysicalExpr; +use datafusion_physical_expr::expressions::{BinaryExpr, col, lit}; use datatypes::arrow::datatypes::Schema; use datatypes::value::{OrderedF64, OrderedFloat, Value}; @@ -363,33 +363,33 @@ impl<'a> Collider<'a> { match (lhs, rhs) { (Operand::Column(col), Operand::Value(val)) => { - if let Some(column_values) = normalized_values.get(col) { - if let Some(&normalized_val) = column_values.get(val) { - return Ok(NucleonExpr { - column: col.clone(), - op: gluon_op, - value: normalized_val, - }); - } + if let Some(column_values) = normalized_values.get(col) + && let Some(&normalized_val) = column_values.get(val) + { + return Ok(NucleonExpr { + column: col.clone(), + op: gluon_op, + value: normalized_val, + }); } } (Operand::Value(val), Operand::Column(col)) => { - if let Some(column_values) = normalized_values.get(col) { - if let Some(&normalized_val) = column_values.get(val) { - // Flip the operation for value op column - let flipped_op = match gluon_op { - GluonOp::Lt => GluonOp::Gt, - GluonOp::LtEq => GluonOp::GtEq, - GluonOp::Gt => GluonOp::Lt, - GluonOp::GtEq => GluonOp::LtEq, - op => op, // Eq and NotEq remain the same - }; - return Ok(NucleonExpr { - column: col.clone(), - op: flipped_op, - value: normalized_val, - }); - } + if let Some(column_values) = normalized_values.get(col) + && let Some(&normalized_val) = column_values.get(val) + { + // Flip the operation for value op column + let flipped_op = match gluon_op { + GluonOp::Lt => GluonOp::Gt, + GluonOp::LtEq => GluonOp::GtEq, + GluonOp::Gt => GluonOp::Lt, + GluonOp::GtEq => GluonOp::LtEq, + op => op, // Eq and NotEq remain the same + }; + return Ok(NucleonExpr { + column: col.clone(), + op: flipped_op, + value: normalized_val, + }); } } _ => {} @@ -488,9 +488,11 @@ mod test { assert_eq!(collider.atomic_exprs[0].source_expr_index, 0); // Test simple AND - let exprs = vec![col("id") - .eq(Value::UInt32(1)) - .and(col("status").eq(Value::String("active".into())))]; + let exprs = vec![ + col("id") + .eq(Value::UInt32(1)) + .and(col("status").eq(Value::String("active".into()))), + ]; let collider = Collider::new(&exprs).unwrap(); assert_eq!(collider.atomic_exprs.len(), 1); diff --git a/src/partition/src/expr.rs b/src/partition/src/expr.rs index cb73bad690..c1c2213d9b 100644 --- a/src/partition/src/expr.rs +++ b/src/partition/src/expr.rs @@ -17,12 +17,12 @@ use std::sync::Arc; use api::v1::meta::Partition; use datafusion_common::{ScalarValue, ToDFSchema}; -use datafusion_expr::execution_props::ExecutionProps; use datafusion_expr::Expr; -use datafusion_physical_expr::{create_physical_expr, PhysicalExpr}; +use datafusion_expr::execution_props::ExecutionProps; +use datafusion_physical_expr::{PhysicalExpr, create_physical_expr}; use datatypes::arrow; use datatypes::value::{ - duration_to_scalar_value, time_to_scalar_value, timestamp_to_scalar_value, Value, + Value, duration_to_scalar_value, time_to_scalar_value, timestamp_to_scalar_value, }; use serde::{Deserialize, Serialize}; use snafu::ResultExt; @@ -97,7 +97,7 @@ impl Operand { return error::UnsupportedPartitionExprValueSnafu { value: other.clone(), } - .fail() + .fail(); } }; Ok(datafusion_expr::lit(scalar_value)) diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs index 50aed6dd99..b8cf6cdb6b 100644 --- a/src/partition/src/manager.rs +++ b/src/partition/src/manager.rs @@ -21,7 +21,7 @@ use common_meta::key::table_route::{PhysicalTableRouteValue, TableRouteManager}; use common_meta::kv_backend::KvBackendRef; use common_meta::peer::Peer; use common_meta::rpc::router::{self, RegionRoute}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::{RegionId, RegionNumber}; use table::metadata::{TableId, TableInfo}; @@ -29,7 +29,7 @@ use crate::error::{FindLeaderSnafu, Result}; use crate::expr::PartitionExpr; use crate::multi_dim::MultiDimPartitionRule; use crate::splitter::RowSplitter; -use crate::{error, PartitionRuleRef}; +use crate::{PartitionRuleRef, error}; #[async_trait::async_trait] pub trait TableRouteCacheInvalidator: Send + Sync { diff --git a/src/partition/src/multi_dim.rs b/src/partition/src/multi_dim.rs index a92f8e5ebe..a8c11d1ae8 100644 --- a/src/partition/src/multi_dim.rs +++ b/src/partition/src/multi_dim.rs @@ -14,8 +14,8 @@ use std::any::Any; use std::cmp::Ordering; -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::{Arc, RwLock}; use datafusion_expr::ColumnarValue; @@ -27,14 +27,14 @@ use datatypes::arrow::datatypes::Schema; use datatypes::prelude::Value; use datatypes::vectors::{Helper, VectorRef}; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::storage::RegionNumber; +use crate::PartitionRule; use crate::checker::PartitionChecker; use crate::error::{self, Result, UndefinedColumnSnafu}; use crate::expr::{Operand, PartitionExpr, RestrictedOp}; use crate::partition::RegionMask; -use crate::PartitionRule; /// The default region number when no partition exprs are matched. const DEFAULT_REGION: RegionNumber = 0; diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs index b1b08a7c4f..28bad396a3 100644 --- a/src/partition/src/splitter.rs +++ b/src/partition/src/splitter.rs @@ -19,8 +19,8 @@ use api::v1::{ColumnSchema, Row, Rows}; use datatypes::value::Value; use store_api::storage::RegionNumber; -use crate::error::Result; use crate::PartitionRuleRef; +use crate::error::Result; pub struct RowSplitter { partition_rule: PartitionRuleRef, @@ -134,14 +134,14 @@ mod tests { use std::any::Any; use std::sync::Arc; + use api::v1::ColumnDataType; use api::v1::helper::{field_column_schema, tag_column_schema}; use api::v1::value::ValueData; - use api::v1::ColumnDataType; use serde::{Deserialize, Serialize}; use super::*; - use crate::partition::RegionMask; use crate::PartitionRule; + use crate::partition::RegionMask; fn mock_rows() -> Rows { let schema = vec![ diff --git a/src/pipeline/benches/processor.rs b/src/pipeline/benches/processor.rs index a2ef070786..74b136e873 100644 --- a/src/pipeline/benches/processor.rs +++ b/src/pipeline/benches/processor.rs @@ -14,9 +14,9 @@ use std::sync::Arc; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use pipeline::error::Result; -use pipeline::{parse, setup_pipeline, Content, Pipeline, PipelineContext, SchemaInfo}; +use pipeline::{Content, Pipeline, PipelineContext, SchemaInfo, parse, setup_pipeline}; use serde_json::Deserializer; use vrl::value::Value as VrlValue; diff --git a/src/pipeline/src/error.rs b/src/pipeline/src/error.rs index a42adb2d9f..ca26c9f5d7 100644 --- a/src/pipeline/src/error.rs +++ b/src/pipeline/src/error.rs @@ -402,14 +402,18 @@ pub enum Error { #[snafu(implicit)] location: Location, }, - #[snafu(display("Transform must have exactly one field specified as timestamp Index, but got {count}: {columns}"))] + #[snafu(display( + "Transform must have exactly one field specified as timestamp Index, but got {count}: {columns}" + ))] TransformTimestampIndexCount { count: usize, columns: String, #[snafu(implicit)] location: Location, }, - #[snafu(display("Exactly one time-related processor and one timestamp value is required to use auto transform. `ignore_missing` can not be set to true."))] + #[snafu(display( + "Exactly one time-related processor and one timestamp value is required to use auto transform. `ignore_missing` can not be set to true." + ))] AutoTransformOneTimestamp { #[snafu(implicit)] location: Location, @@ -552,7 +556,9 @@ pub enum Error { #[snafu(implicit)] location: Location, }, - #[snafu(display("Column datatype mismatch. For column: {column}, expected datatype: {expected}, actual datatype: {actual}"))] + #[snafu(display( + "Column datatype mismatch. For column: {column}, expected datatype: {expected}, actual datatype: {actual}" + ))] IdentifyPipelineColumnTypeMismatch { column: String, expected: String, @@ -578,9 +584,7 @@ pub enum Error { TableSuffixRequiredForDispatcherRule, #[snafu(display("Value is required for dispatcher rule"))] ValueRequiredForDispatcherRule, - #[snafu(display( - "Reached max nested levels when flattening JSON object: {max_nested_levels}" - ))] + #[snafu(display("Reached max nested levels when flattening JSON object: {max_nested_levels}"))] ReachedMaxNestedLevels { max_nested_levels: usize, #[snafu(implicit)] diff --git a/src/pipeline/src/etl.rs b/src/pipeline/src/etl.rs index 8f87f8305b..692b509f6e 100644 --- a/src/pipeline/src/etl.rs +++ b/src/pipeline/src/etl.rs @@ -23,7 +23,7 @@ use api::v1::Row; use common_time::timestamp::TimeUnit; use itertools::Itertools; use processor::{Processor, Processors}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use transform::Transforms; use vrl::core::Value as VrlValue; use yaml_rust::{Yaml, YamlLoader}; diff --git a/src/pipeline/src/etl/processor/cmcd.rs b/src/pipeline/src/etl/processor/cmcd.rs index cca6fbfe0d..4a81b1fccb 100644 --- a/src/pipeline/src/etl/processor/cmcd.rs +++ b/src/pipeline/src/etl/processor/cmcd.rs @@ -31,8 +31,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, Processor, yaml_bool, yaml_new_field, + yaml_new_fields, }; pub(crate) const PROCESSOR_CMCD: &str = "cmcd"; @@ -363,14 +363,16 @@ mod tests { ( "prefix_nor", VrlValue::Bytes(Bytes::from("\"../300kbps/segment35.m4v\"")), - ), ], ), ( "nrr%3D%2212323-48763%22%2Csid%3D%226e2fb550-c457-11e9-bb97-0800200c9a66%22", vec![ - ("prefix_nrr", VrlValue::Bytes(Bytes::from("\"12323-48763\""))), + ( + "prefix_nrr", + VrlValue::Bytes(Bytes::from("\"12323-48763\"")), + ), ( "prefix_sid", VrlValue::Bytes(Bytes::from("\"6e2fb550-c457-11e9-bb97-0800200c9a66\"")), @@ -380,7 +382,10 @@ mod tests { ( "nor%3D%22..%252F300kbps%252Ftrack.m4v%22%2Cnrr%3D%2212323-48763%22%2Csid%3D%226e2fb550-c457-11e9-bb97-0800200c9a66%22", vec![ - ("prefix_nrr", VrlValue::Bytes(Bytes::from("\"12323-48763\""))), + ( + "prefix_nrr", + VrlValue::Bytes(Bytes::from("\"12323-48763\"")), + ), ( "prefix_sid", VrlValue::Bytes(Bytes::from("\"6e2fb550-c457-11e9-bb97-0800200c9a66\"")), @@ -416,7 +421,10 @@ mod tests { ), ("prefix_sf", VrlValue::Bytes(Bytes::from("d"))), ("prefix_br", VrlValue::Integer(3200)), - ("prefix_nrr", VrlValue::Bytes(Bytes::from("\"12323-48763\""))), + ( + "prefix_nrr", + VrlValue::Bytes(Bytes::from("\"12323-48763\"")), + ), ("prefix_pr", VrlValue::Float(NotNan::new(1.08).unwrap())), ("prefix_su", VrlValue::Boolean(true)), ("prefix_dl", VrlValue::Integer(18500)), diff --git a/src/pipeline/src/etl/processor/csv.rs b/src/pipeline/src/etl/processor/csv.rs index 77d27bbbdb..feb0ba6321 100644 --- a/src/pipeline/src/etl/processor/csv.rs +++ b/src/pipeline/src/etl/processor/csv.rs @@ -30,8 +30,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, Processor, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_CSV: &str = "csv"; diff --git a/src/pipeline/src/etl/processor/date.rs b/src/pipeline/src/etl/processor/date.rs index dd74d97943..0121015b99 100644 --- a/src/pipeline/src/etl/processor/date.rs +++ b/src/pipeline/src/etl/processor/date.rs @@ -27,8 +27,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor, FIELDS_NAME, - FIELD_NAME, IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, Processor, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, yaml_strings, }; pub(crate) const PROCESSOR_DATE: &str = "date"; @@ -249,7 +249,7 @@ mod tests { use chrono_tz::Asia::Tokyo; - use crate::etl::processor::date::{try_parse, DateProcessor}; + use crate::etl::processor::date::{DateProcessor, try_parse}; #[test] fn test_try_parse() { diff --git a/src/pipeline/src/etl/processor/decolorize.rs b/src/pipeline/src/etl/processor/decolorize.rs index 5cbdcd5c3c..9f2ff2a311 100644 --- a/src/pipeline/src/etl/processor/decolorize.rs +++ b/src/pipeline/src/etl/processor/decolorize.rs @@ -30,7 +30,7 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, yaml_bool, yaml_new_field, yaml_new_fields, }; pub(crate) const PROCESSOR_DECOLORIZE: &str = "decolorize"; diff --git a/src/pipeline/src/etl/processor/digest.rs b/src/pipeline/src/etl/processor/digest.rs index 5639b69183..5ad7f595a1 100644 --- a/src/pipeline/src/etl/processor/digest.rs +++ b/src/pipeline/src/etl/processor/digest.rs @@ -32,7 +32,7 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, yaml_bool, yaml_new_field, yaml_new_fields, }; pub(crate) const PROCESSOR_DIGEST: &str = "digest"; diff --git a/src/pipeline/src/etl/processor/dissect.rs b/src/pipeline/src/etl/processor/dissect.rs index 8ae2bb7aba..6694049253 100644 --- a/src/pipeline/src/etl/processor/dissect.rs +++ b/src/pipeline/src/etl/processor/dissect.rs @@ -30,8 +30,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, PATTERN_NAME, PATTERNS_NAME, Processor, yaml_bool, yaml_new_field, yaml_new_fields, yaml_parse_string, yaml_parse_strings, yaml_string, - Processor, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, PATTERNS_NAME, PATTERN_NAME, }; pub(crate) const PROCESSOR_DISSECT: &str = "dissect"; @@ -911,7 +911,7 @@ mod tests { ("", "Empty pattern is not allowed"), ( "%{name1}%{name2}", - "Invalid Pattern: '%{name1}%{name2}'. consecutive names are not allowed: 'name1' 'name2'" + "Invalid Pattern: '%{name1}%{name2}'. consecutive names are not allowed: 'name1' 'name2'", ), ( "%{} %{ident", @@ -967,7 +967,7 @@ mod tests { ), ( "%{*ip}", - "Invalid Pattern: '%{*ip}'. key and value not matched: 'ip'" + "Invalid Pattern: '%{*ip}'. key and value not matched: 'ip'", ), ( "%{*ip} %{*ip}", @@ -975,7 +975,7 @@ mod tests { ), ( "%{*ip1} %{&ip2}", - "Invalid Pattern: '%{*ip1} %{&ip2}'. key and value not matched: 'ip1,ip2'" + "Invalid Pattern: '%{*ip1} %{&ip2}'. key and value not matched: 'ip1,ip2'", ), ]; diff --git a/src/pipeline/src/etl/processor/epoch.rs b/src/pipeline/src/etl/processor/epoch.rs index ca0aec9413..db2cd6b3b2 100644 --- a/src/pipeline/src/etl/processor/epoch.rs +++ b/src/pipeline/src/etl/processor/epoch.rs @@ -24,13 +24,13 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, Processor, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, }; use crate::etl::value::{ - MICROSECOND_RESOLUTION, MICRO_RESOLUTION, MILLISECOND_RESOLUTION, MILLI_RESOLUTION, - MS_RESOLUTION, NANOSECOND_RESOLUTION, NANO_RESOLUTION, NS_RESOLUTION, SECOND_RESOLUTION, - SEC_RESOLUTION, S_RESOLUTION, US_RESOLUTION, + MICRO_RESOLUTION, MICROSECOND_RESOLUTION, MILLI_RESOLUTION, MILLISECOND_RESOLUTION, + MS_RESOLUTION, NANO_RESOLUTION, NANOSECOND_RESOLUTION, NS_RESOLUTION, S_RESOLUTION, + SEC_RESOLUTION, SECOND_RESOLUTION, US_RESOLUTION, }; pub(crate) const PROCESSOR_EPOCH: &str = "epoch"; diff --git a/src/pipeline/src/etl/processor/filter.rs b/src/pipeline/src/etl/processor/filter.rs index 0dd559095b..a700a8f2d3 100644 --- a/src/pipeline/src/etl/processor/filter.rs +++ b/src/pipeline/src/etl/processor/filter.rs @@ -16,15 +16,15 @@ use ahash::{HashSet, HashSetExt}; use snafu::OptionExt; use vrl::prelude::Value as VrlValue; +use crate::Processor; use crate::error::{ Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result, ValueMustBeMapSnafu, }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, FIELDS_NAME, FIELD_NAME, + FIELD_NAME, FIELDS_NAME, yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, }; -use crate::Processor; pub(crate) const PROCESSOR_FILTER: &str = "filter"; @@ -185,9 +185,9 @@ mod test { use vrl::prelude::{Bytes, Value as VrlValue}; use vrl::value::{KeyString, ObjectMap}; + use crate::Processor; use crate::etl::field::{Field, Fields}; use crate::etl::processor::filter::{FilterProcessor, MatchMode, MatchOp}; - use crate::Processor; #[test] fn test_eq() { diff --git a/src/pipeline/src/etl/processor/gsub.rs b/src/pipeline/src/etl/processor/gsub.rs index c4c44e0ace..8ec5ffffa3 100644 --- a/src/pipeline/src/etl/processor/gsub.rs +++ b/src/pipeline/src/etl/processor/gsub.rs @@ -24,8 +24,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, PATTERN_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, PATTERN_NAME, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_GSUB: &str = "gsub"; diff --git a/src/pipeline/src/etl/processor/join.rs b/src/pipeline/src/etl/processor/join.rs index 3712dd70c7..64592d1ee5 100644 --- a/src/pipeline/src/etl/processor/join.rs +++ b/src/pipeline/src/etl/processor/join.rs @@ -22,8 +22,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, SEPARATOR_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, Processor, SEPARATOR_NAME, yaml_bool, + yaml_new_field, yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_JOIN: &str = "join"; diff --git a/src/pipeline/src/etl/processor/json_parse.rs b/src/pipeline/src/etl/processor/json_parse.rs index b6ef533fdc..9315670ec1 100644 --- a/src/pipeline/src/etl/processor/json_parse.rs +++ b/src/pipeline/src/etl/processor/json_parse.rs @@ -15,15 +15,15 @@ use snafu::{OptionExt as _, ResultExt}; use vrl::value::{KeyString, Value as VrlValue}; +use crate::Processor; use crate::error::{ Error, FieldMustBeTypeSnafu, JsonParseSnafu, KeyMustBeStringSnafu, ProcessorMissingFieldSnafu, ProcessorUnsupportedValueSnafu, Result, ValueMustBeMapSnafu, }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, yaml_bool, yaml_new_field, yaml_new_fields, }; -use crate::Processor; pub(crate) const PROCESSOR_JSON_PARSE: &str = "json_parse"; diff --git a/src/pipeline/src/etl/processor/json_path.rs b/src/pipeline/src/etl/processor/json_path.rs index ff9b1c12bf..3d34dd1e5d 100644 --- a/src/pipeline/src/etl/processor/json_path.rs +++ b/src/pipeline/src/etl/processor/json_path.rs @@ -22,8 +22,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME, + Processor, yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_JSON_PATH: &str = "json_path"; diff --git a/src/pipeline/src/etl/processor/letter.rs b/src/pipeline/src/etl/processor/letter.rs index 4882cabd64..abe34bbcb9 100644 --- a/src/pipeline/src/etl/processor/letter.rs +++ b/src/pipeline/src/etl/processor/letter.rs @@ -22,8 +22,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, METHOD_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, METHOD_NAME, Processor, yaml_bool, + yaml_new_field, yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_LETTER: &str = "letter"; diff --git a/src/pipeline/src/etl/processor/regex.rs b/src/pipeline/src/etl/processor/regex.rs index 10040b2669..a3d573465b 100644 --- a/src/pipeline/src/etl/processor/regex.rs +++ b/src/pipeline/src/etl/processor/regex.rs @@ -33,8 +33,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor, FIELDS_NAME, - FIELD_NAME, IGNORE_MISSING_NAME, PATTERN_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, PATTERN_NAME, Processor, yaml_bool, + yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, }; lazy_static! { diff --git a/src/pipeline/src/etl/processor/select.rs b/src/pipeline/src/etl/processor/select.rs index a460d3cb02..64071e7235 100644 --- a/src/pipeline/src/etl/processor/select.rs +++ b/src/pipeline/src/etl/processor/select.rs @@ -16,14 +16,14 @@ use ahash::{HashSet, HashSetExt}; use snafu::OptionExt; use vrl::value::{KeyString, Value as VrlValue}; +use crate::Processor; use crate::error::{ Error, KeyMustBeStringSnafu, ProcessorUnsupportedValueSnafu, Result, ValueMustBeMapSnafu, }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME, TYPE_NAME, + FIELD_NAME, FIELDS_NAME, TYPE_NAME, yaml_new_field, yaml_new_fields, yaml_string, }; -use crate::Processor; pub(crate) const PROCESSOR_SELECT: &str = "select"; const INCLUDE_KEY: &str = "include"; @@ -137,9 +137,9 @@ mod test { use vrl::prelude::Bytes; use vrl::value::{KeyString, Value as VrlValue}; + use crate::Processor; use crate::etl::field::{Field, Fields}; use crate::etl::processor::select::{SelectProcessor, SelectType}; - use crate::Processor; #[test] fn test_select() { diff --git a/src/pipeline/src/etl/processor/simple_extract.rs b/src/pipeline/src/etl/processor/simple_extract.rs index 258238e223..42fd022c1e 100644 --- a/src/pipeline/src/etl/processor/simple_extract.rs +++ b/src/pipeline/src/etl/processor/simple_extract.rs @@ -15,15 +15,15 @@ use snafu::OptionExt as _; use vrl::value::{KeyString, Value as VrlValue}; +use crate::Processor; use crate::error::{ Error, KeyMustBeStringSnafu, ProcessorMissingFieldSnafu, Result, ValueMustBeMapSnafu, }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, KEY_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, KEY_NAME, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, }; -use crate::Processor; pub(crate) const PROCESSOR_SIMPLE_EXTRACT: &str = "simple_extract"; diff --git a/src/pipeline/src/etl/processor/urlencoding.rs b/src/pipeline/src/etl/processor/urlencoding.rs index 7675eec0dc..102ae8c494 100644 --- a/src/pipeline/src/etl/processor/urlencoding.rs +++ b/src/pipeline/src/etl/processor/urlencoding.rs @@ -23,8 +23,8 @@ use crate::error::{ }; use crate::etl::field::Fields; use crate::etl::processor::{ - yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME, - IGNORE_MISSING_NAME, METHOD_NAME, + FIELD_NAME, FIELDS_NAME, IGNORE_MISSING_NAME, METHOD_NAME, yaml_bool, yaml_new_field, + yaml_new_fields, yaml_string, }; pub(crate) const PROCESSOR_URL_ENCODING: &str = "urlencoding"; diff --git a/src/pipeline/src/etl/processor/vrl_processor.rs b/src/pipeline/src/etl/processor/vrl_processor.rs index 5540d42cf1..e84f0b3e4c 100644 --- a/src/pipeline/src/etl/processor/vrl_processor.rs +++ b/src/pipeline/src/etl/processor/vrl_processor.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; use chrono_tz::Tz; use snafu::OptionExt; use vrl::compiler::runtime::Runtime; -use vrl::compiler::{compile, Program, TargetValue}; +use vrl::compiler::{Program, TargetValue, compile}; use vrl::diagnostic::Formatter; use vrl::prelude::TimeZone; use vrl::value::{Kind, Secrets, Value as VrlValue}; @@ -218,7 +218,10 @@ processors: assert!(vrl.is_ok()); let vrl = vrl.unwrap(); - assert_eq!(vrl.source, ".name.a = .user_info.name\n.name.b = .user_info.name\ndel(.user_info)\n.timestamp = now()\n.\n"); + assert_eq!( + vrl.source, + ".name.a = .user_info.name\n.name.b = .user_info.name\ndel(.user_info)\n.timestamp = now()\n.\n" + ); } #[test] diff --git a/src/pipeline/src/etl/transform.rs b/src/pipeline/src/etl/transform.rs index 85f011c95d..b6ce8d54cb 100644 --- a/src/pipeline/src/etl/transform.rs +++ b/src/pipeline/src/etl/transform.rs @@ -15,10 +15,10 @@ pub mod index; pub mod transformer; -use api::v1::value::ValueData; use api::v1::ColumnDataType; +use api::v1::value::ValueData; use chrono::Utc; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::error::{ Error, KeyMustBeStringSnafu, Result, TransformElementMustBeMapSnafu, diff --git a/src/pipeline/src/etl/transform/transformer/greptime.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs index 5095e49eb0..5673c09d53 100644 --- a/src/pipeline/src/etl/transform/transformer/greptime.rs +++ b/src/pipeline/src/etl/transform/transformer/greptime.rs @@ -40,12 +40,12 @@ use crate::error::{ Result, TimeIndexMustBeNonNullSnafu, TransformColumnNameMustBeUniqueSnafu, TransformMultipleTimestampIndexSnafu, TransformTimestampIndexCountSnafu, ValueMustBeMapSnafu, }; +use crate::etl::PipelineDocVersion; use crate::etl::ctx_req::ContextOpt; use crate::etl::field::{Field, Fields}; use crate::etl::transform::index::Index; use crate::etl::transform::{Transform, Transforms}; -use crate::etl::PipelineDocVersion; -use crate::{truthy, unwrap_or_continue_if_err, PipelineContext}; +use crate::{PipelineContext, truthy, unwrap_or_continue_if_err}; const DEFAULT_GREPTIME_TIMESTAMP_COLUMN: &str = "greptime_timestamp"; const DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING: usize = 10; @@ -178,23 +178,17 @@ impl GreptimeTransformer { column_names_set.extend(target_fields_set); - if let Some(idx) = transform.index { - if idx == Index::Time { - match transform.fields.len() { - //Safety unwrap is fine here because we have checked the length of real_fields - 1 => { - timestamp_columns.push(transform.fields.first().unwrap().input_field()) - } - _ => { - return TransformMultipleTimestampIndexSnafu { - columns: transform - .fields - .iter() - .map(|x| x.input_field()) - .join(", "), - } - .fail(); + if let Some(idx) = transform.index + && idx == Index::Time + { + match transform.fields.len() { + //Safety unwrap is fine here because we have checked the length of real_fields + 1 => timestamp_columns.push(transform.fields.first().unwrap().input_field()), + _ => { + return TransformMultipleTimestampIndexSnafu { + columns: transform.fields.iter().map(|x| x.input_field()).join(", "), } + .fail(); } } } @@ -722,7 +716,7 @@ mod tests { use api::v1::SemanticType; use super::*; - use crate::{identity_pipeline, PipelineDefinition}; + use crate::{PipelineDefinition, identity_pipeline}; #[test] fn test_identify_pipeline() { diff --git a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs index 52fc35041b..a5f283881f 100644 --- a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs +++ b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs @@ -155,14 +155,14 @@ fn coerce_bool_value(b: bool, transform: &Transform) -> Result return CoerceJsonTypeToSnafu { ty: transform.type_.as_str_name(), } - .fail() + .fail(); } _ => { return UnsupportedTypeInPipelineSnafu { ty: transform.type_.as_str_name(), } - .fail() + .fail(); } }; @@ -196,7 +196,7 @@ fn coerce_i64_value(n: i64, transform: &Transform) -> Result> return CoerceJsonTypeToSnafu { ty: transform.type_.as_str_name(), } - .fail() + .fail(); } _ => return Ok(None), @@ -232,7 +232,7 @@ fn coerce_u64_value(n: u64, transform: &Transform) -> Result> return CoerceJsonTypeToSnafu { ty: transform.type_.as_str_name(), } - .fail() + .fail(); } _ => return Ok(None), @@ -276,7 +276,7 @@ fn coerce_f64_value(n: f64, transform: &Transform) -> Result> return CoerceJsonTypeToSnafu { ty: transform.type_.as_str_name(), } - .fail() + .fail(); } _ => return Ok(None), diff --git a/src/pipeline/src/etl/value.rs b/src/pipeline/src/etl/value.rs index ff0d7bd00c..b69fc7af81 100644 --- a/src/pipeline/src/etl/value.rs +++ b/src/pipeline/src/etl/value.rs @@ -14,8 +14,8 @@ use std::collections::BTreeMap; -use api::v1::value::ValueData; use api::v1::ColumnDataType; +use api::v1::value::ValueData; use ordered_float::NotNan; use snafu::{OptionExt, ResultExt}; use vrl::prelude::Bytes; diff --git a/src/pipeline/src/lib.rs b/src/pipeline/src/lib.rs index f4b0400249..c657f61342 100644 --- a/src/pipeline/src/lib.rs +++ b/src/pipeline/src/lib.rs @@ -23,16 +23,16 @@ mod tablesuffix; pub use etl::ctx_req::{ContextOpt, ContextReq}; pub use etl::processor::Processor; +pub use etl::transform::GreptimeTransformer; pub use etl::transform::transformer::greptime::{GreptimePipelineParams, SchemaInfo}; pub use etl::transform::transformer::identity_pipeline; -pub use etl::transform::GreptimeTransformer; pub use etl::{ - parse, Content, DispatchedTo, Pipeline, PipelineExecOutput, TransformedOutput, TransformerMode, + Content, DispatchedTo, Pipeline, PipelineExecOutput, TransformedOutput, TransformerMode, parse, }; pub use manager::{ - pipeline_operator, table, util, IdentityTimeIndex, PipelineContext, PipelineDefinition, - PipelineInfo, PipelineRef, PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME, + IdentityTimeIndex, PipelineContext, PipelineDefinition, PipelineInfo, PipelineRef, + PipelineTableRef, PipelineVersion, PipelineWay, SelectInfo, pipeline_operator, table, util, }; #[macro_export] diff --git a/src/pipeline/src/manager.rs b/src/pipeline/src/manager.rs index bca1c2a28e..f2779d2d86 100644 --- a/src/pipeline/src/manager.rs +++ b/src/pipeline/src/manager.rs @@ -14,15 +14,15 @@ use std::sync::Arc; -use api::v1::value::ValueData; use api::v1::ColumnDataType; +use api::v1::value::ValueData; use chrono::{DateTime, Utc}; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datatypes::timestamp::TimestampNanosecond; use itertools::Itertools; use session::context::Channel; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use util::to_pipeline_version; use vrl::value::Value as VrlValue; @@ -277,7 +277,7 @@ impl IdentityTimeIndex { "failed to convert {} to number", String::from_utf8_lossy(s) ), - ) + ); } }, Some(VrlValue::Timestamp(timestamp)) => datetime_utc_to_unit(timestamp, unit)?, @@ -286,10 +286,14 @@ impl IdentityTimeIndex { *ignore_errors, *unit, format!("unsupported value type to convert to timestamp: {}", v), - ) + ); } None => { - return if_ignore_errors(*ignore_errors, *unit, "missing field".to_string()) + return if_ignore_errors( + *ignore_errors, + *unit, + "missing field".to_string(), + ); } }; Ok(time_unit_to_value_data(*unit, v)) @@ -309,7 +313,7 @@ impl IdentityTimeIndex { *ignore_errors, TimeUnit::Nanosecond, "missing field".to_string(), - ) + ); } }; @@ -320,7 +324,7 @@ impl IdentityTimeIndex { *ignore_errors, TimeUnit::Nanosecond, format!("failed to parse date string: {}, format: {}", v, format), - ) + ); } }; diff --git a/src/pipeline/src/manager/pipeline_operator.rs b/src/pipeline/src/manager/pipeline_operator.rs index 6ad190cf23..cde22664d0 100644 --- a/src/pipeline/src/manager/pipeline_operator.rs +++ b/src/pipeline/src/manager/pipeline_operator.rs @@ -18,7 +18,7 @@ use std::time::Instant; use api::v1::CreateTableExpr; use catalog::{CatalogManagerRef, RegisterSystemTableRequest}; -use common_catalog::consts::{default_engine, DEFAULT_PRIVATE_SCHEMA_NAME}; +use common_catalog::consts::{DEFAULT_PRIVATE_SCHEMA_NAME, default_engine}; use common_telemetry::info; use datatypes::timestamp::TimestampNanosecond; use futures::FutureExt; @@ -29,14 +29,14 @@ use session::context::QueryContextRef; use snafu::{OptionExt, ResultExt}; use table::TableRef; +use crate::Pipeline; use crate::error::{CatalogSnafu, CreateTableSnafu, PipelineTableNotFoundSnafu, Result}; use crate::manager::{PipelineInfo, PipelineTableRef, PipelineVersion}; use crate::metrics::{ METRIC_PIPELINE_CREATE_HISTOGRAM, METRIC_PIPELINE_DELETE_HISTOGRAM, METRIC_PIPELINE_RETRIEVE_HISTOGRAM, }; -use crate::table::{PipelineTable, PIPELINE_TABLE_NAME}; -use crate::Pipeline; +use crate::table::{PIPELINE_TABLE_NAME, PipelineTable}; /// PipelineOperator is responsible for managing pipelines. /// It provides the ability to: diff --git a/src/pipeline/src/manager/table.rs b/src/pipeline/src/manager/table.rs index 28b8d23bd7..649e00b3cb 100644 --- a/src/pipeline/src/manager/table.rs +++ b/src/pipeline/src/manager/table.rs @@ -33,20 +33,20 @@ use datatypes::vectors::{StringVector, TimestampNanosecondVector, Vector}; use itertools::Itertools; use operator::insert::InserterRef; use operator::statement::StatementExecutorRef; -use query::dataframe::DataFrame; use query::QueryEngineRef; +use query::dataframe::DataFrame; use session::context::{QueryContextBuilder, QueryContextRef}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; +use table::TableRef; use table::metadata::TableInfo; use table::table::adapter::DfTableProviderAdapter; -use table::TableRef; use crate::error::{ BuildDfLogicalPlanSnafu, CastTypeSnafu, CollectRecordsSnafu, DataFrameSnafu, Error, ExecuteInternalStatementSnafu, InsertPipelineSnafu, InvalidPipelineVersionSnafu, MultiPipelineWithDiffSchemaSnafu, PipelineNotFoundSnafu, RecordBatchLenNotMatchSnafu, Result, }; -use crate::etl::{parse, Content, Pipeline}; +use crate::etl::{Content, Pipeline, parse}; use crate::manager::pipeline_cache::PipelineCache; use crate::manager::{PipelineInfo, PipelineVersion}; use crate::metrics::METRIC_PIPELINE_TABLE_FIND_COUNT; @@ -487,7 +487,7 @@ impl PipelineTable { ]) .context(BuildDfLogicalPlanSnafu)? .sort(vec![ - col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).sort(false, true) + col(PIPELINE_TABLE_CREATED_AT_COLUMN_NAME).sort(false, true), ]) .context(BuildDfLogicalPlanSnafu)?; diff --git a/src/pipeline/src/manager/util.rs b/src/pipeline/src/manager/util.rs index 49537c011e..c063a8da22 100644 --- a/src/pipeline/src/manager/util.rs +++ b/src/pipeline/src/manager/util.rs @@ -13,14 +13,14 @@ // limitations under the License. use common_time::Timestamp; -use datafusion_expr::{col, lit, Expr}; +use datafusion_expr::{Expr, col, lit}; use datatypes::timestamp::TimestampNanosecond; +use crate::PipelineVersion; use crate::error::{InvalidPipelineVersionSnafu, Result}; use crate::table::{ PIPELINE_TABLE_CREATED_AT_COLUMN_NAME, PIPELINE_TABLE_PIPELINE_NAME_COLUMN_NAME, }; -use crate::PipelineVersion; pub fn to_pipeline_version(version_str: Option<&str>) -> Result { match version_str { diff --git a/src/pipeline/src/metrics.rs b/src/pipeline/src/metrics.rs index 543b3308f0..8a539d0b33 100644 --- a/src/pipeline/src/metrics.rs +++ b/src/pipeline/src/metrics.rs @@ -13,7 +13,7 @@ // limitations under the License. use lazy_static::lazy_static; -use prometheus::{register_counter_vec, register_histogram_vec, CounterVec, HistogramVec}; +use prometheus::{CounterVec, HistogramVec, register_counter_vec, register_histogram_vec}; lazy_static! { pub static ref METRIC_PIPELINE_CREATE_HISTOGRAM: HistogramVec = register_histogram_vec!( diff --git a/src/pipeline/src/tablesuffix.rs b/src/pipeline/src/tablesuffix.rs index 8733bc3841..01ce4202cf 100644 --- a/src/pipeline/src/tablesuffix.rs +++ b/src/pipeline/src/tablesuffix.rs @@ -14,7 +14,7 @@ use dyn_fmt::AsStrFormatExt; use regex::Regex; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use vrl::value::Value as VrlValue; use yaml_rust::Yaml; diff --git a/src/pipeline/tests/common.rs b/src/pipeline/tests/common.rs index ac53308d45..09ea340235 100644 --- a/src/pipeline/tests/common.rs +++ b/src/pipeline/tests/common.rs @@ -13,7 +13,7 @@ // limitations under the License. use greptime_proto::v1::{ColumnDataType, ColumnSchema, Rows, SemanticType}; -use pipeline::{parse, setup_pipeline, Content, Pipeline, PipelineContext}; +use pipeline::{Content, Pipeline, PipelineContext, parse, setup_pipeline}; use vrl::value::Value as VrlValue; /// test util function to parse and execute pipeline diff --git a/src/pipeline/tests/dissect.rs b/src/pipeline/tests/dissect.rs index 1af3ee3c23..a24e374532 100644 --- a/src/pipeline/tests/dissect.rs +++ b/src/pipeline/tests/dissect.rs @@ -16,7 +16,7 @@ mod common; use greptime_proto::v1::value::ValueData::StringValue; use greptime_proto::v1::{ColumnDataType, SemanticType}; -use pipeline::{setup_pipeline, PipelineContext}; +use pipeline::{PipelineContext, setup_pipeline}; fn make_string_column_schema(name: String) -> greptime_proto::v1::ColumnSchema { common::make_column_schema(name, ColumnDataType::String, SemanticType::Field) diff --git a/src/pipeline/tests/json_parse.rs b/src/pipeline/tests/json_parse.rs index bd43d9754c..9bb5ee4f95 100644 --- a/src/pipeline/tests/json_parse.rs +++ b/src/pipeline/tests/json_parse.rs @@ -16,8 +16,8 @@ mod common; use std::borrow::Cow; -use api::v1::value::ValueData; use api::v1::ColumnDataType; +use api::v1::value::ValueData; const INPUT_VALUE_OBJ: &str = r#" [ diff --git a/src/pipeline/tests/on_failure.rs b/src/pipeline/tests/on_failure.rs index db72a0b7fc..2662a3fa96 100644 --- a/src/pipeline/tests/on_failure.rs +++ b/src/pipeline/tests/on_failure.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use greptime_proto::v1::value::ValueData::{U16Value, U8Value}; +use greptime_proto::v1::value::ValueData::{U8Value, U16Value}; use greptime_proto::v1::{ColumnDataType, SemanticType}; mod common; diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs index e10230c79d..0e6019ab47 100644 --- a/src/pipeline/tests/pipeline.rs +++ b/src/pipeline/tests/pipeline.rs @@ -15,12 +15,12 @@ use api::v1::value::ValueData; use api::v1::{Rows, Value}; use common_telemetry::tracing::info; +use greptime_proto::v1::Value as GreptimeValue; use greptime_proto::v1::value::ValueData::{ BinaryValue, BoolValue, F64Value, StringValue, TimestampNanosecondValue, TimestampSecondValue, - U32Value, U64Value, U8Value, + U8Value, U32Value, U64Value, }; -use greptime_proto::v1::Value as GreptimeValue; -use pipeline::{parse, setup_pipeline, Content, Pipeline, PipelineContext}; +use pipeline::{Content, Pipeline, PipelineContext, parse, setup_pipeline}; #[test] fn test_complex_data() { diff --git a/src/plugins/src/flownode.rs b/src/plugins/src/flownode.rs index 13c1b37a1c..6b56b008da 100644 --- a/src/plugins/src/flownode.rs +++ b/src/plugins/src/flownode.rs @@ -13,8 +13,8 @@ // limitations under the License. use common_base::Plugins; -use flow::error::Result; use flow::FlownodeOptions; +use flow::error::Result; use crate::options::PluginOptions; diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs index 3f3c216acd..1a634b58b0 100644 --- a/src/promql/src/error.rs +++ b/src/promql/src/error.rs @@ -98,9 +98,5 @@ pub(crate) fn ensure( predicate: bool, error: DataFusionError, ) -> std::result::Result<(), DataFusionError> { - if predicate { - Ok(()) - } else { - Err(error) - } + if predicate { Ok(()) } else { Err(error) } } diff --git a/src/promql/src/extension_plan.rs b/src/promql/src/extension_plan.rs index dbb83eb9f8..7aaeb83484 100644 --- a/src/promql/src/extension_plan.rs +++ b/src/promql/src/extension_plan.rs @@ -27,7 +27,7 @@ mod union_distinct_on; pub use absent::{Absent, AbsentExec, AbsentStream}; use datafusion::arrow::datatypes::{ArrowPrimitiveType, TimestampMillisecondType}; -pub use empty_metric::{build_special_time_expr, EmptyMetric, EmptyMetricExec, EmptyMetricStream}; +pub use empty_metric::{EmptyMetric, EmptyMetricExec, EmptyMetricStream, build_special_time_expr}; pub use histogram_fold::{HistogramFold, HistogramFoldExec, HistogramFoldStream}; pub use instant_manipulate::{InstantManipulate, InstantManipulateExec, InstantManipulateStream}; pub use normalize::{SeriesNormalize, SeriesNormalizeExec, SeriesNormalizeStream}; diff --git a/src/promql/src/extension_plan/absent.rs b/src/promql/src/extension_plan/absent.rs index 904da75aa0..843f9a468f 100644 --- a/src/promql/src/extension_plan/absent.rs +++ b/src/promql/src/extension_plan/absent.rs @@ -41,7 +41,7 @@ use datatypes::arrow::datatypes::{DataType, Field, SchemaRef, TimeUnit}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::arrow_array::StringArray; use datatypes::compute::SortOptions; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; diff --git a/src/promql/src/extension_plan/empty_metric.rs b/src/promql/src/extension_plan/empty_metric.rs index 48864ae32d..741a6b64bc 100644 --- a/src/promql/src/extension_plan/empty_metric.rs +++ b/src/promql/src/extension_plan/empty_metric.rs @@ -26,7 +26,7 @@ use datafusion::common::stats::Precision; use datafusion::common::{ DFSchema, DFSchemaRef, Result as DataFusionResult, Statistics, TableReference, }; -use datafusion::datasource::{provider_as_source, MemTable}; +use datafusion::datasource::{MemTable, provider_as_source}; use datafusion::error::DataFusionError; use datafusion::execution::context::{SessionState, TaskContext}; use datafusion::logical_expr::{ExprSchemable, LogicalPlan, UserDefinedLogicalNodeCore}; @@ -38,7 +38,7 @@ use datafusion::physical_plan::{ SendableRecordBatchStream, }; use datafusion::physical_planner::PhysicalPlanner; -use datafusion::prelude::{col, lit, Expr}; +use datafusion::prelude::{Expr, col, lit}; use datafusion_expr::LogicalPlanBuilder; use datatypes::arrow::array::TimestampMillisecondArray; use datatypes::arrow::datatypes::SchemaRef; diff --git a/src/promql/src/extension_plan/histogram_fold.rs b/src/promql/src/extension_plan/histogram_fold.rs index 4f89c32a4b..0937142560 100644 --- a/src/promql/src/extension_plan/histogram_fold.rs +++ b/src/promql/src/extension_plan/histogram_fold.rs @@ -21,7 +21,7 @@ use std::time::Instant; use common_recordbatch::RecordBatch as GtRecordBatch; use common_telemetry::warn; use datafusion::arrow::array::AsArray; -use datafusion::arrow::compute::{self, concat_batches, SortOptions}; +use datafusion::arrow::compute::{self, SortOptions, concat_batches}; use datafusion::arrow::datatypes::{DataType, Float64Type, SchemaRef}; use datafusion::arrow::record_batch::RecordBatch; use datafusion::common::stats::Precision; @@ -44,7 +44,7 @@ use datatypes::prelude::{ConcreteDataType, DataType as GtDataType}; use datatypes::schema::Schema as GtSchema; use datatypes::value::{OrderedF64, ValueRef}; use datatypes::vectors::MutableVector; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; /// `HistogramFold` will fold the conventional (non-native) histogram ([1]) for later /// computing. diff --git a/src/promql/src/extension_plan/instant_manipulate.rs b/src/promql/src/extension_plan/instant_manipulate.rs index 40d5520cac..110a926399 100644 --- a/src/promql/src/extension_plan/instant_manipulate.rs +++ b/src/promql/src/extension_plan/instant_manipulate.rs @@ -35,13 +35,13 @@ use datafusion::physical_plan::{ }; use datatypes::arrow::compute; use datatypes::arrow::error::Result as ArrowResult; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; use crate::error::{DeserializeSnafu, Result}; -use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES}; +use crate::extension_plan::{METRIC_NUM_SERIES, Millisecond}; use crate::metrics::PROMQL_SERIES_COUNT; /// Manipulate the input record batch to make it suitable for Instant Operator. @@ -321,7 +321,11 @@ impl DisplayAs for InstantManipulateExec { write!( f, "PromInstantManipulateExec: range=[{}..{}], lookback=[{}], interval=[{}], time index=[{}]", - self.start,self.end, self.lookback_delta, self.interval, self.time_index_column + self.start, + self.end, + self.lookback_delta, + self.interval, + self.time_index_column ) } } @@ -513,7 +517,7 @@ mod test { use super::*; use crate::extension_plan::test_util::{ - prepare_test_data, prepare_test_data_with_nan, TIME_INDEX_COLUMN, + TIME_INDEX_COLUMN, prepare_test_data, prepare_test_data_with_nan, }; async fn do_normalize_test( diff --git a/src/promql/src/extension_plan/normalize.rs b/src/promql/src/extension_plan/normalize.rs index 89ade7cccb..eddb60f000 100644 --- a/src/promql/src/extension_plan/normalize.rs +++ b/src/promql/src/extension_plan/normalize.rs @@ -34,13 +34,13 @@ use datafusion::physical_plan::{ use datatypes::arrow::array::TimestampMillisecondArray; use datatypes::arrow::datatypes::SchemaRef; use datatypes::arrow::record_batch::RecordBatch; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; use crate::error::{DeserializeSnafu, Result}; -use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES}; +use crate::extension_plan::{METRIC_NUM_SERIES, Millisecond}; use crate::metrics::PROMQL_SERIES_COUNT; /// Normalize the input record batch. Notice that for simplicity, this method assumes diff --git a/src/promql/src/extension_plan/range_manipulate.rs b/src/promql/src/extension_plan/range_manipulate.rs index cffe9a17e7..2b60d0654b 100644 --- a/src/promql/src/extension_plan/range_manipulate.rs +++ b/src/promql/src/extension_plan/range_manipulate.rs @@ -37,13 +37,13 @@ use datafusion::physical_plan::{ SendableRecordBatchStream, Statistics, }; use datafusion::sql::TableReference; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; use crate::error::{DeserializeSnafu, Result}; -use crate::extension_plan::{Millisecond, METRIC_NUM_SERIES}; +use crate::extension_plan::{METRIC_NUM_SERIES, Millisecond}; use crate::metrics::PROMQL_SERIES_COUNT; use crate::range_array::RangeArray; @@ -465,7 +465,7 @@ impl DisplayAs for RangeManipulateExec { write!( f, "PromRangeManipulateExec: req range=[{}..{}], interval=[{}], eval range=[{}], time index=[{}]", - self.start, self.end, self.interval, self.range, self.time_index_column + self.start, self.end, self.interval, self.range, self.time_index_column ) } } @@ -804,7 +804,7 @@ mod test { base array: PrimitiveArray\n[\n 1970-01-01T00:00:00,\n 1970-01-01T00:00:30,\n 1970-01-01T00:01:00,\n 1970-01-01T00:01:30,\n 1970-01-01T00:02:00,\n 1970-01-01T00:03:00,\n 1970-01-01T00:04:00,\n 1970-01-01T00:04:01,\n 1970-01-01T00:04:31,\n 1970-01-01T00:04:51,\n], \ ranges: [Some(0..1), Some(0..2), Some(0..3), Some(0..4), Some(1..5), Some(2..5), Some(3..6), Some(4..6), Some(5..7), Some(5..8), Some(6..10)] \ }", -); + ); do_normalize_test(0, 310_000, 30_000, 90_000, expected.clone()).await; // dump large range @@ -814,7 +814,7 @@ mod test { #[tokio::test] async fn small_empty_range() { let expected = String::from( - "PrimitiveArray\n[\n \ + "PrimitiveArray\n[\n \ 1970-01-01T00:00:00.001,\n \ 1970-01-01T00:00:03.001,\n \ 1970-01-01T00:00:06.001,\n \ @@ -829,7 +829,8 @@ mod test { RangeArray { \ base array: PrimitiveArray\n[\n 1970-01-01T00:00:00,\n 1970-01-01T00:00:30,\n 1970-01-01T00:01:00,\n 1970-01-01T00:01:30,\n 1970-01-01T00:02:00,\n 1970-01-01T00:03:00,\n 1970-01-01T00:04:00,\n 1970-01-01T00:04:01,\n 1970-01-01T00:04:31,\n 1970-01-01T00:04:51,\n], \ ranges: [Some(0..1), Some(0..0), Some(0..0), Some(0..0)] \ - }"); + }", + ); do_normalize_test(1, 10_001, 3_000, 1_000, expected).await; } } diff --git a/src/promql/src/extension_plan/scalar_calculate.rs b/src/promql/src/extension_plan/scalar_calculate.rs index d4a5420827..6372d7f292 100644 --- a/src/promql/src/extension_plan/scalar_calculate.rs +++ b/src/promql/src/extension_plan/scalar_calculate.rs @@ -32,10 +32,10 @@ use datafusion::physical_plan::{ use datafusion::prelude::Expr; use datafusion::sql::TableReference; use datatypes::arrow::array::{Array, Float64Array, StringArray, TimestampMillisecondArray}; -use datatypes::arrow::compute::{cast_with_options, concat_batches, CastOptions}; +use datatypes::arrow::compute::{CastOptions, cast_with_options, concat_batches}; use datatypes::arrow::datatypes::{DataType, Field, Schema, SchemaRef, TimeUnit}; use datatypes::arrow::record_batch::RecordBatch; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; @@ -686,16 +686,18 @@ mod test { Field::new("val", DataType::Float64, true), ])); run_test( - vec![RecordBatch::try_new( - schema, - vec![ - Arc::new(TimestampMillisecondArray::new_null(0)), - Arc::new(StringArray::new_null(0)), - Arc::new(StringArray::new_null(0)), - Arc::new(Float64Array::new_null(0)), - ], - ) - .unwrap()], + vec![ + RecordBatch::try_new( + schema, + vec![ + Arc::new(TimestampMillisecondArray::new_null(0)), + Arc::new(StringArray::new_null(0)), + Arc::new(StringArray::new_null(0)), + Arc::new(Float64Array::new_null(0)), + ], + ) + .unwrap(), + ], "+---------------------+-----+\ \n| ts | val |\ \n+---------------------+-----+\ diff --git a/src/promql/src/extension_plan/series_divide.rs b/src/promql/src/extension_plan/series_divide.rs index 032566d2ad..ece5263741 100644 --- a/src/promql/src/extension_plan/series_divide.rs +++ b/src/promql/src/extension_plan/series_divide.rs @@ -35,7 +35,7 @@ use datafusion::physical_plan::{ }; use datatypes::arrow::compute; use datatypes::compute::SortOptions; -use futures::{ready, Stream, StreamExt}; +use futures::{Stream, StreamExt, ready}; use greptime_proto::substrait_extension as pb; use prost::Message; use snafu::ResultExt; diff --git a/src/promql/src/extension_plan/union_distinct_on.rs b/src/promql/src/extension_plan/union_distinct_on.rs index a943ae3b4d..e5e80525b8 100644 --- a/src/promql/src/extension_plan/union_distinct_on.rs +++ b/src/promql/src/extension_plan/union_distinct_on.rs @@ -29,12 +29,12 @@ use datafusion::physical_expr::EquivalenceProperties; use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}; use datafusion::physical_plan::{ - hash_utils, DisplayAs, DisplayFormatType, Distribution, ExecutionPlan, Partitioning, - PlanProperties, RecordBatchStream, SendableRecordBatchStream, + DisplayAs, DisplayFormatType, Distribution, ExecutionPlan, Partitioning, PlanProperties, + RecordBatchStream, SendableRecordBatchStream, hash_utils, }; use datatypes::arrow::compute; use futures::future::BoxFuture; -use futures::{ready, Stream, StreamExt, TryStreamExt}; +use futures::{Stream, StreamExt, TryStreamExt, ready}; /// A special kind of `UNION`(`OR` in PromQL) operator, for PromQL specific use case. /// diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs index 12841dee09..154971b39d 100644 --- a/src/promql/src/functions.rs +++ b/src/promql/src/functions.rs @@ -40,7 +40,7 @@ pub use holt_winters::HoltWinters; pub use idelta::IDelta; pub use predict_linear::PredictLinear; pub use quantile::QuantileOverTime; -pub use quantile_aggr::{quantile_udaf, QUANTILE_NAME}; +pub use quantile_aggr::{QUANTILE_NAME, quantile_udaf}; pub use resets::Resets; pub use round::Round; diff --git a/src/promql/src/functions/aggr_over_time.rs b/src/promql/src/functions/aggr_over_time.rs index 841f28e0df..518c8689ef 100644 --- a/src/promql/src/functions/aggr_over_time.rs +++ b/src/promql/src/functions/aggr_over_time.rs @@ -99,11 +99,7 @@ pub fn last_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> O display_name = prom_absent_over_time )] pub fn absent_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> Option { - if values.is_empty() { - Some(1.0) - } else { - None - } + if values.is_empty() { Some(1.0) } else { None } } /// the value 1 for any series in the specified interval. @@ -113,11 +109,7 @@ pub fn absent_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> display_name = prom_present_over_time )] pub fn present_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> Option { - if values.is_empty() { - None - } else { - Some(1.0) - } + if values.is_empty() { None } else { Some(1.0) } } /// the population standard variance of the values in the specified interval. diff --git a/src/promql/src/functions/idelta.rs b/src/promql/src/functions/idelta.rs index a70a1dee3c..eeec9a4be9 100644 --- a/src/promql/src/functions/idelta.rs +++ b/src/promql/src/functions/idelta.rs @@ -35,11 +35,7 @@ pub struct IDelta {} impl IDelta { pub const fn name() -> &'static str { - if IS_RATE { - "prom_irate" - } else { - "prom_idelta" - } + if IS_RATE { "prom_irate" } else { "prom_idelta" } } pub fn scalar_udf() -> ScalarUDF { diff --git a/src/promql/src/functions/quantile_aggr.rs b/src/promql/src/functions/quantile_aggr.rs index 5652f57342..6d755cadf7 100644 --- a/src/promql/src/functions/quantile_aggr.rs +++ b/src/promql/src/functions/quantile_aggr.rs @@ -90,7 +90,7 @@ impl QuantileAccumulator { _ => { return Err(DataFusionError::Internal( "Invalid quantile value".to_string(), - )) + )); } }; diff --git a/src/promql/src/functions/round.rs b/src/promql/src/functions/round.rs index ee9065c071..72736ef6e2 100644 --- a/src/promql/src/functions/round.rs +++ b/src/promql/src/functions/round.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use datafusion::error::DataFusionError; use datafusion_common::ScalarValue; -use datafusion_expr::{create_udf, ColumnarValue, ScalarUDF, Volatility}; +use datafusion_expr::{ColumnarValue, ScalarUDF, Volatility, create_udf}; use datatypes::arrow::array::{AsArray, Float64Array, PrimitiveArray}; use datatypes::arrow::datatypes::{DataType, Float64Type}; use datatypes::arrow::error::ArrowError; diff --git a/src/promql/src/functions/test_util.rs b/src/promql/src/functions/test_util.rs index a01216ef90..e8c1d633c9 100644 --- a/src/promql/src/functions/test_util.rs +++ b/src/promql/src/functions/test_util.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use datafusion::arrow::array::Float64Array; use datafusion::logical_expr::ScalarUDF; use datafusion::physical_plan::ColumnarValue; -use datafusion_common::config::ConfigOptions; use datafusion_common::ScalarValue; +use datafusion_common::config::ConfigOptions; use datafusion_expr::ScalarFunctionArgs; use datatypes::arrow::datatypes::{DataType, Field}; @@ -62,12 +62,14 @@ pub fn simple_range_udf_runner( .iter() .collect(); assert_eq!(eval_result.len(), expected.len()); - assert!(eval_result - .iter() - .zip(expected.iter()) - .all(|(x, y)| match (*x, *y) { - (Some(x), Some(y)) => (x - y).abs() < 0.0001, - (None, None) => true, - _ => false, - })); + assert!( + eval_result + .iter() + .zip(expected.iter()) + .all(|(x, y)| match (*x, *y) { + (Some(x), Some(y)) => (x - y).abs() < 0.0001, + (None, None) => true, + _ => false, + }) + ); } diff --git a/src/promql/src/range_array.rs b/src/promql/src/range_array.rs index c932524d07..4ae67dd2da 100644 --- a/src/promql/src/range_array.rs +++ b/src/promql/src/range_array.rs @@ -20,7 +20,7 @@ use datafusion::arrow::buffer::NullBuffer; use datafusion::arrow::datatypes::Field; use datatypes::arrow::array::{Array, ArrayData, ArrayRef, DictionaryArray, Int64Array}; use datatypes::arrow::datatypes::{DataType, Int64Type}; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::error::{EmptyRangeSnafu, IllegalRangeSnafu, Result}; diff --git a/src/puffin/src/file_format/reader/file.rs b/src/puffin/src/file_format/reader/file.rs index 27a9853acd..bbc1dcbdfc 100644 --- a/src/puffin/src/file_format/reader/file.rs +++ b/src/puffin/src/file_format/reader/file.rs @@ -14,13 +14,13 @@ use async_trait::async_trait; use common_base::range_read::RangeReader; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::blob_metadata::BlobMetadata; use crate::error::{ReadSnafu, Result, UnexpectedPuffinFileSizeSnafu}; +use crate::file_format::MIN_FILE_SIZE; use crate::file_format::reader::footer::DEFAULT_PREFETCH_SIZE; use crate::file_format::reader::{AsyncReader, PuffinFileFooterReader}; -use crate::file_format::MIN_FILE_SIZE; use crate::file_metadata::FileMetadata; use crate::partial_reader::PartialReader; diff --git a/src/puffin/src/file_format/reader/footer.rs b/src/puffin/src/file_format/reader/footer.rs index c7a7eab08b..5d0d498d66 100644 --- a/src/puffin/src/file_format/reader/footer.rs +++ b/src/puffin/src/file_format/reader/footer.rs @@ -15,13 +15,13 @@ use std::io::Cursor; use common_base::range_read::RangeReader; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use crate::error::{ DeserializeJsonSnafu, InvalidPuffinFooterSnafu, Lz4DecompressionSnafu, MagicNotMatchedSnafu, ReadSnafu, Result, UnexpectedFooterPayloadSizeSnafu, }; -use crate::file_format::{Flags, FLAGS_SIZE, MAGIC, MAGIC_SIZE, MIN_FILE_SIZE, PAYLOAD_SIZE_SIZE}; +use crate::file_format::{FLAGS_SIZE, Flags, MAGIC, MAGIC_SIZE, MIN_FILE_SIZE, PAYLOAD_SIZE_SIZE}; use crate::file_metadata::FileMetadata; /// The default prefetch size for the footer reader. diff --git a/src/puffin/src/file_format/writer/file.rs b/src/puffin/src/file_format/writer/file.rs index 22e5cc0bdd..8791417f4e 100644 --- a/src/puffin/src/file_format/writer/file.rs +++ b/src/puffin/src/file_format/writer/file.rs @@ -21,9 +21,9 @@ use snafu::ResultExt; use crate::blob_metadata::{BlobMetadata, BlobMetadataBuilder, CompressionCodec}; use crate::error::{CloseSnafu, FlushSnafu, Result, WriteSnafu}; +use crate::file_format::MAGIC; use crate::file_format::writer::footer::FooterWriter; use crate::file_format::writer::{AsyncWriter, Blob, SyncWriter}; -use crate::file_format::MAGIC; /// Puffin file writer, implements both [`PuffinSyncWriter`] and [`PuffinAsyncWriter`] pub struct PuffinFileWriter { diff --git a/src/puffin/src/file_metadata.rs b/src/puffin/src/file_metadata.rs index 4804c65be4..db3091074a 100644 --- a/src/puffin/src/file_metadata.rs +++ b/src/puffin/src/file_metadata.rs @@ -151,9 +151,11 @@ mod tests { let data = r#"{"properties":{"key1":"value1"}}"#; let deserialized = serde_json::from_str::(data); - assert!(deserialized - .unwrap_err() - .to_string() - .contains("missing field `blobs`")); + assert!( + deserialized + .unwrap_err() + .to_string() + .contains("missing field `blobs`") + ); } } diff --git a/src/puffin/src/puffin_manager/file_accessor.rs b/src/puffin/src/puffin_manager/file_accessor.rs index 557f9c7914..cc4cca3171 100644 --- a/src/puffin/src/puffin_manager/file_accessor.rs +++ b/src/puffin/src/puffin_manager/file_accessor.rs @@ -14,7 +14,7 @@ use async_trait::async_trait; use common_base::range_read::{FileReader, SizeAwareRangeReader}; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use futures::AsyncWrite; use tokio::fs::File; use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; @@ -61,10 +61,10 @@ impl PuffinFileAccessor for MockFileAccessor { async fn writer(&self, handle: &String) -> Result { let p = self.tempdir.path().join(handle); - if let Some(p) = p.parent() { - if !tokio::fs::try_exists(p).await.unwrap() { - tokio::fs::create_dir_all(p).await.unwrap(); - } + if let Some(p) = p.parent() + && !tokio::fs::try_exists(p).await.unwrap() + { + tokio::fs::create_dir_all(p).await.unwrap(); } let f = tokio::fs::File::create(p).await.unwrap(); Ok(f.compat()) diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager.rs b/src/puffin/src/puffin_manager/fs_puffin_manager.rs index af57041e68..61f6d5b597 100644 --- a/src/puffin/src/puffin_manager/fs_puffin_manager.rs +++ b/src/puffin/src/puffin_manager/fs_puffin_manager.rs @@ -21,10 +21,10 @@ pub use reader::FsPuffinReader; pub use writer::FsPuffinWriter; use crate::error::Result; +use crate::puffin_manager::PuffinManager; use crate::puffin_manager::cache::PuffinMetadataCacheRef; use crate::puffin_manager::file_accessor::PuffinFileAccessor; use crate::puffin_manager::stager::Stager; -use crate::puffin_manager::PuffinManager; /// `FsPuffinManager` is a `PuffinManager` that provides readers and writers for puffin data in filesystem. #[derive(Clone)] diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs index 2c616578f6..8339d32c95 100644 --- a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs +++ b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs @@ -22,7 +22,7 @@ use bytes::{BufMut, Bytes}; use common_base::range_read::{AsyncReadAdapter, Metadata, RangeReader, SizeAwareRangeReader}; use futures::io::BufReader; use futures::{AsyncRead, AsyncWrite}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::blob_metadata::{BlobMetadata, CompressionCodec}; use crate::error::{ @@ -33,8 +33,8 @@ use crate::file_format::reader::{AsyncReader, PuffinFileReader}; use crate::file_metadata::FileMetadata; use crate::partial_reader::PartialReader; use crate::puffin_manager::file_accessor::PuffinFileAccessor; -use crate::puffin_manager::fs_puffin_manager::dir_meta::DirMetadata; use crate::puffin_manager::fs_puffin_manager::PuffinMetadataCacheRef; +use crate::puffin_manager::fs_puffin_manager::dir_meta::DirMetadata; use crate::puffin_manager::stager::{BoxWriter, DirWriterProviderRef, Stager}; use crate::puffin_manager::{BlobGuard, GuardWithMetadata, PuffinReader}; @@ -167,10 +167,10 @@ where reader: &mut PuffinFileReader, ) -> Result> { let id = self.handle.to_string(); - if let Some(cache) = self.puffin_file_metadata_cache.as_ref() { - if let Some(metadata) = cache.get_metadata(&id) { - return Ok(metadata); - } + if let Some(cache) = self.puffin_file_metadata_cache.as_ref() + && let Some(metadata) = cache.get_metadata(&id) + { + return Ok(metadata); } let metadata = Arc::new(reader.metadata().await?); diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager/writer.rs b/src/puffin/src/puffin_manager/fs_puffin_manager/writer.rs index feb7678756..8ec0cb8e45 100644 --- a/src/puffin/src/puffin_manager/fs_puffin_manager/writer.rs +++ b/src/puffin/src/puffin_manager/fs_puffin_manager/writer.rs @@ -19,7 +19,7 @@ use async_compression::futures::bufread::ZstdEncoder; use async_trait::async_trait; use futures::io::BufReader; use futures::{AsyncRead, AsyncWrite, StreamExt}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio_util::compat::TokioAsyncReadCompatExt; use uuid::Uuid; diff --git a/src/puffin/src/puffin_manager/stager.rs b/src/puffin/src/puffin_manager/stager.rs index 98cc194b9c..708053bb27 100644 --- a/src/puffin/src/puffin_manager/stager.rs +++ b/src/puffin/src/puffin_manager/stager.rs @@ -19,8 +19,8 @@ use std::time::Duration; use async_trait::async_trait; pub use bounded_stager::{BoundedStager, FsBlobGuard, FsDirGuard}; -use futures::future::BoxFuture; use futures::AsyncWrite; +use futures::future::BoxFuture; use crate::error::Result; use crate::puffin_manager::{BlobGuard, DirGuard}; diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs index c64e303349..380cce7930 100644 --- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs +++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs @@ -19,8 +19,8 @@ use std::time::{Duration, Instant}; use async_trait::async_trait; use async_walkdir::{Filtering, WalkDir}; -use base64::prelude::BASE64_URL_SAFE; use base64::Engine; +use base64::prelude::BASE64_URL_SAFE; use common_base::range_read::FileReader; use common_runtime::runtime::RuntimeTrait; use common_telemetry::{info, warn}; @@ -107,10 +107,10 @@ impl BoundedStager { } .boxed() }); - if let Some(ttl) = cache_ttl { - if !ttl.is_zero() { - cache_builder = cache_builder.time_to_live(ttl); - } + if let Some(ttl) = cache_ttl + && !ttl.is_zero() + { + cache_builder = cache_builder.time_to_live(ttl); } let cache = cache_builder.build(); diff --git a/src/puffin/src/puffin_manager/tests.rs b/src/puffin/src/puffin_manager/tests.rs index bd3ec9d5a5..715668e40e 100644 --- a/src/puffin/src/puffin_manager/tests.rs +++ b/src/puffin/src/puffin_manager/tests.rs @@ -16,7 +16,7 @@ use std::collections::HashMap; use std::sync::Arc; use common_base::range_read::RangeReader; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use tokio::io::AsyncReadExt as _; use crate::blob_metadata::CompressionCodec; diff --git a/src/query/src/analyze.rs b/src/query/src/analyze.rs index 00f1f68a4d..26da084852 100644 --- a/src/query/src/analyze.rs +++ b/src/query/src/analyze.rs @@ -30,10 +30,10 @@ use datafusion::execution::TaskContext; use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec; use datafusion::physical_plan::stream::RecordBatchStreamAdapter; use datafusion::physical_plan::{ - accept, DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties, + DisplayAs, DisplayFormatType, ExecutionPlan, PlanProperties, accept, }; use datafusion_common::tree_node::{TreeNode, TreeNodeRecursion}; -use datafusion_common::{internal_err, DataFusionError}; +use datafusion_common::{DataFusionError, internal_err}; use datafusion_physical_expr::{Distribution, EquivalenceProperties, Partitioning}; use futures::StreamExt; use serde::Serialize; diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs index f7577368b0..67a92f0be2 100644 --- a/src/query/src/datafusion.rs +++ b/src/query/src/datafusion.rs @@ -30,9 +30,9 @@ use common_query::{Output, OutputData, OutputMeta}; use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_recordbatch::{EmptyRecordBatchStream, SendableRecordBatchStream}; use common_telemetry::tracing; +use datafusion::physical_plan::ExecutionPlan; use datafusion::physical_plan::analyze::AnalyzeExec; use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec; -use datafusion::physical_plan::ExecutionPlan; use datafusion_common::ResolvedTableReference; use datafusion_expr::{ AggregateUDF, DmlStatement, LogicalPlan as DfLogicalPlan, LogicalPlan, WriteOp, @@ -41,10 +41,10 @@ use datatypes::prelude::VectorRef; use datatypes::schema::Schema; use futures_util::StreamExt; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sqlparser::ast::AnalyzeFormat; -use table::requests::{DeleteRequest, InsertRequest}; use table::TableRef; +use table::requests::{DeleteRequest, InsertRequest}; use crate::analyze::DistAnalyzeExec; use crate::dataframe::DataFrame; @@ -60,7 +60,7 @@ use crate::metrics::{OnDone, QUERY_STAGE_ELAPSED}; use crate::physical_wrapper::PhysicalPlanWrapperRef; use crate::planner::{DfLogicalPlanner, LogicalPlanner}; use crate::query_engine::{DescribeResult, QueryEngineContext, QueryEngineState}; -use crate::{metrics, QueryEngine}; +use crate::{QueryEngine, metrics}; /// Query parallelism hint key. /// This hint can be set in the query context to control the parallelism of the query execution. @@ -668,7 +668,7 @@ mod tests { use datatypes::schema::ColumnSchema; use datatypes::vectors::{Helper, UInt32Vector, UInt64Vector, VectorRef}; use session::context::{QueryContext, QueryContextBuilder}; - use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; + use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use super::*; use crate::options::QueryOptions; @@ -823,6 +823,9 @@ mod tests { true ) ); - assert_eq!("Limit: skip=0, fetch=20\n Aggregate: groupBy=[[]], aggr=[[sum(CAST(numbers.number AS UInt64))]]\n TableScan: numbers projection=[number]", format!("{}", logical_plan.display_indent())); + assert_eq!( + "Limit: skip=0, fetch=20\n Aggregate: groupBy=[[]], aggr=[[sum(CAST(numbers.number AS UInt64))]]\n TableScan: numbers projection=[number]", + format!("{}", logical_plan.display_indent()) + ); } } diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs index 5607e724ff..4e6917fad3 100644 --- a/src/query/src/datafusion/planner.rs +++ b/src/query/src/datafusion/planner.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::sync::Arc; use arrow_schema::DataType; @@ -21,16 +21,16 @@ use catalog::table_source::DfTableSourceProvider; use common_function::function::FunctionContext; use datafusion::common::TableReference; use datafusion::datasource::cte_worktable::CteWorkTable; -use datafusion::datasource::file_format::{format_as_file_type, FileFormatFactory}; +use datafusion::datasource::file_format::{FileFormatFactory, format_as_file_type}; use datafusion::datasource::provider_as_source; use datafusion::error::Result as DfResult; -use datafusion::execution::context::SessionState; use datafusion::execution::SessionStateDefaults; +use datafusion::execution::context::SessionState; use datafusion::sql::planner::ContextProvider; use datafusion::variable::VarType; +use datafusion_common::DataFusionError; use datafusion_common::config::ConfigOptions; use datafusion_common::file_options::file_type::FileType; -use datafusion_common::DataFusionError; use datafusion_expr::planner::{ExprPlanner, TypePlanner}; use datafusion_expr::var_provider::is_system_variables; use datafusion_expr::{AggregateUDF, ScalarUDF, TableSource, WindowUDF}; diff --git a/src/query/src/dist_plan/analyzer.rs b/src/query/src/dist_plan/analyzer.rs index a5d4d21729..45fef00a1b 100644 --- a/src/query/src/dist_plan/analyzer.rs +++ b/src/query/src/dist_plan/analyzer.rs @@ -19,12 +19,12 @@ use common_telemetry::debug; use datafusion::config::{ConfigExtension, ExtensionOptions}; use datafusion::datasource::DefaultTableSource; use datafusion::error::Result as DfResult; +use datafusion_common::Column; use datafusion_common::config::ConfigOptions; use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; -use datafusion_common::Column; use datafusion_expr::expr::{Exists, InSubquery}; use datafusion_expr::utils::expr_to_columns; -use datafusion_expr::{col as col_fn, Expr, LogicalPlan, LogicalPlanBuilder, Subquery}; +use datafusion_expr::{Expr, LogicalPlan, LogicalPlanBuilder, Subquery, col as col_fn}; use datafusion_optimizer::analyzer::AnalyzerRule; use datafusion_optimizer::simplify_expressions::SimplifyExpressions; use datafusion_optimizer::{OptimizerContext, OptimizerRule}; @@ -33,7 +33,7 @@ use table::metadata::TableType; use table::table::adapter::DfTableProviderAdapter; use crate::dist_plan::commutativity::{ - partial_commutative_transformer, Categorizer, Commutativity, + Categorizer, Commutativity, partial_commutative_transformer, }; use crate::dist_plan::merge_scan::MergeScanLogicalPlan; use crate::metrics::PUSH_DOWN_FALLBACK_ERRORS_TOTAL; @@ -472,46 +472,42 @@ impl PlanRewriter { return; } - if let LogicalPlan::TableScan(table_scan) = plan { - if let Some(source) = table_scan + if let LogicalPlan::TableScan(table_scan) = plan + && let Some(source) = table_scan .source .as_any() .downcast_ref::() - { - if let Some(provider) = source - .table_provider - .as_any() - .downcast_ref::() - { - let table = provider.table(); - if table.table_type() == TableType::Base { - let info = table.table_info(); - let partition_key_indices = info.meta.partition_key_indices.clone(); - let schema = info.meta.schema.clone(); - let mut partition_cols = partition_key_indices - .into_iter() - .map(|index| schema.column_name_by_index(index).to_string()) - .collect::>(); + && let Some(provider) = source + .table_provider + .as_any() + .downcast_ref::() + { + let table = provider.table(); + if table.table_type() == TableType::Base { + let info = table.table_info(); + let partition_key_indices = info.meta.partition_key_indices.clone(); + let schema = info.meta.schema.clone(); + let mut partition_cols = partition_key_indices + .into_iter() + .map(|index| schema.column_name_by_index(index).to_string()) + .collect::>(); - let partition_rules = table.partition_rules(); - let exist_phy_part_cols_not_in_logical_table = partition_rules - .map(|r| !r.extra_phy_cols_not_in_logical_table.is_empty()) - .unwrap_or(false); + let partition_rules = table.partition_rules(); + let exist_phy_part_cols_not_in_logical_table = partition_rules + .map(|r| !r.extra_phy_cols_not_in_logical_table.is_empty()) + .unwrap_or(false); - if exist_phy_part_cols_not_in_logical_table && partition_cols.is_empty() { - // there are other physical partition columns that are not in logical table and part cols are empty - // so we need to add a placeholder for it to prevent certain optimization - // this is used to make sure the final partition columns(that optimizer see) are not empty - // notice if originally partition_cols is not empty, then there is no need to add this place holder, - // as subset of phy part cols can still be used for certain optimization, and it works as if - // those columns are always null - // This helps with distinguishing between non-partitioned table and partitioned table with all phy part cols not in logical table - partition_cols - .push("__OTHER_PHYSICAL_PART_COLS_PLACEHOLDER__".to_string()); - } - self.partition_cols = Some(partition_cols); - } + if exist_phy_part_cols_not_in_logical_table && partition_cols.is_empty() { + // there are other physical partition columns that are not in logical table and part cols are empty + // so we need to add a placeholder for it to prevent certain optimization + // this is used to make sure the final partition columns(that optimizer see) are not empty + // notice if originally partition_cols is not empty, then there is no need to add this place holder, + // as subset of phy part cols can still be used for certain optimization, and it works as if + // those columns are always null + // This helps with distinguishing between non-partitioned table and partitioned table with all phy part cols not in logical table + partition_cols.push("__OTHER_PHYSICAL_PART_COLS_PLACEHOLDER__".to_string()); } + self.partition_cols = Some(partition_cols); } } } @@ -533,7 +529,9 @@ impl PlanRewriter { std::mem::take(&mut self.column_requirements), self.level, ); - debug!("PlanRewriter: enforce column requirements for node: {on_node} with rewriter: {rewriter:?}"); + debug!( + "PlanRewriter: enforce column requirements for node: {on_node} with rewriter: {rewriter:?}" + ); on_node = on_node.rewrite(&mut rewriter)?.data; debug!( "PlanRewriter: after enforced column requirements with rewriter: {rewriter:?} for node:\n{on_node}" diff --git a/src/query/src/dist_plan/analyzer/test.rs b/src/query/src/dist_plan/analyzer/test.rs index bbd78b64d0..2dafc91395 100644 --- a/src/query/src/dist_plan/analyzer/test.rs +++ b/src/query/src/dist_plan/analyzer/test.rs @@ -25,12 +25,12 @@ use datafusion::datasource::DefaultTableSource; use datafusion::functions_aggregate::expr_fn::avg; use datafusion::functions_aggregate::min_max::{max, min}; use datafusion_common::JoinType; -use datafusion_expr::{col, lit, Expr, LogicalPlanBuilder}; +use datafusion_expr::{Expr, LogicalPlanBuilder, col, lit}; use datafusion_sql::TableReference; use datatypes::data_type::ConcreteDataType; use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef}; -use futures::task::{Context, Poll}; use futures::Stream; +use futures::task::{Context, Poll}; use pretty_assertions::assert_eq; use store_api::data_source::DataSource; use store_api::storage::ScanRequest; diff --git a/src/query/src/dist_plan/analyzer/utils.rs b/src/query/src/dist_plan/analyzer/utils.rs index 83b617f7a1..e064e8fcea 100644 --- a/src/query/src/dist_plan/analyzer/utils.rs +++ b/src/query/src/dist_plan/analyzer/utils.rs @@ -38,30 +38,27 @@ impl AliasTracker { .source .as_any() .downcast_ref::() - { - if let Some(provider) = source + && let Some(provider) = source .table_provider .as_any() .downcast_ref::() - { - if provider.table().table_type() == TableType::Base { - let info = provider.table().table_info(); - let schema = info.meta.schema.clone(); - let col_schema = schema.column_schemas(); - let mapping = col_schema - .iter() - .map(|col| { - ( - col.name.clone(), - HashSet::from_iter(std::iter::once(Column::new_unqualified( - col.name.clone(), - ))), - ) - }) - .collect(); - return Some(Self { mapping }); - } - } + && provider.table().table_type() == TableType::Base + { + let info = provider.table().table_info(); + let schema = info.meta.schema.clone(); + let col_schema = schema.column_schemas(); + let mapping = col_schema + .iter() + .map(|col| { + ( + col.name.clone(), + HashSet::from_iter(std::iter::once(Column::new_unqualified( + col.name.clone(), + ))), + ) + }) + .collect(); + return Some(Self { mapping }); } None @@ -154,7 +151,7 @@ mod tests { use common_telemetry::init_default_ut_logging; use datafusion::error::Result as DfResult; use datafusion_common::tree_node::{TreeNode, TreeNodeRecursion, TreeNodeVisitor}; - use datafusion_expr::{col, LogicalPlanBuilder}; + use datafusion_expr::{LogicalPlanBuilder, col}; use super::*; use crate::dist_plan::analyzer::test::TestTable; diff --git a/src/query/src/dist_plan/commutativity.rs b/src/query/src/dist_plan/commutativity.rs index d1455b36e6..84102a1b66 100644 --- a/src/query/src/dist_plan/commutativity.rs +++ b/src/query/src/dist_plan/commutativity.rs @@ -15,7 +15,7 @@ use std::collections::HashSet; use std::sync::Arc; -use common_function::aggrs::aggr_wrapper::{aggr_state_func_name, StateMergeHelper}; +use common_function::aggrs::aggr_wrapper::{StateMergeHelper, aggr_state_func_name}; use common_function::function_registry::FUNCTION_REGISTRY; use common_telemetry::debug; use datafusion_expr::{Expr, LogicalPlan, UserDefinedLogicalNode}; @@ -23,9 +23,9 @@ use promql::extension_plan::{ EmptyMetric, InstantManipulate, RangeManipulate, SeriesDivide, SeriesNormalize, }; -use crate::dist_plan::analyzer::AliasMapping; -use crate::dist_plan::merge_sort::{merge_sort_transformer, MergeSortLogicalPlan}; use crate::dist_plan::MergeScanLogicalPlan; +use crate::dist_plan::analyzer::AliasMapping; +use crate::dist_plan::merge_sort::{MergeSortLogicalPlan, merge_sort_transformer}; pub struct StepTransformAction { extra_parent_plans: Vec, diff --git a/src/query/src/dist_plan/merge_scan.rs b/src/query/src/dist_plan/merge_scan.rs index 4d44fc5e51..698673ecc0 100644 --- a/src/query/src/dist_plan/merge_scan.rs +++ b/src/query/src/dist_plan/merge_scan.rs @@ -364,7 +364,11 @@ impl MergeScanExec { if explain_verbose { common_telemetry::info!( "Merge scan finish one region, partition: {}, region_id: {}, poll_duration: {:?}, first_consume: {}, do_get_cost: {:?}", - partition, region_id, poll_duration, metric.first_consume_time(), do_get_cost + partition, + region_id, + poll_duration, + metric.first_consume_time(), + do_get_cost ); } @@ -543,12 +547,18 @@ impl PartitionMetrics { if self.explain_verbose { common_telemetry::info!( "MergeScan partition {} finished: {} regions, total_poll_duration: {:?}, total_do_get_cost: {:?}", - self.partition, self.total_regions, self.total_poll_duration, self.total_do_get_cost + self.partition, + self.total_regions, + self.total_poll_duration, + self.total_do_get_cost ); } else { common_telemetry::debug!( "MergeScan partition {} finished: {} regions, total_poll_duration: {:?}, total_do_get_cost: {:?}", - self.partition, self.total_regions, self.total_poll_duration, self.total_do_get_cost + self.partition, + self.total_regions, + self.total_poll_duration, + self.total_do_get_cost ); } } @@ -623,19 +633,23 @@ impl DisplayAs for MergeScanExec { if i > 0 { write!(f, ", ")?; } - write!(f, "\"partition_{}\":{{\"regions\":{},\"total_poll_duration\":\"{:?}\",\"total_do_get_cost\":\"{:?}\",\"region_metrics\":[", - pm.partition, pm.total_regions, - pm.total_poll_duration, - pm.total_do_get_cost)?; + write!( + f, + "\"partition_{}\":{{\"regions\":{},\"total_poll_duration\":\"{:?}\",\"total_do_get_cost\":\"{:?}\",\"region_metrics\":[", + pm.partition, + pm.total_regions, + pm.total_poll_duration, + pm.total_do_get_cost + )?; for (j, rm) in pm.region_metrics.iter().enumerate() { if j > 0 { write!(f, ",")?; } - write!(f, "{{\"region_id\":\"{}\",\"poll_duration\":\"{:?}\",\"do_get_cost\":\"{:?}\",\"total_cost\":\"{:?}\"}}", - rm.region_id, - rm.poll_duration, - rm.do_get_cost, - rm.total_cost)?; + write!( + f, + "{{\"region_id\":\"{}\",\"poll_duration\":\"{:?}\",\"do_get_cost\":\"{:?}\",\"total_cost\":\"{:?}\"}}", + rm.region_id, rm.poll_duration, rm.do_get_cost, rm.total_cost + )?; } write!(f, "]}}")?; } diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs index 1f95c6fe71..74567b727d 100644 --- a/src/query/src/dist_plan/planner.rs +++ b/src/query/src/dist_plan/planner.rs @@ -36,10 +36,10 @@ pub use table::metadata::TableType; use table::table::adapter::DfTableProviderAdapter; use table::table_name::TableName; +use crate::dist_plan::PredicateExtractor; use crate::dist_plan::merge_scan::{MergeScanExec, MergeScanLogicalPlan}; use crate::dist_plan::merge_sort::MergeSortLogicalPlan; use crate::dist_plan::region_pruner::ConstraintPruner; -use crate::dist_plan::PredicateExtractor; use crate::error::{CatalogSnafu, TableNotFoundSnafu}; use crate::region_query::RegionQueryHandlerRef; @@ -329,22 +329,21 @@ impl TreeNodeVisitor<'_> for TableNameExtractor { fn f_down(&mut self, node: &Self::Node) -> Result { match node { LogicalPlan::TableScan(scan) => { - if let Some(source) = scan.source.as_any().downcast_ref::() { - if let Some(provider) = source + if let Some(source) = scan.source.as_any().downcast_ref::() + && let Some(provider) = source .table_provider .as_any() .downcast_ref::() - { - if provider.table().table_type() == TableType::Base { - let info = provider.table().table_info(); - self.table_name = Some(TableName::new( - info.catalog_name.clone(), - info.schema_name.clone(), - info.name.clone(), - )); - } - return Ok(TreeNodeRecursion::Stop); + { + if provider.table().table_type() == TableType::Base { + let info = provider.table().table_info(); + self.table_name = Some(TableName::new( + info.catalog_name.clone(), + info.schema_name.clone(), + info.name.clone(), + )); } + return Ok(TreeNodeRecursion::Stop); } match &scan.table_name { TableReference::Full { diff --git a/src/query/src/dist_plan/predicate_extractor.rs b/src/query/src/dist_plan/predicate_extractor.rs index 8bd21d4e72..b139fb5b05 100644 --- a/src/query/src/dist_plan/predicate_extractor.rs +++ b/src/query/src/dist_plan/predicate_extractor.rs @@ -501,7 +501,7 @@ mod tests { use datafusion::arrow::datatypes::{DataType, Field, Schema}; use datafusion::common::Column; use datafusion::datasource::DefaultTableSource; - use datafusion_expr::{col, lit, LogicalPlanBuilder}; + use datafusion_expr::{LogicalPlanBuilder, col, lit}; use datatypes::value::Value; use partition::expr::{Operand, PartitionExpr, RestrictedOp}; diff --git a/src/query/src/dist_plan/region_pruner.rs b/src/query/src/dist_plan/region_pruner.rs index fd5c6a51ea..80a4a78998 100644 --- a/src/query/src/dist_plan/region_pruner.rs +++ b/src/query/src/dist_plan/region_pruner.rs @@ -17,6 +17,7 @@ use std::cmp::Ordering; use std::ops::Bound; +use GluonOp::*; use ahash::{HashMap, HashSet}; use common_telemetry::debug; use datatypes::prelude::ConcreteDataType; @@ -25,7 +26,6 @@ use partition::collider::{AtomicExpr, Collider, GluonOp, NucleonExpr}; use partition::expr::{Operand, PartitionExpr}; use partition::manager::PartitionInfo; use store_api::storage::RegionId; -use GluonOp::*; use crate::error::Result; @@ -407,7 +407,7 @@ impl ValueRange { #[cfg(test)] mod tests { use datatypes::value::Value; - use partition::expr::{col, Operand, PartitionExpr, RestrictedOp}; + use partition::expr::{Operand, PartitionExpr, RestrictedOp, col}; use store_api::storage::RegionId; use super::*; diff --git a/src/query/src/dummy_catalog.rs b/src/query/src/dummy_catalog.rs index d439ab77b1..d81767a904 100644 --- a/src/query/src/dummy_catalog.rs +++ b/src/query/src/dummy_catalog.rs @@ -22,8 +22,8 @@ use api::v1::SemanticType; use async_trait::async_trait; use catalog::error::Result as CatalogResult; use catalog::{CatalogManager, CatalogManagerRef}; -use common_recordbatch::filter::SimpleFilterEvaluator; use common_recordbatch::OrderOption; +use common_recordbatch::filter::SimpleFilterEvaluator; use datafusion::catalog::{CatalogProvider, CatalogProviderList, SchemaProvider, Session}; use datafusion::datasource::TableProvider; use datafusion::physical_plan::ExecutionPlan; @@ -36,9 +36,9 @@ use snafu::ResultExt; use store_api::metadata::RegionMetadataRef; use store_api::region_engine::RegionEngineRef; use store_api::storage::{RegionId, ScanRequest, TimeSeriesDistribution, TimeSeriesRowSelector}; +use table::TableRef; use table::metadata::{TableId, TableInfoRef}; use table::table::scan::RegionScanExec; -use table::TableRef; use crate::error::{GetRegionMetadataSnafu, Result}; diff --git a/src/query/src/log_query/planner.rs b/src/query/src/log_query/planner.rs index 66d915a941..d26b6aaba1 100644 --- a/src/query/src/log_query/planner.rs +++ b/src/query/src/log_query/planner.rs @@ -20,7 +20,7 @@ use datafusion::execution::SessionState; use datafusion_common::{DFSchema, ScalarValue}; use datafusion_expr::utils::{conjunction, disjunction}; use datafusion_expr::{ - col, lit, not, BinaryExpr, Expr, ExprSchemable, LogicalPlan, LogicalPlanBuilder, Operator, + BinaryExpr, Expr, ExprSchemable, LogicalPlan, LogicalPlanBuilder, Operator, col, lit, not, }; use datafusion_sql::TableReference; use datatypes::schema::Schema; @@ -556,8 +556,8 @@ impl LogQueryPlanner { mod tests { use std::sync::Arc; - use catalog::memory::MemoryCatalogManager; use catalog::RegisterTableRequest; + use catalog::memory::MemoryCatalogManager; use common_catalog::consts::DEFAULT_CATALOG_NAME; use common_query::test_util::DummyDecoder; use datafusion::execution::SessionStateBuilder; diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs index 52a33029e2..4259b587ba 100644 --- a/src/query/src/optimizer.rs +++ b/src/query/src/optimizer.rs @@ -25,8 +25,8 @@ pub mod transcribe_atat; pub mod type_conversion; pub mod windowed_sort; -use datafusion_common::config::ConfigOptions; use datafusion_common::Result; +use datafusion_common::config::ConfigOptions; use datafusion_expr::LogicalPlan; use crate::QueryEngineContext; diff --git a/src/query/src/optimizer/constant_term.rs b/src/query/src/optimizer/constant_term.rs index 121f3470b2..13d7440e16 100644 --- a/src/query/src/optimizer/constant_term.rs +++ b/src/query/src/optimizer/constant_term.rs @@ -22,10 +22,10 @@ use common_function::scalars::matches_term::MatchesTermFinder; use datafusion::config::ConfigOptions; use datafusion::error::Result as DfResult; use datafusion::physical_optimizer::PhysicalOptimizerRule; -use datafusion::physical_plan::filter::FilterExec; use datafusion::physical_plan::ExecutionPlan; -use datafusion_common::tree_node::{Transformed, TreeNode}; +use datafusion::physical_plan::filter::FilterExec; use datafusion_common::ScalarValue; +use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_expr::ColumnarValue; use datafusion_physical_expr::expressions::Literal; use datafusion_physical_expr::{PhysicalExpr, ScalarFunctionExpr}; @@ -178,26 +178,26 @@ impl PhysicalOptimizerRule for MatchesConstantTermOptimizer { return Ok(Transformed::no(expr)); } - if let Some(lit) = args[1].as_any().downcast_ref::() { - if let ScalarValue::Utf8(Some(term)) = lit.value() { - let finder = MatchesTermFinder::new(term); + if let Some(lit) = args[1].as_any().downcast_ref::() + && let ScalarValue::Utf8(Some(term)) = lit.value() + { + let finder = MatchesTermFinder::new(term); - // For debugging purpose. Not really precise but enough for most cases. - let probes = term - .split(|c: char| !c.is_alphanumeric() && c != '_') - .filter(|s| !s.is_empty()) - .map(|s| s.to_string()) - .collect(); + // For debugging purpose. Not really precise but enough for most cases. + let probes = term + .split(|c: char| !c.is_alphanumeric() && c != '_') + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + .collect(); - let expr = PreCompiledMatchesTermExpr { - text: args[0].clone(), - term: term.to_string(), - finder, - probes, - }; + let expr = PreCompiledMatchesTermExpr { + text: args[0].clone(), + term: term.to_string(), + finder, + probes, + }; - return Ok(Transformed::yes(Arc::new(expr))); - } + return Ok(Transformed::yes(Arc::new(expr))); } } @@ -235,8 +235,8 @@ mod tests { use arrow::array::{ArrayRef, StringArray}; use arrow::datatypes::{DataType, Field, Schema}; use arrow::record_batch::RecordBatch; - use catalog::memory::MemoryCatalogManager; use catalog::RegisterTableRequest; + use catalog::memory::MemoryCatalogManager; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_function::scalars::matches_term::MatchesTermFunction; use common_function::scalars::udf::create_udf; @@ -249,7 +249,7 @@ mod tests { use datafusion_common::{Column, DFSchema}; use datafusion_expr::expr::ScalarFunction; use datafusion_expr::{Expr, Literal, ScalarUDF}; - use datafusion_physical_expr::{create_physical_expr, ScalarFunctionExpr}; + use datafusion_physical_expr::{ScalarFunctionExpr, create_physical_expr}; use datatypes::prelude::ConcreteDataType; use datatypes::schema::ColumnSchema; use session::context::QueryContext; @@ -304,15 +304,17 @@ mod tests { .unwrap(); let table = EmptyTable::from_table_info(&table_info); let catalog_list = MemoryCatalogManager::with_default_setup(); - assert!(catalog_list - .register_table_sync(RegisterTableRequest { - catalog: DEFAULT_CATALOG_NAME.to_string(), - schema: DEFAULT_SCHEMA_NAME.to_string(), - table_name, - table_id: 1024, - table, - }) - .is_ok()); + assert!( + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: DEFAULT_SCHEMA_NAME.to_string(), + table_name, + table_id: 1024, + table, + }) + .is_ok() + ); QueryEngineFactory::new( catalog_list, None, @@ -476,8 +478,10 @@ mod tests { "MatchesConstTerm(text@0, term: \"hello wo_rld\", probes: [\"hello\", \"wo_rld\"]" )); assert!(plan_str.contains("MatchesConstTerm(text@0, term: \"world\", probes: [\"world\"]")); - assert!(plan_str - .contains("MatchesConstTerm(text@0, term: \"greeting\", probes: [\"greeting\"]")); + assert!( + plan_str + .contains("MatchesConstTerm(text@0, term: \"greeting\", probes: [\"greeting\"]") + ); assert!(plan_str.contains("MatchesConstTerm(text@0, term: \"there\", probes: [\"there\"]")); assert!(plan_str.contains("MatchesConstTerm(text@0, term: \"42\", probes: [\"42\"]")); assert!(!plan_str.contains("matches_term")) diff --git a/src/query/src/optimizer/count_wildcard.rs b/src/query/src/optimizer/count_wildcard.rs index cdfb057ac0..4b32911f7a 100644 --- a/src/query/src/optimizer/count_wildcard.rs +++ b/src/query/src/optimizer/count_wildcard.rs @@ -19,9 +19,9 @@ use datafusion_common::tree_node::{ use datafusion_common::{Column, Result as DataFusionResult, ScalarValue}; use datafusion_expr::expr::{AggregateFunction, WindowFunction}; use datafusion_expr::utils::COUNT_STAR_EXPANSION; -use datafusion_expr::{col, lit, Expr, LogicalPlan, WindowFunctionDefinition}; -use datafusion_optimizer::utils::NamePreserver; +use datafusion_expr::{Expr, LogicalPlan, WindowFunctionDefinition, col, lit}; use datafusion_optimizer::AnalyzerRule; +use datafusion_optimizer::utils::NamePreserver; use datafusion_sql::TableReference; use table::table::adapter::DfTableProviderAdapter; @@ -146,29 +146,26 @@ impl TreeNodeVisitor<'_> for TimeIndexFinder { self.table_alias = Some(subquery_alias.alias.clone()); } - if let LogicalPlan::TableScan(table_scan) = &node { - if let Some(source) = table_scan + if let LogicalPlan::TableScan(table_scan) = &node + && let Some(source) = table_scan .source .as_any() .downcast_ref::() - { - if let Some(adapter) = source - .table_provider - .as_any() - .downcast_ref::() - { - let table_info = adapter.table().table_info(); - self.table_alias - .get_or_insert(TableReference::bare(table_info.name.clone())); - self.time_index_col = table_info - .meta - .schema - .timestamp_column() - .map(|c| c.name.clone()); + && let Some(adapter) = source + .table_provider + .as_any() + .downcast_ref::() + { + let table_info = adapter.table().table_info(); + self.table_alias + .get_or_insert(TableReference::bare(table_info.name.clone())); + self.time_index_col = table_info + .meta + .schema + .timestamp_column() + .map(|c| c.name.clone()); - return Ok(TreeNodeRecursion::Stop); - } - } + return Ok(TreeNodeRecursion::Stop); } Ok(TreeNodeRecursion::Continue) diff --git a/src/query/src/optimizer/parallelize_scan.rs b/src/query/src/optimizer/parallelize_scan.rs index c554f65bed..c6baecc4b6 100644 --- a/src/query/src/optimizer/parallelize_scan.rs +++ b/src/query/src/optimizer/parallelize_scan.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use common_telemetry::debug; use datafusion::config::ConfigOptions; use datafusion::physical_optimizer::PhysicalOptimizerRule; -use datafusion::physical_plan::sorts::sort::SortExec; use datafusion::physical_plan::ExecutionPlan; +use datafusion::physical_plan::sorts::sort::SortExec; use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::{DataFusionError, Result}; use store_api::region_engine::PartitionRange; @@ -172,8 +172,8 @@ impl ParallelizeScan { #[cfg(test)] mod test { - use common_time::timestamp::TimeUnit; use common_time::Timestamp; + use common_time::timestamp::TimeUnit; use super::*; diff --git a/src/query/src/optimizer/pass_distribution.rs b/src/query/src/optimizer/pass_distribution.rs index 8a096ab780..9fb89a2dcc 100644 --- a/src/query/src/optimizer/pass_distribution.rs +++ b/src/query/src/optimizer/pass_distribution.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use datafusion::config::ConfigOptions; use datafusion::physical_optimizer::PhysicalOptimizerRule; use datafusion::physical_plan::ExecutionPlan; -use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::Result as DfResult; +use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_physical_expr::Distribution; use crate::dist_plan::MergeScanExec; diff --git a/src/query/src/optimizer/remove_duplicate.rs b/src/query/src/optimizer/remove_duplicate.rs index 77c0ddd788..6ed81a75c7 100644 --- a/src/query/src/optimizer/remove_duplicate.rs +++ b/src/query/src/optimizer/remove_duplicate.rs @@ -16,11 +16,11 @@ use std::sync::Arc; use datafusion::config::ConfigOptions; use datafusion::physical_optimizer::PhysicalOptimizerRule; +use datafusion::physical_plan::ExecutionPlan; use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec; use datafusion::physical_plan::repartition::RepartitionExec; -use datafusion::physical_plan::ExecutionPlan; -use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::Result as DfResult; +use datafusion_common::tree_node::{Transformed, TreeNode}; /// This is [PhysicalOptimizerRule] to remove duplicate physical plans such as two /// adjoining [CoalesceBatchesExec] or [RepartitionExec]. They won't have any effect diff --git a/src/query/src/optimizer/scan_hint.rs b/src/query/src/optimizer/scan_hint.rs index cd3907ca4f..1318c338e0 100644 --- a/src/query/src/optimizer/scan_hint.rs +++ b/src/query/src/optimizer/scan_hint.rs @@ -21,7 +21,7 @@ use datafusion::datasource::DefaultTableSource; use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRecursion, TreeNodeVisitor}; use datafusion_common::{Column, Result}; use datafusion_expr::expr::Sort; -use datafusion_expr::{utils, Expr, LogicalPlan}; +use datafusion_expr::{Expr, LogicalPlan, utils}; use datafusion_optimizer::{OptimizerConfig, OptimizerRule}; use store_api::storage::{TimeSeriesDistribution, TimeSeriesRowSelector}; @@ -302,7 +302,7 @@ mod test { use datafusion::functions_aggregate::first_last::last_value_udaf; use datafusion_expr::expr::{AggregateFunction, AggregateFunctionParams}; - use datafusion_expr::{col, LogicalPlanBuilder}; + use datafusion_expr::{LogicalPlanBuilder, col}; use datafusion_optimizer::OptimizerContext; use store_api::storage::RegionId; diff --git a/src/query/src/optimizer/string_normalization.rs b/src/query/src/optimizer/string_normalization.rs index 071ea5bf1b..fcde0fc8d0 100644 --- a/src/query/src/optimizer/string_normalization.rs +++ b/src/query/src/optimizer/string_normalization.rs @@ -122,9 +122,9 @@ mod tests { use arrow::datatypes::TimeUnit::{Microsecond, Millisecond, Nanosecond, Second}; use arrow::datatypes::{DataType, SchemaRef}; use arrow_schema::{Field, Schema, TimeUnit}; - use datafusion::datasource::{provider_as_source, MemTable}; + use datafusion::datasource::{MemTable, provider_as_source}; use datafusion_common::config::ConfigOptions; - use datafusion_expr::{lit, Cast, Expr, LogicalPlan, LogicalPlanBuilder}; + use datafusion_expr::{Cast, Expr, LogicalPlan, LogicalPlanBuilder, lit}; use datafusion_optimizer::analyzer::AnalyzerRule; use crate::optimizer::string_normalization::StringNormalizationRule; @@ -142,8 +142,9 @@ mod tests { for (time_unit, proj) in projects { let plan = create_test_plan_with_project(proj); let result = StringNormalizationRule.analyze(plan, config).unwrap(); - let expected = format!("Projection: CAST(Utf8(\"2017-07-23 13:10:11\") AS Timestamp({:#?}, None))\n TableScan: t", - time_unit + let expected = format!( + "Projection: CAST(Utf8(\"2017-07-23 13:10:11\") AS Timestamp({:#?}, None))\n TableScan: t", + time_unit ); assert_eq!(expected, result.to_string()); } @@ -161,7 +162,7 @@ mod tests { .analyze(int_to_timestamp_plan, config) .unwrap(); let expected = String::from( - "Projection: CAST(Int64(158412331400600000) AS Timestamp(Nanosecond, None))\n TableScan: t" + "Projection: CAST(Int64(158412331400600000) AS Timestamp(Nanosecond, None))\n TableScan: t", ); assert_eq!(expected, result.to_string()); diff --git a/src/query/src/optimizer/transcribe_atat.rs b/src/query/src/optimizer/transcribe_atat.rs index 5f27707685..29886acfb9 100644 --- a/src/query/src/optimizer/transcribe_atat.rs +++ b/src/query/src/optimizer/transcribe_atat.rs @@ -18,8 +18,8 @@ use common_function::scalars::matches_term::MatchesTermFunction; use common_function::scalars::udf::create_udf; use common_function::state::FunctionState; use datafusion::config::ConfigOptions; -use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_common::Result; +use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_expr::expr::ScalarFunction; use datafusion_expr::{Expr, LogicalPlan}; use datafusion_optimizer::analyzer::AnalyzerRule; @@ -109,8 +109,8 @@ impl TreeNodeRewriter for TranscribeAtatRewriter { mod tests { use arrow_schema::SchemaRef; - use datafusion::datasource::{provider_as_source, MemTable}; - use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder}; + use datafusion::datasource::{MemTable, provider_as_source}; + use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit}; use datafusion_expr::{BinaryExpr, Operator}; use datatypes::arrow::datatypes::{DataType, Field, Schema}; diff --git a/src/query/src/optimizer/type_conversion.rs b/src/query/src/optimizer/type_conversion.rs index 8c44b098fd..21f95e5156 100644 --- a/src/query/src/optimizer/type_conversion.rs +++ b/src/query/src/optimizer/type_conversion.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_time::timestamp::{TimeUnit, Timestamp}; use common_time::Timezone; +use common_time::timestamp::{TimeUnit, Timestamp}; use datafusion::config::ConfigOptions; use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_common::{DFSchemaRef, DataFusionError, Result, ScalarValue}; @@ -25,9 +25,9 @@ use datatypes::arrow::compute; use datatypes::arrow::datatypes::DataType; use session::context::QueryContextRef; +use crate::QueryEngineContext; use crate::optimizer::ExtensionAnalyzerRule; use crate::plan::ExtractExpr; -use crate::QueryEngineContext; /// TypeConversionRule converts some literal values in logical plan to other types according /// to data type of corresponding columns. @@ -129,10 +129,10 @@ struct TypeConverter { impl TypeConverter { fn column_type(&self, expr: &Expr) -> Option { - if let Expr::Column(_) = expr { - if let Ok(v) = expr.get_type(&self.schema) { - return Some(v); - } + if let Expr::Column(_) = expr + && let Ok(v) = expr.get_type(&self.schema) + { + return Some(v); } None } diff --git a/src/query/src/optimizer/windowed_sort.rs b/src/query/src/optimizer/windowed_sort.rs index 5f9fa92bac..8de6fc8317 100644 --- a/src/query/src/optimizer/windowed_sort.rs +++ b/src/query/src/optimizer/windowed_sort.rs @@ -16,6 +16,7 @@ use std::collections::HashSet; use std::sync::Arc; use datafusion::physical_optimizer::PhysicalOptimizerRule; +use datafusion::physical_plan::ExecutionPlan; use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec; use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec; use datafusion::physical_plan::coop::CooperativeExec; @@ -24,9 +25,8 @@ use datafusion::physical_plan::projection::ProjectionExec; use datafusion::physical_plan::repartition::RepartitionExec; use datafusion::physical_plan::sorts::sort::SortExec; use datafusion::physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec; -use datafusion::physical_plan::ExecutionPlan; -use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_common::Result as DataFusionResult; +use datafusion_common::tree_node::{Transformed, TreeNode}; use datafusion_physical_expr::expressions::Column as PhysicalColumn; use store_api::region_engine::PartitionRange; use table::table::scan::RegionScanExec; diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs index 9e89ada3a9..826ddfaa50 100644 --- a/src/query/src/parser.rs +++ b/src/query/src/parser.rs @@ -21,9 +21,9 @@ use chrono::DateTime; use common_error::ext::{BoxedError, PlainError}; use common_error::status_code::StatusCode; use common_telemetry::tracing; +use promql_parser::parser::Expr::Extension; use promql_parser::parser::ast::{Extension as NodeExtension, ExtensionExpr}; use promql_parser::parser::value::ValueType; -use promql_parser::parser::Expr::Extension; use promql_parser::parser::{EvalStmt, Expr}; use session::context::QueryContextRef; use snafu::{OptionExt, ResultExt}; diff --git a/src/query/src/part_sort.rs b/src/query/src/part_sort.rs index 4d953420fa..e9d70ec17a 100644 --- a/src/query/src/part_sort.rs +++ b/src/query/src/part_sort.rs @@ -34,7 +34,7 @@ use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSe use datafusion::physical_plan::{ DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties, TopK, }; -use datafusion_common::{internal_err, DataFusionError}; +use datafusion_common::{DataFusionError, internal_err}; use datafusion_physical_expr::PhysicalSortExpr; use futures::{Stream, StreamExt}; use itertools::Itertools; @@ -377,10 +377,10 @@ impl PartSortStream { for (idx, val) in sort_column_iter { // ignore vacant time index data - if let Some(val) = val { - if val >= cur_range.end.value() || val < cur_range.start.value() { - return Ok(Some(idx)); - } + if let Some(val) = val + && (val >= cur_range.end.value() || val < cur_range.start.value()) + { + return Ok(Some(idx)); } } @@ -680,7 +680,7 @@ mod test { use store_api::region_engine::PartitionRange; use super::*; - use crate::test_util::{new_ts_array, MockInputExec}; + use crate::test_util::{MockInputExec, new_ts_array}; #[tokio::test] async fn fuzzy_test() { @@ -1097,11 +1097,13 @@ mod test { } panic!( "case_{} failed, opt: {:?},\n real output has {} batches, {} rows, expected has {} batches with {} rows\nfull msg: {}", - case_id, opt, + case_id, + opt, real_output.len(), - real_output.iter().map(|x|x.num_rows()).sum::(), + real_output.iter().map(|x| x.num_rows()).sum::(), expected_output.len(), - expected_output.iter().map(|x|x.num_rows()).sum::(), full_msg + expected_output.iter().map(|x| x.num_rows()).sum::(), + full_msg ); } } diff --git a/src/query/src/plan.rs b/src/query/src/plan.rs index eb6765093e..d3eafd84c0 100644 --- a/src/query/src/plan.rs +++ b/src/query/src/plan.rs @@ -15,8 +15,8 @@ use std::collections::HashSet; use datafusion::datasource::DefaultTableSource; -use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_common::TableReference; +use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter}; use datafusion_expr::{Expr, LogicalPlan}; use session::context::QueryContextRef; pub use table::metadata::TableType; @@ -40,21 +40,19 @@ impl TreeNodeRewriter for TableNamesExtractAndRewriter { ) -> datafusion::error::Result> { match node { LogicalPlan::TableScan(mut scan) => { - if let Some(source) = scan.source.as_any().downcast_ref::() { - if let Some(provider) = source + if let Some(source) = scan.source.as_any().downcast_ref::() + && let Some(provider) = source .table_provider .as_any() .downcast_ref::() - { - if provider.table().table_type() == TableType::Base { - let info = provider.table().table_info(); - self.table_names.insert(TableName::new( - info.catalog_name.clone(), - info.schema_name.clone(), - info.name.clone(), - )); - } - } + && provider.table().table_type() == TableType::Base + { + let info = provider.table().table_info(); + self.table_names.insert(TableName::new( + info.catalog_name.clone(), + info.schema_name.clone(), + info.name.clone(), + )); } match &scan.table_name { TableReference::Full { @@ -144,7 +142,7 @@ pub(crate) mod tests { use arrow::datatypes::{DataType, Field, Schema, SchemaRef, TimeUnit}; use common_catalog::consts::DEFAULT_CATALOG_NAME; use datafusion::logical_expr::builder::LogicalTableSource; - use datafusion::logical_expr::{col, lit, LogicalPlan, LogicalPlanBuilder}; + use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder, col, lit}; use session::context::QueryContextBuilder; use super::*; diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs index e6e9cacdc0..b3aaad0a38 100644 --- a/src/query/src/planner.rs +++ b/src/query/src/planner.rs @@ -23,24 +23,24 @@ use common_telemetry::tracing; use datafusion::common::DFSchema; use datafusion::execution::context::SessionState; use datafusion::sql::planner::PlannerContext; -use datafusion_expr::{col, Expr as DfExpr, LogicalPlan, LogicalPlanBuilder}; +use datafusion_expr::{Expr as DfExpr, LogicalPlan, LogicalPlanBuilder, col}; use datafusion_sql::planner::{ParserOptions, SqlToRel}; use log_query::LogQuery; use promql_parser::parser::EvalStmt; use session::context::QueryContextRef; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; +use sql::CteContent; use sql::ast::Expr as SqlExpr; use sql::statements::query::Query; use sql::statements::statement::Statement; use sql::statements::tql::Tql; -use sql::CteContent; use crate::error::{ CteColumnSchemaMismatchSnafu, PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu, UnimplementedSnafu, }; use crate::log_query::planner::LogQueryPlanner; -use crate::parser::{PromQuery, QueryLanguageParser, QueryStatement, DEFAULT_LOOKBACK_STRING}; +use crate::parser::{DEFAULT_LOOKBACK_STRING, PromQuery, QueryLanguageParser, QueryStatement}; use crate::promql::planner::PromPlanner; use crate::query_engine::{DefaultPlanDecoder, QueryEngineState}; use crate::range_select::plan_rewrite::RangePlanRewriter; diff --git a/src/query/src/promql/label_values.rs b/src/query/src/promql/label_values.rs index f15f19b405..4f6ea42a60 100644 --- a/src/query/src/promql/label_values.rs +++ b/src/query/src/promql/label_values.rs @@ -14,11 +14,11 @@ use std::time::{SystemTime, UNIX_EPOCH}; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datafusion_common::{Column, ScalarValue}; use datafusion_expr::utils::conjunction; -use datafusion_expr::{col, Expr, LogicalPlan, LogicalPlanBuilder}; +use datafusion_expr::{Expr, LogicalPlan, LogicalPlanBuilder, col}; use snafu::{OptionExt, ResultExt}; use table::TableRef; diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs index 2ef0d75462..6a4a200c3f 100644 --- a/src/query/src/promql/planner.rs +++ b/src/query/src/promql/planner.rs @@ -47,30 +47,31 @@ use datafusion::sql::TableReference; use datafusion_common::{DFSchema, NullEquality}; use datafusion_expr::expr::WindowFunctionParams; use datafusion_expr::utils::conjunction; -use datafusion_expr::{col, lit, ExprSchemable, Literal, SortExpr}; +use datafusion_expr::{ExprSchemable, Literal, SortExpr, col, lit}; use datatypes::arrow::datatypes::{DataType as ArrowDataType, TimeUnit as ArrowTimeUnit}; use datatypes::data_type::ConcreteDataType; use itertools::Itertools; use once_cell::sync::Lazy; use promql::extension_plan::{ - build_special_time_expr, Absent, EmptyMetric, HistogramFold, InstantManipulate, Millisecond, - RangeManipulate, ScalarCalculate, SeriesDivide, SeriesNormalize, UnionDistinctOn, + Absent, EmptyMetric, HistogramFold, InstantManipulate, Millisecond, RangeManipulate, + ScalarCalculate, SeriesDivide, SeriesNormalize, UnionDistinctOn, build_special_time_expr, }; use promql::functions::{ - quantile_udaf, AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, - IDelta, Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime, + AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta, + Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime, QuantileOverTime, Rate, Resets, Round, StddevOverTime, StdvarOverTime, SumOverTime, + quantile_udaf, }; -use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME}; +use promql_parser::label::{METRIC_NAME, MatchOp, Matcher, Matchers}; use promql_parser::parser::token::TokenType; use promql_parser::parser::{ - token, AggregateExpr, BinModifier, BinaryExpr as PromBinaryExpr, Call, EvalStmt, - Expr as PromExpr, Function, FunctionArgs as PromFunctionArgs, LabelModifier, MatrixSelector, - NumberLiteral, Offset, ParenExpr, StringLiteral, SubqueryExpr, UnaryExpr, - VectorMatchCardinality, VectorSelector, + AggregateExpr, BinModifier, BinaryExpr as PromBinaryExpr, Call, EvalStmt, Expr as PromExpr, + Function, FunctionArgs as PromFunctionArgs, LabelModifier, MatrixSelector, NumberLiteral, + Offset, ParenExpr, StringLiteral, SubqueryExpr, UnaryExpr, VectorMatchCardinality, + VectorSelector, token, }; use regex::{self, Regex}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metric_engine_consts::{ DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME, }; @@ -854,12 +855,12 @@ impl PromPlanner { // some special functions that are not expression but a plan match func.name { SPECIAL_HISTOGRAM_QUANTILE => { - return self.create_histogram_plan(args, query_engine_state).await + return self.create_histogram_plan(args, query_engine_state).await; } SPECIAL_VECTOR_FUNCTION => return self.create_vector_plan(args).await, SCALAR_FUNCTION => return self.create_scalar_plan(args, query_engine_state).await, SPECIAL_ABSENT_FUNCTION => { - return self.create_absent_plan(args, query_engine_state).await + return self.create_absent_plan(args, query_engine_state).await; } _ => {} } @@ -3373,8 +3374,8 @@ enum ScalarFunc { mod test { use std::time::{Duration, UNIX_EPOCH}; - use catalog::memory::{new_memory_catalog_manager, MemoryCatalogManager}; use catalog::RegisterTableRequest; + use catalog::memory::{MemoryCatalogManager, new_memory_catalog_manager}; use common_base::Plugins; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_query::test_util::DummyDecoder; @@ -3448,15 +3449,17 @@ mod test { .unwrap(); let table = EmptyTable::from_table_info(&table_info); - assert!(catalog_list - .register_table_sync(RegisterTableRequest { - catalog: DEFAULT_CATALOG_NAME.to_string(), - schema: schema_name.to_string(), - table_name: table_name.to_string(), - table_id: 1024, - table, - }) - .is_ok()); + assert!( + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: schema_name.to_string(), + table_name: table_name.to_string(), + table_id: 1024, + table, + }) + .is_ok() + ); } DfTableSourceProvider::new( @@ -3510,15 +3513,17 @@ mod test { .unwrap(); let table = EmptyTable::from_table_info(&table_info); - assert!(catalog_list - .register_table_sync(RegisterTableRequest { - catalog: DEFAULT_CATALOG_NAME.to_string(), - schema: schema_name.to_string(), - table_name: table_name.to_string(), - table_id: 1024, - table, - }) - .is_ok()); + assert!( + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: schema_name.to_string(), + table_name: table_name.to_string(), + table_id: 1024, + table, + }) + .is_ok() + ); } DfTableSourceProvider::new( @@ -3910,7 +3915,7 @@ mod test { .await .unwrap(); - let expected = String::from( + let expected = String::from( "Projection: rhs.tag_0, rhs.timestamp, lhs.field_0 + rhs.field_0 AS lhs.field_0 + rhs.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), lhs.field_0 + rhs.field_0:Float64;N]\ \n Inner Join: lhs.tag_0 = rhs.tag_0, lhs.timestamp = rhs.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n SubqueryAlias: lhs [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ @@ -3924,7 +3929,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); assert_eq!(plan.display_indent_schema().to_string(), expected); @@ -3971,7 +3976,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.tag_0 = Utf8(\"bar\") AND some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -3994,7 +3999,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4009,7 +4014,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4024,7 +4029,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4041,7 +4046,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-301000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4056,7 +4061,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4073,7 +4078,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-301000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4487,7 +4492,7 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-1000, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; @@ -4498,13 +4503,14 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: greptime_private.some_alt_metric.tag_0 ASC NULLS FIRST, greptime_private.some_alt_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: greptime_private.some_alt_metric.timestamp >= TimestampMillisecond(-1000, None) AND greptime_private.some_alt_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]" + \n TableScan: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", ); indie_query_plan_compare(query, expected).await; let query = "some_alt_metric{__schema__=\"greptime_private\"} / some_metric"; - let expected = String::from("Projection: some_metric.tag_0, some_metric.timestamp, greptime_private.some_alt_metric.field_0 / some_metric.field_0 AS greptime_private.some_alt_metric.field_0 / some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), greptime_private.some_alt_metric.field_0 / some_metric.field_0:Float64;N]\ + let expected = String::from( + "Projection: some_metric.tag_0, some_metric.timestamp, greptime_private.some_alt_metric.field_0 / some_metric.field_0 AS greptime_private.some_alt_metric.field_0 / some_metric.field_0 [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), greptime_private.some_alt_metric.field_0 / some_metric.field_0:Float64;N]\ \n Inner Join: greptime_private.some_alt_metric.tag_0 = some_metric.tag_0, greptime_private.some_alt_metric.timestamp = some_metric.timestamp [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n SubqueryAlias: greptime_private.some_alt_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ @@ -4517,7 +4523,8 @@ mod test { \n PromSeriesDivide: tags=[\"tag_0\"] [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Sort: some_metric.tag_0 ASC NULLS FIRST, some_metric.timestamp ASC NULLS FIRST [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ \n Filter: some_metric.timestamp >= TimestampMillisecond(-1000, None) AND some_metric.timestamp <= TimestampMillisecond(100001000, None) [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]\ - \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]"); + \n TableScan: some_metric [tag_0:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N]", + ); indie_query_plan_compare(query, expected).await; } @@ -4598,15 +4605,17 @@ mod test { .build() .unwrap(); let table = EmptyTable::from_table_info(&table_info); - assert!(catalog_list - .register_table_sync(RegisterTableRequest { - catalog: DEFAULT_CATALOG_NAME.to_string(), - schema: DEFAULT_SCHEMA_NAME.to_string(), - table_name: "metrics".to_string(), - table_id: 1024, - table, - }) - .is_ok()); + assert!( + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: DEFAULT_SCHEMA_NAME.to_string(), + table_name: "metrics".to_string(), + table_id: 1024, + table, + }) + .is_ok() + ); let plan = PromPlanner::stmt_to_plan( DfTableSourceProvider::new( @@ -4629,8 +4638,9 @@ mod test { ) .await .unwrap(); - assert_eq!(plan.display_indent_schema().to_string(), - "PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ + assert_eq!( + plan.display_indent_schema().to_string(), + "PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ \n PromSeriesDivide: tags=[\"tag\"] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ \n Sort: metrics.tag ASC NULLS FIRST, metrics.timestamp ASC NULLS FIRST [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ \n Filter: metrics.tag = Utf8(\"1\") AND metrics.timestamp >= TimestampMillisecond(-1000, None) AND metrics.timestamp <= TimestampMillisecond(100001000, None) [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ @@ -4658,8 +4668,9 @@ mod test { ) .await .unwrap(); - assert_eq!(plan.display_indent_schema().to_string(), - "Filter: prom_avg_over_time(timestamp_range,field) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\ + assert_eq!( + plan.display_indent_schema().to_string(), + "Filter: prom_avg_over_time(timestamp_range,field) IS NOT NULL [timestamp:Timestamp(Millisecond, None), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\ \n Projection: metrics.timestamp, prom_avg_over_time(timestamp_range, field) AS prom_avg_over_time(timestamp_range,field), metrics.tag [timestamp:Timestamp(Millisecond, None), prom_avg_over_time(timestamp_range,field):Float64;N, tag:Utf8]\ \n PromRangeManipulate: req range=[0..100000000], interval=[5000], eval range=[5000], time index=[timestamp], values=[\"field\"] [field:Dictionary(Int64, Float64);N, tag:Utf8, timestamp:Timestamp(Millisecond, None), timestamp_range:Dictionary(Int64, Timestamp(Millisecond, None))]\ \n PromSeriesNormalize: offset=[0], time index=[timestamp], filter NaN: [true] [field:Float64;N, tag:Utf8, timestamp:Timestamp(Millisecond, None)]\ diff --git a/src/query/src/query_engine/default_serializer.rs b/src/query/src/query_engine/default_serializer.rs index 0e10cea30a..a78d0a3bd6 100644 --- a/src/query/src/query_engine/default_serializer.rs +++ b/src/query/src/query_engine/default_serializer.rs @@ -29,9 +29,10 @@ use datafusion::logical_expr::LogicalPlan; use datafusion_expr::UserDefinedLogicalNode; use greptime_proto::substrait_extension::MergeScan as PbMergeScan; use promql::functions::{ - quantile_udaf, AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, - IDelta, Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime, + AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta, + Increase, LastOverTime, MaxOverTime, MinOverTime, PredictLinear, PresentOverTime, QuantileOverTime, Rate, Resets, Round, StddevOverTime, StdvarOverTime, SumOverTime, + quantile_udaf, }; use prost::Message; use session::context::QueryContextRef; @@ -184,15 +185,15 @@ impl SubstraitPlanDecoder for DefaultPlanDecoder { #[cfg(test)] mod tests { use datafusion::catalog::TableProvider; - use datafusion_expr::{col, lit, LogicalPlanBuilder, LogicalTableSource}; + use datafusion_expr::{LogicalPlanBuilder, LogicalTableSource, col, lit}; use datatypes::arrow::datatypes::SchemaRef; use session::context::QueryContext; use super::*; + use crate::QueryEngineFactory; use crate::dummy_catalog::DummyCatalogList; use crate::optimizer::test_util::mock_table_provider; use crate::options::QueryOptions; - use crate::QueryEngineFactory; fn mock_plan(schema: SchemaRef) -> LogicalPlan { let table_source = LogicalTableSource::new(schema); diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index d1b5c542e1..51b7f68684 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -27,12 +27,12 @@ use common_function::state::FunctionState; use common_telemetry::warn; use datafusion::dataframe::DataFrame; use datafusion::error::Result as DfResult; +use datafusion::execution::SessionStateBuilder; use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionContext, SessionState}; use datafusion::execution::runtime_env::RuntimeEnv; -use datafusion::execution::SessionStateBuilder; +use datafusion::physical_optimizer::PhysicalOptimizerRule; use datafusion::physical_optimizer::optimizer::PhysicalOptimizer; use datafusion::physical_optimizer::sanity_checker::SanityCheckPlan; -use datafusion::physical_optimizer::PhysicalOptimizerRule; use datafusion::physical_plan::ExecutionPlan; use datafusion::physical_planner::{DefaultPhysicalPlanner, ExtensionPlanner, PhysicalPlanner}; use datafusion_expr::{AggregateUDF, LogicalPlan as DfLogicalPlan}; @@ -40,12 +40,14 @@ use datafusion_optimizer::analyzer::Analyzer; use datafusion_optimizer::optimizer::Optimizer; use partition::manager::PartitionRuleManagerRef; use promql::extension_plan::PromExtensionPlanner; -use table::table::adapter::DfTableProviderAdapter; use table::TableRef; +use table::table::adapter::DfTableProviderAdapter; +use crate::QueryEngineContext; use crate::dist_plan::{ DistExtensionPlanner, DistPlannerAnalyzer, DistPlannerOptions, MergeSortExtensionPlanner, }; +use crate::optimizer::ExtensionAnalyzerRule; use crate::optimizer::constant_term::MatchesConstantTermOptimizer; use crate::optimizer::count_wildcard::CountWildcardToTimeIndexRule; use crate::optimizer::parallelize_scan::ParallelizeScan; @@ -56,13 +58,11 @@ use crate::optimizer::string_normalization::StringNormalizationRule; use crate::optimizer::transcribe_atat::TranscribeAtatRule; use crate::optimizer::type_conversion::TypeConversionRule; use crate::optimizer::windowed_sort::WindowedSortPhysicalRule; -use crate::optimizer::ExtensionAnalyzerRule; use crate::options::QueryOptions as QueryOptionsNew; -use crate::query_engine::options::QueryOptions; use crate::query_engine::DefaultSerializer; +use crate::query_engine::options::QueryOptions; use crate::range_select::planner::RangeSelectPlanner; use crate::region_query::RegionQueryHandlerRef; -use crate::QueryEngineContext; /// Query engine global state #[derive(Clone)] diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs index 1e2f74f5b9..663acca0e0 100644 --- a/src/query/src/range_select/plan.rs +++ b/src/query/src/range_select/plan.rs @@ -22,13 +22,13 @@ use std::task::{Context, Poll}; use std::time::Duration; use ahash::RandomState; -use arrow::compute::{self, cast_with_options, take_arrays, CastOptions}; +use arrow::compute::{self, CastOptions, cast_with_options, take_arrays}; use arrow_schema::{DataType, Field, Schema, SchemaRef, SortOptions, TimeUnit}; use common_recordbatch::DfSendableRecordBatchStream; use datafusion::common::Result as DataFusionResult; use datafusion::error::Result as DfResult; -use datafusion::execution::context::SessionState; use datafusion::execution::TaskContext; +use datafusion::execution::context::SessionState; use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType}; use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet}; use datafusion::physical_plan::{ @@ -37,14 +37,14 @@ use datafusion::physical_plan::{ }; use datafusion_common::hash_utils::create_hashes; use datafusion_common::{DFSchema, DFSchemaRef, DataFusionError, ScalarValue}; -use datafusion_expr::utils::{exprlist_to_fields, COUNT_STAR_EXPANSION}; +use datafusion_expr::utils::{COUNT_STAR_EXPANSION, exprlist_to_fields}; use datafusion_expr::{ - lit, Accumulator, Expr, ExprSchemable, LogicalPlan, UserDefinedLogicalNodeCore, + Accumulator, Expr, ExprSchemable, LogicalPlan, UserDefinedLogicalNodeCore, lit, }; use datafusion_physical_expr::aggregate::{AggregateExprBuilder, AggregateFunctionExpr}; use datafusion_physical_expr::{ - create_physical_expr, create_physical_sort_expr, Distribution, EquivalenceProperties, - Partitioning, PhysicalExpr, PhysicalSortExpr, + Distribution, EquivalenceProperties, Partitioning, PhysicalExpr, PhysicalSortExpr, + create_physical_expr, create_physical_sort_expr, }; use datatypes::arrow::array::{ Array, ArrayRef, TimestampMillisecondArray, TimestampMillisecondBuilder, UInt32Builder, @@ -52,7 +52,7 @@ use datatypes::arrow::array::{ use datatypes::arrow::datatypes::{ArrowPrimitiveType, TimestampMillisecondType}; use datatypes::arrow::record_batch::RecordBatch; use datatypes::arrow::row::{OwnedRow, RowConverter, SortField}; -use futures::{ready, Stream}; +use futures::{Stream, ready}; use futures_util::StreamExt; use snafu::ensure; @@ -1175,7 +1175,9 @@ impl Stream for RangeSelectStream { Some(Ok(batch)) => { if let Err(e) = self.update_range_context(batch) { common_telemetry::debug!( - "RangeSelectStream cannot update range context, schema: {:?}, err: {:?}", self.schema, e + "RangeSelectStream cannot update range context, schema: {:?}, err: {:?}", + self.schema, + e ); return Poll::Ready(Some(Err(e))); } @@ -1247,8 +1249,8 @@ mod test { use datafusion::functions_aggregate::min_max; use datafusion::physical_plan::sorts::sort::SortExec; use datafusion::prelude::SessionContext; - use datafusion_physical_expr::expressions::Column; use datafusion_physical_expr::PhysicalSortExpr; + use datafusion_physical_expr::expressions::Column; use datatypes::arrow::array::TimestampMillisecondArray; use datatypes::arrow_array::StringArray; @@ -1712,13 +1714,13 @@ mod test { Fill::try_from_str("WHAT", &DataType::UInt8) .unwrap_err() .to_string(), - "Error during planning: WHAT is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string 'WHAT' to value of UInt8 type }" + "Error during planning: WHAT is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string 'WHAT' to value of UInt8 type }" ); assert_eq!( Fill::try_from_str("8.0", &DataType::UInt8) .unwrap_err() .to_string(), - "Error during planning: 8.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '8.0' to value of UInt8 type }" + "Error during planning: 8.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '8.0' to value of UInt8 type }" ); assert!( Fill::try_from_str("8", &DataType::UInt8).unwrap() diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs index 4aa7126c66..ad5d2d3d5b 100644 --- a/src/query/src/range_select/plan_rewrite.rs +++ b/src/query/src/range_select/plan_rewrite.rs @@ -39,7 +39,7 @@ use datafusion_optimizer::simplify_expressions::ExprSimplifier; use datatypes::prelude::ConcreteDataType; use promql_parser::util::parse_duration; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use table::table::adapter::DfTableProviderAdapter; use crate::error::{ @@ -273,53 +273,52 @@ impl TreeNodeRewriter for RangeExprRewriter<'_> { type Node = Expr; fn f_down(&mut self, node: Expr) -> DFResult> { - if let Expr::ScalarFunction(func) = &node { - if func.name() == "range_fn" { - // `range_fn(func, range, fill, byc, [byv], align, to)` - // `[byv]` are variadic arguments, byc indicate the length of arguments - let range_expr = self.get_range_expr(&func.args, 0)?; - let range = parse_duration_expr(&func.args, 1)?; - let byc = str::parse::(parse_str_expr(&func.args, 3)?) - .map_err(|e| DataFusionError::Plan(e.to_string()))?; - let by = parse_expr_list(&func.args, 4, byc)?; - let align = parse_duration_expr(&func.args, byc + 4)?; - let align_to = - parse_align_to(&func.args, byc + 5, Some(&self.query_ctx.timezone()))?; - let mut data_type = range_expr.get_type(self.input_plan.schema())?; - let mut need_cast = false; - let fill = Fill::try_from_str(parse_str_expr(&func.args, 2)?, &data_type)?; - if matches!(fill, Some(Fill::Linear)) && data_type.is_integer() { - data_type = DataType::Float64; - need_cast = true; - } - inconsistent_check!(self.by, !self.by.is_empty()); - inconsistent_check!(self.align, self.align != Duration::default()); - inconsistent_check!(self.align_to, self.align_to != 0); - let range_fn = RangeFn { - name: if let Some(fill) = &fill { - format!( - "{} RANGE {} FILL {}", - range_expr.schema_name(), - parse_expr_to_string(&func.args, 1)?, - fill - ) - } else { - format!( - "{} RANGE {}", - range_expr.schema_name(), - parse_expr_to_string(&func.args, 1)?, - ) - }, - data_type, - expr: range_expr, - range, - fill, - need_cast, - }; - let alias = Expr::Column(Column::from_name(range_fn.name.clone())); - self.range_fn.insert(range_fn); - return Ok(Transformed::yes(alias)); + if let Expr::ScalarFunction(func) = &node + && func.name() == "range_fn" + { + // `range_fn(func, range, fill, byc, [byv], align, to)` + // `[byv]` are variadic arguments, byc indicate the length of arguments + let range_expr = self.get_range_expr(&func.args, 0)?; + let range = parse_duration_expr(&func.args, 1)?; + let byc = str::parse::(parse_str_expr(&func.args, 3)?) + .map_err(|e| DataFusionError::Plan(e.to_string()))?; + let by = parse_expr_list(&func.args, 4, byc)?; + let align = parse_duration_expr(&func.args, byc + 4)?; + let align_to = parse_align_to(&func.args, byc + 5, Some(&self.query_ctx.timezone()))?; + let mut data_type = range_expr.get_type(self.input_plan.schema())?; + let mut need_cast = false; + let fill = Fill::try_from_str(parse_str_expr(&func.args, 2)?, &data_type)?; + if matches!(fill, Some(Fill::Linear)) && data_type.is_integer() { + data_type = DataType::Float64; + need_cast = true; } + inconsistent_check!(self.by, !self.by.is_empty()); + inconsistent_check!(self.align, self.align != Duration::default()); + inconsistent_check!(self.align_to, self.align_to != 0); + let range_fn = RangeFn { + name: if let Some(fill) = &fill { + format!( + "{} RANGE {} FILL {}", + range_expr.schema_name(), + parse_expr_to_string(&func.args, 1)?, + fill + ) + } else { + format!( + "{} RANGE {}", + range_expr.schema_name(), + parse_expr_to_string(&func.args, 1)?, + ) + }, + data_type, + expr: range_expr, + range, + fill, + need_cast, + }; + let alias = Expr::Column(Column::from_name(range_fn.name.clone())); + self.range_fn.insert(range_fn); + return Ok(Transformed::yes(alias)); } Ok(Transformed::no(node)) } @@ -461,8 +460,7 @@ impl RangePlanRewriter { ensure!( inputs.len() == 1, RangeQuerySnafu { - msg: - "Illegal subplan nums when rewrite DistinctOn logical plan", + msg: "Illegal subplan nums when rewrite DistinctOn logical plan", } ); LogicalPlanBuilder::from(inputs[0].clone()) @@ -607,8 +605,8 @@ fn interval_only_in_expr(expr: &Expr) -> bool { mod test { use arrow::datatypes::IntervalUnit; - use catalog::memory::MemoryCatalogManager; use catalog::RegisterTableRequest; + use catalog::memory::MemoryCatalogManager; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_time::IntervalYearMonth; use datafusion_expr::{BinaryExpr, Literal, Operator}; @@ -663,15 +661,17 @@ mod test { .unwrap(); let table = EmptyTable::from_table_info(&table_info); let catalog_list = MemoryCatalogManager::with_default_setup(); - assert!(catalog_list - .register_table_sync(RegisterTableRequest { - catalog: DEFAULT_CATALOG_NAME.to_string(), - schema: DEFAULT_SCHEMA_NAME.to_string(), - table_name, - table_id: 1024, - table, - }) - .is_ok()); + assert!( + catalog_list + .register_table_sync(RegisterTableRequest { + catalog: DEFAULT_CATALOG_NAME.to_string(), + schema: DEFAULT_SCHEMA_NAME.to_string(), + table_name, + table_id: 1024, + table, + }) + .is_ok() + ); QueryEngineFactory::new( catalog_list, None, @@ -700,7 +700,7 @@ mod test { let query = r#"SELECT timestamp, tag_0, tag_1, avg(field_0 + field_1) RANGE '5m' FROM test ALIGN '1h' by (tag_0,tag_1);"#; let expected = String::from( "RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, avg(test.field_0 + test.field_1) RANGE 5m:Float64;N]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -711,7 +711,7 @@ mod test { let expected = String::from( "Projection: avg(test.field_0 + test.field_1) RANGE 5m / Int64(4) [avg(test.field_0 + test.field_1) RANGE 5m / Int64(4):Float64;N]\ \n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -723,7 +723,7 @@ mod test { let expected = String::from( "Projection: covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4) [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m / Int64(4):Float64;N]\ \n RangeSelect: range_exprs=[covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1, test.tag_2, test.tag_3, test.tag_4], time_index=timestamp [covar_samp(test.field_0 + test.field_1,test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -734,7 +734,7 @@ mod test { let expected = String::from( "Projection: (avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL) / Int64(4) [avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL / Int64(4):Float64;N]\ \n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -747,7 +747,7 @@ mod test { \n Filter: foo > Int64(1) [foo:Float64;N]\ \n Projection: (avg(test.field_0) RANGE 5m FILL NULL + sum(test.field_1) RANGE 5m FILL NULL) / Int64(4) AS foo [foo:Float64;N]\ \n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL NULL, sum(test.field_1) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL NULL:Float64;N, sum(test.field_1) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -760,8 +760,8 @@ mod test { \n RangeSelect: range_exprs=[avg(a) RANGE 5m FILL NULL, sum(b) RANGE 5m FILL NULL], align=3600000ms, align_to=0ms, align_by=[c, d], time_index=timestamp [avg(a) RANGE 5m FILL NULL:Float64;N, sum(b) RANGE 5m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), c:Utf8, d:Utf8]\ \n Projection: test.field_0 AS a, test.field_1 AS b, test.tag_0 AS c, test.tag_1 AS d, test.timestamp [a:Float64;N, b:Float64;N, c:Utf8, d:Utf8, timestamp:Timestamp(Millisecond, None)]\ \n Filter: test.field_0 > Float64(1) [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" - ); + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", + ); query_plan_compare(query, expected).await; } @@ -771,7 +771,7 @@ mod test { let expected = String::from( "Projection: sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1)) [sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1)):Float64;N]\ \n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -782,7 +782,7 @@ mod test { let expected = String::from( "Projection: avg(test.field_0) RANGE 5m FILL 6 + avg(test.field_0) RANGE 5m FILL 6 [avg(test.field_0) RANGE 5m FILL 6 + avg(test.field_0) RANGE 5m FILL 6:Float64]\ \n RangeSelect: range_exprs=[avg(test.field_0) RANGE 5m FILL 6], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0) RANGE 5m FILL 6:Float64, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -793,7 +793,7 @@ mod test { let expected = String::from( "Projection: round(sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1))) [round(sin(avg(test.field_0 + test.field_1) RANGE 5m + Int64(1))):Float64;N]\ \n RangeSelect: range_exprs=[avg(test.field_0 + test.field_1) RANGE 5m], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [avg(test.field_0 + test.field_1) RANGE 5m:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -804,7 +804,7 @@ mod test { let expected = String::from( "Projection: gcd(arrow_cast(max(test.field_0 + Int64(1)) RANGE 5m FILL NULL, Utf8(\"Int64\")), arrow_cast(test.tag_0, Utf8(\"Int64\"))) + round(max(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + max(test.field_2 + Int64(3)) RANGE 10m FILL NULL * arrow_cast(test.tag_1, Utf8(\"Float64\")) + Int64(1) [gcd(arrow_cast(max(test.field_0 + Int64(1)) RANGE 5m FILL NULL,Utf8(\"Int64\")),arrow_cast(test.tag_0,Utf8(\"Int64\"))) + round(max(test.field_2 + Int64(1)) RANGE 6m FILL NULL + Int64(1)) + max(test.field_2 + Int64(3)) RANGE 10m FILL NULL * arrow_cast(test.tag_1,Utf8(\"Float64\")) + Int64(1):Float64;N]\ \n RangeSelect: range_exprs=[max(test.field_0 + Int64(1)) RANGE 5m FILL NULL, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [max(test.field_0 + Int64(1)) RANGE 5m FILL NULL:Float64;N, max(test.field_2 + Int64(1)) RANGE 6m FILL NULL:Float64;N, max(test.field_2 + Int64(3)) RANGE 10m FILL NULL:Float64;N, timestamp:Timestamp(Millisecond, None), tag_0:Utf8, tag_1:Utf8]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } @@ -814,7 +814,7 @@ mod test { let query = r#"SELECT min(CAST(field_0 AS Int64) + CAST(field_1 AS Int64)) RANGE '5m' FILL LINEAR FROM test ALIGN '1h' by (tag_0,tag_1);"#; let expected = String::from( "RangeSelect: range_exprs=[min(arrow_cast(test.field_0,Utf8(\"Int64\")) + arrow_cast(test.field_1,Utf8(\"Int64\"))) RANGE 5m FILL LINEAR], align=3600000ms, align_to=0ms, align_by=[test.tag_0, test.tag_1], time_index=timestamp [min(arrow_cast(test.field_0,Utf8(\"Int64\")) + arrow_cast(test.field_1,Utf8(\"Int64\"))) RANGE 5m FILL LINEAR:Float64;N]\ - \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]" + \n TableScan: test [tag_0:Utf8, tag_1:Utf8, tag_2:Utf8, tag_3:Utf8, tag_4:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N, field_2:Float64;N, field_3:Float64;N, field_4:Float64;N]", ); query_plan_compare(query, expected).await; } diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs index ec241162bd..6b6ee2ed07 100644 --- a/src/query/src/sql.rs +++ b/src/query/src/sql.rs @@ -17,31 +17,31 @@ mod show_create_table; use std::collections::HashMap; use std::sync::Arc; -use catalog::information_schema::{ - columns, flows, key_column_usage, process_list, region_peers, schemata, tables, CHARACTER_SETS, - COLLATIONS, COLUMNS, FLOWS, KEY_COLUMN_USAGE, REGION_PEERS, SCHEMATA, TABLES, VIEWS, -}; use catalog::CatalogManagerRef; +use catalog::information_schema::{ + CHARACTER_SETS, COLLATIONS, COLUMNS, FLOWS, KEY_COLUMN_USAGE, REGION_PEERS, SCHEMATA, TABLES, + VIEWS, columns, flows, key_column_usage, process_list, region_peers, schemata, tables, +}; use common_catalog::consts::{ INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX, }; use common_catalog::format_full_table_name; -use common_datasource::file_format::{infer_schemas, FileFormat, Format}; +use common_datasource::file_format::{FileFormat, Format, infer_schemas}; use common_datasource::lister::{Lister, Source}; use common_datasource::object_store::build_backend; use common_datasource::util::find_dir_and_filename; -use common_meta::key::flow::flow_info::FlowInfoValue; use common_meta::SchemaOptions; -use common_query::prelude::GREPTIME_TIMESTAMP; +use common_meta::key::flow::flow_info::FlowInfoValue; use common_query::Output; -use common_recordbatch::adapter::RecordBatchStreamAdapter; +use common_query::prelude::GREPTIME_TIMESTAMP; use common_recordbatch::RecordBatches; -use common_time::timezone::get_timezone; +use common_recordbatch::adapter::RecordBatchStreamAdapter; use common_time::Timestamp; +use common_time::timezone::get_timezone; use datafusion::common::ScalarValue; use datafusion::prelude::SessionContext; -use datafusion_expr::{case, col, lit, Expr, SortExpr}; +use datafusion_expr::{Expr, SortExpr, case, col, lit}; use datatypes::prelude::*; use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, RawSchema, Schema}; use datatypes::vectors::StringVector; @@ -51,25 +51,25 @@ use once_cell::sync::Lazy; use regex::Regex; use session::context::{Channel, QueryContextRef}; pub use show_create_table::create_table_stmt; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sql::ast::Ident; use sql::parser::ParserContext; +use sql::statements::OptionMap; use sql::statements::create::{CreateDatabase, CreateFlow, CreateView, Partitions, SqlOrTql}; use sql::statements::show::{ ShowColumns, ShowDatabases, ShowFlows, ShowIndex, ShowKind, ShowProcessList, ShowRegion, ShowTableStatus, ShowTables, ShowVariables, ShowViews, }; use sql::statements::statement::Statement; -use sql::statements::OptionMap; use sqlparser::ast::ObjectName; use store_api::metric_engine_consts::{is_metric_engine, is_metric_engine_internal_column}; -use table::requests::{FILE_TABLE_LOCATION_KEY, FILE_TABLE_PATTERN_KEY}; use table::TableRef; +use table::requests::{FILE_TABLE_LOCATION_KEY, FILE_TABLE_PATTERN_KEY}; +use crate::QueryEngineRef; use crate::dataframe::DataFrame; use crate::error::{self, Result, UnsupportedVariableSnafu}; use crate::planner::DfLogicalPlanner; -use crate::QueryEngineRef; const SCHEMAS_COLUMN: &str = "Database"; const OPTIONS_COLUMN: &str = "Options"; @@ -1084,11 +1084,7 @@ fn describe_column_keys( fn describe_column_nullables(columns_schemas: &[ColumnSchema]) -> VectorRef { Arc::new(StringVector::from_iterator(columns_schemas.iter().map( |cs| { - if cs.is_nullable() { - YES_STR - } else { - NO_STR - } + if cs.is_nullable() { YES_STR } else { NO_STR } }, ))) } @@ -1321,8 +1317,8 @@ mod test { use common_query::{Output, OutputData}; use common_recordbatch::{RecordBatch, RecordBatches}; - use common_time::timestamp::TimeUnit; use common_time::Timezone; + use common_time::timestamp::TimeUnit; use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, Schema, SchemaRef}; use datatypes::vectors::{StringVector, TimestampMillisecondVector, UInt32Vector, VectorRef}; @@ -1330,15 +1326,15 @@ mod test { use snafu::ResultExt; use sql::ast::{Ident, ObjectName}; use sql::statements::show::ShowVariables; - use table::test_util::MemTable; use table::TableRef; + use table::test_util::MemTable; use super::show_variable; use crate::error; use crate::error::Result; use crate::sql::{ - describe_table, DESCRIBE_TABLE_OUTPUT_SCHEMA, NO_STR, SEMANTIC_TYPE_FIELD, - SEMANTIC_TYPE_TIME_INDEX, YES_STR, + DESCRIBE_TABLE_OUTPUT_SCHEMA, NO_STR, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_TIME_INDEX, + YES_STR, describe_table, }; #[test] diff --git a/src/query/src/sql/show_create_table.rs b/src/query/src/sql/show_create_table.rs index 435dc2406c..4981edeef7 100644 --- a/src/query/src/sql/show_create_table.rs +++ b/src/query/src/sql/show_create_table.rs @@ -18,11 +18,11 @@ use std::collections::HashMap; use common_meta::SchemaOptions; use datatypes::schema::{ - ColumnDefaultConstraint, ColumnSchema, FulltextBackend, SchemaRef, COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_BACKEND, COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, COLUMN_FULLTEXT_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, COMMENT_KEY, + ColumnDefaultConstraint, ColumnSchema, FulltextBackend, SchemaRef, }; use snafu::ResultExt; use sql::ast::{ColumnDef, ColumnOption, ColumnOptionDef, Expr, Ident, ObjectName}; @@ -260,7 +260,7 @@ mod tests { use datatypes::schema::{FulltextOptions, Schema, SchemaRef, SkippingIndexOptions}; use table::metadata::*; use table::requests::{ - TableOptions, FILE_TABLE_FORMAT_KEY, FILE_TABLE_LOCATION_KEY, FILE_TABLE_META_KEY, + FILE_TABLE_FORMAT_KEY, FILE_TABLE_LOCATION_KEY, FILE_TABLE_META_KEY, TableOptions, }; use super::*; diff --git a/src/query/src/tests.rs b/src/query/src/tests.rs index 04a076c39b..c70381d32f 100644 --- a/src/query/src/tests.rs +++ b/src/query/src/tests.rs @@ -14,7 +14,7 @@ use catalog::memory::MemoryCatalogManager; use common_query::OutputData; -use common_recordbatch::{util, RecordBatch}; +use common_recordbatch::{RecordBatch, util}; use session::context::QueryContext; use table::TableRef; diff --git a/src/query/src/tests/function.rs b/src/query/src/tests/function.rs index 028ed21246..b383daf521 100644 --- a/src/query/src/tests/function.rs +++ b/src/query/src/tests/function.rs @@ -22,8 +22,8 @@ use datatypes::vectors::BinaryVector; use rand::Rng; use table::test_util::MemTable; -use crate::tests::new_query_engine_with_table; use crate::QueryEngineRef; +use crate::tests::new_query_engine_with_table; pub fn create_query_engine_for_vector10x3() -> QueryEngineRef { let mut column_schemas = vec![]; diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs index ede5285c26..797d2cf26a 100644 --- a/src/query/src/tests/query_engine_test.rs +++ b/src/query/src/tests/query_engine_test.rs @@ -14,13 +14,13 @@ use std::sync::Arc; -use catalog::memory::MemoryCatalogManager; use catalog::RegisterTableRequest; +use catalog::memory::MemoryCatalogManager; use common_base::Plugins; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, NUMBERS_TABLE_ID}; use common_error::ext::BoxedError; use common_query::OutputData; -use common_recordbatch::{util, RecordBatch}; +use common_recordbatch::{RecordBatch, util}; use datafusion::datasource::DefaultTableSource; use datafusion_expr::logical_plan::builder::LogicalPlanBuilder; use datatypes::prelude::*; @@ -29,14 +29,14 @@ use datatypes::vectors::UInt32Vector; use session::context::QueryContext; use snafu::ResultExt; use table::table::adapter::DfTableProviderAdapter; -use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME}; +use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable}; use table::test_util::MemTable; use crate::error::{QueryExecutionSnafu, Result}; use crate::options::QueryOptions as QueryOptionsNew; use crate::parser::QueryLanguageParser; -use crate::query_engine::options::QueryOptions; use crate::query_engine::QueryEngineFactory; +use crate::query_engine::options::QueryOptions; #[tokio::test] async fn test_datafusion_query_engine() -> Result<()> { @@ -147,21 +147,25 @@ async fn test_query_validate() -> Result<()> { let stmt = QueryLanguageParser::parse_sql("select number from public.numbers", &QueryContext::arc()) .unwrap(); - assert!(engine - .planner() - .plan(&stmt, QueryContext::arc()) - .await - .is_ok()); + assert!( + engine + .planner() + .plan(&stmt, QueryContext::arc()) + .await + .is_ok() + ); let stmt = QueryLanguageParser::parse_sql( "select number from wrongschema.numbers", &QueryContext::arc(), ) .unwrap(); - assert!(engine - .planner() - .plan(&stmt, QueryContext::arc()) - .await - .is_err()); + assert!( + engine + .planner() + .plan(&stmt, QueryContext::arc()) + .await + .is_err() + ); Ok(()) } diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs index 84bdd8cb18..0a3df116cb 100644 --- a/src/query/src/tests/time_range_filter_test.rs +++ b/src/query/src/tests/time_range_filter_test.rs @@ -14,14 +14,14 @@ use std::sync::{Arc, RwLock}; -use catalog::memory::new_memory_catalog_manager; use catalog::RegisterTableRequest; +use catalog::memory::new_memory_catalog_manager; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::ext::BoxedError; use common_recordbatch::{RecordBatch, SendableRecordBatchStream}; +use common_time::Timestamp; use common_time::range::TimestampRange; use common_time::timestamp::TimeUnit; -use common_time::Timestamp; use datafusion_expr::expr::Expr; use datatypes::data_type::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema}; diff --git a/src/query/src/window_sort.rs b/src/query/src/window_sort.rs index 1248459874..eb0aa2d071 100644 --- a/src/query/src/window_sort.rs +++ b/src/query/src/window_sort.rs @@ -39,7 +39,7 @@ use datafusion::physical_plan::{ DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties, }; use datafusion_common::utils::bisect; -use datafusion_common::{internal_err, DataFusionError}; +use datafusion_common::{DataFusionError, internal_err}; use datafusion_physical_expr::PhysicalSortExpr; use datatypes::value::Value; use futures::Stream; @@ -491,13 +491,17 @@ impl WindowedSortStream { error!("Invalid range: {:?} > {:?}", cur_range, working_range); #[cfg(debug_assertions)] self.check_subset_ranges(&cur_range); - internal_err!("Current batch have data on the right side of working range, something is very wrong")?; + internal_err!( + "Current batch have data on the right side of working range, something is very wrong" + )?; } } else if cur_range.start < working_range.start { error!("Invalid range: {:?} < {:?}", cur_range, working_range); #[cfg(debug_assertions)] self.check_subset_ranges(&cur_range); - internal_err!("Current batch have data on the left side of working range, something is very wrong")?; + internal_err!( + "Current batch have data on the left side of working range, something is very wrong" + )?; } if cur_range.is_subset(&working_range) { @@ -516,7 +520,9 @@ impl WindowedSortStream { )?; if offset != 0 { - internal_err!("Current batch have data on the left side of working range, something is very wrong")?; + internal_err!( + "Current batch have data on the left side of working range, something is very wrong" + )?; } let sliced_rb = sorted_rb.slice(offset, len); @@ -848,11 +854,7 @@ fn cmp_with_opts( let opt = opt.unwrap_or_default(); if let (Some(a), Some(b)) = (a, b) { - if opt.descending { - b.cmp(a) - } else { - a.cmp(b) - } + if opt.descending { b.cmp(a) } else { a.cmp(b) } } else if opt.nulls_first { // now we know at leatst one of them is None // in rust None < Some(_) @@ -914,22 +916,22 @@ fn find_successive_runs)>, N: Ord + Copy>( let mut last_val: Option = None; for (idx, t) in iter { - if let Some(last_value) = &last_value { - if cmp_with_opts(last_value, &t, sort_opts) == std::cmp::Ordering::Greater { - // we found a boundary - let len = idx - last_offset; - let run = SucRun { - offset: last_offset, - len, - first_val, - last_val, - }; - runs.push(run); - first_val = None; - last_val = None; + if let Some(last_value) = &last_value + && cmp_with_opts(last_value, &t, sort_opts) == std::cmp::Ordering::Greater + { + // we found a boundary + let len = idx - last_offset; + let run = SucRun { + offset: last_offset, + len, + first_val, + last_val, + }; + runs.push(run); + first_val = None; + last_val = None; - last_offset = idx; - } + last_offset = idx; } last_value = Some(t); if let Some(t) = t { @@ -1255,7 +1257,7 @@ mod test { use serde_json::json; use super::*; - use crate::test_util::{new_ts_array, MockInputExec}; + use crate::test_util::{MockInputExec, new_ts_array}; #[test] fn test_overlapping() { diff --git a/src/servers/benches/loki_labels.rs b/src/servers/benches/loki_labels.rs index e0d64976ef..ae18ede28b 100644 --- a/src/servers/benches/loki_labels.rs +++ b/src/servers/benches/loki_labels.rs @@ -14,7 +14,7 @@ use std::collections::BTreeMap; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use servers::error::Result; use servers::http::loki::parse_loki_labels; diff --git a/src/servers/benches/prom_decode.rs b/src/servers/benches/prom_decode.rs index 52759db3d0..28f86c844c 100644 --- a/src/servers/benches/prom_decode.rs +++ b/src/servers/benches/prom_decode.rs @@ -16,7 +16,7 @@ use std::time::Duration; use api::prom_store::remote::WriteRequest; use bytes::Bytes; -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use prost::Message; use servers::http::PromValidationMode; use servers::prom_store::to_grpc_row_insert_requests; diff --git a/src/servers/benches/to_http_output.rs b/src/servers/benches/to_http_output.rs index 9e7881bb0f..2f2708a263 100644 --- a/src/servers/benches/to_http_output.rs +++ b/src/servers/benches/to_http_output.rs @@ -18,7 +18,7 @@ use std::time::Instant; use arrow::array::StringArray; use arrow_schema::{DataType, Field, Schema}; use common_recordbatch::RecordBatch; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, criterion_group, criterion_main}; use datatypes::schema::SchemaRef; use datatypes::vectors::StringVector; use servers::http::HttpRecordsOutput; diff --git a/src/servers/src/elasticsearch.rs b/src/servers/src/elasticsearch.rs index bfcd13845d..3627f19f19 100644 --- a/src/servers/src/elasticsearch.rs +++ b/src/servers/src/elasticsearch.rs @@ -16,30 +16,30 @@ use std::collections::BTreeMap; use std::sync::Arc; use std::time::Instant; +use axum::Extension; use axum::extract::{Path, Query, State}; use axum::http::{HeaderMap, HeaderName, HeaderValue, StatusCode}; use axum::response::IntoResponse; -use axum::Extension; use axum_extra::TypedHeader; use common_error::ext::ErrorExt; use common_telemetry::{debug, error}; use headers::ContentType; use once_cell::sync::Lazy; use pipeline::{ - GreptimePipelineParams, PipelineDefinition, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, + GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, GreptimePipelineParams, PipelineDefinition, }; -use serde_json::{json, Deserializer, Value}; +use serde_json::{Deserializer, Value, json}; use session::context::{Channel, QueryContext}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use vrl::value::Value as VrlValue; use crate::error::{ - status_code_to_http_status, InvalidElasticsearchInputSnafu, ParseJsonSnafu, - Result as ServersResult, + InvalidElasticsearchInputSnafu, ParseJsonSnafu, Result as ServersResult, + status_code_to_http_status, }; use crate::http::event::{ - extract_pipeline_params_map_from_headers, ingest_logs_inner, LogIngesterQueryParams, LogState, - PipelineIngestRequest, + LogIngesterQueryParams, LogState, PipelineIngestRequest, + extract_pipeline_params_map_from_headers, ingest_logs_inner, }; use crate::http::header::constants::GREPTIME_PIPELINE_NAME_HEADER_NAME; use crate::metrics::{ diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index 7d470a531e..d36bdd1494 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -18,7 +18,7 @@ use std::string::FromUtf8Error; use axum::http::StatusCode as HttpStatusCode; use axum::response::{IntoResponse, Response}; -use axum::{http, Json}; +use axum::{Json, http}; use base64::DecodeError; use common_error::define_into_tonic_status; use common_error::ext::{BoxedError, ErrorExt}; diff --git a/src/servers/src/export_metrics.rs b/src/servers/src/export_metrics.rs index 0b526f760c..aac7e8dda4 100644 --- a/src/servers/src/export_metrics.rs +++ b/src/servers/src/export_metrics.rs @@ -17,14 +17,14 @@ use std::sync::Arc; use std::time::Duration; use common_base::Plugins; -use common_telemetry::metric::{convert_metric_to_write_request, MetricFilter}; +use common_telemetry::metric::{MetricFilter, convert_metric_to_write_request}; use common_telemetry::{error, info}; use common_time::Timestamp; use prost::Message; use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use serde::{Deserialize, Serialize}; use session::context::QueryContextBuilder; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::time::{self, Interval}; use crate::error::{InvalidExportMetricsConfigSnafu, Result, SendPromRemoteRequestSnafu}; @@ -131,7 +131,7 @@ impl ExportMetricsTask { return InvalidExportMetricsConfigSnafu { msg: format!("Export metrics: invalid HTTP header name: {}", k), } - .fail() + .fail(); } }; match TryInto::::try_into(v) { @@ -140,7 +140,7 @@ impl ExportMetricsTask { return InvalidExportMetricsConfigSnafu { msg: format!("Export metrics: invalid HTTP header value: {}", v), } - .fail() + .fail(); } }; Ok(()) @@ -287,62 +287,72 @@ mod test { #[tokio::test] async fn test_config() { // zero write_interval - assert!(ExportMetricsTask::try_new( - &ExportMetricsOption { - enable: true, - write_interval: Duration::from_secs(0), - ..Default::default() - }, - None - ) - .is_err()); - // none self_import and remote_write - assert!(ExportMetricsTask::try_new( - &ExportMetricsOption { - enable: true, - ..Default::default() - }, - None - ) - .is_err()); - // both self_import and remote_write - assert!(ExportMetricsTask::try_new( - &ExportMetricsOption { - enable: true, - self_import: Some(SelfImportOption::default()), - remote_write: Some(RemoteWriteOption::default()), - ..Default::default() - }, - None - ) - .is_err()); - // empty db - assert!(ExportMetricsTask::try_new( - &ExportMetricsOption { - enable: true, - self_import: Some(SelfImportOption { - db: String::default() - }), - remote_write: None, - ..Default::default() - }, - None - ) - .is_err()); - // empty url - assert!(ExportMetricsTask::try_new( - &ExportMetricsOption { - enable: true, - self_import: None, - remote_write: Some(RemoteWriteOption { - url: String::default(), + assert!( + ExportMetricsTask::try_new( + &ExportMetricsOption { + enable: true, + write_interval: Duration::from_secs(0), ..Default::default() - }), - ..Default::default() - }, - None - ) - .is_err()); + }, + None + ) + .is_err() + ); + // none self_import and remote_write + assert!( + ExportMetricsTask::try_new( + &ExportMetricsOption { + enable: true, + ..Default::default() + }, + None + ) + .is_err() + ); + // both self_import and remote_write + assert!( + ExportMetricsTask::try_new( + &ExportMetricsOption { + enable: true, + self_import: Some(SelfImportOption::default()), + remote_write: Some(RemoteWriteOption::default()), + ..Default::default() + }, + None + ) + .is_err() + ); + // empty db + assert!( + ExportMetricsTask::try_new( + &ExportMetricsOption { + enable: true, + self_import: Some(SelfImportOption { + db: String::default() + }), + remote_write: None, + ..Default::default() + }, + None + ) + .is_err() + ); + // empty url + assert!( + ExportMetricsTask::try_new( + &ExportMetricsOption { + enable: true, + self_import: None, + remote_write: Some(RemoteWriteOption { + url: String::default(), + ..Default::default() + }), + ..Default::default() + }, + None + ) + .is_err() + ); // self import but no handle let s = ExportMetricsTask::try_new( &ExportMetricsOption { diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs index 0310abc0e8..38ba40f056 100644 --- a/src/servers/src/grpc.rs +++ b/src/servers/src/grpc.rs @@ -35,14 +35,14 @@ use common_telemetry::{error, info, warn}; use futures::FutureExt; use otel_arrow_rust::proto::opentelemetry::arrow::v1::arrow_metrics_service_server::ArrowMetricsServiceServer; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use tokio::net::TcpListener; -use tokio::sync::oneshot::{self, Receiver, Sender}; use tokio::sync::Mutex; -use tonic::service::interceptor::InterceptedService; +use tokio::sync::oneshot::{self, Receiver, Sender}; use tonic::service::Routes; -use tonic::transport::server::TcpIncoming; +use tonic::service::interceptor::InterceptedService; use tonic::transport::ServerTlsConfig; +use tonic::transport::server::TcpIncoming; use tonic::{Request, Response, Status}; use tonic_reflection::server::v1::{ServerReflection, ServerReflectionServer}; @@ -282,10 +282,10 @@ pub const GRPC_SERVER: &str = "GRPC_SERVER"; impl Server for GrpcServer { async fn shutdown(&self) -> Result<()> { let mut shutdown_tx = self.shutdown_tx.lock().await; - if let Some(tx) = shutdown_tx.take() { - if tx.send(()).is_err() { - info!("Receiver dropped, the grpc server has already exited"); - } + if let Some(tx) = shutdown_tx.take() + && tx.send(()).is_err() + { + info!("Receiver dropped, the grpc server has already exited"); } info!("Shutdown grpc server"); diff --git a/src/servers/src/grpc/builder.rs b/src/servers/src/grpc/builder.rs index a9ebbacce4..370b1f6481 100644 --- a/src/servers/src/grpc/builder.rs +++ b/src/servers/src/grpc/builder.rs @@ -24,8 +24,8 @@ use otel_arrow_rust::proto::opentelemetry::arrow::v1::arrow_metrics_service_serv use snafu::ResultExt; use tokio::sync::Mutex; use tonic::codec::CompressionEncoding; -use tonic::service::interceptor::InterceptedService; use tonic::service::RoutesBuilder; +use tonic::service::interceptor::InterceptedService; use tonic::transport::{Identity, ServerTlsConfig}; use crate::grpc::database::DatabaseService; diff --git a/src/servers/src/grpc/context_auth.rs b/src/servers/src/grpc/context_auth.rs index f1e1bdd7e3..39c4fc5c88 100644 --- a/src/servers/src/grpc/context_auth.rs +++ b/src/servers/src/grpc/context_auth.rs @@ -22,14 +22,14 @@ use common_catalog::parse_catalog_and_schema_from_db_string; use common_error::ext::ErrorExt; use session::context::{Channel, QueryContextBuilder, QueryContextRef}; use snafu::{OptionExt, ResultExt}; -use tonic::metadata::MetadataMap; use tonic::Status; +use tonic::metadata::MetadataMap; use crate::error::Error::UnsupportedAuthScheme; use crate::error::{AuthSnafu, InvalidParameterSnafu, NotFoundAuthHeaderSnafu, Result}; use crate::grpc::TonicResult; -use crate::http::header::constants::GREPTIME_DB_HEADER_NAME; use crate::http::AUTHORIZATION_HEADER; +use crate::http::header::constants::GREPTIME_DB_HEADER_NAME; use crate::metrics::METRIC_AUTH_FAILURE; /// Create a query context from the grpc metadata. diff --git a/src/servers/src/grpc/database.rs b/src/servers/src/grpc/database.rs index 121d8c6c85..13c328399d 100644 --- a/src/servers/src/grpc/database.rs +++ b/src/servers/src/grpc/database.rs @@ -23,7 +23,7 @@ use futures::StreamExt; use tonic::{Request, Response, Status, Streaming}; use crate::grpc::greptime_handler::GreptimeRequestHandler; -use crate::grpc::{cancellation, TonicResult}; +use crate::grpc::{TonicResult, cancellation}; use crate::hint_headers; pub(crate) struct DatabaseService { diff --git a/src/servers/src/grpc/flight.rs b/src/servers/src/grpc/flight.rs index d799ef3a69..bb431bfdae 100644 --- a/src/servers/src/grpc/flight.rs +++ b/src/servers/src/grpc/flight.rs @@ -31,11 +31,11 @@ use common_grpc::flight::{FlightEncoder, FlightMessage}; use common_query::{Output, OutputData}; use common_telemetry::tracing::info_span; use common_telemetry::tracing_context::{FutureExt, TracingContext}; -use futures::{future, ready, Stream}; +use futures::{Stream, future, ready}; use futures_util::{StreamExt, TryStreamExt}; use prost::Message; use session::context::{QueryContext, QueryContextRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use table::table_name::TableName; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -43,8 +43,8 @@ use tonic::{Request, Response, Status, Streaming}; use crate::error::{InvalidParameterSnafu, ParseJsonSnafu, Result, ToJsonSnafu}; pub use crate::grpc::flight::stream::FlightRecordBatchStream; -use crate::grpc::greptime_handler::{get_request_type, GreptimeRequestHandler}; -use crate::grpc::{context_auth, FlightCompression, TonicResult}; +use crate::grpc::greptime_handler::{GreptimeRequestHandler, get_request_type}; +use crate::grpc::{FlightCompression, TonicResult, context_auth}; use crate::{error, hint_headers}; pub type TonicStream = Pin> + Send + 'static>>; diff --git a/src/servers/src/grpc/flight/stream.rs b/src/servers/src/grpc/flight/stream.rs index 2c208be04e..ee08f8f20d 100644 --- a/src/servers/src/grpc/flight/stream.rs +++ b/src/servers/src/grpc/flight/stream.rs @@ -21,7 +21,7 @@ use arrow_flight::FlightData; use common_error::ext::ErrorExt; use common_grpc::flight::{FlightEncoder, FlightMessage}; use common_recordbatch::SendableRecordBatchStream; -use common_telemetry::tracing::{info_span, Instrument}; +use common_telemetry::tracing::{Instrument, info_span}; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{error, info, warn}; use futures::channel::mpsc; @@ -33,8 +33,8 @@ use snafu::ResultExt; use tokio::task::JoinHandle; use crate::error; -use crate::grpc::flight::TonicResult; use crate::grpc::FlightCompression; +use crate::grpc::flight::TonicResult; /// Metrics collector for Flight stream with RAII logging pattern struct StreamMetrics { @@ -168,19 +168,18 @@ impl FlightRecordBatchStream { } metrics.send_record_batch_duration += start.elapsed(); - if should_send_partial_metrics { - if let Some(metrics_str) = recordbatches + if should_send_partial_metrics + && let Some(metrics_str) = recordbatches .metrics() .and_then(|m| serde_json::to_string(&m).ok()) - { - metrics.metrics_count += 1; - let start = Instant::now(); - if let Err(e) = tx.send(Ok(FlightMessage::Metrics(metrics_str))).await { - warn!(e; "stop sending Flight data"); - return; - } - metrics.send_metrics_duration += start.elapsed(); + { + metrics.metrics_count += 1; + let start = Instant::now(); + if let Err(e) = tx.send(Ok(FlightMessage::Metrics(metrics_str))).await { + warn!(e; "stop sending Flight data"); + return; } + metrics.send_metrics_duration += start.elapsed(); } } Err(e) => { diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs index 2ec2e4e617..e19fc4352b 100644 --- a/src/servers/src/grpc/greptime_handler.rs +++ b/src/servers/src/grpc/greptime_handler.rs @@ -24,11 +24,11 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::parse_catalog_and_schema_from_db_string; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; -use common_grpc::flight::do_put::DoPutResponse; use common_grpc::flight::FlightDecoder; +use common_grpc::flight::do_put::DoPutResponse; use common_query::Output; -use common_runtime::runtime::RuntimeTrait; use common_runtime::Runtime; +use common_runtime::runtime::RuntimeTrait; use common_session::ReadPreference; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{debug, error, tracing, warn}; @@ -43,7 +43,7 @@ use tokio::sync::mpsc::error::TrySendError; use crate::error::{InvalidQuerySnafu, JoinTaskSnafu, Result, UnknownHintSnafu}; use crate::grpc::flight::{PutRecordBatchRequest, PutRecordBatchRequestStream}; -use crate::grpc::{context_auth, FlightCompression, TonicResult}; +use crate::grpc::{FlightCompression, TonicResult, context_auth}; use crate::metrics; use crate::metrics::METRIC_SERVER_GRPC_DB_REQUEST_TIMER; use crate::query_handler::grpc::ServerGrpcQueryHandlerRef; diff --git a/src/servers/src/grpc/prom_query_gateway.rs b/src/servers/src/grpc/prom_query_gateway.rs index a8565737f9..755c741ace 100644 --- a/src/servers/src/grpc/prom_query_gateway.rs +++ b/src/servers/src/grpc/prom_query_gateway.rs @@ -32,10 +32,10 @@ use snafu::OptionExt; use tonic::{Request, Response}; use crate::error::InvalidQuerySnafu; +use crate::grpc::TonicResult; use crate::grpc::context_auth::auth; use crate::grpc::greptime_handler::create_query_context; -use crate::grpc::TonicResult; -use crate::http::prometheus::{retrieve_metric_name_and_result_type, PrometheusJsonResponse}; +use crate::http::prometheus::{PrometheusJsonResponse, retrieve_metric_name_and_result_type}; use crate::prometheus_handler::PrometheusHandlerRef; pub struct PrometheusGatewayService { @@ -125,7 +125,7 @@ impl PrometheusGatewayService { match retrieve_metric_name_and_result_type(&query.query) { Ok((metric_name, result_type)) => (metric_name, result_type), Err(err) => { - return PrometheusJsonResponse::error(err.status_code(), err.output_msg()) + return PrometheusJsonResponse::error(err.status_code(), err.output_msg()); } }; // range query only returns matrix diff --git a/src/servers/src/grpc/region_server.rs b/src/servers/src/grpc/region_server.rs index e3eb87467a..cb1cbee789 100644 --- a/src/servers/src/grpc/region_server.rs +++ b/src/servers/src/grpc/region_server.rs @@ -15,11 +15,11 @@ use std::sync::Arc; use api::v1::region::region_server::Region as RegionServer; -use api::v1::region::{region_request, RegionRequest, RegionResponse}; +use api::v1::region::{RegionRequest, RegionResponse, region_request}; use async_trait::async_trait; use common_error::ext::ErrorExt; -use common_runtime::runtime::RuntimeTrait; use common_runtime::Runtime; +use common_runtime::runtime::RuntimeTrait; use common_telemetry::tracing::info_span; use common_telemetry::tracing_context::{FutureExt, TracingContext}; use common_telemetry::{debug, error, warn}; @@ -27,7 +27,7 @@ use snafu::{OptionExt, ResultExt}; use tonic::{Request, Response, Status}; use crate::error::{InvalidQuerySnafu, JoinTaskSnafu, Result}; -use crate::grpc::{cancellation, TonicResult}; +use crate::grpc::{TonicResult, cancellation}; #[async_trait] pub trait RegionServerHandler: Send + Sync { diff --git a/src/servers/src/hint_headers.rs b/src/servers/src/hint_headers.rs index a143911d63..5f64be584f 100644 --- a/src/servers/src/hint_headers.rs +++ b/src/servers/src/hint_headers.rs @@ -13,7 +13,7 @@ // limitations under the License. use http::HeaderMap; -use session::hints::{HINTS_KEY, HINTS_KEY_PREFIX, HINT_KEYS}; +use session::hints::{HINT_KEYS, HINTS_KEY, HINTS_KEY_PREFIX}; use tonic::metadata::MetadataMap; pub(crate) fn extract_hints(headers: &T) -> Vec<(String, String)> { diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index 0ed86572a2..8319541248 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -24,13 +24,13 @@ use axum::extract::DefaultBodyLimit; use axum::http::StatusCode as HttpStatusCode; use axum::response::{IntoResponse, Response}; use axum::serve::ListenerExt; -use axum::{middleware, routing, Router}; -use common_base::readable_size::ReadableSize; +use axum::{Router, middleware, routing}; use common_base::Plugins; +use common_base::readable_size::ReadableSize; use common_recordbatch::RecordBatch; use common_telemetry::{debug, error, info}; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datatypes::data_type::DataType; use datatypes::schema::SchemaRef; use datatypes::value::transform_value_ref_to_json_value; @@ -40,9 +40,9 @@ use http::{HeaderValue, Method}; use prost::DecodeError; use serde::{Deserialize, Serialize}; use serde_json::Value; -use snafu::{ensure, ResultExt}; -use tokio::sync::oneshot::{self, Sender}; +use snafu::{ResultExt, ensure}; use tokio::sync::Mutex; +use tokio::sync::oneshot::{self, Sender}; use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; use tower_http::cors::{AllowOrigin, Any, CorsLayer}; @@ -1171,10 +1171,10 @@ pub const HTTP_SERVER: &str = "HTTP_SERVER"; impl Server for HttpServer { async fn shutdown(&self) -> Result<()> { let mut shutdown_tx = self.shutdown_tx.lock().await; - if let Some(tx) = shutdown_tx.take() { - if tx.send(()).is_err() { - info!("Receiver dropped, the HTTP server has already exited"); - } + if let Some(tx) = shutdown_tx.take() + && tx.send(()).is_err() + { + info!("Receiver dropped, the HTTP server has already exited"); } info!("Shutdown HTTP server"); @@ -1433,9 +1433,10 @@ mod test { .await; assert_eq!(res.status(), StatusCode::OK); - assert!(!res - .headers() - .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)); + assert!( + !res.headers() + .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN) + ); } #[tokio::test] @@ -1454,9 +1455,10 @@ mod test { let res = client.get("/health").send().await; assert_eq!(res.status(), StatusCode::OK); - assert!(!res - .headers() - .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)); + assert!( + !res.headers() + .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN) + ); } #[test] diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs index e8b42603d3..9c0af39e44 100644 --- a/src/servers/src/http/authorize.rs +++ b/src/servers/src/http/authorize.rs @@ -18,24 +18,24 @@ use axum::extract::{Request, State}; use axum::http::{self, StatusCode}; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; -use base64::prelude::BASE64_STANDARD; use base64::Engine; +use base64::prelude::BASE64_STANDARD; use common_base::secrets::{ExposeSecret, SecretString}; use common_catalog::consts::DEFAULT_SCHEMA_NAME; use common_catalog::parse_catalog_and_schema_from_db_string; use common_error::ext::ErrorExt; use common_telemetry::warn; -use common_time::timezone::parse_timezone; use common_time::Timezone; +use common_time::timezone::parse_timezone; use headers::Header; use session::context::QueryContextBuilder; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{ self, InvalidAuthHeaderInvisibleASCIISnafu, InvalidAuthHeaderSnafu, InvalidParameterSnafu, NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu, UrlDecodeSnafu, }; -use crate::http::header::{GreptimeDbName, GREPTIME_TIMEZONE_HEADER_NAME}; +use crate::http::header::{GREPTIME_TIMEZONE_HEADER_NAME, GreptimeDbName}; use crate::http::result::error_result::ErrorResponse; use crate::http::{AUTHORIZATION_HEADER, HTTP_API_PREFIX, PUBLIC_APIS}; use crate::influxdb::{is_influxdb_request, is_influxdb_v2_request}; diff --git a/src/servers/src/http/dashboard.rs b/src/servers/src/http/dashboard.rs index bd02b7f37d..bdb98490f0 100644 --- a/src/servers/src/http/dashboard.rs +++ b/src/servers/src/http/dashboard.rs @@ -13,7 +13,7 @@ // limitations under the License. use axum::body::Body; -use axum::http::{header, StatusCode, Uri}; +use axum::http::{StatusCode, Uri, header}; use axum::response::Response; use common_telemetry::debug; use rust_embed::RustEmbed; diff --git a/src/servers/src/http/dyn_log.rs b/src/servers/src/http/dyn_log.rs index a34601aff9..b82ecdadd6 100644 --- a/src/servers/src/http/dyn_log.rs +++ b/src/servers/src/http/dyn_log.rs @@ -15,7 +15,7 @@ use axum::http::StatusCode; use axum::response::IntoResponse; use common_telemetry::tracing_subscriber::filter; -use common_telemetry::{info, RELOAD_HANDLE}; +use common_telemetry::{RELOAD_HANDLE, info}; use snafu::OptionExt; use crate::error::{InternalSnafu, InvalidParameterSnafu, Result}; diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs index ad649c872f..6f31a8b41a 100644 --- a/src/servers/src/http/event.rs +++ b/src/servers/src/http/event.rs @@ -37,23 +37,23 @@ use mime_guess::mime; use pipeline::util::to_pipeline_version; use pipeline::{ContextReq, GreptimePipelineParams, PipelineContext, PipelineDefinition}; use serde::{Deserialize, Serialize}; -use serde_json::{json, Deserializer, Map, Value as JsonValue}; +use serde_json::{Deserializer, Map, Value as JsonValue, json}; use session::context::{Channel, QueryContext, QueryContextRef}; use simd_json::Buffers; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use strum::{EnumIter, IntoEnumIterator}; use vrl::value::{KeyString, Value as VrlValue}; use crate::error::{ - status_code_to_http_status, Error, InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result, + Error, InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result, status_code_to_http_status, }; +use crate::http::HttpResponse; use crate::http::header::constants::GREPTIME_PIPELINE_PARAMS_HEADER; use crate::http::header::{ CONTENT_TYPE_NDJSON_STR, CONTENT_TYPE_NDJSON_SUBTYPE_STR, CONTENT_TYPE_PROTOBUF_STR, }; use crate::http::result::greptime_manage_resp::GreptimedbManageResponse; use crate::http::result::greptime_result_v1::GreptimedbV1Response; -use crate::http::HttpResponse; use crate::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef}; use crate::metrics::{ METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_INGESTION_COUNTER, METRIC_HTTP_LOGS_INGESTION_ELAPSED, @@ -887,7 +887,7 @@ pub trait LogValidator: Send + Sync { /// validate payload by source before processing /// Return a `Some` result to indicate validation failure. async fn validate(&self, source: Option<&str>, payload: &Bytes) - -> Option>; + -> Option>; } pub type LogValidatorRef = Arc; diff --git a/src/servers/src/http/extractor.rs b/src/servers/src/http/extractor.rs index 28370fe4b6..b4222fcfe0 100644 --- a/src/servers/src/http/extractor.rs +++ b/src/servers/src/http/extractor.rs @@ -16,10 +16,10 @@ use core::str; use ahash::HashSet; use axum::extract::FromRequestParts; -use axum::http::request::Parts; use axum::http::StatusCode; +use axum::http::request::Parts; use http::HeaderMap; -use pipeline::{truthy, GreptimePipelineParams, SelectInfo}; +use pipeline::{GreptimePipelineParams, SelectInfo, truthy}; use crate::http::header::constants::{ GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME, GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME, diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs index 6a2d5af73d..626dcecf24 100644 --- a/src/servers/src/http/handler.rs +++ b/src/servers/src/http/handler.rs @@ -26,7 +26,7 @@ use common_plugins::GREPTIME_EXEC_WRITE_COST; use common_query::{Output, OutputData}; use common_recordbatch::util; use common_telemetry::tracing; -use query::parser::{PromQuery, DEFAULT_LOOKBACK_STRING}; +use query::parser::{DEFAULT_LOOKBACK_STRING, PromQuery}; use serde::{Deserialize, Serialize}; use serde_json::Value; use session::context::{Channel, QueryContext, QueryContextRef}; @@ -400,10 +400,10 @@ pub async fn metrics( // But ProcessCollector only support on linux. #[cfg(not(windows))] - if let Some(c) = crate::metrics::jemalloc::JEMALLOC_COLLECTOR.as_ref() { - if let Err(e) = c.update() { - common_telemetry::error!(e; "Failed to update jemalloc metrics"); - } + if let Some(c) = crate::metrics::jemalloc::JEMALLOC_COLLECTOR.as_ref() + && let Err(e) = c.update() + { + common_telemetry::error!(e; "Failed to update jemalloc metrics"); } state.render() } diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs index 4af50c3e9f..ce58e15e98 100644 --- a/src/servers/src/http/header.rs +++ b/src/servers/src/http/header.rs @@ -16,8 +16,8 @@ use std::collections::HashMap; use std::sync::Arc; use common_plugins::GREPTIME_EXEC_PREFIX; -use datafusion::physical_plan::metrics::MetricValue; use datafusion::physical_plan::ExecutionPlan; +use datafusion::physical_plan::metrics::MetricValue; use headers::{Header, HeaderName, HeaderValue}; use hyper::HeaderMap; use serde_json::Value; @@ -120,10 +120,10 @@ impl Header for GreptimeDbName { } fn encode>(&self, values: &mut E) { - if let Some(name) = &self.0 { - if let Ok(value) = HeaderValue::from_str(name) { - values.extend(std::iter::once(value)); - } + if let Some(name) = &self.0 + && let Ok(value) = HeaderValue::from_str(name) + { + values.extend(std::iter::once(value)); } } } diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs index bd428f4289..671beb7bae 100644 --- a/src/servers/src/http/influxdb.rs +++ b/src/servers/src/http/influxdb.rs @@ -15,10 +15,10 @@ use std::collections::HashMap; use std::sync::Arc; +use axum::Extension; use axum::extract::{Query, State}; use axum::http::StatusCode; use axum::response::IntoResponse; -use axum::Extension; use common_catalog::consts::DEFAULT_SCHEMA_NAME; use common_grpc::precision::Precision; use common_telemetry::tracing; @@ -93,7 +93,7 @@ pub async fn influxdb_write( lines: String, handler: InfluxdbLineProtocolHandlerRef, ctx: QueryContextRef, -) -> Result { +) -> Result> { let _timer = crate::metrics::METRIC_HTTP_INFLUXDB_WRITE_ELAPSED .with_label_values(&[db]) .start_timer(); diff --git a/src/servers/src/http/jaeger.rs b/src/servers/src/http/jaeger.rs index 77a05e9f85..b7cea134cc 100644 --- a/src/servers/src/http/jaeger.rs +++ b/src/servers/src/http/jaeger.rs @@ -17,10 +17,10 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; +use axum::Extension; use axum::extract::{Path, Query, State}; use axum::http::{HeaderMap, StatusCode as HttpStatusCode}; use axum::response::IntoResponse; -use axum::Extension; use chrono::Utc; use common_catalog::consts::{PARENT_SPAN_ID_COLUMN, TRACE_TABLE_NAME}; use common_error::ext::ErrorExt; @@ -28,16 +28,16 @@ use common_error::status_code::StatusCode; use common_query::{Output, OutputData}; use common_recordbatch::util; use common_telemetry::{debug, error, tracing, warn}; -use serde::{de, Deserialize, Deserializer, Serialize}; +use serde::{Deserialize, Deserializer, Serialize, de}; use serde_json::Value as JsonValue; use session::context::{Channel, QueryContext}; use snafu::{OptionExt, ResultExt}; use crate::error::{ - status_code_to_http_status, CollectRecordbatchSnafu, Error, InvalidJaegerQuerySnafu, Result, + CollectRecordbatchSnafu, Error, InvalidJaegerQuerySnafu, Result, status_code_to_http_status, }; -use crate::http::extractor::TraceTableName; use crate::http::HttpRecordsOutput; +use crate::http::extractor::TraceTableName; use crate::metrics::METRIC_JAEGER_QUERY_ELAPSED; use crate::otlp::trace::{ DURATION_NANO_COLUMN, KEY_OTEL_SCOPE_NAME, KEY_OTEL_SCOPE_VERSION, KEY_OTEL_STATUS_CODE, @@ -789,14 +789,14 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result> { } } PARENT_SPAN_ID_COLUMN => { - if let JsonValue::String(parent_span_id) = cell { - if !parent_span_id.is_empty() { - span.references.push(Reference { - trace_id: span.trace_id.clone(), - span_id: parent_span_id, - ref_type: REF_TYPE_CHILD_OF.to_string(), - }); - } + if let JsonValue::String(parent_span_id) = cell + && !parent_span_id.is_empty() + { + span.references.push(Reference { + trace_id: span.trace_id.clone(), + span_id: parent_span_id, + ref_type: REF_TYPE_CHILD_OF.to_string(), + }); } } SPAN_EVENTS_COLUMN => { @@ -840,47 +840,48 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result> { } } SCOPE_NAME_COLUMN => { - if let JsonValue::String(scope_name) = cell { - if !scope_name.is_empty() { - span.tags.push(KeyValue { - key: KEY_OTEL_SCOPE_NAME.to_string(), - value_type: ValueType::String, - value: Value::String(scope_name), - }); - } + if let JsonValue::String(scope_name) = cell + && !scope_name.is_empty() + { + span.tags.push(KeyValue { + key: KEY_OTEL_SCOPE_NAME.to_string(), + value_type: ValueType::String, + value: Value::String(scope_name), + }); } } SCOPE_VERSION_COLUMN => { - if let JsonValue::String(scope_version) = cell { - if !scope_version.is_empty() { - span.tags.push(KeyValue { - key: KEY_OTEL_SCOPE_VERSION.to_string(), - value_type: ValueType::String, - value: Value::String(scope_version), - }); - } + if let JsonValue::String(scope_version) = cell + && !scope_version.is_empty() + { + span.tags.push(KeyValue { + key: KEY_OTEL_SCOPE_VERSION.to_string(), + value_type: ValueType::String, + value: Value::String(scope_version), + }); } } SPAN_KIND_COLUMN => { - if let JsonValue::String(span_kind) = cell { - if !span_kind.is_empty() { - span.tags.push(KeyValue { - key: KEY_SPAN_KIND.to_string(), - value_type: ValueType::String, - value: Value::String(normalize_span_kind(&span_kind)), - }); - } + if let JsonValue::String(span_kind) = cell + && !span_kind.is_empty() + { + span.tags.push(KeyValue { + key: KEY_SPAN_KIND.to_string(), + value_type: ValueType::String, + value: Value::String(normalize_span_kind(&span_kind)), + }); } } SPAN_STATUS_CODE => { - if let JsonValue::String(span_status) = cell { - if span_status != SPAN_STATUS_UNSET && !span_status.is_empty() { - span.tags.push(KeyValue { - key: KEY_OTEL_STATUS_CODE.to_string(), - value_type: ValueType::String, - value: Value::String(normalize_status_code(&span_status)), - }); - } + if let JsonValue::String(span_status) = cell + && span_status != SPAN_STATUS_UNSET + && !span_status.is_empty() + { + span.tags.push(KeyValue { + key: KEY_OTEL_STATUS_CODE.to_string(), + value_type: ValueType::String, + value: Value::String(normalize_status_code(&span_status)), + }); } } @@ -900,16 +901,16 @@ fn traces_from_records(records: HttpRecordsOutput) -> Result> { ) { span.tags.push(keyvalue); } - } else if column_name.starts_with(RESOURCE_ATTR_PREFIX) { - if let Some(keyvalue) = to_keyvalue( + } else if column_name.starts_with(RESOURCE_ATTR_PREFIX) + && let Some(keyvalue) = to_keyvalue( column_name .strip_prefix(RESOURCE_ATTR_PREFIX) .unwrap_or_default() .to_string(), cell, - ) { - resource_tags.push(keyvalue); - } + ) + { + resource_tags.push(keyvalue); } } } @@ -1093,10 +1094,10 @@ fn convert_string_to_number(input: &serde_json::Value) -> Option() { return Some(serde_json::Value::Number(serde_json::Number::from(number))); } - if let Ok(number) = data.parse::() { - if let Some(number) = serde_json::Number::from_f64(number) { - return Some(serde_json::Value::Number(number)); - } + if let Ok(number) = data.parse::() + && let Some(number) = serde_json::Number::from_f64(number) + { + return Some(serde_json::Value::Number(number)); } } @@ -1154,7 +1155,7 @@ fn parse_jaeger_time_range_for_operations( #[cfg(test)] mod tests { - use serde_json::{json, Number, Value as JsonValue}; + use serde_json::{Number, Value as JsonValue, json}; use super::*; use crate::http::{ColumnSchema, HttpRecordsOutput, OutputSchema}; diff --git a/src/servers/src/http/loki.rs b/src/servers/src/http/loki.rs index a16702f77a..45d6eadadd 100644 --- a/src/servers/src/http/loki.rs +++ b/src/servers/src/http/loki.rs @@ -21,8 +21,8 @@ use api::v1::{ ColumnDataType, ColumnDataTypeExtension, ColumnSchema, JsonTypeExtension, Row, RowInsertRequest, Rows, SemanticType, Value as GreptimeValue, }; -use axum::extract::State; use axum::Extension; +use axum::extract::State; use axum_extra::TypedHeader; use bytes::Bytes; use chrono::DateTime; @@ -39,17 +39,17 @@ use pipeline::{ContextReq, PipelineContext, PipelineDefinition, SchemaInfo}; use prost::Message; use quoted_string::test_utils::TestSpec; use session::context::{Channel, QueryContext}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use vrl::value::{KeyString, Value as VrlValue}; use crate::error::{ DecodeOtlpRequestSnafu, InvalidLokiLabelsSnafu, InvalidLokiPayloadSnafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu, }; -use crate::http::event::{LogState, PipelineIngestRequest, JSON_CONTENT_TYPE, PB_CONTENT_TYPE}; +use crate::http::HttpResponse; +use crate::http::event::{JSON_CONTENT_TYPE, LogState, PB_CONTENT_TYPE, PipelineIngestRequest}; use crate::http::extractor::{LogTableName, PipelineInfo}; use crate::http::result::greptime_result_v1::GreptimedbV1Response; -use crate::http::HttpResponse; use crate::metrics::{ METRIC_FAILURE_VALUE, METRIC_LOKI_LOGS_INGESTION_COUNTER, METRIC_LOKI_LOGS_INGESTION_ELAPSED, METRIC_SUCCESS_VALUE, diff --git a/src/servers/src/http/otlp.rs b/src/servers/src/http/otlp.rs index bf1f374e9c..fc0656cf0e 100644 --- a/src/servers/src/http/otlp.rs +++ b/src/servers/src/http/otlp.rs @@ -14,10 +14,10 @@ use std::sync::Arc; +use axum::Extension; use axum::extract::State; use axum::http::header; use axum::response::IntoResponse; -use axum::Extension; use bytes::Bytes; use common_catalog::consts::{TRACE_TABLE_NAME, TRACE_TABLE_NAME_SESSION_KEY}; use common_telemetry::tracing; @@ -40,7 +40,7 @@ use crate::http::extractor::{ LogTableName, OtlpMetricOptions, PipelineInfo, SelectInfoWrapper, TraceTableName, }; // use crate::http::header::constants::GREPTIME_METRICS_LEGACY_MODE_HEADER_NAME; -use crate::http::header::{write_cost_header_map, CONTENT_TYPE_PROTOBUF}; +use crate::http::header::{CONTENT_TYPE_PROTOBUF, write_cost_header_map}; use crate::metrics::METRIC_HTTP_OPENTELEMETRY_LOGS_ELAPSED; use crate::query_handler::{OpenTelemetryProtocolHandlerRef, PipelineHandler}; diff --git a/src/servers/src/http/prom_store.rs b/src/servers/src/http/prom_store.rs index 5f23125008..62bb2383be 100644 --- a/src/servers/src/http/prom_store.rs +++ b/src/servers/src/http/prom_store.rs @@ -15,11 +15,11 @@ use std::sync::Arc; use api::prom_store::remote::ReadRequest; +use axum::Extension; use axum::body::Bytes; use axum::extract::{Query, State}; -use axum::http::{header, HeaderValue, StatusCode}; +use axum::http::{HeaderValue, StatusCode, header}; use axum::response::IntoResponse; -use axum::Extension; use axum_extra::TypedHeader; use common_catalog::consts::DEFAULT_SCHEMA_NAME; use common_query::prelude::GREPTIME_PHYSICAL_TABLE; @@ -27,17 +27,17 @@ use common_telemetry::tracing; use hyper::HeaderMap; use lazy_static::lazy_static; use object_pool::Pool; -use pipeline::util::to_pipeline_version; use pipeline::PipelineDefinition; +use pipeline::util::to_pipeline_version; use prost::Message; use serde::{Deserialize, Serialize}; use session::context::{Channel, QueryContext}; use snafu::prelude::*; use crate::error::{self, InternalSnafu, PipelineSnafu, Result}; -use crate::http::extractor::PipelineInfo; -use crate::http::header::{write_cost_header_map, GREPTIME_DB_HEADER_METRICS}; use crate::http::PromValidationMode; +use crate::http::extractor::PipelineInfo; +use crate::http::header::{GREPTIME_DB_HEADER_METRICS, write_cost_header_map}; use crate::prom_row_builder::TablesBuilder; use crate::prom_store::{extract_schema_from_read_request, snappy_decompress, zstd_decompress}; use crate::proto::{PromSeriesProcessor, PromWriteRequest}; diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs index 86ffe51704..1c9f76a0e6 100644 --- a/src/servers/src/http/prometheus.rs +++ b/src/servers/src/http/prometheus.rs @@ -30,17 +30,17 @@ use common_version::OwnedBuildInfo; use datatypes::prelude::ConcreteDataType; use datatypes::scalars::ScalarVector; use datatypes::vectors::Float64Vector; -use futures::future::join_all; use futures::StreamExt; +use futures::future::join_all; use itertools::Itertools; -use promql_parser::label::{MatchOp, Matcher, Matchers, METRIC_NAME}; +use promql_parser::label::{METRIC_NAME, MatchOp, Matcher, Matchers}; use promql_parser::parser::token::{self}; use promql_parser::parser::value::ValueType; use promql_parser::parser::{ AggregateExpr, BinaryExpr, Call, Expr as PromqlExpr, LabelModifier, MatrixSelector, ParenExpr, SubqueryExpr, UnaryExpr, VectorSelector, }; -use query::parser::{PromQuery, QueryLanguageParser, DEFAULT_LOOKBACK_STRING}; +use query::parser::{DEFAULT_LOOKBACK_STRING, PromQuery, QueryLanguageParser}; use query::promql::planner::normalize_matcher; use serde::de::{self, MapAccess, Visitor}; use serde::{Deserialize, Serialize}; @@ -1064,10 +1064,14 @@ pub async fn label_values_query( let end = params.end.unwrap_or_else(current_time_rfc3339); let mut label_values = HashSet::new(); - let start = try_call_return_response!(QueryLanguageParser::parse_promql_timestamp(&start) - .context(ParseTimestampSnafu { timestamp: &start })); - let end = try_call_return_response!(QueryLanguageParser::parse_promql_timestamp(&end) - .context(ParseTimestampSnafu { timestamp: &end })); + let start = try_call_return_response!( + QueryLanguageParser::parse_promql_timestamp(&start) + .context(ParseTimestampSnafu { timestamp: &start }) + ); + let end = try_call_return_response!( + QueryLanguageParser::parse_promql_timestamp(&end) + .context(ParseTimestampSnafu { timestamp: &end }) + ); for query in queries { let promql_expr = try_call_return_response!(promql_parser::parser::parse(&query)); @@ -1109,10 +1113,11 @@ pub async fn label_values_query( } fn truncate_results(label_values: &mut Vec, limit: Option) { - if let Some(limit) = limit { - if limit > 0 && label_values.len() >= limit { - label_values.truncate(limit); - } + if let Some(limit) = limit + && limit > 0 + && label_values.len() >= limit + { + label_values.truncate(limit); } } diff --git a/src/servers/src/http/read_preference.rs b/src/servers/src/http/read_preference.rs index aee56c1fe8..04e9771b89 100644 --- a/src/servers/src/http/read_preference.rs +++ b/src/servers/src/http/read_preference.rs @@ -18,8 +18,8 @@ use axum::body::Body; use axum::http::Request; use axum::middleware::Next; use axum::response::Response; -use session::context::QueryContext; use session::ReadPreference; +use session::context::QueryContext; use crate::http::header::GREPTIME_DB_HEADER_READ_PREFERENCE; diff --git a/src/servers/src/http/result/arrow_result.rs b/src/servers/src/http/result/arrow_result.rs index bb865d9c00..d583c3a590 100644 --- a/src/servers/src/http/result/arrow_result.rs +++ b/src/servers/src/http/result/arrow_result.rs @@ -16,9 +16,9 @@ use std::pin::Pin; use std::sync::Arc; use arrow::datatypes::Schema; -use arrow_ipc::writer::{FileWriter, IpcWriteOptions}; use arrow_ipc::CompressionType; -use axum::http::{header, HeaderValue}; +use arrow_ipc::writer::{FileWriter, IpcWriteOptions}; +use axum::http::{HeaderValue, header}; use axum::response::{IntoResponse, Response}; use common_error::status_code::StatusCode; use common_query::{Output, OutputData}; diff --git a/src/servers/src/http/result/csv_result.rs b/src/servers/src/http/result/csv_result.rs index cf4af29c24..40d1c16cdd 100644 --- a/src/servers/src/http/result/csv_result.rs +++ b/src/servers/src/http/result/csv_result.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use axum::http::{header, HeaderValue}; +use axum::http::{HeaderValue, header}; use axum::response::{IntoResponse, Response}; use common_error::status_code::StatusCode; use common_query::Output; @@ -22,7 +22,7 @@ use serde_json::Value as JsonValue; use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT}; use crate::http::result::error_result::ErrorResponse; -use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat}; +use crate::http::{GreptimeQueryOutput, HttpResponse, ResponseFormat, handler, process_with_limit}; #[derive(Serialize, Deserialize, Debug)] pub struct CsvResponse { diff --git a/src/servers/src/http/result/error_result.rs b/src/servers/src/http/result/error_result.rs index 3c7c718e5b..7b70066b68 100644 --- a/src/servers/src/http/result/error_result.rs +++ b/src/servers/src/http/result/error_result.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use axum::response::{IntoResponse, Response}; use axum::Json; +use axum::response::{IntoResponse, Response}; use common_error::ext::ErrorExt; use common_error::from_err_code_msg_to_header; use common_error::status_code::StatusCode; diff --git a/src/servers/src/http/result/greptime_manage_resp.rs b/src/servers/src/http/result/greptime_manage_resp.rs index 3db07028b3..a875204082 100644 --- a/src/servers/src/http/result/greptime_manage_resp.rs +++ b/src/servers/src/http/result/greptime_manage_resp.rs @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use axum::response::IntoResponse; use axum::Json; -use http::header::CONTENT_TYPE; +use axum::response::IntoResponse; use http::HeaderValue; +use http::header::CONTENT_TYPE; use serde::{Deserialize, Serialize}; use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT}; diff --git a/src/servers/src/http/result/greptime_result_v1.rs b/src/servers/src/http/result/greptime_result_v1.rs index ad19a13295..58fa7ca9c7 100644 --- a/src/servers/src/http/result/greptime_result_v1.rs +++ b/src/servers/src/http/result/greptime_result_v1.rs @@ -14,8 +14,8 @@ use std::collections::HashMap; -use axum::response::{IntoResponse, Response}; use axum::Json; +use axum::response::{IntoResponse, Response}; use common_query::Output; use headers::HeaderValue; use serde::{Deserialize, Serialize}; @@ -24,7 +24,7 @@ use serde_json::Value; use crate::http::header::{ GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT, GREPTIME_DB_HEADER_METRICS, }; -use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat}; +use crate::http::{GreptimeQueryOutput, HttpResponse, ResponseFormat, handler, process_with_limit}; #[derive(Serialize, Deserialize, Debug)] pub struct GreptimedbV1Response { diff --git a/src/servers/src/http/result/influxdb_result_v1.rs b/src/servers/src/http/result/influxdb_result_v1.rs index 6ddf8fbcf7..36d59b9878 100644 --- a/src/servers/src/http/result/influxdb_result_v1.rs +++ b/src/servers/src/http/result/influxdb_result_v1.rs @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use axum::Json; use axum::http::HeaderValue; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_query::{Output, OutputData}; -use common_recordbatch::{util, RecordBatch}; +use common_recordbatch::{RecordBatch, util}; use serde::{Deserialize, Serialize}; use serde_json::Value; use snafu::ResultExt; diff --git a/src/servers/src/http/result/json_result.rs b/src/servers/src/http/result/json_result.rs index 4b84f0de81..25408b2836 100644 --- a/src/servers/src/http/result/json_result.rs +++ b/src/servers/src/http/result/json_result.rs @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use axum::http::{header, HeaderValue}; +use axum::http::{HeaderValue, header}; use axum::response::{IntoResponse, Response}; use common_error::status_code::StatusCode; use common_query::Output; use mime_guess::mime; use serde::{Deserialize, Serialize}; -use serde_json::{json, Map, Value}; +use serde_json::{Map, Value, json}; use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT}; use crate::http::result::error_result::ErrorResponse; -use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat}; +use crate::http::{GreptimeQueryOutput, HttpResponse, ResponseFormat, handler, process_with_limit}; /// The json format here is different from the default json output of `GreptimedbV1` result. /// `JsonResponse` is intended to make it easier for user to consume data. diff --git a/src/servers/src/http/result/null_result.rs b/src/servers/src/http/result/null_result.rs index 82c65a84b3..e2cb391a1b 100644 --- a/src/servers/src/http/result/null_result.rs +++ b/src/servers/src/http/result/null_result.rs @@ -14,7 +14,7 @@ use std::fmt::Write; -use axum::http::{header, HeaderValue}; +use axum::http::{HeaderValue, header}; use axum::response::{IntoResponse, Response}; use common_error::status_code::StatusCode; use common_query::Output; @@ -23,7 +23,7 @@ use serde::{Deserialize, Serialize}; use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT}; use crate::http::result::error_result::ErrorResponse; -use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat}; +use crate::http::{GreptimeQueryOutput, HttpResponse, ResponseFormat, handler}; #[derive(Serialize, Deserialize, Debug)] enum Rows { diff --git a/src/servers/src/http/result/prometheus_resp.rs b/src/servers/src/http/result/prometheus_resp.rs index 50af912122..4bf386bbfd 100644 --- a/src/servers/src/http/result/prometheus_resp.rs +++ b/src/servers/src/http/result/prometheus_resp.rs @@ -16,9 +16,9 @@ use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap}; +use axum::Json; use axum::http::HeaderValue; use axum::response::{IntoResponse, Response}; -use axum::Json; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_query::{Output, OutputData}; @@ -34,9 +34,9 @@ use serde_json::Value; use snafu::{OptionExt, ResultExt}; use crate::error::{ - status_code_to_http_status, CollectRecordbatchSnafu, Result, UnexpectedResultSnafu, + CollectRecordbatchSnafu, Result, UnexpectedResultSnafu, status_code_to_http_status, }; -use crate::http::header::{collect_plan_metrics, GREPTIME_DB_HEADER_METRICS}; +use crate::http::header::{GREPTIME_DB_HEADER_METRICS, collect_plan_metrics}; use crate::http::prometheus::{ PromData, PromQueryResult, PromSeriesMatrix, PromSeriesVector, PrometheusResponse, }; diff --git a/src/servers/src/http/result/table_result.rs b/src/servers/src/http/result/table_result.rs index 91780ed6b6..7e80e8c680 100644 --- a/src/servers/src/http/result/table_result.rs +++ b/src/servers/src/http/result/table_result.rs @@ -15,7 +15,7 @@ use std::cmp::max; use std::fmt::{Display, Write}; -use axum::http::{header, HeaderValue}; +use axum::http::{HeaderValue, header}; use axum::response::{IntoResponse, Response}; use common_error::status_code::StatusCode; use common_query::Output; @@ -25,7 +25,7 @@ use serde::{Deserialize, Serialize}; use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT}; use crate::http::result::error_result::ErrorResponse; -use crate::http::{handler, process_with_limit, GreptimeQueryOutput, HttpResponse, ResponseFormat}; +use crate::http::{GreptimeQueryOutput, HttpResponse, ResponseFormat, handler, process_with_limit}; #[derive(Serialize, Deserialize, Debug)] pub struct TableResponse { diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs index 0dacdf2474..f9ffd754dd 100644 --- a/src/servers/src/influxdb.rs +++ b/src/servers/src/influxdb.rs @@ -16,7 +16,7 @@ use api::v1::value::ValueData; use api::v1::{ColumnDataType, RowInsertRequests}; use common_grpc::precision::Precision; use hyper::Request; -use influxdb_line_protocol::{parse_lines, FieldValue}; +use influxdb_line_protocol::{FieldValue, parse_lines}; use snafu::ResultExt; use crate::error::{Error, InfluxdbLineProtocolSnafu}; diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs index 48f44409fd..7425c228f5 100644 --- a/src/servers/src/interceptor.rs +++ b/src/servers/src/interceptor.rs @@ -16,8 +16,8 @@ use std::borrow::Cow; use std::sync::Arc; use api::prom_store::remote::ReadRequest; -use api::v1::greptime_request::Request; use api::v1::RowInsertRequests; +use api::v1::greptime_request::Request; use async_trait::async_trait; use common_error::ext::ErrorExt; use common_query::Output; diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs index a1ced1b127..af44e697db 100644 --- a/src/servers/src/metrics.rs +++ b/src/servers/src/metrics.rs @@ -23,8 +23,8 @@ use axum::middleware::Next; use axum::response::IntoResponse; use lazy_static::lazy_static; use prometheus::{ - register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, - register_int_gauge, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, + Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, register_histogram, + register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, }; use session::context::QueryContext; use tonic::body::Body; diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs index fb5cd8a009..7cba3a8d1c 100644 --- a/src/servers/src/mysql/federated.rs +++ b/src/servers/src/mysql/federated.rs @@ -25,10 +25,10 @@ use datatypes::prelude::ConcreteDataType; use datatypes::schema::{ColumnSchema, Schema}; use datatypes::vectors::StringVector; use once_cell::sync::Lazy; -use regex::bytes::RegexSet; use regex::Regex; -use session::context::QueryContextRef; +use regex::bytes::RegexSet; use session::SessionRef; +use session::context::QueryContextRef; static SELECT_VAR_PATTERN: Lazy = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap()); static MYSQL_CONN_JAVA_PATTERN: Lazy = @@ -238,7 +238,10 @@ fn check_select_variable(query: &str, query_context: QueryContextRef) -> Option< fn check_show_variables(query: &str) -> Option { let recordbatches = if SHOW_SQL_MODE_PATTERN.is_match(query) { - Some(show_variables("sql_mode", "ONLY_FULL_GROUP_BY STRICT_TRANS_TABLES NO_ZERO_IN_DATE NO_ZERO_DATE ERROR_FOR_DIVISION_BY_ZERO NO_ENGINE_SUBSTITUTION")) + Some(show_variables( + "sql_mode", + "ONLY_FULL_GROUP_BY STRICT_TRANS_TABLES NO_ZERO_IN_DATE NO_ZERO_DATE ERROR_FOR_DIVISION_BY_ZERO NO_ENGINE_SUBSTITUTION", + )) } else if SHOW_LOWER_CASE_PATTERN.is_match(query) { Some(show_variables("lower_case_table_names", "0")) } else if SHOW_VARIABLES_LIKE_PATTERN.is_match(query) { @@ -276,10 +279,10 @@ pub(crate) fn check( // INSERT don't need MySQL federated check. We assume the query doesn't contain // federated or driver setup command if it starts with a 'INSERT' statement. let the_6th_index = query.char_indices().nth(6).map(|(i, _)| i); - if let Some(index) = the_6th_index { - if query[..index].eq_ignore_ascii_case("INSERT") { - return None; - } + if let Some(index) = the_6th_index + && query[..index].eq_ignore_ascii_case("INSERT") + { + return None; } // First to check the query is like "select @@variables". @@ -295,8 +298,8 @@ mod test { use common_query::OutputData; use common_time::timezone::set_default_timezone; - use session::context::{Channel, QueryContext}; use session::Session; + use session::context::{Channel, QueryContext}; use super::*; diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs index 8cc858507c..ade126c12d 100644 --- a/src/servers/src/mysql/handler.rs +++ b/src/servers/src/mysql/handler.rs @@ -14,8 +14,8 @@ use std::collections::HashMap; use std::net::SocketAddr; -use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU32, Ordering}; use std::time::Duration; use ::auth::{Identity, Password, UserProviderRef}; @@ -38,12 +38,13 @@ use query::query_engine::DescribeResult; use rand::RngCore; use session::context::{Channel, QueryContextRef}; use session::{Session, SessionRef}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sql::dialect::MySqlDialect; use sql::parser::{ParseOptions, ParserContext}; use sql::statements::statement::Statement; use tokio::io::AsyncWrite; +use crate::SqlPlan; use crate::error::{self, DataFrameSnafu, InvalidPrepareStatementSnafu, Result}; use crate::metrics::METRIC_AUTH_FAILURE; use crate::mysql::helper::{ @@ -52,7 +53,6 @@ use crate::mysql::helper::{ use crate::mysql::writer; use crate::mysql::writer::{create_mysql_column, handle_err}; use crate::query_handler::sql::ServerSqlQueryHandlerRef; -use crate::SqlPlan; const MYSQL_NATIVE_PASSWORD: &str = "mysql_native_password"; const MYSQL_CLEAR_PASSWORD: &str = "mysql_clear_password"; @@ -626,22 +626,21 @@ impl AsyncMysqlShim for MysqlInstanceShi let user_info = &self.session.user_info(); - if let Some(schema_validator) = &self.user_provider { - if let Err(e) = schema_validator + if let Some(schema_validator) = &self.user_provider + && let Err(e) = schema_validator .authorize(&catalog, &schema, user_info) .await - { - METRIC_AUTH_FAILURE - .with_label_values(&[e.status_code().as_ref()]) - .inc(); - return w - .error( - ErrorKind::ER_DBACCESS_DENIED_ERROR, - e.output_msg().as_bytes(), - ) - .await - .map_err(|e| e.into()); - } + { + METRIC_AUTH_FAILURE + .with_label_values(&[e.status_code().as_ref()]) + .inc(); + return w + .error( + ErrorKind::ER_DBACCESS_DENIED_ERROR, + e.output_msg().as_bytes(), + ) + .await + .map_err(|e| e.into()); } if catalog_from_db.is_some() { diff --git a/src/servers/src/mysql/helper.rs b/src/servers/src/mysql/helper.rs index b6c8e8f85c..4705c01387 100644 --- a/src/servers/src/mysql/helper.rs +++ b/src/servers/src/mysql/helper.rs @@ -25,9 +25,9 @@ use datatypes::prelude::ConcreteDataType; use datatypes::types::TimestampType; use datatypes::value::{self, Value}; use itertools::Itertools; -use opensrv_mysql::{to_naive_datetime, ParamValue, ValueInner}; +use opensrv_mysql::{ParamValue, ValueInner, to_naive_datetime}; use snafu::ResultExt; -use sql::ast::{visit_expressions_mut, Expr, Value as ValueExpr, ValueWithSpan, VisitMut}; +use sql::ast::{Expr, Value as ValueExpr, ValueWithSpan, VisitMut, visit_expressions_mut}; use sql::statements::statement::Statement; use crate::error::{self, DataFusionSnafu, Result}; @@ -340,7 +340,10 @@ mod tests { let query = "select from demo where host=? and idc in (select idc from idcs where name=?) and cpu>?"; let (sql, index) = replace_placeholders(query); - assert_eq!("select from demo where host=$1 and idc in (select idc from idcs where name=$2) and cpu>$3", sql); + assert_eq!( + "select from demo where host=$1 and idc in (select idc from idcs where name=$2) and cpu>$3", + sql + ); assert_eq!(4, index); } @@ -371,11 +374,16 @@ mod tests { delete.inner.to_string() ); - let select = parse_sql("select * from demo where host=? and idc in (select idc from idcs where name=?) and cpu>?"); + let select = parse_sql( + "select * from demo where host=? and idc in (select idc from idcs where name=?) and cpu>?", + ); let Statement::Query(select) = transform_placeholders(select) else { unreachable!() }; - assert_eq!("SELECT * FROM demo WHERE host = $1 AND idc IN (SELECT idc FROM idcs WHERE name = $2) AND cpu > $3", select.inner.to_string()); + assert_eq!( + "SELECT * FROM demo WHERE host = $1 AND idc IN (SELECT idc FROM idcs WHERE name = $2) AND cpu > $3", + select.inner.to_string() + ); } #[test] diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs index 9f042de765..c27d3ebbda 100644 --- a/src/servers/src/mysql/server.rs +++ b/src/servers/src/mysql/server.rs @@ -19,12 +19,12 @@ use std::sync::Arc; use async_trait::async_trait; use auth::UserProviderRef; use catalog::process_manager::ProcessManagerRef; -use common_runtime::runtime::RuntimeTrait; use common_runtime::Runtime; +use common_runtime::runtime::RuntimeTrait; use common_telemetry::{debug, warn}; use futures::StreamExt; use opensrv_mysql::{ - plain_run_with_options, secure_run_with_options, AsyncMysqlIntermediary, IntermediaryOptions, + AsyncMysqlIntermediary, IntermediaryOptions, plain_run_with_options, secure_run_with_options, }; use snafu::ensure; use tokio; @@ -141,7 +141,7 @@ impl MysqlServer { io_runtime: Runtime, stream: AbortableStream, process_manager: Option, - ) -> impl Future { + ) -> impl Future + use<> { let spawn_ref = self.spawn_ref.clone(); let spawn_config = self.spawn_config.clone(); diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs index 701f26f8bc..f571995b99 100644 --- a/src/servers/src/mysql/writer.rs +++ b/src/servers/src/mysql/writer.rs @@ -238,7 +238,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> { "cannot write value {:?} in mysql protocol: unimplemented", &value ), - }) + }); } Value::Time(v) => row_writer .write_col(v.to_timezone_aware_string(Some(&query_context.timezone())))?, diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs index ae7b8354ce..a3ab53a3b1 100644 --- a/src/servers/src/opentsdb/codec.rs +++ b/src/servers/src/opentsdb/codec.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest, SemanticType}; +use api::v1::{Column, ColumnDataType, InsertRequest as GrpcInsertRequest, SemanticType, column}; use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use crate::error::{self, Result}; @@ -63,7 +63,7 @@ impl DataPoint { return error::InvalidQuerySnafu { reason: format!("put: invalid timestamp: {}", tokens[2]), } - .fail() + .fail(); } }; @@ -73,7 +73,7 @@ impl DataPoint { return error::InvalidQuerySnafu { reason: format!("put: invalid value: {}", tokens[3]), } - .fail() + .fail(); } }; diff --git a/src/servers/src/otel_arrow.rs b/src/servers/src/otel_arrow.rs index edf2e8e89b..f905b39f5b 100644 --- a/src/servers/src/otel_arrow.rs +++ b/src/servers/src/otel_arrow.rs @@ -17,9 +17,9 @@ use common_error::ext::ErrorExt; use common_error::status_code::status_to_tonic_code; use common_telemetry::error; use futures::SinkExt; +use otel_arrow_rust::Consumer; use otel_arrow_rust::proto::opentelemetry::arrow::v1::arrow_metrics_service_server::ArrowMetricsService; use otel_arrow_rust::proto::opentelemetry::arrow::v1::{BatchArrowRecords, BatchStatus}; -use otel_arrow_rust::Consumer; use tonic::metadata::{Entry, MetadataValue}; use tonic::service::Interceptor; use tonic::{Request, Response, Status, Streaming}; diff --git a/src/servers/src/otlp/logs.rs b/src/servers/src/otlp/logs.rs index 20c1dd7bae..c60990077b 100644 --- a/src/servers/src/otlp/logs.rs +++ b/src/servers/src/otlp/logs.rs @@ -23,7 +23,7 @@ use api::v1::{ use bytes::Bytes; use jsonb::{Number as JsonbNumber, Value as JsonbValue}; use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; -use opentelemetry_proto::tonic::common::v1::{any_value, AnyValue, InstrumentationScope, KeyValue}; +use opentelemetry_proto::tonic::common::v1::{AnyValue, InstrumentationScope, KeyValue, any_value}; use opentelemetry_proto::tonic::logs::v1::{LogRecord, ResourceLogs, ScopeLogs}; use pipeline::{ ContextReq, GreptimePipelineParams, PipelineContext, PipelineWay, SchemaInfo, SelectInfo, diff --git a/src/servers/src/otlp/metrics.rs b/src/servers/src/otlp/metrics.rs index 4a9ba6e2fb..64f62665e4 100644 --- a/src/servers/src/otlp/metrics.rs +++ b/src/servers/src/otlp/metrics.rs @@ -19,7 +19,7 @@ use common_query::prelude::{GREPTIME_COUNT, GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use itertools::Itertools; use lazy_static::lazy_static; use otel_arrow_rust::proto::opentelemetry::collector::metrics::v1::ExportMetricsServiceRequest; -use otel_arrow_rust::proto::opentelemetry::common::v1::{any_value, AnyValue, KeyValue}; +use otel_arrow_rust::proto::opentelemetry::common::v1::{AnyValue, KeyValue, any_value}; use otel_arrow_rust::proto::opentelemetry::metrics::v1::{metric, number_data_point, *}; use regex::Regex; use session::protocol_ctx::{MetricType, OtlpMetricCtx}; @@ -849,8 +849,8 @@ fn encode_summary( #[cfg(test)] mod tests { - use otel_arrow_rust::proto::opentelemetry::common::v1::any_value::Value as Val; use otel_arrow_rust::proto::opentelemetry::common::v1::AnyValue; + use otel_arrow_rust::proto::opentelemetry::common::v1::any_value::Value as Val; use otel_arrow_rust::proto::opentelemetry::metrics::v1::number_data_point::Value; use otel_arrow_rust::proto::opentelemetry::metrics::v1::summary_data_point::ValueAtQuantile; use otel_arrow_rust::proto::opentelemetry::metrics::v1::{ diff --git a/src/servers/src/otlp/trace/attributes.rs b/src/servers/src/otlp/trace/attributes.rs index b4c3a323f0..0855486343 100644 --- a/src/servers/src/otlp/trace/attributes.rs +++ b/src/servers/src/otlp/trace/attributes.rs @@ -18,8 +18,8 @@ use opentelemetry_proto::tonic::common::v1::any_value::Value::{ ArrayValue, BoolValue, BytesValue, DoubleValue, IntValue, KvlistValue, StringValue, }; use opentelemetry_proto::tonic::common::v1::{AnyValue, KeyValue}; -use serde::ser::{SerializeMap, SerializeSeq}; use serde::Serialize; +use serde::ser::{SerializeMap, SerializeSeq}; use crate::otlp::utils::key_value_to_jsonb; diff --git a/src/servers/src/otlp/trace/span.rs b/src/servers/src/otlp/trace/span.rs index 0163cbbdcc..d96bc17277 100644 --- a/src/servers/src/otlp/trace/span.rs +++ b/src/servers/src/otlp/trace/span.rs @@ -17,13 +17,13 @@ use std::fmt::Display; use common_time::timestamp::Timestamp; use itertools::Itertools; use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; -use opentelemetry_proto::tonic::common::v1::{any_value, InstrumentationScope, KeyValue}; +use opentelemetry_proto::tonic::common::v1::{InstrumentationScope, KeyValue, any_value}; use opentelemetry_proto::tonic::trace::v1::span::{Event, Link}; use opentelemetry_proto::tonic::trace::v1::{Span, Status}; use serde::Serialize; -use crate::otlp::trace::attributes::Attributes; use crate::otlp::trace::KEY_SERVICE_NAME; +use crate::otlp::trace::attributes::Attributes; use crate::otlp::utils::bytes_to_hex_string; #[derive(Debug, Clone)] diff --git a/src/servers/src/otlp/trace/v0.rs b/src/servers/src/otlp/trace/v0.rs index 04f6c9420d..03f279fccb 100644 --- a/src/servers/src/otlp/trace/v0.rs +++ b/src/servers/src/otlp/trace/v0.rs @@ -23,7 +23,7 @@ use pipeline::{GreptimePipelineParams, PipelineWay}; use session::context::QueryContextRef; use crate::error::Result; -use crate::otlp::trace::span::{parse, TraceSpan}; +use crate::otlp::trace::span::{TraceSpan, parse}; use crate::otlp::trace::{ DURATION_NANO_COLUMN, PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_EVENTS_COLUMN, SPAN_ID_COLUMN, SPAN_KIND_COLUMN, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, diff --git a/src/servers/src/otlp/trace/v1.rs b/src/servers/src/otlp/trace/v1.rs index 14181d74d2..306444bc18 100644 --- a/src/servers/src/otlp/trace/v1.rs +++ b/src/servers/src/otlp/trace/v1.rs @@ -25,7 +25,7 @@ use session::context::QueryContextRef; use crate::error::Result; use crate::otlp::trace::attributes::Attributes; -use crate::otlp::trace::span::{parse, TraceSpan}; +use crate::otlp::trace::span::{TraceSpan, parse}; use crate::otlp::trace::{ DURATION_NANO_COLUMN, KEY_SERVICE_NAME, PARENT_SPAN_ID_COLUMN, SERVICE_NAME_COLUMN, SPAN_EVENTS_COLUMN, SPAN_ID_COLUMN, SPAN_KIND_COLUMN, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, diff --git a/src/servers/src/otlp/utils.rs b/src/servers/src/otlp/utils.rs index d627e4b7d8..b43cb0baab 100644 --- a/src/servers/src/otlp/utils.rs +++ b/src/servers/src/otlp/utils.rs @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use api::v1::value::ValueData; use api::v1::ColumnDataType; +use api::v1::value::ValueData; use itertools::Itertools; use jsonb::{Number as JsonbNumber, Value as JsonbValue}; -use opentelemetry_proto::tonic::common::v1::{any_value, KeyValue}; +use opentelemetry_proto::tonic::common::v1::{KeyValue, any_value}; pub fn bytes_to_hex_string(bs: &[u8]) -> String { bs.iter().map(|b| format!("{:02x}", b)).join("") diff --git a/src/servers/src/pipeline.rs b/src/servers/src/pipeline.rs index 5ecfd9d329..5a6710f420 100644 --- a/src/servers/src/pipeline.rs +++ b/src/servers/src/pipeline.rs @@ -21,9 +21,9 @@ use api::v1::helper::time_index_column_schema; use api::v1::{ColumnDataType, RowInsertRequest, Rows}; use common_time::timestamp::TimeUnit; use pipeline::{ - identity_pipeline, unwrap_or_continue_if_err, ContextReq, DispatchedTo, Pipeline, - PipelineContext, PipelineDefinition, PipelineExecOutput, SchemaInfo, TransformedOutput, - TransformerMode, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, + ContextReq, DispatchedTo, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME, Pipeline, PipelineContext, + PipelineDefinition, PipelineExecOutput, SchemaInfo, TransformedOutput, TransformerMode, + identity_pipeline, unwrap_or_continue_if_err, }; use session::context::{Channel, QueryContextRef}; use snafu::ResultExt; diff --git a/src/servers/src/postgres.rs b/src/servers/src/postgres.rs index ca396dc6d2..7533fe084c 100644 --- a/src/servers/src/postgres.rs +++ b/src/servers/src/postgres.rs @@ -36,8 +36,8 @@ use pgwire::api::auth::{ServerParameterProvider, StartupHandler}; use pgwire::api::query::{ExtendedQueryHandler, SimpleQueryHandler}; use pgwire::api::{ClientInfo, ErrorHandler, PgWireServerHandlers}; pub use server::PostgresServer; -use session::context::Channel; use session::Session; +use session::context::Channel; use self::auth_handler::PgLoginVerifier; use self::handler::DefaultQueryParser; diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs index d6388b7012..14450289ba 100644 --- a/src/servers/src/postgres/auth_handler.rs +++ b/src/servers/src/postgres/auth_handler.rs @@ -15,13 +15,13 @@ use std::fmt::Debug; use std::sync::Exclusive; -use ::auth::{userinfo_by_name, Identity, Password, UserInfoRef, UserProviderRef}; +use ::auth::{Identity, Password, UserInfoRef, UserProviderRef, userinfo_by_name}; use async_trait::async_trait; use common_catalog::parse_catalog_and_schema_from_db_string; use common_error::ext::ErrorExt; use futures::{Sink, SinkExt}; use pgwire::api::auth::StartupHandler; -use pgwire::api::{auth, ClientInfo, PgWireConnectionState}; +use pgwire::api::{ClientInfo, PgWireConnectionState, auth}; use pgwire::error::{ErrorInfo, PgWireError, PgWireResult}; use pgwire::messages::response::ErrorResponse; use pgwire::messages::startup::{Authentication, SecretKey}; @@ -31,9 +31,9 @@ use snafu::IntoError; use crate::error::{AuthSnafu, Result}; use crate::metrics::METRIC_AUTH_FAILURE; +use crate::postgres::PostgresServerHandlerInner; use crate::postgres::types::PgErrorCode; use crate::postgres::utils::convert_err; -use crate::postgres::PostgresServerHandlerInner; use crate::query_handler::sql::ServerSqlQueryHandlerRef; pub(crate) struct PgLoginVerifier { diff --git a/src/servers/src/postgres/fixtures.rs b/src/servers/src/postgres/fixtures.rs index 4774c4ece0..40ad3ca9ea 100644 --- a/src/servers/src/postgres/fixtures.rs +++ b/src/servers/src/postgres/fixtures.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use futures::stream; use once_cell::sync::Lazy; -use pgwire::api::results::{DataRowEncoder, FieldFormat, FieldInfo, QueryResponse, Response, Tag}; use pgwire::api::Type; +use pgwire::api::results::{DataRowEncoder, FieldFormat, FieldInfo, QueryResponse, Response, Tag}; use pgwire::error::PgWireResult; use pgwire::messages::data::DataRow; use regex::Regex; diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs index 76d409a87f..3f6926d4dd 100644 --- a/src/servers/src/postgres/handler.rs +++ b/src/servers/src/postgres/handler.rs @@ -17,13 +17,13 @@ use std::sync::Arc; use async_trait::async_trait; use common_query::{Output, OutputData}; -use common_recordbatch::error::Result as RecordBatchResult; use common_recordbatch::RecordBatch; +use common_recordbatch::error::Result as RecordBatchResult; use common_telemetry::{debug, tracing}; use datafusion_common::ParamValues; use datatypes::prelude::ConcreteDataType; use datatypes::schema::SchemaRef; -use futures::{future, stream, Sink, SinkExt, Stream, StreamExt}; +use futures::{Sink, SinkExt, Stream, StreamExt, future, stream}; use pgwire::api::portal::{Format, Portal}; use pgwire::api::query::{ExtendedQueryHandler, SimpleQueryHandler}; use pgwire::api::results::{ @@ -34,18 +34,18 @@ use pgwire::api::{ClientInfo, ErrorHandler, Type}; use pgwire::error::{ErrorInfo, PgWireError, PgWireResult}; use pgwire::messages::PgWireBackendMessage; use query::query_engine::DescribeResult; -use session::context::QueryContextRef; use session::Session; +use session::context::QueryContextRef; use snafu::ResultExt; use sql::dialect::PostgreSqlDialect; use sql::parser::{ParseOptions, ParserContext}; +use crate::SqlPlan; use crate::error::{DataFusionSnafu, Result}; use crate::postgres::types::*; use crate::postgres::utils::convert_err; -use crate::postgres::{fixtures, PostgresServerHandlerInner}; +use crate::postgres::{PostgresServerHandlerInner, fixtures}; use crate::query_handler::sql::ServerSqlQueryHandlerRef; -use crate::SqlPlan; #[async_trait] impl SimpleQueryHandler for PostgresServerHandlerInner { @@ -356,13 +356,12 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner { } else { if let Some(mut resp) = fixtures::process(&sql_plan.query, self.session.new_query_context()) + && let Response::Query(query_response) = resp.remove(0) { - if let Response::Query(query_response) = resp.remove(0) { - return Ok(DescribeStatementResponse::new( - param_types, - (*query_response.row_schema()).clone(), - )); - } + return Ok(DescribeStatementResponse::new( + param_types, + (*query_response.row_schema()).clone(), + )); } Ok(DescribeStatementResponse::new(param_types, vec![])) @@ -387,12 +386,11 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner { } else { if let Some(mut resp) = fixtures::process(&sql_plan.query, self.session.new_query_context()) + && let Response::Query(query_response) = resp.remove(0) { - if let Response::Query(query_response) = resp.remove(0) { - return Ok(DescribePortalResponse::new( - (*query_response.row_schema()).clone(), - )); - } + return Ok(DescribePortalResponse::new( + (*query_response.row_schema()).clone(), + )); } Ok(DescribePortalResponse::new(vec![])) diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs index a509771fcf..3c7a711780 100644 --- a/src/servers/src/postgres/server.rs +++ b/src/servers/src/postgres/server.rs @@ -19,8 +19,8 @@ use std::sync::Arc; use ::auth::UserProviderRef; use async_trait::async_trait; use catalog::process_manager::ProcessManagerRef; -use common_runtime::runtime::RuntimeTrait; use common_runtime::Runtime; +use common_runtime::runtime::RuntimeTrait; use common_telemetry::{debug, warn}; use futures::StreamExt; use pgwire::tokio::process_socket; @@ -74,7 +74,7 @@ impl PostgresServer { &self, io_runtime: Runtime, accepting_stream: AbortableStream, - ) -> impl Future { + ) -> impl Future + use<> { let handler_maker = self.make_handler.clone(); let tls_server_config = self.tls_server_config.clone(); let process_manager = self.process_manager.clone(); diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs index 867cb1c3e6..d4d6393437 100644 --- a/src/servers/src/postgres/types.rs +++ b/src/servers/src/postgres/types.rs @@ -27,11 +27,11 @@ use datafusion_expr::LogicalPlan; use datatypes::arrow::datatypes::DataType as ArrowDataType; use datatypes::prelude::{ConcreteDataType, Value}; use datatypes::schema::Schema; -use datatypes::types::{json_type_value_to_string, IntervalType, TimestampType}; +use datatypes::types::{IntervalType, TimestampType, json_type_value_to_string}; use datatypes::value::ListValue; +use pgwire::api::Type; use pgwire::api::portal::{Format, Portal}; use pgwire::api::results::{DataRowEncoder, FieldInfo}; -use pgwire::api::Type; use pgwire::error::{PgWireError, PgWireResult}; use session::context::QueryContextRef; use session::session_config::PGByteaOutputValue; @@ -41,9 +41,9 @@ use self::bytea::{EscapeOutputBytea, HexOutputBytea}; use self::datetime::{StylingDate, StylingDateTime}; pub use self::error::{PgErrorCode, PgErrorSeverity}; use self::interval::PgInterval; +use crate::SqlPlan; use crate::error::{self as server_error, DataFusionSnafu, Error, Result}; use crate::postgres::utils::convert_err; -use crate::SqlPlan; pub(super) fn schema_to_pg(origin: &Schema, field_formats: &Format) -> Result> { origin @@ -648,7 +648,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -664,7 +664,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -690,7 +690,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -716,7 +716,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -742,7 +742,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -769,7 +769,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -796,7 +796,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -829,7 +829,7 @@ pub(super) fn parameters_to_scalar_values( return Err(invalid_parameter_error( "invalid_parameter_type", Some(format!("Expected: {}, found: {}", server_type, client_type)), - )) + )); } } } else { @@ -1026,13 +1026,13 @@ pub(super) fn param_types_to_pg_types( mod test { use std::sync::Arc; + use common_time::Timestamp; use common_time::interval::IntervalUnit; use common_time::timestamp::TimeUnit; - use common_time::Timestamp; use datatypes::schema::{ColumnSchema, Schema}; use datatypes::value::ListValue; - use pgwire::api::results::{FieldFormat, FieldInfo}; use pgwire::api::Type; + use pgwire::api::results::{FieldFormat, FieldInfo}; use session::context::QueryContextBuilder; use super::*; diff --git a/src/servers/src/postgres/types/interval.rs b/src/servers/src/postgres/types/interval.rs index ec9bfe912b..5d977ae47e 100644 --- a/src/servers/src/postgres/types/interval.rs +++ b/src/servers/src/postgres/types/interval.rs @@ -19,7 +19,7 @@ use common_time::interval::IntervalFormat; use common_time::timestamp::TimeUnit; use common_time::{Duration, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth}; use pgwire::types::ToSqlText; -use postgres_types::{to_sql_checked, FromSql, IsNull, ToSql, Type}; +use postgres_types::{FromSql, IsNull, ToSql, Type, to_sql_checked}; use crate::error; @@ -217,8 +217,8 @@ impl ToSqlText for PgInterval { #[cfg(test)] mod tests { - use common_time::timestamp::TimeUnit; use common_time::Duration; + use common_time::timestamp::TimeUnit; use super::*; diff --git a/src/servers/src/prom_row_builder.rs b/src/servers/src/prom_row_builder.rs index 588a026580..a6591dbfe1 100644 --- a/src/servers/src/prom_row_builder.rs +++ b/src/servers/src/prom_row_builder.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::collections::hash_map::Entry; use std::string::ToString; use api::prom_store::remote::Sample; @@ -207,8 +207,8 @@ impl TableBuilder { #[cfg(test)] mod tests { use api::prom_store::remote::Sample; - use api::v1::value::ValueData; use api::v1::Value; + use api::v1::value::ValueData; use arrow::datatypes::ToByteSlice; use bytes::Bytes; use prost::DecodeError; diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs index 7b88cd6dd1..95dd6453dc 100644 --- a/src/servers/src/prom_store.rs +++ b/src/servers/src/prom_store.rs @@ -26,13 +26,13 @@ use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_recordbatch::{RecordBatch, RecordBatches}; use common_telemetry::tracing; use common_time::timestamp::TimeUnit; -use datafusion::prelude::{col, lit, regexp_match, Expr}; +use datafusion::prelude::{Expr, col, lit, regexp_match}; use datafusion_common::ScalarValue; use datafusion_expr::LogicalPlan; use datatypes::prelude::{ConcreteDataType, Value}; use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue}; use query::dataframe::DataFrame; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use snap::raw::{Decoder, Encoder}; use crate::error::{self, Result}; @@ -655,7 +655,10 @@ mod tests { let plan = query_to_plan(DataFrame::DataFusion(dataframe), &q).unwrap(); let display_string = format!("{}", plan.display_indent()); - assert_eq!("Filter: ?table?.greptime_timestamp >= TimestampMillisecond(1000, None) AND ?table?.greptime_timestamp <= TimestampMillisecond(2000, None)\n TableScan: ?table?", display_string); + assert_eq!( + "Filter: ?table?.greptime_timestamp >= TimestampMillisecond(1000, None) AND ?table?.greptime_timestamp <= TimestampMillisecond(2000, None)\n TableScan: ?table?", + display_string + ); let q = Query { start_timestamp_ms: 1000, @@ -684,7 +687,10 @@ mod tests { let plan = query_to_plan(DataFrame::DataFusion(dataframe), &q).unwrap(); let display_string = format!("{}", plan.display_indent()); - assert_eq!("Filter: ?table?.greptime_timestamp >= TimestampMillisecond(1000, None) AND ?table?.greptime_timestamp <= TimestampMillisecond(2000, None) AND regexp_match(?table?.job, Utf8(\"*prom*\")) IS NOT NULL AND ?table?.instance != Utf8(\"localhost\")\n TableScan: ?table?", display_string); + assert_eq!( + "Filter: ?table?.greptime_timestamp >= TimestampMillisecond(1000, None) AND ?table?.greptime_timestamp <= TimestampMillisecond(2000, None) AND regexp_match(?table?.job, Utf8(\"*prom*\")) IS NOT NULL AND ?table?.instance != Utf8(\"localhost\")\n TableScan: ?table?", + display_string + ); } fn column_schemas_with( diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs index 7066eda94b..0fbdb97c34 100644 --- a/src/servers/src/prometheus.rs +++ b/src/servers/src/prometheus.rs @@ -16,7 +16,7 @@ use catalog::system_schema::information_schema::tables::{ ENGINE as TABLE_ENGINE, TABLE_CATALOG, TABLE_NAME, TABLE_SCHEMA, }; use common_telemetry::tracing; -use datafusion::prelude::{col, lit, regexp_match, Expr}; +use datafusion::prelude::{Expr, col, lit, regexp_match}; use datafusion_expr::LogicalPlan; use promql_parser::label::{MatchOp, Matcher}; use query::dataframe::DataFrame; diff --git a/src/servers/src/proto.rs b/src/servers/src/proto.rs index fdefdfe724..564943a152 100644 --- a/src/servers/src/proto.rs +++ b/src/servers/src/proto.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::btree_map::Entry; use std::collections::BTreeMap; +use std::collections::btree_map::Entry; use std::ops::Deref; use std::slice; @@ -22,17 +22,17 @@ use bytes::{Buf, Bytes}; use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE}; use common_telemetry::warn; use pipeline::{ContextReq, GreptimePipelineParams, PipelineContext, PipelineDefinition}; -use prost::encoding::message::merge; -use prost::encoding::{decode_key, decode_varint, WireType}; use prost::DecodeError; +use prost::encoding::message::merge; +use prost::encoding::{WireType, decode_key, decode_varint}; use session::context::QueryContextRef; use snafu::OptionExt; use vrl::prelude::NotNan; use vrl::value::{KeyString, Value as VrlValue}; use crate::error::InternalSnafu; -use crate::http::event::PipelineIngestRequest; use crate::http::PromValidationMode; +use crate::http::event::PipelineIngestRequest; use crate::pipeline::run_pipeline; use crate::prom_row_builder::{PromCtx, TablesBuilder}; use crate::prom_store::{ diff --git a/src/servers/src/query_handler/grpc.rs b/src/servers/src/query_handler/grpc.rs index 49ec6917ce..305fde4448 100644 --- a/src/servers/src/query_handler/grpc.rs +++ b/src/servers/src/query_handler/grpc.rs @@ -23,8 +23,8 @@ use common_grpc::flight::FlightDecoder; use common_query::Output; use session::context::QueryContextRef; use snafu::ResultExt; -use table::table_name::TableName; use table::TableRef; +use table::table_name::TableName; use crate::error::{self, Result}; diff --git a/src/servers/src/row_writer.rs b/src/servers/src/row_writer.rs index 974b638aa9..ec439a8659 100644 --- a/src/servers/src/row_writer.rs +++ b/src/servers/src/row_writer.rs @@ -22,10 +22,10 @@ use api::v1::{ RowInsertRequest, RowInsertRequests, Rows, SemanticType, Value, }; use common_grpc::precision::Precision; +use common_time::Timestamp; use common_time::timestamp::TimeUnit; use common_time::timestamp::TimeUnit::Nanosecond; -use common_time::Timestamp; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use crate::error::{ IncompatibleSchemaSnafu, Result, RowWriterSnafu, TimePrecisionSnafu, TimestampOverflowSnafu, diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs index 9e6f674dda..6a3028f63c 100644 --- a/src/servers/src/server.rs +++ b/src/servers/src/server.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use async_trait::async_trait; use common_runtime::Runtime; use common_telemetry::{error, info}; -use futures::future::{try_join_all, AbortHandle, AbortRegistration, Abortable}; -use snafu::{ensure, ResultExt}; +use futures::future::{AbortHandle, AbortRegistration, Abortable, try_join_all}; +use snafu::{ResultExt, ensure}; use strum::Display; use tokio::sync::Mutex; use tokio::task::JoinHandle; diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs index 1810aeca66..245bf4c71a 100644 --- a/src/servers/src/tls.rs +++ b/src/servers/src/tls.rs @@ -22,7 +22,7 @@ use std::sync::{Arc, RwLock}; use common_telemetry::{error, info}; use notify::{EventKind, RecursiveMode, Watcher}; use rustls::ServerConfig; -use rustls_pemfile::{certs, read_one, Item}; +use rustls_pemfile::{Item, certs, read_one}; use rustls_pki_types::{CertificateDer, PrivateKeyDer}; use serde::{Deserialize, Serialize}; use snafu::ResultExt; diff --git a/src/servers/tests/http/authorize.rs b/src/servers/tests/http/authorize.rs index 77da594b29..214ab7d9f9 100644 --- a/src/servers/tests/http/authorize.rs +++ b/src/servers/tests/http/authorize.rs @@ -14,12 +14,12 @@ use std::sync::Arc; -use auth::tests::MockUserProvider; use auth::UserProvider; +use auth::tests::MockUserProvider; use axum::http; use hyper::{Request, StatusCode}; -use servers::http::authorize::inner_auth; use servers::http::AUTHORIZATION_HEADER; +use servers::http::authorize::inner_auth; use session::context::QueryContext; async fn check_http_auth(header_key: &str) { diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs index 30ca45600c..46bb386ee6 100644 --- a/src/servers/tests/http/http_handler_test.rs +++ b/src/servers/tests/http/http_handler_test.rs @@ -14,17 +14,17 @@ use std::collections::HashMap; +use axum::Form; use axum::extract::{Json, Query, State}; use axum::http::header; use axum::response::{IntoResponse, Response}; -use axum::Form; use bytes::Bytes; use headers::HeaderValue; use mime_guess::mime; use servers::http::GreptimeQueryOutput::Records; use servers::http::{ - handler as http_handler, ApiState, GreptimeOptionsConfigState, GreptimeQueryOutput, - HttpResponse, + ApiState, GreptimeOptionsConfigState, GreptimeQueryOutput, HttpResponse, + handler as http_handler, }; use servers::metrics_handler::MetricsHandler; use session::context::QueryContext; diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs index e3822059a5..a88b0750ea 100644 --- a/src/servers/tests/http/influxdb_test.rs +++ b/src/servers/tests/http/influxdb_test.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use api::v1::RowInsertRequests; use async_trait::async_trait; use auth::tests::{DatabaseAuthInfo, MockUserProvider}; -use axum::{http, Router}; +use axum::{Router, http}; use common_query::Output; use common_test_util::ports; use datafusion_expr::LogicalPlan; @@ -28,8 +28,8 @@ use servers::http::header::constants::GREPTIME_DB_HEADER_NAME; use servers::http::test_helpers::TestClient; use servers::http::{HttpOptions, HttpServerBuilder}; use servers::influxdb::InfluxdbRequest; -use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::InfluxdbLineProtocolHandler; +use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContextRef; use sql::statements::statement::Statement; use tokio::sync::mpsc; diff --git a/src/servers/tests/http/opentsdb_test.rs b/src/servers/tests/http/opentsdb_test.rs index 6bb0b79666..0fe6ff8df3 100644 --- a/src/servers/tests/http/opentsdb_test.rs +++ b/src/servers/tests/http/opentsdb_test.rs @@ -25,8 +25,8 @@ use servers::error::{self, Result}; use servers::http::test_helpers::TestClient; use servers::http::{HttpOptions, HttpServerBuilder}; use servers::opentsdb::codec::DataPoint; -use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::OpentsdbProtocolHandler; +use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContextRef; use sql::statements::statement::Statement; use tokio::sync::mpsc; @@ -193,7 +193,10 @@ async fn test_opentsdb_debug_put() { .send() .await; assert_eq!(result.status(), 200); - assert_eq!(result.text().await, "{\"success\":0,\"failed\":1,\"errors\":[{\"datapoint\":{\"metric\":\"should_failed\",\"timestamp\":1000,\"value\":1.0,\"tags\":{\"host\":\"web01\"}},\"error\":\"Internal error: 1003\"}]}"); + assert_eq!( + result.text().await, + "{\"success\":0,\"failed\":1,\"errors\":[{\"datapoint\":{\"metric\":\"should_failed\",\"timestamp\":1000,\"value\":1.0,\"tags\":{\"host\":\"web01\"}},\"error\":\"Internal error: 1003\"}]}" + ); // multiple data point summary debug put let result = client @@ -218,7 +221,10 @@ async fn test_opentsdb_debug_put() { .send() .await; assert_eq!(result.status(), 200); - assert_eq!(result.text().await, "{\"success\":1,\"failed\":1,\"errors\":[{\"datapoint\":{\"metric\":\"should_failed\",\"timestamp\":1000,\"value\":1.0,\"tags\":{\"host\":\"web01\"}},\"error\":\"Internal error: 1003\"}]}"); + assert_eq!( + result.text().await, + "{\"success\":1,\"failed\":1,\"errors\":[{\"datapoint\":{\"metric\":\"should_failed\",\"timestamp\":1000,\"value\":1.0,\"tags\":{\"host\":\"web01\"}},\"error\":\"Internal error: 1003\"}]}" + ); let mut metrics = vec![]; while let Ok(s) = rx.try_recv() { diff --git a/src/servers/tests/http/prom_store_test.rs b/src/servers/tests/http/prom_store_test.rs index e0bd688a15..55ce9089bf 100644 --- a/src/servers/tests/http/prom_store_test.rs +++ b/src/servers/tests/http/prom_store_test.rs @@ -31,7 +31,7 @@ use servers::http::header::{CONTENT_ENCODING_SNAPPY, CONTENT_TYPE_PROTOBUF}; use servers::http::test_helpers::TestClient; use servers::http::{HttpOptions, HttpServerBuilder, PromValidationMode}; use servers::prom_store; -use servers::prom_store::{snappy_compress, Metrics}; +use servers::prom_store::{Metrics, snappy_compress}; use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::{PromStoreProtocolHandler, PromStoreResponse}; use session::context::QueryContextRef; diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs index e38ac5abc9..aeadb74f90 100644 --- a/src/servers/tests/mod.rs +++ b/src/servers/tests/mod.rs @@ -34,8 +34,8 @@ use servers::query_handler::sql::{ServerSqlQueryHandlerRef, SqlQueryHandler}; use session::context::QueryContextRef; use snafu::ensure; use sql::statements::statement::Statement; -use table::table_name::TableName; use table::TableRef; +use table::table_name::TableName; mod http; mod interceptor; diff --git a/src/servers/tests/mysql/mod.rs b/src/servers/tests/mysql/mod.rs index 4bf5fac445..a62c8840f1 100644 --- a/src/servers/tests/mysql/mod.rs +++ b/src/servers/tests/mysql/mod.rs @@ -17,9 +17,9 @@ use std::sync::Arc; use datatypes::prelude::*; use datatypes::schema::ColumnSchema; use datatypes::vectors::{ - BinaryVector, BooleanVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, - Int64Vector, Int8Vector, NullVector, StringVector, UInt16Vector, UInt32Vector, UInt64Vector, - UInt8Vector, + BinaryVector, BooleanVector, Float32Vector, Float64Vector, Int8Vector, Int16Vector, + Int32Vector, Int64Vector, NullVector, StringVector, UInt8Vector, UInt16Vector, UInt32Vector, + UInt64Vector, }; use mysql_async::prelude::FromRow; use mysql_async::{FromRowError, Value as MysqlValue}; @@ -48,9 +48,11 @@ impl TestingData { // and all columns length are equal assert!(columns.windows(2).all(|x| x[0].len() == x[1].len())); // and all output rows width are equal - assert!(mysql_text_output_rows - .windows(2) - .all(|x| x[0].len() == x[1].len())); + assert!( + mysql_text_output_rows + .windows(2) + .all(|x| x[0].len() == x[1].len()) + ); // and the rows' columns size equals to input columns size. assert_eq!(columns.first().unwrap().len(), mysql_text_output_rows.len()); diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs index f89da0043d..16272af98f 100644 --- a/src/servers/tests/mysql/mysql_server_test.rs +++ b/src/servers/tests/mysql/mysql_server_test.rs @@ -19,8 +19,8 @@ use std::time::Duration; use auth::tests::{DatabaseAuthInfo, MockUserProvider}; use common_catalog::consts::DEFAULT_SCHEMA_NAME; use common_recordbatch::RecordBatch; -use common_runtime::runtime::BuilderBuild; use common_runtime::Builder as RuntimeBuilder; +use common_runtime::runtime::BuilderBuild; use datatypes::prelude::VectorRef; use datatypes::schema::{ColumnSchema, Schema}; use datatypes::value::Value; @@ -32,11 +32,11 @@ use servers::install_ring_crypto_provider; use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef}; use servers::server::Server; use servers::tls::{ReloadableTlsServerConfig, TlsOption}; -use table::test_util::MemTable; use table::TableRef; +use table::test_util::MemTable; use crate::create_testing_sql_query_handler; -use crate::mysql::{all_datatype_testing_data, MysqlTextRow, TestingData}; +use crate::mysql::{MysqlTextRow, TestingData, all_datatype_testing_data}; #[derive(Default)] struct MysqlOpts<'a> { @@ -87,10 +87,12 @@ async fn test_start_mysql_server() -> Result<()> { mysql_server.start(listening).await.unwrap(); let result = mysql_server.start(listening).await; - assert!(result - .unwrap_err() - .to_string() - .contains("MySQL server has been started.")); + assert!( + result + .unwrap_err() + .to_string() + .contains("MySQL server has been started.") + ); Ok(()) } @@ -173,10 +175,12 @@ async fn test_shutdown_mysql_server() -> Result<()> { let mut mysql_server = create_mysql_server(table, Default::default())?; let result = mysql_server.shutdown().await; - assert!(result - .unwrap_err() - .to_string() - .contains("MySQL server is not started.")); + assert!( + result + .unwrap_err() + .to_string() + .contains("MySQL server is not started.") + ); let listening = "127.0.0.1:0".parse::().unwrap(); mysql_server.start(listening).await.unwrap(); diff --git a/src/servers/tests/mysql/mysql_writer_test.rs b/src/servers/tests/mysql/mysql_writer_test.rs index 9bedf72a3f..dff541a762 100644 --- a/src/servers/tests/mysql/mysql_writer_test.rs +++ b/src/servers/tests/mysql/mysql_writer_test.rs @@ -18,7 +18,7 @@ use datatypes::prelude::*; use datatypes::schema::{ColumnSchema, Schema}; use servers::mysql::writer::create_mysql_column_def; -use crate::mysql::{all_datatype_testing_data, TestingData}; +use crate::mysql::{TestingData, all_datatype_testing_data}; #[test] fn test_create_mysql_column_def() { diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs index 612aa3dd94..3d73d84e27 100644 --- a/src/servers/tests/postgres/mod.rs +++ b/src/servers/tests/postgres/mod.rs @@ -16,11 +16,11 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use auth::tests::{DatabaseAuthInfo, MockUserProvider}; use auth::UserProviderRef; +use auth::tests::{DatabaseAuthInfo, MockUserProvider}; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; -use common_runtime::runtime::BuilderBuild; use common_runtime::Builder as RuntimeBuilder; +use common_runtime::runtime::BuilderBuild; use pgwire::api::Type; use rand::Rng; use rustls::client::danger::{ServerCertVerified, ServerCertVerifier}; @@ -31,8 +31,8 @@ use servers::install_ring_crypto_provider; use servers::postgres::PostgresServer; use servers::server::Server; use servers::tls::{ReloadableTlsServerConfig, TlsOption}; -use table::test_util::MemTable; use table::TableRef; +use table::test_util::MemTable; use tokio_postgres::{Client, Error as PgError, NoTls, SimpleQueryMessage}; use crate::create_testing_instance; @@ -84,10 +84,12 @@ pub async fn test_start_postgres_server() -> Result<()> { pg_server.start(listening).await.unwrap(); let result = pg_server.start(listening).await; - assert!(result - .unwrap_err() - .to_string() - .contains("Postgres server has been started.")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Postgres server has been started.") + ); Ok(()) } @@ -143,10 +145,12 @@ async fn test_shutdown_pg_server(with_pwd: bool) -> Result<()> { let table = MemTable::default_numbers_table(); let mut postgres_server = create_postgres_server(table, with_pwd, Default::default(), None)?; let result = postgres_server.shutdown().await; - assert!(result - .unwrap_err() - .to_string() - .contains("Postgres server is not started.")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Postgres server is not started.") + ); let listening = "127.0.0.1:0".parse::().unwrap(); postgres_server.start(listening).await.unwrap(); diff --git a/src/session/src/context.rs b/src/session/src/context.rs index 533d01a27b..0ba0cdb2f8 100644 --- a/src/session/src/context.rs +++ b/src/session/src/context.rs @@ -18,16 +18,16 @@ use std::net::SocketAddr; use std::sync::{Arc, RwLock}; use std::time::Duration; -use api::v1::region::RegionRequestHeader; use api::v1::ExplainOptions; +use api::v1::region::RegionRequestHeader; use arc_swap::ArcSwap; use auth::UserInfoRef; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::{build_db_string, parse_catalog_and_schema_from_db_string}; use common_recordbatch::cursor::RecordBatchStreamCursor; use common_telemetry::warn; -use common_time::timezone::parse_timezone; use common_time::Timezone; +use common_time::timezone::parse_timezone; use datafusion_common::config::ConfigOptions; use derive_builder::Builder; use sql::dialect::{Dialect, GenericDialect, GreptimeDbDialect, MySqlDialect, PostgreSqlDialect}; @@ -662,8 +662,8 @@ mod test { use common_catalog::consts::DEFAULT_CATALOG_NAME; use super::*; - use crate::context::Channel; use crate::Session; + use crate::context::Channel; #[test] fn test_session() { diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs index ba0484f9c5..8696419014 100644 --- a/src/session/src/lib.rs +++ b/src/session/src/lib.rs @@ -28,8 +28,8 @@ use common_catalog::build_db_string; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_recordbatch::cursor::RecordBatchStreamCursor; pub use common_session::ReadPreference; -use common_time::timezone::get_timezone; use common_time::Timezone; +use common_time::timezone::get_timezone; use context::{ConfigurationVariables, QueryContextBuilder}; use derive_more::Debug; diff --git a/src/sql/src/ast.rs b/src/sql/src/ast.rs index fa26112dd1..5d207fb579 100644 --- a/src/sql/src/ast.rs +++ b/src/sql/src/ast.rs @@ -13,10 +13,10 @@ // limitations under the License. pub use sqlparser::ast::{ - visit_expressions_mut, visit_statements_mut, BinaryOperator, ColumnDef, ColumnOption, - ColumnOptionDef, DataType, Expr, Function, FunctionArg, FunctionArgExpr, FunctionArguments, - Ident, ObjectName, ObjectNamePart, SqlOption, TableConstraint, TimezoneInfo, Value, - ValueWithSpan, Visit, VisitMut, Visitor, VisitorMut, + BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function, + FunctionArg, FunctionArgExpr, FunctionArguments, Ident, ObjectName, ObjectNamePart, SqlOption, + TableConstraint, TimezoneInfo, Value, ValueWithSpan, Visit, VisitMut, Visitor, VisitorMut, + visit_expressions_mut, visit_statements_mut, }; pub trait ObjectNamePartExt { diff --git a/src/sql/src/parsers/admin_parser.rs b/src/sql/src/parsers/admin_parser.rs index 18b2039d3e..c2e0ddf8bb 100644 --- a/src/sql/src/parsers/admin_parser.rs +++ b/src/sql/src/parsers/admin_parser.rs @@ -106,27 +106,21 @@ mod tests { #[test] fn test_invalid_admin_statement() { let sql = "ADMIN"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); let sql = "ADMIN test"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); let sql = "ADMIN test test"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); } } diff --git a/src/sql/src/parsers/alter_parser.rs b/src/sql/src/parsers/alter_parser.rs index cfefcf0232..ee49b43d2b 100644 --- a/src/sql/src/parsers/alter_parser.rs +++ b/src/sql/src/parsers/alter_parser.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use common_query::AddColumnLocation; use datatypes::schema::COLUMN_FULLTEXT_CHANGE_OPT_KEY_ENABLE; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::ast::Ident; use sqlparser::keywords::Keyword; use sqlparser::parser::{Parser, ParserError}; @@ -150,7 +150,7 @@ impl ParserContext<'_> { return Err(ParserError::ParserError(format!( "expect table name, actual: {new_table_name_obj}" ))) - .context(error::SyntaxSnafu) + .context(error::SyntaxSnafu); } }; AlterTableOperation::RenameTable { new_table_name } @@ -642,11 +642,13 @@ mod tests { assert_eq!(add_columns.len(), 1); assert_eq!("tagk_i", add_columns[0].column_def.name.value); assert_eq!(DataType::String(None), add_columns[0].column_def.data_type); - assert!(add_columns[0] - .column_def - .options - .iter() - .any(|o| matches!(o.option, ColumnOption::Null))); + assert!( + add_columns[0] + .column_def + .options + .iter() + .any(|o| matches!(o.option, ColumnOption::Null)) + ); assert_eq!(&None, &add_columns[0].location); } _ => unreachable!(), @@ -676,11 +678,13 @@ mod tests { AlterTableOperation::AddColumns { add_columns } => { assert_eq!("tagk_i", add_columns[0].column_def.name.value); assert_eq!(DataType::String(None), add_columns[0].column_def.data_type); - assert!(add_columns[0] - .column_def - .options - .iter() - .any(|o| matches!(o.option, ColumnOption::Null))); + assert!( + add_columns[0] + .column_def + .options + .iter() + .any(|o| matches!(o.option, ColumnOption::Null)) + ); assert_eq!(&Some(AddColumnLocation::First), &add_columns[0].location); } _ => unreachable!(), @@ -961,7 +965,10 @@ mod tests { ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) .unwrap_err(); let err = result.output_msg(); - assert_eq!(err, "Invalid SQL syntax: sql parser error: Expected ADD or DROP or MODIFY or RENAME or SET after ALTER TABLE, found: table_t"); + assert_eq!( + err, + "Invalid SQL syntax: sql parser error: Expected ADD or DROP or MODIFY or RENAME or SET after ALTER TABLE, found: table_t" + ); let sql = "ALTER TABLE test_table RENAME table_t"; let mut result = @@ -1323,14 +1330,20 @@ mod tests { ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) .unwrap_err(); let err = result.output_msg(); - assert_eq!(err, "Invalid SQL syntax: sql parser error: Expected FULLTEXT OR INVERTED OR SKIPPING INDEX, found: 100"); + assert_eq!( + err, + "Invalid SQL syntax: sql parser error: Expected FULLTEXT OR INVERTED OR SKIPPING INDEX, found: 100" + ); let sql = "ALTER TABLE test_table MODIFY COLUMN a SET DEFAULT 100, b SET DEFAULT 200"; let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) .unwrap_err(); let err = result.output_msg(); - assert_eq!(err, "Invalid SQL syntax: sql parser error: Expected: MODIFY, found: b at Line: 1, Column: 57"); + assert_eq!( + err, + "Invalid SQL syntax: sql parser error: Expected: MODIFY, found: b at Line: 1, Column: 57" + ); let sql = "ALTER TABLE test_table MODIFY COLUMN a SET DEFAULT 100, MODIFY COLUMN b DROP DEFAULT 200"; let result = diff --git a/src/sql/src/parsers/alter_parser/trigger.rs b/src/sql/src/parsers/alter_parser/trigger.rs index b7a47bb8b0..47fcedd6c4 100644 --- a/src/sql/src/parsers/alter_parser/trigger.rs +++ b/src/sql/src/parsers/alter_parser/trigger.rs @@ -1,4 +1,4 @@ -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::ast::Ident; use sqlparser::parser::Parser; use sqlparser::tokenizer::Token; @@ -6,13 +6,13 @@ use sqlparser::tokenizer::Token; use crate::error::{self, DuplicateClauseSnafu, InvalidSqlSnafu, Result}; use crate::parser::ParserContext; use crate::parsers::create_parser::trigger::{ANNOTATIONS, LABELS, NOTIFY, ON}; +use crate::statements::OptionMap; use crate::statements::alter::trigger::{ AlterTrigger, AlterTriggerOperation, AnnotationChange, AnnotationOperations, LabelChange, LabelOperations, NotifyChannelChange, NotifyChannelOperations, }; use crate::statements::create::trigger::NotifyChannel; use crate::statements::statement::Statement; -use crate::statements::OptionMap; /// Some keywords about trigger. pub const RENAME: &str = "RENAME"; @@ -119,7 +119,7 @@ impl<'a> ParserContext<'a> { return self.expected( "`LABELS`, `ANNOTATIONS` or `NOTIFY` keyword after `SET`", next_token, - ) + ); } } } @@ -542,10 +542,10 @@ mod tests { use crate::dialect::GreptimeDbDialect; use crate::parser::ParserContext; use crate::parsers::alter_parser::trigger::{apply_label_change, apply_label_replacement}; + use crate::statements::OptionMap; use crate::statements::alter::trigger::{LabelChange, LabelOperations}; use crate::statements::create::trigger::TriggerOn; use crate::statements::statement::Statement; - use crate::statements::OptionMap; #[test] fn test_parse_alter_without_alter_options() { diff --git a/src/sql/src/parsers/copy_parser.rs b/src/sql/src/parsers/copy_parser.rs index 45563b6ca5..185aa4dbcf 100644 --- a/src/sql/src/parsers/copy_parser.rs +++ b/src/sql/src/parsers/copy_parser.rs @@ -321,7 +321,10 @@ mod tests { Test { sql: "COPY catalog0.schema0.tbl FROM 'tbl_file.parquet' WITH (PATTERN = 'demo.*') CONNECTION (FOO='Bar', ONE='two')", expected_pattern: Some("demo.*".into()), - expected_connection: [("foo","Bar"),("one","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect() + expected_connection: [("foo", "Bar"), ("one", "two")] + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), }, ]; @@ -367,11 +370,17 @@ mod tests { }, Test { sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' CONNECTION (FOO='Bar', ONE='two')", - expected_connection: [("foo","Bar"),("one","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect() + expected_connection: [("foo", "Bar"), ("one", "two")] + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), }, Test { - sql:"COPY catalog0.schema0.tbl TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')", - expected_connection: [("foo","Bar"),("one","two")].into_iter().map(|(k,v)|{(k.to_string(),v.to_string())}).collect() + sql: "COPY catalog0.schema0.tbl TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')", + expected_connection: [("foo", "Bar"), ("one", "two")] + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), }, ]; @@ -514,32 +523,38 @@ mod tests { { let sql = "COPY SELECT * FROM tbl WHERE ts > 10 TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() + assert!( + ParserContext::create_with_dialect( + sql, + &GreptimeDbDialect {}, + ParseOptions::default() + ) + .is_err() ) - .is_err()) } { let sql = "COPY SELECT * FROM tbl WHERE ts > 10) TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() + assert!( + ParserContext::create_with_dialect( + sql, + &GreptimeDbDialect {}, + ParseOptions::default() + ) + .is_err() ) - .is_err()) } { let sql = "COPY (SELECT * FROM tbl WHERE ts > 10 TO 'tbl_file.parquet' WITH (FORMAT = 'parquet') CONNECTION (FOO='Bar', ONE='two')"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() + assert!( + ParserContext::create_with_dialect( + sql, + &GreptimeDbDialect {}, + ParseOptions::default() + ) + .is_err() ) - .is_err()) } } } diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs index b20b778cd1..af6960961c 100644 --- a/src/sql/src/parsers/create_parser.rs +++ b/src/sql/src/parsers/create_parser.rs @@ -23,7 +23,7 @@ use datafusion_common::ScalarValue; use datatypes::arrow::datatypes::{DataType as ArrowDataType, IntervalUnit}; use datatypes::data_type::ConcreteDataType; use itertools::Itertools; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sqlparser::ast::{ColumnOption, ColumnOptionDef, DataType, Expr}; use sqlparser::dialect::keywords::Keyword; use sqlparser::keywords::ALL_KEYWORDS; @@ -38,7 +38,7 @@ use crate::error::{ InvalidSqlSnafu, InvalidTableOptionSnafu, InvalidTimeIndexSnafu, MissingTimeIndexSnafu, Result, SyntaxSnafu, UnexpectedSnafu, UnsupportedSnafu, }; -use crate::parser::{ParserContext, FLOW}; +use crate::parser::{FLOW, ParserContext}; use crate::parsers::tql_parser; use crate::parsers::utils::{ self, validate_column_fulltext_create_option, validate_column_skipping_index_create_option, @@ -49,7 +49,7 @@ use crate::statements::create::{ }; use crate::statements::statement::Statement; use crate::statements::transform::type_alias::get_data_type_by_alias_name; -use crate::statements::{sql_data_type_to_concrete_data_type, OptionMap}; +use crate::statements::{OptionMap, sql_data_type_to_concrete_data_type}; use crate::util::{location_to_index, parse_option_string}; pub const ENGINE: &str = "ENGINE"; @@ -213,13 +213,14 @@ impl<'a> ParserContext<'a> { } ); } - if let Some(append_mode) = options.get("append_mode") { - if append_mode == "true" && options.contains_key("merge_mode") { - return InvalidDatabaseOptionSnafu { - key: "merge_mode".to_string(), - } - .fail(); + if let Some(append_mode) = options.get("append_mode") + && append_mode == "true" + && options.contains_key("merge_mode") + { + return InvalidDatabaseOptionSnafu { + key: "merge_mode".to_string(), } + .fail(); } Ok(Statement::CreateDatabase(CreateDatabase { @@ -320,7 +321,7 @@ impl<'a> ParserContext<'a> { return self .parser .expected("string", unexpected) - .context(SyntaxSnafu) + .context(SyntaxSnafu); } } } else { @@ -554,8 +555,8 @@ impl<'a> ParserContext<'a> { let mut time_index_opt_idx = None; for (index, opt) in column.options().iter().enumerate() { - if let ColumnOption::DialectSpecific(tokens) = &opt.option { - if matches!( + if let ColumnOption::DialectSpecific(tokens) = &opt.option + && matches!( &tokens[..], [ Token::Word(Word { @@ -567,21 +568,21 @@ impl<'a> ParserContext<'a> { .. }) ] - ) { - ensure!( - time_index_opt_idx.is_none(), - InvalidColumnOptionSnafu { - name: column.name().to_string(), - msg: "duplicated time index", - } - ); - time_index_opt_idx = Some(index); + ) + { + ensure!( + time_index_opt_idx.is_none(), + InvalidColumnOptionSnafu { + name: column.name().to_string(), + msg: "duplicated time index", + } + ); + time_index_opt_idx = Some(index); - let constraint = TableConstraint::TimeIndex { - column: Ident::new(column.name().value.clone()), - }; - constraints.push(constraint); - } + let constraint = TableConstraint::TimeIndex { + column: Ident::new(column.name().value.clone()), + }; + constraints.push(constraint); } } @@ -1310,10 +1311,12 @@ mod tests { let sql = "create database"; let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); - assert!(result - .unwrap_err() - .to_string() - .contains("Unexpected token while parsing SQL statement")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Unexpected token while parsing SQL statement") + ); let sql = "create database prometheus"; let stmts = @@ -1735,10 +1738,12 @@ PARTITION ON COLUMNS(x) () ENGINE=mito"; let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); - assert!(result - .unwrap_err() - .to_string() - .contains("Partition column \"x\" not defined")); + assert!( + result + .unwrap_err() + .to_string() + .contains("Partition column \"x\" not defined") + ); } #[test] @@ -1990,10 +1995,12 @@ ENGINE=mito"; ParseOptions::default(), ); - assert!(result1 - .unwrap_err() - .to_string() - .contains("time index column data type should be timestamp")); + assert!( + result1 + .unwrap_err() + .to_string() + .contains("time index column data type should be timestamp") + ); } #[test] @@ -2146,10 +2153,12 @@ PARTITION COLUMNS(c, a) ( ENGINE=mito"; let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); - assert!(result - .unwrap_err() - .output_msg() - .contains("sql parser error: Expected: ON, found: COLUMNS")); + assert!( + result + .unwrap_err() + .output_msg() + .contains("sql parser error: Expected: ON, found: COLUMNS") + ); } #[test] @@ -2395,12 +2404,13 @@ CREATE TABLE log ( if let Statement::CreateTable(c) = &result1[0] { c.columns.iter().for_each(|col| { if col.name().value == "msg" { - assert!(col - .extensions - .fulltext_index_options - .as_ref() - .unwrap() - .is_empty()); + assert!( + col.extensions + .fulltext_index_options + .as_ref() + .unwrap() + .is_empty() + ); } }); } else { @@ -2474,10 +2484,12 @@ CREATE TABLE log ( let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("FULLTEXT index only supports string type")); + assert!( + result + .unwrap_err() + .to_string() + .contains("FULLTEXT index only supports string type") + ); } #[test] @@ -2490,10 +2502,12 @@ CREATE TABLE log ( let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("duplicated FULLTEXT INDEX option")); + assert!( + result + .unwrap_err() + .to_string() + .contains("duplicated FULLTEXT INDEX option") + ); } #[test] @@ -2506,10 +2520,12 @@ CREATE TABLE log ( let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("invalid FULLTEXT INDEX option")); + assert!( + result + .unwrap_err() + .to_string() + .contains("invalid FULLTEXT INDEX option") + ); } #[test] @@ -2526,12 +2542,13 @@ CREATE TABLE log ( if let Statement::CreateTable(c) = &result[0] { c.columns.iter().for_each(|col| { if col.name().value == "msg" { - assert!(!col - .extensions - .skipping_index_options - .as_ref() - .unwrap() - .is_empty()); + assert!( + !col.extensions + .skipping_index_options + .as_ref() + .unwrap() + .is_empty() + ); } }); } else { @@ -2550,12 +2567,13 @@ CREATE TABLE log ( if let Statement::CreateTable(c) = &result[0] { c.columns.iter().for_each(|col| { if col.name().value == "msg" { - assert!(col - .extensions - .skipping_index_options - .as_ref() - .unwrap() - .is_empty()); + assert!( + col.extensions + .skipping_index_options + .as_ref() + .unwrap() + .is_empty() + ); } }); } else { @@ -2748,10 +2766,12 @@ CREATE TABLE log ( &mut extensions, ); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("FULLTEXT index only supports string type")); + assert!( + result + .unwrap_err() + .to_string() + .contains("FULLTEXT index only supports string type") + ); } // Test fulltext index with invalid option (won't fail, the parser doesn't check the option's content) @@ -2810,10 +2830,12 @@ CREATE TABLE log ( &mut extensions, ); assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("INVERTED index doesn't support options")); + assert!( + result + .unwrap_err() + .to_string() + .contains("INVERTED index doesn't support options") + ); } // Test multiple indices diff --git a/src/sql/src/parsers/create_parser/trigger.rs b/src/sql/src/parsers/create_parser/trigger.rs index a0b3ce587b..553a53960d 100644 --- a/src/sql/src/parsers/create_parser/trigger.rs +++ b/src/sql/src/parsers/create_parser/trigger.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::time::Duration; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use sqlparser::keywords::Keyword; use sqlparser::parser::Parser; use sqlparser::tokenizer::Token; @@ -10,11 +10,11 @@ use crate::error; use crate::error::Result; use crate::parser::ParserContext; use crate::parsers::utils::convert_month_day_nano_to_duration; +use crate::statements::OptionMap; use crate::statements::create::trigger::{ AlertManagerWebhook, ChannelType, CreateTrigger, NotifyChannel, TriggerOn, }; use crate::statements::statement::Statement; -use crate::statements::OptionMap; use crate::util::parse_option_string; /// Some keywords about trigger. diff --git a/src/sql/src/parsers/cursor_parser.rs b/src/sql/src/parsers/cursor_parser.rs index 706f820c18..32275d5a43 100644 --- a/src/sql/src/parsers/cursor_parser.rs +++ b/src/sql/src/parsers/cursor_parser.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::keywords::Keyword; use sqlparser::tokenizer::Token; diff --git a/src/sql/src/parsers/describe_parser.rs b/src/sql/src/parsers/describe_parser.rs index f4449cf575..849dca468b 100644 --- a/src/sql/src/parsers/describe_parser.rs +++ b/src/sql/src/parsers/describe_parser.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::keywords::Keyword; use crate::error::{self, InvalidTableNameSnafu, Result}; diff --git a/src/sql/src/parsers/drop_parser.rs b/src/sql/src/parsers/drop_parser.rs index 04e7013d64..8f3872d957 100644 --- a/src/sql/src/parsers/drop_parser.rs +++ b/src/sql/src/parsers/drop_parser.rs @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::dialect::keywords::Keyword; use sqlparser::tokenizer::Token; use crate::error::{self, InvalidFlowNameSnafu, InvalidTableNameSnafu, Result}; -use crate::parser::{ParserContext, FLOW}; +use crate::parser::{FLOW, ParserContext}; #[cfg(feature = "enterprise")] use crate::statements::drop::trigger::DropTrigger; use crate::statements::drop::{DropDatabase, DropFlow, DropTable, DropView}; diff --git a/src/sql/src/parsers/query_parser.rs b/src/sql/src/parsers/query_parser.rs index fc3cecf1a3..b532dadd9b 100644 --- a/src/sql/src/parsers/query_parser.rs +++ b/src/sql/src/parsers/query_parser.rs @@ -53,9 +53,11 @@ mod tests { let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); assert!(result.is_err()); - assert!(result - .unwrap_err() - .output_msg() - .contains("Expected: an expression")); + assert!( + result + .unwrap_err() + .output_msg() + .contains("Expected: an expression") + ); } } diff --git a/src/sql/src/parsers/show_parser.rs b/src/sql/src/parsers/show_parser.rs index de414549bc..a6bd6b9dcb 100644 --- a/src/sql/src/parsers/show_parser.rs +++ b/src/sql/src/parsers/show_parser.rs @@ -15,7 +15,7 @@ #[cfg(feature = "enterprise")] pub mod trigger; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::keywords::Keyword; use sqlparser::tokenizer::Token; @@ -876,7 +876,10 @@ mod tests { let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); let error = result.unwrap_err(); - assert_eq!("Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", error.to_string()); + assert_eq!( + "Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", + error.to_string() + ); let sql = "SHOW COLUMNS from test"; let result = @@ -936,7 +939,10 @@ mod tests { let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); let error = result.unwrap_err(); - assert_eq!("Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", error.to_string()); + assert_eq!( + "Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", + error.to_string() + ); let sql = "SHOW INDEX from test"; let result = @@ -992,7 +998,10 @@ mod tests { let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()); let error = result.unwrap_err(); - assert_eq!("Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", error.to_string()); + assert_eq!( + "Unexpected token while parsing SQL statement, expected: '{FROM | IN} table', found: EOF", + error.to_string() + ); let sql = "SHOW REGION from test"; let result = diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs index 20aabcdb2d..ec83f5162a 100644 --- a/src/sql/src/parsers/tql_parser.rs +++ b/src/sql/src/parsers/tql_parser.rs @@ -325,7 +325,10 @@ mod tests { assert_eq!(eval.end, "10"); assert_eq!(eval.step, "1s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), }; @@ -352,7 +355,10 @@ mod tests { assert_eq!(eval.end, "10"); assert_eq!(eval.step, "1s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), }; @@ -366,7 +372,10 @@ mod tests { assert_eq!(eval.end, "15"); assert_eq!(eval.step, "1s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), }; @@ -382,7 +391,10 @@ mod tests { assert_eq!(eval.end, "10"); assert_eq!(eval.step, "1s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -394,7 +406,10 @@ mod tests { assert_eq!(eval.end, "1200"); assert_eq!(eval.step, "1m"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -414,7 +429,10 @@ mod tests { assert_eq!(eval.end, "1676887659"); assert_eq!(eval.step, "1m"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -428,7 +446,10 @@ mod tests { assert_eq!(eval.end, "1676887659.5"); assert_eq!(eval.step, "30.3"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -444,7 +465,10 @@ mod tests { assert_eq!(eval.end, "2015-07-01T20:11:00.781Z"); assert_eq!(eval.step, "30s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -459,7 +483,10 @@ mod tests { assert_eq!(eval.end, "1676887659"); assert_eq!(eval.step, "1m".to_string()); assert_eq!(eval.lookback, Some("5m".to_string())); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -471,7 +498,10 @@ mod tests { assert_eq!(eval.end, "1200"); assert_eq!(eval.step, "1m"); assert_eq!(eval.lookback, Some("7m".to_string())); - assert_eq!(eval.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + eval.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); } _ => unreachable!(), } @@ -479,7 +509,10 @@ mod tests { let sql = "TQL EXPLAIN (20, 100, 10, '3m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -492,7 +525,10 @@ mod tests { let sql = "TQL EXPLAIN VERBOSE (20, 100, 10, '3m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -509,7 +545,10 @@ mod tests { assert_eq!(analyze.end, "1676887659"); assert_eq!(analyze.step, "1m"); assert_eq!(analyze.lookback, Some("9m".to_string())); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(!analyze.is_verbose); } _ => unreachable!(), @@ -522,7 +561,10 @@ mod tests { assert_eq!(analyze.end, "1676887659"); assert_eq!(analyze.step, "1m"); assert_eq!(analyze.lookback, Some("9m".to_string())); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(analyze.is_verbose); } _ => unreachable!(), @@ -534,7 +576,10 @@ mod tests { let sql = "TQL EXPLAIN http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "0"); assert_eq!(explain.end, "0"); assert_eq!(explain.step, "5m"); @@ -548,7 +593,10 @@ mod tests { let sql = "TQL EXPLAIN VERBOSE http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "0"); assert_eq!(explain.end, "0"); assert_eq!(explain.step, "5m"); @@ -562,7 +610,10 @@ mod tests { let sql = "TQL EXPLAIN FORMAT JSON http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "0"); assert_eq!(explain.end, "0"); assert_eq!(explain.step, "5m"); @@ -576,7 +627,10 @@ mod tests { let sql = "TQL EXPLAIN VERBOSE FORMAT JSON http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "0"); assert_eq!(explain.end, "0"); assert_eq!(explain.step, "5m"); @@ -590,7 +644,10 @@ mod tests { let sql = "TQL EXPLAIN FORMAT TEXT (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -604,7 +661,10 @@ mod tests { let sql = "TQL EXPLAIN (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -618,7 +678,10 @@ mod tests { let sql = "TQL EXPLAIN ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "300"); assert_eq!(explain.end, "1200"); assert_eq!(explain.step, "10"); @@ -632,7 +695,10 @@ mod tests { let sql = "TQL EXPLAIN VERBOSE (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -646,7 +712,10 @@ mod tests { let sql = "TQL EXPLAIN verbose (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "20"); assert_eq!(explain.end, "100"); assert_eq!(explain.step, "10"); @@ -660,7 +729,10 @@ mod tests { let sql = "TQL EXPLAIN VERBOSE ('1970-01-01T00:05:00'::timestamp, '1970-01-01T00:10:00'::timestamp + '10 minutes'::interval, 10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; match parse_into_statement(sql) { Statement::Tql(Tql::Explain(explain)) => { - assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + explain.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert_eq!(explain.start, "300"); assert_eq!(explain.end, "1200"); assert_eq!(explain.step, "10"); @@ -681,7 +753,10 @@ mod tests { assert_eq!(analyze.end, "1676887659.5"); assert_eq!(analyze.step, "30.3"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(!analyze.is_verbose); assert_eq!(analyze.format, None); } @@ -695,7 +770,10 @@ mod tests { assert_eq!(analyze.end, "1676887659.5"); assert_eq!(analyze.step, "30.3"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(!analyze.is_verbose); assert_eq!(analyze.format, Some(AnalyzeFormat::JSON)); } @@ -709,7 +787,10 @@ mod tests { assert_eq!(analyze.end, "1676887659.5"); assert_eq!(analyze.step, "30.3"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(analyze.is_verbose); assert_eq!(analyze.format, Some(AnalyzeFormat::JSON)); } @@ -723,7 +804,10 @@ mod tests { assert_eq!(analyze.end, "1200"); assert_eq!(analyze.step, "10"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(!analyze.is_verbose); assert_eq!(analyze.format, None); } @@ -737,7 +821,10 @@ mod tests { assert_eq!(analyze.end, "1676887659.5"); assert_eq!(analyze.step, "30.3"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(analyze.is_verbose); assert_eq!(analyze.format, None); } @@ -751,7 +838,10 @@ mod tests { assert_eq!(analyze.end, "1676887659.5"); assert_eq!(analyze.step, "30.3"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(analyze.is_verbose); assert_eq!(analyze.format, None); } @@ -765,7 +855,10 @@ mod tests { assert_eq!(analyze.end, "1200"); assert_eq!(analyze.step, "10"); assert_eq!(analyze.lookback, None); - assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"); + assert_eq!( + analyze.query, + "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m" + ); assert!(analyze.is_verbose); assert_eq!(analyze.format, None); } @@ -874,7 +967,10 @@ mod tests { assert_eq!(eval.end, "30"); assert_eq!(eval.step, "10s"); assert_eq!(eval.lookback, None); - assert_eq!(eval.query, "(sum by(host) (irate(host_cpu_seconds_total{mode!='idle'}[1m0s])) / sum by (host)((irate(host_cpu_seconds_total[1m0s])))) * 100"); + assert_eq!( + eval.query, + "(sum by(host) (irate(host_cpu_seconds_total{mode!='idle'}[1m0s])) / sum by (host)((irate(host_cpu_seconds_total[1m0s])))) * 100" + ); } _ => unreachable!(), } @@ -901,17 +997,21 @@ mod tests { let sql = "TQL EVAL (1676887657, 1676887659, 1m) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; let result = ParserContext::create_with_dialect(sql, dialect, parse_options.clone()).unwrap_err(); - assert!(result - .output_msg() - .contains("Failed to extract a timestamp value")); + assert!( + result + .output_msg() + .contains("Failed to extract a timestamp value") + ); // missing end let sql = "TQL EVAL (1676887657, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m"; let result = ParserContext::create_with_dialect(sql, dialect, parse_options.clone()).unwrap_err(); - assert!(result - .output_msg() - .contains("Failed to extract a timestamp value")); + assert!( + result + .output_msg() + .contains("Failed to extract a timestamp value") + ); // empty TQL query let sql = "TQL EVAL (0, 30, '10s')"; diff --git a/src/sql/src/parsers/truncate_parser.rs b/src/sql/src/parsers/truncate_parser.rs index 7059c22b64..e7dd9a8c74 100644 --- a/src/sql/src/parsers/truncate_parser.rs +++ b/src/sql/src/parsers/truncate_parser.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::keywords::Keyword; use sqlparser::tokenizer::Token; diff --git a/src/sql/src/parsers/utils.rs b/src/sql/src/parsers/utils.rs index 9933fe3167..15ef50ab71 100644 --- a/src/sql/src/parsers/utils.rs +++ b/src/sql/src/parsers/utils.rs @@ -17,16 +17,16 @@ use std::sync::Arc; use chrono::Utc; use datafusion::config::ConfigOptions; use datafusion::error::Result as DfResult; -use datafusion::execution::context::SessionState; use datafusion::execution::SessionStateBuilder; +use datafusion::execution::context::SessionState; use datafusion::optimizer::simplify_expressions::ExprSimplifier; use datafusion_common::tree_node::{TreeNode, TreeNodeVisitor}; use datafusion_common::{DFSchema, ScalarValue}; use datafusion_expr::execution_props::ExecutionProps; use datafusion_expr::simplify::SimplifyContext; use datafusion_expr::{AggregateUDF, Expr, ScalarUDF, TableSource, WindowUDF}; -use datafusion_sql::planner::{ContextProvider, SqlToRel}; use datafusion_sql::TableReference; +use datafusion_sql::planner::{ContextProvider, SqlToRel}; use datatypes::arrow::datatypes::DataType; use datatypes::schema::{ COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_BACKEND, @@ -34,7 +34,7 @@ use datatypes::schema::{ COLUMN_FULLTEXT_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_FALSE_POSITIVE_RATE, COLUMN_SKIPPING_INDEX_OPT_KEY_GRANULARITY, COLUMN_SKIPPING_INDEX_OPT_KEY_TYPE, }; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlparser::dialect::Dialect; use crate::error::{ @@ -88,16 +88,16 @@ pub fn parser_expr_to_scalar_value_literal( &mut self, node: &Self::Node, ) -> DfResult { - if let Expr::ScalarFunction(func) = node { - if func.name().to_lowercase() == "now" { - if !func.args.is_empty() { - return Err(datafusion_common::DataFusionError::Plan( - "now() function should not have arguments".to_string(), - )); - } - self.found = true; - return Ok(datafusion_common::tree_node::TreeNodeRecursion::Stop); + if let Expr::ScalarFunction(func) = node + && func.name().to_lowercase() == "now" + { + if !func.args.is_empty() { + return Err(datafusion_common::DataFusionError::Plan( + "now() function should not have arguments".to_string(), + )); } + self.found = true; + return Ok(datafusion_common::tree_node::TreeNodeRecursion::Stop); } Ok(datafusion_common::tree_node::TreeNodeRecursion::Continue) } diff --git a/src/sql/src/parsers/with_tql_parser.rs b/src/sql/src/parsers/with_tql_parser.rs index a73155e0b7..e52e3103a4 100644 --- a/src/sql/src/parsers/with_tql_parser.rs +++ b/src/sql/src/parsers/with_tql_parser.rs @@ -197,14 +197,13 @@ impl ParserContext<'_> { /// Determine if CTE contains TQL or SQL and parse accordingly fn parse_cte_content(&mut self) -> Result { // Check if the next token is TQL - if let Token::Word(w) = &self.parser.peek_token().token { - if w.keyword == Keyword::NoKeyword - && w.quote_style.is_none() - && w.value.to_uppercase() == tql_parser::TQL - { - let tql = self.parse_tql_content_in_cte()?; - return Ok(CteContent::Tql(tql)); - } + if let Token::Word(w) = &self.parser.peek_token().token + && w.keyword == Keyword::NoKeyword + && w.quote_style.is_none() + && w.value.to_uppercase() == tql_parser::TQL + { + let tql = self.parse_tql_content_in_cte()?; + return Ok(CteContent::Tql(tql)); } // Parse as SQL query diff --git a/src/sql/src/partition.rs b/src/sql/src/partition.rs index 6ba6f0ffc6..47b74f05c1 100644 --- a/src/sql/src/partition.rs +++ b/src/sql/src/partition.rs @@ -222,10 +222,9 @@ mod tests { value: Value::SingleQuotedString(leftmost), .. }) = *right.clone() + && uuid < leftmost { - if uuid < leftmost { - return i; - } + return i; } } else if i == rules.len() - 1 { // Hit the rightmost rule. @@ -233,10 +232,9 @@ mod tests { value: Value::SingleQuotedString(rightmost), .. }) = *right.clone() + && uuid >= rightmost { - if uuid >= rightmost { - return i; - } + return i; } } else { // Hit the middle rules. @@ -245,29 +243,23 @@ mod tests { op: _, right: inner_right, } = *left.clone() - { - if let Expr::Value(ValueWithSpan { + && let Expr::Value(ValueWithSpan { value: Value::SingleQuotedString(lower), .. }) = *inner_right.clone() - { - if let Expr::BinaryOp { - left: _, - op: _, - right: inner_right, - } = *right.clone() - { - if let Expr::Value(ValueWithSpan { - value: Value::SingleQuotedString(upper), - .. - }) = *inner_right.clone() - { - if uuid >= lower && uuid < upper { - return i; - } - } - } - } + && let Expr::BinaryOp { + left: _, + op: _, + right: inner_right, + } = *right.clone() + && let Expr::Value(ValueWithSpan { + value: Value::SingleQuotedString(upper), + .. + }) = *inner_right.clone() + && uuid >= lower + && uuid < upper + { + return i; } } } diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs index 0d658fd6c7..488fadfae6 100644 --- a/src/sql/src/statements.rs +++ b/src/sql/src/statements.rs @@ -37,7 +37,7 @@ use api::v1::SemanticType; use common_sql::default_constraint::parse_column_default_constraint; use common_time::timezone::Timezone; use datatypes::prelude::ConcreteDataType; -use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY}; +use datatypes::schema::{COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema}; use datatypes::types::TimestampType; use datatypes::value::Value; use snafu::ResultExt; @@ -324,14 +324,14 @@ pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Resu mod tests { use api::v1::ColumnDataType; use datatypes::schema::{ - FulltextAnalyzer, COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, + COLUMN_FULLTEXT_OPT_KEY_ANALYZER, COLUMN_FULLTEXT_OPT_KEY_CASE_SENSITIVE, FulltextAnalyzer, }; use sqlparser::ast::{ColumnOptionDef, Expr}; use super::*; use crate::ast::TimezoneInfo; - use crate::statements::create::ColumnExtensions; use crate::statements::ColumnOption; + use crate::statements::create::ColumnExtensions; fn check_type(sql_type: SqlDataType, data_type: ConcreteDataType) { assert_eq!( diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs index ba9712a403..6507701f49 100644 --- a/src/sql/src/statements/alter.rs +++ b/src/sql/src/statements/alter.rs @@ -201,7 +201,11 @@ impl Display for AlterTableOperation { column_name, options, } => { - write!(f, "MODIFY COLUMN {column_name} SET FULLTEXT INDEX WITH(analyzer={0}, case_sensitive={1}, backend={2})", options.analyzer, options.case_sensitive, options.backend) + write!( + f, + "MODIFY COLUMN {column_name} SET FULLTEXT INDEX WITH(analyzer={0}, case_sensitive={1}, backend={2})", + options.analyzer, options.case_sensitive, options.backend + ) } SetIndexOperation::Inverted { column_name } => { write!(f, "MODIFY COLUMN {column_name} SET INVERTED INDEX") @@ -210,7 +214,11 @@ impl Display for AlterTableOperation { column_name, options, } => { - write!(f, "MODIFY COLUMN {column_name} SET SKIPPING INDEX WITH(granularity={0}, index_type={1})", options.granularity, options.index_type) + write!( + f, + "MODIFY COLUMN {column_name} SET SKIPPING INDEX WITH(granularity={0}, index_type={1})", + options.granularity, options.index_type + ) } }, AlterTableOperation::UnsetIndex { options } => match options { diff --git a/src/sql/src/statements/alter/trigger.rs b/src/sql/src/statements/alter/trigger.rs index 93c02e7158..f84dfccf4d 100644 --- a/src/sql/src/statements/alter/trigger.rs +++ b/src/sql/src/statements/alter/trigger.rs @@ -4,8 +4,8 @@ use serde::Serialize; use sqlparser::ast::ObjectName; use sqlparser_derive::{Visit, VisitMut}; -use crate::statements::create::trigger::{NotifyChannel, TriggerOn}; use crate::statements::OptionMap; +use crate::statements::create::trigger::{NotifyChannel, TriggerOn}; #[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)] pub struct AlterTrigger { diff --git a/src/sql/src/statements/copy.rs b/src/sql/src/statements/copy.rs index 9961162a0c..48f13f1fb2 100644 --- a/src/sql/src/statements/copy.rs +++ b/src/sql/src/statements/copy.rs @@ -18,8 +18,8 @@ use serde::Serialize; use sqlparser::ast::ObjectName; use sqlparser_derive::{Visit, VisitMut}; -use crate::statements::statement::Statement; use crate::statements::OptionMap; +use crate::statements::statement::Statement; #[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)] pub enum Copy { diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs index 12cd5843d7..c47c01543c 100644 --- a/src/sql/src/statements/create.rs +++ b/src/sql/src/statements/create.rs @@ -27,9 +27,9 @@ use crate::ast::{ColumnDef, Ident, ObjectName, Value as SqlValue}; use crate::error::{ InvalidFlowQuerySnafu, Result, SetFulltextOptionSnafu, SetSkippingIndexOptionSnafu, }; +use crate::statements::OptionMap; use crate::statements::statement::Statement; use crate::statements::tql::Tql; -use crate::statements::OptionMap; const LINE_SEP: &str = ",\n"; const COMMA_SEP: &str = ", "; @@ -150,11 +150,11 @@ impl Column { impl Display for Column { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - if let Some(vector_options) = &self.extensions.vector_options { - if let Some(dim) = vector_options.get(VECTOR_OPT_DIM) { - write!(f, "{} VECTOR({})", self.column_def.name, dim)?; - return Ok(()); - } + if let Some(vector_options) = &self.extensions.vector_options + && let Some(dim) = vector_options.get(VECTOR_OPT_DIM) + { + write!(f, "{} VECTOR({})", self.column_def.name, dim)?; + return Ok(()); } write!(f, "{}", self.column_def)?; diff --git a/src/sql/src/statements/create/trigger.rs b/src/sql/src/statements/create/trigger.rs index a6cc3d88d8..0788fa7b78 100644 --- a/src/sql/src/statements/create/trigger.rs +++ b/src/sql/src/statements/create/trigger.rs @@ -8,8 +8,8 @@ use sqlparser::ast::{Query, Visit, VisitMut, Visitor, VisitorMut}; use sqlparser_derive::{Visit, VisitMut}; use crate::ast::{Ident, ObjectName}; -use crate::statements::create::{COMMA_SEP, INDENT, LINE_SEP}; use crate::statements::OptionMap; +use crate::statements::create::{COMMA_SEP, INDENT, LINE_SEP}; #[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)] pub struct CreateTrigger { diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs index 1a7bba24e5..2a87725dfc 100644 --- a/src/sql/src/statements/describe.rs +++ b/src/sql/src/statements/describe.rs @@ -111,12 +111,10 @@ mod tests { #[test] pub fn test_describe_missing_table_name() { let sql = "DESCRIBE TABLE"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); } #[test] diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs index e18528a6fb..0dfdd1de7d 100644 --- a/src/sql/src/statements/show.rs +++ b/src/sql/src/statements/show.rs @@ -426,23 +426,19 @@ mod tests { #[test] pub fn test_show_create_missing_table_name() { let sql = "SHOW CREATE TABLE"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); } #[test] pub fn test_show_create_unknown_for() { let sql = "SHOW CREATE TABLE t FOR UNKNOWN"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); } #[test] @@ -466,12 +462,10 @@ mod tests { #[test] pub fn test_show_create_missing_flow() { let sql = "SHOW CREATE FLOW"; - assert!(ParserContext::create_with_dialect( - sql, - &GreptimeDbDialect {}, - ParseOptions::default() - ) - .is_err()); + assert!( + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .is_err() + ); } #[test] diff --git a/src/sql/src/statements/show/trigger.rs b/src/sql/src/statements/show/trigger.rs index bc6de33a3b..dbc23265da 100644 --- a/src/sql/src/statements/show/trigger.rs +++ b/src/sql/src/statements/show/trigger.rs @@ -23,8 +23,8 @@ impl Display for ShowTriggers { mod tests { use crate::dialect::GreptimeDbDialect; use crate::parser::{ParseOptions, ParserContext}; - use crate::statements::show::trigger::ShowTriggers; use crate::statements::show::ShowKind; + use crate::statements::show::trigger::ShowTriggers; use crate::statements::statement::Statement; #[test] diff --git a/src/sql/src/statements/transform.rs b/src/sql/src/statements/transform.rs index 7bd4218d2f..38a490b19f 100644 --- a/src/sql/src/statements/transform.rs +++ b/src/sql/src/statements/transform.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use expand_interval::ExpandIntervalTransformRule; use lazy_static::lazy_static; -use sqlparser::ast::{visit_expressions_mut, Expr}; +use sqlparser::ast::{Expr, visit_expressions_mut}; use type_alias::TypeAliasTransformRule; use crate::error::Result; diff --git a/src/sql/src/statements/transform/expand_interval.rs b/src/sql/src/statements/transform/expand_interval.rs index 405ebffbba..6108978289 100644 --- a/src/sql/src/statements/transform/expand_interval.rs +++ b/src/sql/src/statements/transform/expand_interval.rs @@ -215,10 +215,10 @@ mod tests { use sqlparser::ast::{BinaryOperator, CastKind, DataType, Expr, Interval, Value}; - use crate::statements::transform::expand_interval::{ - normalize_interval_name, single_quoted_string_expr, ExpandIntervalTransformRule, - }; use crate::statements::transform::TransformRule; + use crate::statements::transform::expand_interval::{ + ExpandIntervalTransformRule, normalize_interval_name, single_quoted_string_expr, + }; fn create_interval(value: Box) -> Expr { Expr::Interval(Interval { diff --git a/src/sql/src/statements/transform/type_alias.rs b/src/sql/src/statements/transform/type_alias.rs index 054affd092..fb61abf27e 100644 --- a/src/sql/src/statements/transform/type_alias.rs +++ b/src/sql/src/statements/transform/type_alias.rs @@ -26,7 +26,7 @@ use crate::statements::alter::AlterTableOperation; use crate::statements::create::{CreateExternalTable, CreateTable}; use crate::statements::statement::Statement; use crate::statements::transform::TransformRule; -use crate::statements::{sql_data_type_to_concrete_data_type, TimezoneInfo}; +use crate::statements::{TimezoneInfo, sql_data_type_to_concrete_data_type}; /// SQL data type alias transformer: /// - `TimestampSecond`, `Timestamp_s`, `Timestamp_sec` for `Timestamp(0)`. @@ -337,7 +337,12 @@ mod tests { transform_statements(&mut stmts).unwrap(); match &stmts[0] { - Statement::Query(q) => assert_eq!(format!("SELECT arrow_cast(TIMESTAMP '2020-01-01 01:23:45.12345678', 'Timestamp({expected}, None)')"), q.to_string()), + Statement::Query(q) => assert_eq!( + format!( + "SELECT arrow_cast(TIMESTAMP '2020-01-01 01:23:45.12345678', 'Timestamp({expected}, None)')" + ), + q.to_string() + ), _ => unreachable!(), } } @@ -355,7 +360,10 @@ mod tests { transform_statements(&mut stmts).unwrap(); match &stmts[0] { - Statement::CreateTable(c) => assert_eq!("CREATE TABLE test (\n b BOOLEAN,\n ts TIMESTAMP NOT NULL,\n TIME INDEX (ts)\n)\nENGINE=mito\n", c.to_string()), + Statement::CreateTable(c) => assert_eq!( + "CREATE TABLE test (\n b BOOLEAN,\n ts TIMESTAMP NOT NULL,\n TIME INDEX (ts)\n)\nENGINE=mito\n", + c.to_string() + ), _ => unreachable!(), } } diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs index 4f3f877bec..d75caa4ace 100644 --- a/src/store-api/src/metadata.rs +++ b/src/store-api/src/metadata.rs @@ -21,9 +21,9 @@ use std::collections::{HashMap, HashSet}; use std::fmt; use std::sync::Arc; +use api::v1::SemanticType; use api::v1::column_def::try_as_column_schema; use api::v1::region::RegionColumnDef; -use api::v1::SemanticType; use common_error::ext::ErrorExt; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; @@ -34,7 +34,7 @@ use datatypes::types::TimestampType; use itertools::Itertools; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize}; -use snafu::{ensure, Location, OptionExt, ResultExt, Snafu}; +use snafu::{Location, OptionExt, ResultExt, Snafu, ensure}; use crate::codec::PrimaryKeyEncoding; use crate::region_request::{ @@ -1213,8 +1213,10 @@ fn set_column_fulltext_options( && current_options.case_sensitive == options.case_sensitive, InvalidColumnOptionSnafu { column_name, - msg: format!("Cannot change analyzer or case_sensitive if FULLTEXT index is set before. Previous analyzer: {}, previous case_sensitive: {}", - current_options.analyzer, current_options.case_sensitive), + msg: format!( + "Cannot change analyzer or case_sensitive if FULLTEXT index is set before. Previous analyzer: {}, previous case_sensitive: {}", + current_options.analyzer, current_options.case_sensitive + ), } ); } @@ -1931,7 +1933,10 @@ mod test { fn test_debug_for_column_metadata() { let region_metadata = build_test_region_metadata(); let formatted = format!("{:?}", region_metadata); - assert_eq!(formatted, "RegionMetadata { column_metadatas: [[a Int64 not null Tag 1], [b Float64 not null Field 2], [c TimestampMillisecond not null Timestamp 3]], time_index: 3, primary_key: [1], region_id: 5299989648942(1234, 5678), schema_version: 0, partition_expr: Some(\"\") }"); + assert_eq!( + formatted, + "RegionMetadata { column_metadatas: [[a Int64 not null Tag 1], [b Float64 not null Field 2], [c TimestampMillisecond not null Timestamp 3]], time_index: 3, primary_key: [1], region_id: 5299989648942(1234, 5678), schema_version: 0, partition_expr: Some(\"\") }" + ); } #[test] diff --git a/src/store-api/src/metrics.rs b/src/store-api/src/metrics.rs index 7989e0f295..1eab3021c3 100644 --- a/src/store-api/src/metrics.rs +++ b/src/store-api/src/metrics.rs @@ -13,7 +13,7 @@ // limitations under the License. use lazy_static::lazy_static; -use prometheus::{register_histogram_vec, HistogramVec}; +use prometheus::{HistogramVec, register_histogram_vec}; lazy_static! { pub static ref CONVERT_REGION_BULK_REQUEST: HistogramVec = register_histogram_vec!( diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs index a31d294d29..09d1fe72ed 100644 --- a/src/store-api/src/region_request.rs +++ b/src/store-api/src/region_request.rs @@ -15,16 +15,16 @@ use std::collections::HashMap; use std::fmt::{self, Display}; -use api::helper::{from_pb_time_ranges, ColumnDataTypeWrapper}; +use api::helper::{ColumnDataTypeWrapper, from_pb_time_ranges}; use api::v1::add_column_location::LocationType; use api::v1::column_def::{ as_fulltext_option_analyzer, as_fulltext_option_backend, as_skipping_index_type, }; use api::v1::region::bulk_insert_request::Body; use api::v1::region::{ - alter_request, compact_request, region_request, truncate_request, AlterRequest, AlterRequests, - BulkInsertRequest, CloseRequest, CompactRequest, CreateRequest, CreateRequests, DeleteRequests, - DropRequest, DropRequests, FlushRequest, InsertRequests, OpenRequest, TruncateRequest, + AlterRequest, AlterRequests, BulkInsertRequest, CloseRequest, CompactRequest, CreateRequest, + CreateRequests, DeleteRequests, DropRequest, DropRequests, FlushRequest, InsertRequests, + OpenRequest, TruncateRequest, alter_request, compact_request, region_request, truncate_request, }; use api::v1::{ self, Analyzer, ArrowIpc, FulltextBackend as PbFulltextBackend, Option as PbOption, Rows, @@ -38,7 +38,7 @@ use datatypes::prelude::ConcreteDataType; use datatypes::schema::{FulltextOptions, SkippingIndexOptions}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use strum::{AsRefStr, IntoStaticStr}; use crate::logstore::entry; @@ -1080,7 +1080,9 @@ impl AddColumn { region_id: metadata.region_id, err: format!( "column {} already exists with different metadata, existing: {:?}, got: {:?}", - self.column_metadata.column_schema.name, existing_column, self.column_metadata, + self.column_metadata.column_schema.name, + existing_column, + self.column_metadata, ), } ); @@ -1946,9 +1948,10 @@ mod tests { column_metadatas: column_metadatas_with_different_ts_column, }; let err = kind.validate(&metadata).unwrap_err(); - assert!(err - .to_string() - .contains("timestamp column ts has different id")); + assert!( + err.to_string() + .contains("timestamp column ts has different id") + ); // Change the primary key column name. let mut column_metadatas_with_different_pk_column = metadata.column_metadatas.clone(); @@ -1961,9 +1964,10 @@ mod tests { column_metadatas: column_metadatas_with_different_pk_column, }; let err = kind.validate(&metadata).unwrap_err(); - assert!(err - .to_string() - .contains("column with same name tag_0 has different id")); + assert!( + err.to_string() + .contains("column with same name tag_0 has different id") + ); // Add a new field column. let mut column_metadatas_with_new_field_column = metadata.column_metadatas.clone(); diff --git a/src/store-api/src/sst_entry.rs b/src/store-api/src/sst_entry.rs index 7bb3a57454..c50d7d57f7 100644 --- a/src/store-api/src/sst_entry.rs +++ b/src/store-api/src/sst_entry.rs @@ -15,13 +15,13 @@ use std::sync::Arc; use common_recordbatch::DfRecordBatch; -use common_time::timestamp::TimeUnit; use common_time::Timestamp; +use common_time::timestamp::TimeUnit; use datafusion_common::DataFusionError; use datafusion_expr::{LogicalPlan, LogicalPlanBuilder, LogicalTableSource}; use datatypes::arrow::array::{ - ArrayRef, TimestampMillisecondArray, TimestampNanosecondArray, UInt32Array, UInt64Array, - UInt8Array, + ArrayRef, TimestampMillisecondArray, TimestampNanosecondArray, UInt8Array, UInt32Array, + UInt64Array, }; use datatypes::arrow::error::ArrowError; use datatypes::arrow_array::StringArray; @@ -256,10 +256,10 @@ fn build_plan_helper( #[cfg(test)] mod tests { use datafusion_common::TableReference; - use datafusion_expr::{binary_expr, col, lit, LogicalPlan, Operator}; + use datafusion_expr::{LogicalPlan, Operator, binary_expr, col, lit}; use datatypes::arrow::array::{ - Array, TimestampMillisecondArray, TimestampNanosecondArray, UInt32Array, UInt64Array, - UInt8Array, + Array, TimestampMillisecondArray, TimestampNanosecondArray, UInt8Array, UInt32Array, + UInt64Array, }; use datatypes::arrow_array::StringArray; diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs index 9a4291acef..32bfe3ecbc 100644 --- a/src/store-api/src/storage/descriptors.rs +++ b/src/store-api/src/storage/descriptors.rs @@ -200,10 +200,10 @@ impl ColumnDescriptorBuilder { } fn validate(&self) -> Result<(), String> { - if let Some(name) = &self.name { - if name.is_empty() { - return Err("name should not be empty".to_string()); - } + if let Some(name) = &self.name + && name.is_empty() + { + return Err("name should not be empty".to_string()); } if let (Some(Some(constraint)), Some(data_type)) = @@ -273,11 +273,13 @@ mod tests { .unwrap(); assert_eq!("A test column", desc.comment); - assert!(new_column_desc_builder() - .is_nullable(false) - .default_constraint(Some(ColumnDefaultConstraint::Value(Value::Null))) - .build() - .is_err()); + assert!( + new_column_desc_builder() + .is_nullable(false) + .default_constraint(Some(ColumnDefaultConstraint::Value(Value::Null))) + .build() + .is_err() + ); } #[test] diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs index 8e7f5c1c53..513a98b148 100644 --- a/src/store-api/src/storage/requests.rs +++ b/src/store-api/src/storage/requests.rs @@ -141,7 +141,7 @@ impl Display for ScanRequest { #[cfg(test)] mod tests { - use datafusion_expr::{binary_expr, col, lit, Operator}; + use datafusion_expr::{Operator, binary_expr, col, lit}; use super::*; diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs index 7e2bbe49fe..3704bc889f 100644 --- a/src/table/src/metadata.rs +++ b/src/table/src/metadata.rs @@ -27,7 +27,7 @@ use datatypes::schema::{ }; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{OptionExt, ResultExt, ensure}; use store_api::metric_engine_consts::PHYSICAL_TABLE_METADATA_KEY; use store_api::mito_engine_options::{COMPACTION_TYPE, COMPACTION_TYPE_TWCS}; use store_api::region_request::{SetRegionOption, UnsetRegionOption}; @@ -1340,8 +1340,10 @@ fn set_column_fulltext_options( && current_options.case_sensitive == options.case_sensitive, error::InvalidColumnOptionSnafu { column_name, - msg: format!("Cannot change analyzer or case_sensitive if FULLTEXT index is set before. Previous analyzer: {}, previous case_sensitive: {}", - current_options.analyzer, current_options.case_sensitive), + msg: format!( + "Cannot change analyzer or case_sensitive if FULLTEXT index is set before. Previous analyzer: {}, previous case_sensitive: {}", + current_options.analyzer, current_options.case_sensitive + ), } ); } diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs index e886427872..00ef81e0f7 100644 --- a/src/table/src/predicate.rs +++ b/src/table/src/predicate.rs @@ -15,17 +15,17 @@ use std::sync::Arc; use common_telemetry::{error, warn}; +use common_time::Timestamp; use common_time::range::TimestampRange; use common_time::timestamp::TimeUnit; -use common_time::Timestamp; use datafusion::common::ScalarValue; use datafusion::physical_optimizer::pruning::PruningPredicate; -use datafusion_common::pruning::PruningStatistics; use datafusion_common::ToDFSchema; +use datafusion_common::pruning::PruningStatistics; use datafusion_expr::expr::{Expr, InList}; use datafusion_expr::{Between, BinaryExpr, Operator}; use datafusion_physical_expr::execution_props::ExecutionProps; -use datafusion_physical_expr::{create_physical_expr, PhysicalExpr}; +use datafusion_physical_expr::{PhysicalExpr, create_physical_expr}; use datatypes::arrow; use datatypes::value::scalar_value_to_timestamp; use snafu::ResultExt; @@ -349,10 +349,10 @@ fn extract_from_in_list_expr( mod tests { use std::sync::Arc; - use common_test_util::temp_dir::{create_temp_dir, TempDir}; + use common_test_util::temp_dir::{TempDir, create_temp_dir}; use datafusion::parquet::arrow::ArrowWriter; use datafusion_common::{Column, ScalarValue}; - use datafusion_expr::{col, lit, BinaryExpr, Literal, Operator}; + use datafusion_expr::{BinaryExpr, Literal, Operator, col, lit}; use datatypes::arrow::array::Int32Array; use datatypes::arrow::datatypes::{DataType, Field, Schema}; use datatypes::arrow::record_batch::RecordBatch; diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs index 953a2bfa27..8f64f0b9fc 100644 --- a/src/table/src/requests.rs +++ b/src/table/src/requests.rs @@ -22,8 +22,8 @@ use common_base::readable_size::ReadableSize; use common_datasource::object_store::oss::is_supported_in_oss; use common_datasource::object_store::s3::is_supported_in_s3; use common_query::AddColumnLocation; -use common_time::range::TimestampRange; use common_time::TimeToLive; +use common_time::range::TimestampRange; use datatypes::data_type::ConcreteDataType; use datatypes::prelude::VectorRef; use datatypes::schema::{ @@ -33,11 +33,11 @@ use greptime_proto::v1::region::compact_request; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use store_api::metric_engine_consts::{ - is_metric_engine_option_key, LOGICAL_TABLE_METADATA_KEY, PHYSICAL_TABLE_METADATA_KEY, + LOGICAL_TABLE_METADATA_KEY, PHYSICAL_TABLE_METADATA_KEY, is_metric_engine_option_key, }; use store_api::mito_engine_options::{ - is_mito_engine_option_key, APPEND_MODE_KEY, COMPACTION_TYPE, MEMTABLE_TYPE, MERGE_MODE_KEY, - TWCS_FALLBACK_TO_LOCAL, TWCS_MAX_OUTPUT_FILE_SIZE, TWCS_TIME_WINDOW, TWCS_TRIGGER_FILE_NUM, + APPEND_MODE_KEY, COMPACTION_TYPE, MEMTABLE_TYPE, MERGE_MODE_KEY, TWCS_FALLBACK_TO_LOCAL, + TWCS_MAX_OUTPUT_FILE_SIZE, TWCS_TIME_WINDOW, TWCS_TRIGGER_FILE_NUM, is_mito_engine_option_key, }; use store_api::region_request::{SetRegionOption, UnsetRegionOption}; diff --git a/src/table/src/table.rs b/src/table/src/table.rs index 9f48cfd9c6..a42edacfaf 100644 --- a/src/table/src/table.rs +++ b/src/table/src/table.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use common_recordbatch::SendableRecordBatchStream; use datafusion::execution::FunctionRegistry; -use datafusion::logical_expr::expr::ScalarFunction; use datafusion::logical_expr::Cast; +use datafusion::logical_expr::expr::ScalarFunction; use datafusion::prelude::SessionContext; use datafusion_expr::expr::Expr; use datatypes::data_type::DataType; diff --git a/src/table/src/table/adapter.rs b/src/table/src/table/adapter.rs index 4ba880b1eb..b341768bf3 100644 --- a/src/table/src/table/adapter.rs +++ b/src/table/src/table/adapter.rs @@ -22,10 +22,10 @@ use datafusion::catalog::Session; use datafusion::datasource::{TableProvider, TableType as DfTableType}; use datafusion::error::Result as DfResult; use datafusion::physical_plan::ExecutionPlan; -use datafusion_expr::expr::Expr; use datafusion_expr::TableProviderFilterPushDown as DfTableProviderFilterPushDown; -use datafusion_physical_expr::expressions::Column; +use datafusion_expr::expr::Expr; use datafusion_physical_expr::PhysicalSortExpr; +use datafusion_physical_expr::expressions::Column; use store_api::storage::ScanRequest; use crate::table::{TableRef, TableType}; diff --git a/src/table/src/table/numbers.rs b/src/table/src/table/numbers.rs index bba3479843..05f221bce0 100644 --- a/src/table/src/table/numbers.rs +++ b/src/table/src/table/numbers.rs @@ -24,8 +24,8 @@ use datafusion::arrow::record_batch::RecordBatch as DfRecordBatch; use datatypes::arrow::array::UInt32Array; use datatypes::data_type::ConcreteDataType; use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef}; -use futures::task::{Context, Poll}; use futures::Stream; +use futures::task::{Context, Poll}; use store_api::data_source::DataSource; use store_api::storage::ScanRequest; diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs index 34683a986e..224c9e100f 100644 --- a/src/table/src/test_util/memtable.rs +++ b/src/table/src/test_util/memtable.rs @@ -23,8 +23,8 @@ use common_recordbatch::{OrderOption, RecordBatch, RecordBatchStream, SendableRe use datatypes::prelude::*; use datatypes::schema::{ColumnSchema, Schema, SchemaRef}; use datatypes::vectors::UInt32Vector; -use futures::task::{Context, Poll}; use futures::Stream; +use futures::task::{Context, Poll}; use snafu::prelude::*; use store_api::data_source::DataSource; use store_api::storage::{RegionNumber, ScanRequest}; diff --git a/tests-fuzz/src/context.rs b/tests-fuzz/src/context.rs index 9311d5c19e..08513277cf 100644 --- a/tests-fuzz/src/context.rs +++ b/tests-fuzz/src/context.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use common_query::AddColumnLocation; use datatypes::types::cast; use rand::Rng; -use snafu::{ensure, OptionExt}; +use snafu::{OptionExt, ensure}; use crate::error::{self, Result}; use crate::generator::Random; @@ -175,9 +175,9 @@ impl TableContext { rng: &mut R, generator: &dyn Random, ) -> Ident { - let mut name = generator.gen(rng); + let mut name = generator.generate(rng); while self.columns.iter().any(|col| col.name.value == name.value) { - name = generator.gen(rng); + name = generator.generate(rng); } name } @@ -187,9 +187,9 @@ impl TableContext { rng: &mut R, generator: &dyn Random, ) -> Ident { - let mut name = generator.gen(rng); + let mut name = generator.generate(rng); while self.name.value == name.value { - name = generator.gen(rng); + name = generator.generate(rng); } name } diff --git a/tests-fuzz/src/fake.rs b/tests-fuzz/src/fake.rs index d189e44e3b..aa92e0293a 100644 --- a/tests-fuzz/src/fake.rs +++ b/tests-fuzz/src/fake.rs @@ -16,9 +16,9 @@ use std::collections::HashSet; use std::marker::PhantomData; use lazy_static::lazy_static; +use rand::Rng; use rand::prelude::IndexedRandom; use rand::seq::{IteratorRandom, SliceRandom}; -use rand::Rng; use crate::generator::Random; use crate::impl_random; diff --git a/tests-fuzz/src/generator.rs b/tests-fuzz/src/generator.rs index 5c3e4a9143..bb9e6a3d47 100644 --- a/tests-fuzz/src/generator.rs +++ b/tests-fuzz/src/generator.rs @@ -51,7 +51,7 @@ pub trait Generator { pub trait Random { /// Generates a random element. - fn gen(&self, rng: &mut R) -> T { + fn generate(&self, rng: &mut R) -> T { self.choose(rng, 1).remove(0) } diff --git a/tests-fuzz/src/generator/alter_expr.rs b/tests-fuzz/src/generator/alter_expr.rs index a2df3124ee..08c59fc6ea 100644 --- a/tests-fuzz/src/generator/alter_expr.rs +++ b/tests-fuzz/src/generator/alter_expr.rs @@ -29,8 +29,8 @@ use crate::generator::{ColumnOptionGenerator, ConcreteDataTypeGenerator, Generat use crate::ir::alter_expr::{AlterTableExpr, AlterTableOperation, AlterTableOption, Ttl}; use crate::ir::create_expr::ColumnOption; use crate::ir::{ - droppable_columns, generate_columns, generate_random_value, modifiable_columns, Column, - ColumnTypeGenerator, Ident, + Column, ColumnTypeGenerator, Ident, droppable_columns, generate_columns, generate_random_value, + modifiable_columns, }; fn add_column_options_generator( @@ -175,9 +175,9 @@ impl Generator for AlterExprModifyDataTypeGenerator Result { let modifiable = modifiable_columns(&self.table_ctx.columns); let changed = modifiable[rng.random_range(0..modifiable.len())].clone(); - let mut to_type = self.column_type_generator.gen(rng); + let mut to_type = self.column_type_generator.generate(rng); while !changed.column_type.can_arrow_type_cast_to(&to_type) { - to_type = self.column_type_generator.gen(rng); + to_type = self.column_type_generator.generate(rng); } Ok(AlterTableExpr { @@ -292,8 +292,8 @@ mod tests { use super::*; use crate::context::TableContext; - use crate::generator::create_expr::CreateTableExprGeneratorBuilder; use crate::generator::Generator; + use crate::generator::create_expr::CreateTableExprGeneratorBuilder; #[test] fn test_alter_table_expr_generator_deterministic() { diff --git a/tests-fuzz/src/generator/create_expr.rs b/tests-fuzz/src/generator/create_expr.rs index 1291e2749a..6aee3e6251 100644 --- a/tests-fuzz/src/generator/create_expr.rs +++ b/tests-fuzz/src/generator/create_expr.rs @@ -18,23 +18,23 @@ use datatypes::data_type::ConcreteDataType; use datatypes::value::Value; use derive_builder::Builder; use partition::expr::{Operand, PartitionExpr, RestrictedOp}; -use rand::seq::SliceRandom; use rand::Rng; -use snafu::{ensure, ResultExt}; +use rand::seq::SliceRandom; +use snafu::{ResultExt, ensure}; use super::Generator; use crate::context::TableContextRef; use crate::error::{self, Error, Result}; -use crate::fake::{random_capitalize_map, MappedGenerator, WordGenerator}; +use crate::fake::{MappedGenerator, WordGenerator, random_capitalize_map}; use crate::generator::{ColumnOptionGenerator, ConcreteDataTypeGenerator, Random}; use crate::ir::create_expr::{ ColumnOption, CreateDatabaseExprBuilder, CreateTableExprBuilder, PartitionDef, }; use crate::ir::{ - column_options_generator, generate_columns, generate_partition_bounds, - partible_column_options_generator, primary_key_options_generator, ts_column_options_generator, Column, ColumnTypeGenerator, CreateDatabaseExpr, CreateTableExpr, Ident, PartibleColumnTypeGenerator, StringColumnTypeGenerator, TsColumnTypeGenerator, + column_options_generator, generate_columns, generate_partition_bounds, + partible_column_options_generator, primary_key_options_generator, ts_column_options_generator, }; #[derive(Builder)] @@ -163,7 +163,7 @@ impl Generator for CreateTableExprGenerato builder.engine(self.engine.to_string()); builder.if_not_exists(self.if_not_exists); if self.name.is_empty() { - builder.table_name(self.name_generator.gen(rng)); + builder.table_name(self.name_generator.generate(rng)); } else { builder.table_name(self.name.clone()); } @@ -243,7 +243,7 @@ impl Generator for CreatePhysicalTableExpr } Ok(CreateTableExpr { - table_name: self.name_generator.gen(rng), + table_name: self.name_generator.generate(rng), columns: vec![ Column { name: Ident::new("ts"), @@ -355,7 +355,7 @@ impl Generator for CreateDatabaseExprGe let mut builder = CreateDatabaseExprBuilder::default(); builder.if_not_exists(self.if_not_exists); if self.database_name.is_empty() { - builder.database_name(self.name_generator.gen(rng)); + builder.database_name(self.name_generator.generate(rng)); } else { builder.database_name(self.database_name.to_string()); } @@ -477,16 +477,13 @@ mod tests { assert_eq!(logical_table_expr.engine, "metric"); assert_eq!(logical_table_expr.columns.len(), 7); assert_eq!(logical_ts_name, physical_ts_name); - assert!(logical_table_expr - .columns - .iter() - .all( - |column| column.column_type != ConcreteDataType::string_datatype() - || column - .options - .iter() - .any(|option| option == &ColumnOption::PrimaryKey) - )); + assert!(logical_table_expr.columns.iter().all(|column| { + column.column_type != ConcreteDataType::string_datatype() + || column + .options + .iter() + .any(|option| option == &ColumnOption::PrimaryKey) + })); } #[test] diff --git a/tests-fuzz/src/generator/insert_expr.rs b/tests-fuzz/src/generator/insert_expr.rs index e13f518a65..ededa53ddd 100644 --- a/tests-fuzz/src/generator/insert_expr.rs +++ b/tests-fuzz/src/generator/insert_expr.rs @@ -16,8 +16,8 @@ use std::marker::PhantomData; use datatypes::value::Value; use derive_builder::Builder; -use rand::seq::{IndexedRandom, SliceRandom}; use rand::Rng; +use rand::seq::{IndexedRandom, SliceRandom}; use super::TsValueGenerator; use crate::context::TableContextRef; @@ -25,7 +25,7 @@ use crate::error::{Error, Result}; use crate::fake::WordGenerator; use crate::generator::{Generator, Random, ValueGenerator}; use crate::ir::insert_expr::{InsertIntoExpr, RowValue}; -use crate::ir::{generate_random_timestamp, generate_random_value, Ident}; +use crate::ir::{Ident, generate_random_timestamp, generate_random_value}; /// Generates [InsertIntoExpr]. #[derive(Builder)] diff --git a/tests-fuzz/src/generator/select_expr.rs b/tests-fuzz/src/generator/select_expr.rs index 95a109d358..629a34801b 100644 --- a/tests-fuzz/src/generator/select_expr.rs +++ b/tests-fuzz/src/generator/select_expr.rs @@ -15,8 +15,8 @@ use std::marker::PhantomData; use derive_builder::Builder; -use rand::seq::{IndexedRandom, SliceRandom}; use rand::Rng; +use rand::seq::{IndexedRandom, SliceRandom}; use crate::context::TableContextRef; use crate::error::{Error, Result}; diff --git a/tests-fuzz/src/ir.rs b/tests-fuzz/src/ir.rs index 74745327c2..7be697ab69 100644 --- a/tests-fuzz/src/ir.rs +++ b/tests-fuzz/src/ir.rs @@ -34,8 +34,8 @@ use datatypes::value::Value; use derive_builder::Builder; pub use insert_expr::InsertIntoExpr; use lazy_static::lazy_static; -use rand::seq::{IndexedRandom, SliceRandom}; use rand::Rng; +use rand::seq::{IndexedRandom, SliceRandom}; use serde::{Deserialize, Serialize}; use self::insert_expr::{RowValue, RowValues}; @@ -153,7 +153,7 @@ pub fn generate_random_value( ConcreteDataType::Float32(_) => Value::from(rng.random::()), ConcreteDataType::Float64(_) => Value::from(rng.random::()), ConcreteDataType::String(_) => match random_str { - Some(random) => Value::from(random.gen(rng).value), + Some(random) => Value::from(random.generate(rng).value), None => Value::from(rng.random::().to_string()), }, ConcreteDataType::Date(_) => generate_random_date(rng), @@ -483,7 +483,7 @@ pub fn generate_columns( names .into_iter() .map(|name| { - let column_type = types.gen(rng); + let column_type = types.generate(rng); let options = options(rng, &column_type); Column { name, diff --git a/tests-fuzz/src/ir/alter_expr.rs b/tests-fuzz/src/ir/alter_expr.rs index 1790467ac2..88ec7be395 100644 --- a/tests-fuzz/src/ir/alter_expr.rs +++ b/tests-fuzz/src/ir/alter_expr.rs @@ -172,8 +172,7 @@ mod tests { #[test] fn test_parse_kv_pairs() { - let option_string = - "compaction.twcs.max_output_file_size = '1M', compaction.type = 'twcs', ttl = 'forever'"; + let option_string = "compaction.twcs.max_output_file_size = '1M', compaction.type = 'twcs', ttl = 'forever'"; let options = AlterTableOption::parse_kv_pairs(option_string).unwrap(); assert_eq!(options.len(), 2); assert_eq!( diff --git a/tests-fuzz/src/test_utils.rs b/tests-fuzz/src/test_utils.rs index bef96a1fd7..a12d129dfe 100644 --- a/tests-fuzz/src/test_utils.rs +++ b/tests-fuzz/src/test_utils.rs @@ -15,8 +15,8 @@ use datatypes::data_type::ConcreteDataType; use crate::context::TableContext; -use crate::ir::create_expr::ColumnOption; use crate::ir::Column; +use crate::ir::create_expr::ColumnOption; pub fn new_test_ctx() -> TableContext { TableContext { diff --git a/tests-fuzz/src/translator/mysql/alter_expr.rs b/tests-fuzz/src/translator/mysql/alter_expr.rs index 942f41eefd..d5074c6b49 100644 --- a/tests-fuzz/src/translator/mysql/alter_expr.rs +++ b/tests-fuzz/src/translator/mysql/alter_expr.rs @@ -22,8 +22,8 @@ use crate::error::{Error, Result}; use crate::ir::alter_expr::AlterTableOperation; use crate::ir::create_expr::ColumnOption; use crate::ir::{AlterTableExpr, Column}; -use crate::translator::common::CommonAlterTableTranslator; use crate::translator::DslTranslator; +use crate::translator::common::CommonAlterTableTranslator; pub struct AlterTableExprTranslator; diff --git a/tests-fuzz/src/translator/mysql/insert_expr.rs b/tests-fuzz/src/translator/mysql/insert_expr.rs index ae2da2a330..480395aede 100644 --- a/tests-fuzz/src/translator/mysql/insert_expr.rs +++ b/tests-fuzz/src/translator/mysql/insert_expr.rs @@ -74,8 +74,8 @@ mod tests { use rand::{Rng, SeedableRng}; use super::*; - use crate::generator::insert_expr::InsertExprGeneratorBuilder; use crate::generator::Generator; + use crate::generator::insert_expr::InsertExprGeneratorBuilder; use crate::test_utils; use crate::translator::DslTranslator; diff --git a/tests-fuzz/src/translator/mysql/select_expr.rs b/tests-fuzz/src/translator/mysql/select_expr.rs index 46161dfb9e..bc68440ec8 100644 --- a/tests-fuzz/src/translator/mysql/select_expr.rs +++ b/tests-fuzz/src/translator/mysql/select_expr.rs @@ -52,8 +52,8 @@ mod tests { use rand::SeedableRng; use super::SelectExprTranslator; - use crate::generator::select_expr::SelectExprGeneratorBuilder; use crate::generator::Generator; + use crate::generator::select_expr::SelectExprGeneratorBuilder; use crate::test_utils; use crate::translator::DslTranslator; diff --git a/tests-fuzz/src/translator/postgres/alter_expr.rs b/tests-fuzz/src/translator/postgres/alter_expr.rs index 8a0681daa6..1a355f9d4a 100644 --- a/tests-fuzz/src/translator/postgres/alter_expr.rs +++ b/tests-fuzz/src/translator/postgres/alter_expr.rs @@ -21,9 +21,9 @@ use crate::error::{Error, Result}; use crate::ir::alter_expr::AlterTableOperation; use crate::ir::create_expr::ColumnOption; use crate::ir::{AlterTableExpr, Column}; +use crate::translator::DslTranslator; use crate::translator::common::CommonAlterTableTranslator; use crate::translator::postgres::sql_data_type_to_postgres_data_type; -use crate::translator::DslTranslator; pub struct AlterTableExprTranslator; diff --git a/tests-fuzz/src/translator/postgres/create_expr.rs b/tests-fuzz/src/translator/postgres/create_expr.rs index 0de021a44e..10dd05da83 100644 --- a/tests-fuzz/src/translator/postgres/create_expr.rs +++ b/tests-fuzz/src/translator/postgres/create_expr.rs @@ -18,8 +18,8 @@ use sql::statements::concrete_data_type_to_sql_data_type; use crate::error::{Error, Result}; use crate::ir::create_expr::ColumnOption; use crate::ir::{Column, CreateTableExpr}; -use crate::translator::postgres::sql_data_type_to_postgres_data_type; use crate::translator::DslTranslator; +use crate::translator::postgres::sql_data_type_to_postgres_data_type; pub struct CreateTableExprTranslator; diff --git a/tests-fuzz/src/utils/process.rs b/tests-fuzz/src/utils/process.rs index 80b64c040d..6e79644a87 100644 --- a/tests-fuzz/src/utils/process.rs +++ b/tests-fuzz/src/utils/process.rs @@ -22,7 +22,7 @@ use common_telemetry::{info, warn}; use nix::sys::signal::Signal; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use tokio::fs::OpenOptions; use tokio::process::Child; diff --git a/tests-fuzz/src/validator/column.rs b/tests-fuzz/src/validator/column.rs index dbd50e25c8..29e08a24c2 100644 --- a/tests-fuzz/src/validator/column.rs +++ b/tests-fuzz/src/validator/column.rs @@ -14,7 +14,7 @@ use common_telemetry::debug; use datatypes::data_type::DataType; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::MySqlPool; use crate::error::{self, Result, UnexpectedSnafu}; diff --git a/tests-fuzz/src/validator/row.rs b/tests-fuzz/src/validator/row.rs index 29a61782d5..728ede0473 100644 --- a/tests-fuzz/src/validator/row.rs +++ b/tests-fuzz/src/validator/row.rs @@ -13,10 +13,10 @@ // limitations under the License. use chrono::{DateTime as ChronoDateTime, NaiveDate, NaiveDateTime, Utc}; -use common_time::date::Date; use common_time::Timestamp; +use common_time::date::Date; use datatypes::value::Value; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::mysql::MySqlRow; use sqlx::{Column, ColumnIndex, Database, MySqlPool, Row, TypeInfo, ValueRef}; diff --git a/tests-fuzz/src/validator/table.rs b/tests-fuzz/src/validator/table.rs index 3e45930f29..f271101051 100644 --- a/tests-fuzz/src/validator/table.rs +++ b/tests-fuzz/src/validator/table.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{MySqlPool, Row}; use crate::error::{self, Result, UnexpectedSnafu}; @@ -74,8 +74,8 @@ mod tests { use common_time::Duration; use super::*; - use crate::ir::alter_expr::Ttl; use crate::ir::AlterTableOption; + use crate::ir::alter_expr::Ttl; #[test] fn test_parse_show_create() { diff --git a/tests-fuzz/targets/ddl/fuzz_alter_logical_table.rs b/tests-fuzz/targets/ddl/fuzz_alter_logical_table.rs index 445b5d45fe..7157af4748 100644 --- a/tests-fuzz/targets/ddl/fuzz_alter_logical_table.rs +++ b/tests-fuzz/targets/ddl/fuzz_alter_logical_table.rs @@ -28,23 +28,23 @@ use sqlx::{MySql, Pool}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::alter_expr::AlterExprAddColumnGeneratorBuilder; use tests_fuzz::generator::create_expr::{ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, }; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - primary_key_and_not_null_column_options_generator, primary_key_options_generator, Column, - CreateTableExpr, StringColumnTypeGenerator, + Column, CreateTableExpr, StringColumnTypeGenerator, + primary_key_and_not_null_column_options_generator, primary_key_options_generator, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::{ - get_gt_fuzz_input_max_alter_actions, init_greptime_connections_via_env, Connections, + Connections, get_gt_fuzz_input_max_alter_actions, init_greptime_connections_via_env, }; use tests_fuzz::validator; diff --git a/tests-fuzz/targets/ddl/fuzz_alter_table.rs b/tests-fuzz/targets/ddl/fuzz_alter_table.rs index 38357e7ef5..22e5d254e8 100644 --- a/tests-fuzz/targets/ddl/fuzz_alter_table.rs +++ b/tests-fuzz/targets/ddl/fuzz_alter_table.rs @@ -29,24 +29,24 @@ use strum::{EnumIter, IntoEnumIterator}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result, UnexpectedSnafu}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::alter_expr::{ AlterExprAddColumnGeneratorBuilder, AlterExprDropColumnGeneratorBuilder, AlterExprModifyDataTypeGeneratorBuilder, AlterExprRenameGeneratorBuilder, AlterExprSetTableOptionsGeneratorBuilder, AlterExprUnsetTableOptionsGeneratorBuilder, }; use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - droppable_columns, modifiable_columns, AlterTableExpr, AlterTableOption, CreateTableExpr, + AlterTableExpr, AlterTableOption, CreateTableExpr, droppable_columns, modifiable_columns, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::{ - get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, Connections, + Connections, get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, }; use tests_fuzz::validator; struct FuzzContext { diff --git a/tests-fuzz/targets/ddl/fuzz_create_database.rs b/tests-fuzz/targets/ddl/fuzz_create_database.rs index 83de72a342..2e9c7e45f7 100644 --- a/tests-fuzz/targets/ddl/fuzz_create_database.rs +++ b/tests-fuzz/targets/ddl/fuzz_create_database.rs @@ -23,15 +23,15 @@ use snafu::ResultExt; use sqlx::{MySql, Pool}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; -use tests_fuzz::generator::create_expr::CreateDatabaseExprGeneratorBuilder; use tests_fuzz::generator::Generator; +use tests_fuzz::generator::create_expr::CreateDatabaseExprGeneratorBuilder; use tests_fuzz::ir::CreateDatabaseExpr; -use tests_fuzz::translator::mysql::create_expr::CreateDatabaseExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; +use tests_fuzz::translator::mysql::create_expr::CreateDatabaseExprTranslator; +use tests_fuzz::utils::{Connections, init_greptime_connections_via_env}; struct FuzzContext { greptime: Pool, diff --git a/tests-fuzz/targets/ddl/fuzz_create_logical_table.rs b/tests-fuzz/targets/ddl/fuzz_create_logical_table.rs index 46af6a4ceb..1535f6d6b1 100644 --- a/tests-fuzz/targets/ddl/fuzz_create_logical_table.rs +++ b/tests-fuzz/targets/ddl/fuzz_create_logical_table.rs @@ -28,17 +28,17 @@ use sqlx::{MySql, Pool}; use tests_fuzz::context::TableContext; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::{ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, }; -use tests_fuzz::generator::Generator; -use tests_fuzz::ir::{primary_key_and_not_null_column_options_generator, Column}; -use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; +use tests_fuzz::ir::{Column, primary_key_and_not_null_column_options_generator}; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; +use tests_fuzz::utils::{Connections, init_greptime_connections_via_env}; use tests_fuzz::validator; struct FuzzContext { diff --git a/tests-fuzz/targets/ddl/fuzz_create_table.rs b/tests-fuzz/targets/ddl/fuzz_create_table.rs index a4717919d3..32d0d3c9eb 100644 --- a/tests-fuzz/targets/ddl/fuzz_create_table.rs +++ b/tests-fuzz/targets/ddl/fuzz_create_table.rs @@ -25,16 +25,16 @@ use snafu::ResultExt; use sqlx::{MySql, Pool}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; -use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::generator::Generator; +use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::ir::CreateTableExpr; -use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::utils::{ - get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, Connections, + Connections, get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, }; use tests_fuzz::validator; diff --git a/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs index f6390cd8f0..58ff07de87 100644 --- a/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs +++ b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs @@ -24,34 +24,33 @@ use common_telemetry::info; use libfuzzer_sys::fuzz_target; use rand::{Rng, SeedableRng}; use rand_chacha::{ChaCha20Rng, ChaChaRng}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::{ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, }; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - generate_random_timestamp_for_mysql, generate_random_value, CreateTableExpr, InsertIntoExpr, + CreateTableExpr, InsertIntoExpr, generate_random_timestamp_for_mysql, generate_random_value, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::cluster_info::wait_for_all_datanode_online; use tests_fuzz::utils::partition::{ fetch_partitions, region_distribution, wait_for_all_regions_evicted, }; use tests_fuzz::utils::pod_failure::{inject_datanode_pod_failure, recover_pod_failure}; use tests_fuzz::utils::{ - compact_table, flush_memtable, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, - init_greptime_connections_via_env, Connections, GT_FUZZ_CLUSTER_NAME, - GT_FUZZ_CLUSTER_NAMESPACE, + Connections, GT_FUZZ_CLUSTER_NAME, GT_FUZZ_CLUSTER_NAMESPACE, compact_table, flush_memtable, + get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, init_greptime_connections_via_env, }; use tests_fuzz::validator::row::count_values; diff --git a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs index 9faf82ec0b..c4a8564f36 100644 --- a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs +++ b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs @@ -27,35 +27,34 @@ use libfuzzer_sys::fuzz_target; use rand::seq::IndexedRandom; use rand::{Rng, SeedableRng}; use rand_chacha::{ChaCha20Rng, ChaChaRng}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; use tests_fuzz::generator::{Generator, Random}; use tests_fuzz::ir::{ - generate_random_value, generate_unique_timestamp_for_mysql, CreateTableExpr, Ident, - InsertIntoExpr, MySQLTsColumnTypeGenerator, + CreateTableExpr, Ident, InsertIntoExpr, MySQLTsColumnTypeGenerator, generate_random_value, + generate_unique_timestamp_for_mysql, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::cluster_info::wait_for_all_datanode_online; use tests_fuzz::utils::partition::{ - fetch_partitions, pretty_print_region_distribution, region_distribution, - wait_for_all_regions_evicted, Partition, + Partition, fetch_partitions, pretty_print_region_distribution, region_distribution, + wait_for_all_regions_evicted, }; use tests_fuzz::utils::pod_failure::{inject_datanode_pod_failure, recover_pod_failure}; use tests_fuzz::utils::{ - compact_table, flush_memtable, get_gt_fuzz_input_max_columns, - get_gt_fuzz_input_max_insert_actions, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, - init_greptime_connections_via_env, Connections, GT_FUZZ_CLUSTER_NAME, - GT_FUZZ_CLUSTER_NAMESPACE, + Connections, GT_FUZZ_CLUSTER_NAME, GT_FUZZ_CLUSTER_NAMESPACE, compact_table, flush_memtable, + get_gt_fuzz_input_max_columns, get_gt_fuzz_input_max_insert_actions, + get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, init_greptime_connections_via_env, }; use tests_fuzz::validator::row::count_values; use tokio::sync::Semaphore; @@ -114,7 +113,7 @@ fn generate_create_exprs( merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), ); - let base_table_name = name_generator.gen(rng); + let base_table_name = name_generator.generate(rng); let min_column = columns / 2; let columns = rng.random_range(min_column..columns); let mut exprs = Vec::with_capacity(tables); diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs index f041408480..3756782d42 100644 --- a/tests-fuzz/targets/fuzz_insert.rs +++ b/tests-fuzz/targets/fuzz_insert.rs @@ -23,27 +23,27 @@ use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured}; use libfuzzer_sys::fuzz_target; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - generate_random_value, generate_unique_timestamp_for_mysql, replace_default, - sort_by_primary_keys, CreateTableExpr, InsertIntoExpr, MySQLTsColumnTypeGenerator, + CreateTableExpr, InsertIntoExpr, MySQLTsColumnTypeGenerator, generate_random_value, + generate_unique_timestamp_for_mysql, replace_default, sort_by_primary_keys, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::{ - flush_memtable, get_gt_fuzz_input_max_columns, get_gt_fuzz_input_max_rows, - init_greptime_connections_via_env, Connections, + Connections, flush_memtable, get_gt_fuzz_input_max_columns, get_gt_fuzz_input_max_rows, + init_greptime_connections_via_env, }; use tests_fuzz::validator; diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs index 6e5e7d0a7f..b5594ded4a 100644 --- a/tests-fuzz/targets/fuzz_insert_logical_table.rs +++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs @@ -23,29 +23,29 @@ use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured}; use libfuzzer_sys::fuzz_target; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::{ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, }; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - generate_random_value, generate_unique_timestamp_for_mysql, replace_default, - sort_by_primary_keys, CreateTableExpr, InsertIntoExpr, + CreateTableExpr, InsertIntoExpr, generate_random_value, generate_unique_timestamp_for_mysql, + replace_default, sort_by_primary_keys, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; use tests_fuzz::utils::{ - compact_table, flush_memtable, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, - init_greptime_connections_via_env, Connections, + Connections, compact_table, flush_memtable, get_gt_fuzz_input_max_rows, + get_gt_fuzz_input_max_tables, init_greptime_connections_via_env, }; use tests_fuzz::validator; struct FuzzContext { diff --git a/tests-fuzz/targets/migration/fuzz_migrate_metric_regions.rs b/tests-fuzz/targets/migration/fuzz_migrate_metric_regions.rs index 7f7257c078..834e30c5d6 100644 --- a/tests-fuzz/targets/migration/fuzz_migrate_metric_regions.rs +++ b/tests-fuzz/targets/migration/fuzz_migrate_metric_regions.rs @@ -24,35 +24,35 @@ use common_telemetry::info; use libfuzzer_sys::fuzz_target; use rand::{Rng, SeedableRng}; use rand_chacha::{ChaCha20Rng, ChaChaRng}; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use store_api::storage::RegionId; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::{ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, }; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - generate_random_timestamp_for_mysql, generate_random_value, CreateTableExpr, Ident, - InsertIntoExpr, + CreateTableExpr, Ident, InsertIntoExpr, generate_random_timestamp_for_mysql, + generate_random_value, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::cluster_info::{fetch_nodes, PEER_TYPE_DATANODE}; +use tests_fuzz::utils::cluster_info::{PEER_TYPE_DATANODE, fetch_nodes}; use tests_fuzz::utils::migration::migrate_region; use tests_fuzz::utils::partition::{fetch_partition, fetch_partitions, region_distribution}; use tests_fuzz::utils::procedure::procedure_state; use tests_fuzz::utils::wait::wait_condition_fn; use tests_fuzz::utils::{ - compact_table, flush_memtable, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables, - init_greptime_connections_via_env, Connections, + Connections, compact_table, flush_memtable, get_gt_fuzz_input_max_rows, + get_gt_fuzz_input_max_tables, init_greptime_connections_via_env, }; use tests_fuzz::validator::row::count_values; @@ -264,7 +264,9 @@ async fn migrate_regions(ctx: &FuzzContext, migrations: &[Migration]) -> Result< { let procedure_id = migrate_region(&ctx.greptime, region_id.as_u64(), *from_peer, *to_peer, 120).await; - info!("Migrating region: {region_id} from {from_peer} to {to_peer}, procedure: {procedure_id}"); + info!( + "Migrating region: {region_id} from {from_peer} to {to_peer}, procedure: {procedure_id}" + ); procedure_ids.push(procedure_id); } for (migration, procedure_id) in migrations.iter().zip(procedure_ids) { diff --git a/tests-fuzz/targets/migration/fuzz_migrate_mito_regions.rs b/tests-fuzz/targets/migration/fuzz_migrate_mito_regions.rs index fe9fa3dc6e..a5ab0ad856 100644 --- a/tests-fuzz/targets/migration/fuzz_migrate_mito_regions.rs +++ b/tests-fuzz/targets/migration/fuzz_migrate_mito_regions.rs @@ -24,32 +24,33 @@ use common_telemetry::info; use libfuzzer_sys::fuzz_target; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; -use snafu::{ensure, ResultExt}; +use snafu::{ResultExt, ensure}; use sqlx::{Executor, MySql, Pool}; use store_api::storage::RegionId; use tests_fuzz::context::{TableContext, TableContextRef}; use tests_fuzz::error::{self, Result}; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; +use tests_fuzz::generator::Generator; use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; -use tests_fuzz::generator::Generator; use tests_fuzz::ir::{ - format_columns, generate_random_value, generate_unique_timestamp_for_mysql, replace_default, - sort_by_primary_keys, CreateTableExpr, InsertIntoExpr, MySQLTsColumnTypeGenerator, + CreateTableExpr, InsertIntoExpr, MySQLTsColumnTypeGenerator, format_columns, + generate_random_value, generate_unique_timestamp_for_mysql, replace_default, + sort_by_primary_keys, }; +use tests_fuzz::translator::DslTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; -use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::cluster_info::{fetch_nodes, PEER_TYPE_DATANODE}; +use tests_fuzz::utils::cluster_info::{PEER_TYPE_DATANODE, fetch_nodes}; use tests_fuzz::utils::migration::migrate_region; use tests_fuzz::utils::partition::{fetch_partition, fetch_partitions, region_distribution}; use tests_fuzz::utils::procedure::procedure_state; use tests_fuzz::utils::wait::wait_condition_fn; use tests_fuzz::utils::{ - compact_table, flush_memtable, init_greptime_connections_via_env, Connections, + Connections, compact_table, flush_memtable, init_greptime_connections_via_env, }; use tests_fuzz::validator; @@ -233,7 +234,9 @@ async fn migrate_regions(ctx: &FuzzContext, migrations: &[Migration]) -> Result< { let procedure_id = migrate_region(&ctx.greptime, region_id.as_u64(), *from_peer, *to_peer, 240).await; - info!("Migrating region: {region_id} from {from_peer} to {to_peer}, procedure: {procedure_id}"); + info!( + "Migrating region: {region_id} from {from_peer} to {to_peer}, procedure: {procedure_id}" + ); procedure_ids.push(procedure_id); } diff --git a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs index 53369e9792..8e95cc920c 100644 --- a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs +++ b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs @@ -16,8 +16,8 @@ use std::collections::HashMap; use std::fs::create_dir_all; -use std::sync::atomic::AtomicBool; use std::sync::Arc; +use std::sync::atomic::AtomicBool; use std::time::Duration; use common_telemetry::info; @@ -33,14 +33,14 @@ use sqlx::{MySql, Pool}; use tests_fuzz::context::TableContext; use tests_fuzz::error::Result; use tests_fuzz::fake::{ - merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, - MappedGenerator, WordGenerator, + MappedGenerator, WordGenerator, merge_two_word_map_fn, random_capitalize_map, + uppercase_and_keyword_backtick_map, }; -use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::generator::Generator; +use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; use tests_fuzz::ir::CreateTableExpr; -use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::utils::config::{get_conf_path, write_config_file}; use tests_fuzz::utils::health::HttpHealthChecker; use tests_fuzz::utils::process::{ProcessManager, ProcessState, UnstableProcessController}; diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs index 1c13e52e2a..e73f02b467 100644 --- a/tests-integration/src/cluster.rs +++ b/tests-integration/src/cluster.rs @@ -28,30 +28,30 @@ use cache::{ use catalog::information_extension::DistributedInformationExtension; use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend}; use catalog::process_manager::ProcessManager; -use client::client_manager::NodeClients; use client::Client; +use client::client_manager::NodeClients; use common_base::Plugins; use common_grpc::channel_manager::{ChannelConfig, ChannelManager}; +use common_meta::DatanodeId; use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder}; +use common_meta::heartbeat::handler::HandlerGroupExecutor; use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler; use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler; -use common_meta::heartbeat::handler::HandlerGroupExecutor; +use common_meta::kv_backend::KvBackendRef; use common_meta::kv_backend::chroot::ChrootKvBackend; use common_meta::kv_backend::etcd::EtcdStore; use common_meta::kv_backend::memory::MemoryKvBackend; -use common_meta::kv_backend::KvBackendRef; use common_meta::peer::Peer; -use common_meta::DatanodeId; -use common_runtime::runtime::BuilderBuild; use common_runtime::Builder as RuntimeBuilder; +use common_runtime::runtime::BuilderBuild; use common_test_util::temp_dir::create_temp_dir; use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig}; use datanode::config::DatanodeOptions; use datanode::datanode::{Datanode, DatanodeBuilder, ProcedureConfig}; use frontend::frontend::{Frontend, FrontendOptions}; use frontend::heartbeat::HeartbeatTask; -use frontend::instance::builder::FrontendBuilder; use frontend::instance::Instance as FeInstance; +use frontend::instance::builder::FrontendBuilder; use frontend::server::Services; use hyper_util::rt::TokioIo; use meta_client::client::MetaClientBuilder; @@ -60,9 +60,9 @@ use meta_srv::metasrv::{Metasrv, MetasrvOptions, SelectorRef}; use meta_srv::mocks::MockInfo; use object_store::config::ObjectStoreConfig; use rand::Rng; +use servers::grpc::GrpcOptions; use servers::grpc::flight::FlightCraftWrapper; use servers::grpc::region_server::RegionServerRequestHandler; -use servers::grpc::GrpcOptions; use servers::heartbeat_options::HeartbeatOptions; use servers::server::ServerHandlers; use tempfile::TempDir; @@ -72,8 +72,8 @@ use tower::service_fn; use uuid::Uuid; use crate::test_util::{ - self, create_datanode_opts, create_tmp_dir_and_datanode_opts, FileDirGuard, StorageType, - TestGuard, PEER_PLACEHOLDER_ADDR, + self, FileDirGuard, PEER_PLACEHOLDER_ADDR, StorageType, TestGuard, create_datanode_opts, + create_tmp_dir_and_datanode_opts, }; pub struct GreptimeDbCluster { diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs index 31d9e34bb6..6a5efda46a 100644 --- a/tests-integration/src/grpc.rs +++ b/tests-integration/src/grpc.rs @@ -14,9 +14,9 @@ mod flight; +use api::v1::QueryRequest; use api::v1::greptime_request::Request; use api::v1::query_request::Query; -use api::v1::QueryRequest; use common_query::OutputData; use common_recordbatch::RecordBatches; use frontend::instance::Instance; @@ -51,10 +51,10 @@ mod test { use api::v1::query_request::Query; use api::v1::region::QueryRequest as RegionQueryRequest; use api::v1::{ - alter_table_expr, AddColumn, AddColumns, AlterTableExpr, Column, ColumnDataType, - ColumnDataTypeExtension, ColumnDef, CreateDatabaseExpr, CreateTableExpr, DdlRequest, - DeleteRequest, DeleteRequests, DropTableExpr, InsertRequest, InsertRequests, QueryRequest, - SemanticType, VectorTypeExtension, + AddColumn, AddColumns, AlterTableExpr, Column, ColumnDataType, ColumnDataTypeExtension, + ColumnDef, CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DeleteRequests, + DropTableExpr, InsertRequest, InsertRequests, QueryRequest, SemanticType, + VectorTypeExtension, alter_table_expr, }; use client::OutputData; use common_catalog::consts::MITO_ENGINE; @@ -76,10 +76,10 @@ mod test { use super::*; use crate::standalone::GreptimeDbStandaloneBuilder; use crate::tests; - use crate::tests::test_util::{ - both_instances_cases, distributed, execute_sql, standalone, MockInstance, - }; use crate::tests::MockDistributedInstance; + use crate::tests::test_util::{ + MockInstance, both_instances_cases, distributed, execute_sql, standalone, + }; #[tokio::test(flavor = "multi_thread")] async fn test_distributed_handle_ddl_request() { @@ -371,18 +371,20 @@ mod test { } async fn verify_table_is_dropped(instance: &MockDistributedInstance) { - assert!(instance - .frontend() - .catalog_manager() - .table( - "greptime", - "database_created_through_grpc", - "table_created_through_grpc", - None, - ) - .await - .unwrap() - .is_none()); + assert!( + instance + .frontend() + .catalog_manager() + .table( + "greptime", + "database_created_through_grpc", + "table_created_through_grpc", + None, + ) + .await + .unwrap() + .is_none() + ); } #[tokio::test(flavor = "multi_thread")] @@ -497,7 +499,9 @@ CREATE TABLE {table_name} ( let instance = standalone.fe_instance(); let table_name = "my_table"; - let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, c JSON, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b, c))"); + let sql = format!( + "CREATE TABLE {table_name} (a INT, b STRING, c JSON, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b, c))" + ); create_table(instance, sql).await; test_insert_delete_and_query_on_existing_table(instance, table_name).await; @@ -1075,7 +1079,9 @@ CREATE TABLE {table_name} ( let instance = standalone.fe_instance(); let table_name = "my_table"; - let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))"); + let sql = format!( + "CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))" + ); create_table(instance, sql).await; let insert = InsertRequest { diff --git a/tests-integration/src/grpc/flight.rs b/tests-integration/src/grpc/flight.rs index 590964d8f2..af940f069c 100644 --- a/tests-integration/src/grpc/flight.rs +++ b/tests-integration/src/grpc/flight.rs @@ -40,7 +40,7 @@ mod test { use crate::cluster::GreptimeDbClusterBuilder; use crate::grpc::query_and_expect; - use crate::test_util::{setup_grpc_server, StorageType}; + use crate::test_util::{StorageType, setup_grpc_server}; use crate::tests::test_util::MockInstance; #[tokio::test(flavor = "multi_thread")] diff --git a/tests-integration/src/influxdb.rs b/tests-integration/src/influxdb.rs index 17f29ff6c5..85ea8fc2a7 100644 --- a/tests-integration/src/influxdb.rs +++ b/tests-integration/src/influxdb.rs @@ -21,11 +21,11 @@ mod test { use rstest::rstest; use rstest_reuse::apply; use servers::influxdb::InfluxdbRequest; - use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::InfluxdbLineProtocolHandler; + use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContext; - use crate::tests::test_util::{both_instances_cases, distributed, standalone, MockInstance}; + use crate::tests::test_util::{MockInstance, both_instances_cases, distributed, standalone}; #[apply(both_instances_cases)] async fn test_put_influxdb_lines_without_time_column(instance: Arc) { diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs index 49bd516e90..f58f2ece3c 100644 --- a/tests-integration/src/instance.rs +++ b/tests-integration/src/instance.rs @@ -16,8 +16,8 @@ mod tests { use std::borrow::Cow; use std::collections::HashMap; - use std::sync::atomic::AtomicU32; use std::sync::Arc; + use std::sync::atomic::AtomicU32; use api::v1::region::QueryRequest; use client::OutputData; @@ -278,13 +278,15 @@ mod tests { } async fn verify_table_is_dropped(instance: &MockDistributedInstance) { - assert!(instance - .frontend() - .catalog_manager() - .table("greptime", "public", "demo", None) - .await - .unwrap() - .is_none()) + assert!( + instance + .frontend() + .catalog_manager() + .table("greptime", "public", "demo", None) + .await + .unwrap() + .is_none() + ) } #[tokio::test(flavor = "multi_thread")] @@ -395,7 +397,7 @@ mod tests { Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => { return Err(Error::NotSupported { feat: "Database operations".to_owned(), - }) + }); } _ => {} } diff --git a/tests-integration/src/opentsdb.rs b/tests-integration/src/opentsdb.rs index af16cb36bc..dc256361a9 100644 --- a/tests-integration/src/opentsdb.rs +++ b/tests-integration/src/opentsdb.rs @@ -21,8 +21,8 @@ mod tests { use frontend::instance::Instance; use itertools::Itertools; use servers::opentsdb::codec::DataPoint; - use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::OpentsdbProtocolHandler; + use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContext; use crate::standalone::GreptimeDbStandaloneBuilder; diff --git a/tests-integration/src/otlp.rs b/tests-integration/src/otlp.rs index 2651ca24e2..e0c496fb13 100644 --- a/tests-integration/src/otlp.rs +++ b/tests-integration/src/otlp.rs @@ -16,7 +16,7 @@ mod test { use std::sync::Arc; - use client::{OutputData, DEFAULT_CATALOG_NAME}; + use client::{DEFAULT_CATALOG_NAME, OutputData}; use common_recordbatch::RecordBatches; use frontend::instance::Instance; use otel_arrow_rust::proto::opentelemetry::collector::metrics::v1::ExportMetricsServiceRequest; @@ -26,12 +26,12 @@ mod test { }; use otel_arrow_rust::proto::opentelemetry::metrics::v1::number_data_point::Value; use otel_arrow_rust::proto::opentelemetry::metrics::v1::{ - metric, Gauge, Histogram, HistogramDataPoint, Metric, NumberDataPoint, ResourceMetrics, - ScopeMetrics, + Gauge, Histogram, HistogramDataPoint, Metric, NumberDataPoint, ResourceMetrics, + ScopeMetrics, metric, }; use otel_arrow_rust::proto::opentelemetry::resource::v1::Resource; - use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::OpenTelemetryProtocolHandler; + use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContext; use crate::standalone::GreptimeDbStandaloneBuilder; @@ -59,15 +59,17 @@ mod test { let db = "otlp"; let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db)); - assert!(SqlQueryHandler::do_query( - instance.as_ref(), - &format!("CREATE DATABASE IF NOT EXISTS {db}"), - ctx.clone(), - ) - .await - .first() - .unwrap() - .is_ok()); + assert!( + SqlQueryHandler::do_query( + instance.as_ref(), + &format!("CREATE DATABASE IF NOT EXISTS {db}"), + ctx.clone(), + ) + .await + .first() + .unwrap() + .is_ok() + ); let resp = instance.metrics(req, ctx.clone()).await; assert!(resp.is_ok()); diff --git a/tests-integration/src/prom_store.rs b/tests-integration/src/prom_store.rs index 4543202973..0649b2dd77 100644 --- a/tests-integration/src/prom_store.rs +++ b/tests-integration/src/prom_store.rs @@ -26,8 +26,8 @@ mod tests { use servers::http::prom_store::PHYSICAL_TABLE_PARAM; use servers::prom_store; use servers::prom_store::to_grpc_row_insert_requests; - use servers::query_handler::sql::SqlQueryHandler; use servers::query_handler::PromStoreProtocolHandler; + use servers::query_handler::sql::SqlQueryHandler; use session::context::QueryContext; use crate::standalone::GreptimeDbStandaloneBuilder; @@ -98,15 +98,17 @@ mod tests { } let ctx = Arc::new(ctx); - assert!(SqlQueryHandler::do_query( - instance.as_ref(), - "CREATE DATABASE IF NOT EXISTS prometheus", - ctx.clone(), - ) - .await - .first() - .unwrap() - .is_ok()); + assert!( + SqlQueryHandler::do_query( + instance.as_ref(), + "CREATE DATABASE IF NOT EXISTS prometheus", + ctx.clone(), + ) + .await + .first() + .unwrap() + .is_ok() + ); let (row_inserts, _) = to_grpc_row_insert_requests(&write_request).unwrap(); instance diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs index 6bf144d320..2c1c296d55 100644 --- a/tests-integration/src/standalone.rs +++ b/tests-integration/src/standalone.rs @@ -32,16 +32,16 @@ use common_meta::ddl::flow_meta::FlowMetadataAllocator; use common_meta::ddl::table_meta::TableMetadataAllocator; use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl}; use common_meta::ddl_manager::DdlManager; -use common_meta::key::flow::FlowMetadataManager; use common_meta::key::TableMetadataManager; +use common_meta::key::flow::FlowMetadataManager; use common_meta::kv_backend::KvBackendRef; use common_meta::procedure_executor::LocalProcedureExecutor; use common_meta::region_keeper::MemoryRegionKeeper; use common_meta::region_registry::LeaderRegionRegistry; use common_meta::sequence::SequenceBuilder; use common_meta::wal_options_allocator::build_wal_options_allocator; -use common_procedure::options::ProcedureConfig; use common_procedure::ProcedureManagerRef; +use common_procedure::options::ProcedureConfig; use common_telemetry::logging::SlowQueryOptions; use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig}; use datanode::datanode::DatanodeBuilder; @@ -54,7 +54,7 @@ use servers::grpc::GrpcOptions; use servers::server::ServerHandlers; use snafu::ResultExt; -use crate::test_util::{self, create_tmp_dir_and_datanode_opts, StorageType, TestGuard}; +use crate::test_util::{self, StorageType, TestGuard, create_tmp_dir_and_datanode_opts}; pub struct GreptimeDbStandalone { pub frontend: Arc, diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs index 9396205737..b483be1c43 100644 --- a/tests-integration/src/test_util.rs +++ b/tests-integration/src/test_util.rs @@ -27,17 +27,17 @@ use common_meta::key::schema_name::SchemaNameKey; use common_runtime::runtime::BuilderBuild; use common_runtime::{Builder as RuntimeBuilder, Runtime}; use common_test_util::ports; -use common_test_util::temp_dir::{create_temp_dir, TempDir}; +use common_test_util::temp_dir::{TempDir, create_temp_dir}; use common_wal::config::DatanodeWalConfig; use datanode::config::{DatanodeOptions, StorageConfig}; use frontend::instance::Instance; use frontend::service_config::{MysqlOptions, PostgresOptions}; +use object_store::ObjectStore; use object_store::config::{ AzblobConfig, FileConfig, GcsConfig, ObjectStoreConfig, OssConfig, S3Config, }; use object_store::services::{Azblob, Gcs, Oss, S3}; use object_store::test_util::TempFolder; -use object_store::ObjectStore; use servers::grpc::builder::GrpcServerBuilder; use servers::grpc::greptime_handler::GreptimeRequestHandler; use servers::grpc::{FlightCompression, GrpcOptions, GrpcServer, GrpcServerConfig}; @@ -83,10 +83,10 @@ impl StorageType { pub fn build_storage_types_based_on_env() -> Vec { let mut storage_types = Vec::with_capacity(4); storage_types.push(StorageType::File); - if let Ok(bucket) = env::var("GT_S3_BUCKET") { - if !bucket.is_empty() { - storage_types.push(StorageType::S3); - } + if let Ok(bucket) = env::var("GT_S3_BUCKET") + && !bucket.is_empty() + { + storage_types.push(StorageType::S3); } if env::var("GT_OSS_BUCKET").is_ok() { storage_types.push(StorageType::Oss); @@ -309,10 +309,9 @@ impl Drop for TestGuard { | TempDirGuard::Oss(guard) | TempDirGuard::Azblob(guard) | TempDirGuard::Gcs(guard) = guard.0 + && let Err(e) = guard.remove_all().await { - if let Err(e) = guard.remove_all().await { - errors.push(e); - } + errors.push(e); } } if errors.is_empty() { @@ -483,7 +482,9 @@ pub async fn setup_test_prom_app_with_frontend( store_type: StorageType, name: &str, ) -> (Router, TestGuard) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = setup_standalone_instance(name, store_type).await; diff --git a/tests-integration/src/tests/instance_kafka_wal_test.rs b/tests-integration/src/tests/instance_kafka_wal_test.rs index 19383e287a..d019a42387 100644 --- a/tests-integration/src/tests/instance_kafka_wal_test.rs +++ b/tests-integration/src/tests/instance_kafka_wal_test.rs @@ -13,16 +13,16 @@ // limitations under the License. use std::assert_matches::assert_matches; -use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; use client::DEFAULT_CATALOG_NAME; use common_query::{Output, OutputData}; use datatypes::vectors::{TimestampMillisecondVector, VectorRef}; use frontend::instance::Instance; use itertools::Itertools; -use rand::rngs::ThreadRng; use rand::Rng; +use rand::rngs::ThreadRng; use rstest::rstest; use rstest_reuse::apply; use servers::query_handler::sql::SqlQueryHandler; diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs index 8262d56b5c..95664323ff 100644 --- a/tests-integration/src/tests/instance_test.rs +++ b/tests-integration/src/tests/instance_test.rs @@ -16,7 +16,7 @@ use std::assert_matches::assert_matches; use std::env; use std::sync::Arc; -use client::{OutputData, DEFAULT_SCHEMA_NAME}; +use client::{DEFAULT_SCHEMA_NAME, OutputData}; use common_catalog::consts::DEFAULT_CATALOG_NAME; use common_query::Output; use common_recordbatch::util; @@ -32,9 +32,10 @@ use servers::query_handler::sql::SqlQueryHandler; use session::context::{QueryContext, QueryContextRef}; use crate::tests::test_util::{ - both_instances_cases, both_instances_cases_with_custom_storages, check_unordered_output_stream, - distributed, distributed_with_multiple_object_stores, find_testing_resource, prepare_path, - standalone, standalone_instance_case, standalone_with_multiple_object_stores, MockInstance, + MockInstance, both_instances_cases, both_instances_cases_with_custom_storages, + check_unordered_output_stream, distributed, distributed_with_multiple_object_stores, + find_testing_resource, prepare_path, standalone, standalone_instance_case, + standalone_with_multiple_object_stores, }; #[apply(both_instances_cases)] @@ -180,7 +181,9 @@ async fn test_extra_external_table_options(instance: Arc) { #[apply(both_instances_cases)] async fn test_show_create_external_table(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let fe_instance = instance.frontend(); let format = "csv"; @@ -675,7 +678,9 @@ async fn test_execute_external_create_with_invalid_ts(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "parquet"; @@ -748,7 +753,9 @@ async fn test_execute_query_external_table_parquet(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "orc"; @@ -831,7 +838,9 @@ async fn test_execute_query_external_table_orc(instance: Arc) #[apply(both_instances_cases)] async fn test_execute_query_external_table_orc_with_schema(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "orc"; @@ -887,7 +896,9 @@ async fn test_execute_query_external_table_orc_with_schema(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "csv"; @@ -940,7 +951,9 @@ async fn test_execute_query_external_table_csv(instance: Arc) #[apply(both_instances_cases)] async fn test_execute_query_external_table_json(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "json"; @@ -1000,7 +1013,9 @@ async fn test_execute_query_external_table_json(instance: Arc) #[apply(both_instances_cases)] async fn test_execute_query_external_table_json_with_schema(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "json"; @@ -1069,7 +1084,9 @@ async fn test_execute_query_external_table_json_with_schema(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "json"; @@ -1142,7 +1159,9 @@ async fn test_execute_query_external_table_json_type_cast(instance: Arc) { - std::env::set_var("TZ", "UTC"); + unsafe { + std::env::set_var("TZ", "UTC"); + } let instance = instance.frontend(); let format = "json"; @@ -1596,39 +1615,42 @@ async fn test_delete(instance: Arc) { #[apply(both_instances_cases)] async fn test_execute_copy_to_s3(instance: Arc) { - if let Ok(bucket) = env::var("GT_S3_BUCKET") { - if !bucket.is_empty() { - let instance = instance.frontend(); + if let Ok(bucket) = env::var("GT_S3_BUCKET") + && !bucket.is_empty() + { + let instance = instance.frontend(); - // setups - assert!(matches!(execute_sql( + // setups + assert!(matches!(execute_sql( &instance, "create table demo(host string, cpu double, memory double, ts timestamp time index);", ) .await.data, OutputData::AffectedRows(0))); - let output = execute_sql( - &instance, - r#"insert into demo(host, cpu, memory, ts) values + let output = execute_sql( + &instance, + r#"insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000) "#, - ) - .await - .data; - assert!(matches!(output, OutputData::AffectedRows(2))); - let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap(); - let key = env::var("GT_S3_ACCESS_KEY").unwrap(); - let region = env::var("GT_S3_REGION").unwrap(); + ) + .await + .data; + assert!(matches!(output, OutputData::AffectedRows(2))); + let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap(); + let key = env::var("GT_S3_ACCESS_KEY").unwrap(); + let region = env::var("GT_S3_REGION").unwrap(); - let root = uuid::Uuid::new_v4().to_string(); + let root = uuid::Uuid::new_v4().to_string(); - // exports - let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", bucket, root, key_id, key, region); + // exports + let copy_to_stmt = format!( + "Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", + bucket, root, key_id, key, region + ); - let output = execute_sql(&instance, ©_to_stmt).await.data; - assert!(matches!(output, OutputData::AffectedRows(2))); - } + let output = execute_sql(&instance, ©_to_stmt).await.data; + assert!(matches!(output, OutputData::AffectedRows(2))); } } @@ -1637,67 +1659,71 @@ async fn test_execute_copy_from_s3(instance: Arc) { use common_telemetry::info; common_telemetry::init_default_ut_logging(); - if let Ok(bucket) = env::var("GT_S3_BUCKET") { - if !bucket.is_empty() { - let instance = instance.frontend(); + if let Ok(bucket) = env::var("GT_S3_BUCKET") + && !bucket.is_empty() + { + let instance = instance.frontend(); - // setups - assert!(matches!(execute_sql( + // setups + assert!(matches!(execute_sql( &instance, "create table demo(host string, cpu double, memory double, ts timestamp time index);", ) .await.data, OutputData::AffectedRows(0))); - let output = execute_sql( - &instance, - r#"insert into demo(host, cpu, memory, ts) values + let output = execute_sql( + &instance, + r#"insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000) "#, - ) - .await - .data; - assert!(matches!(output, OutputData::AffectedRows(2))); + ) + .await + .data; + assert!(matches!(output, OutputData::AffectedRows(2))); - // export - let root = uuid::Uuid::new_v4().to_string(); - let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap(); - let key = env::var("GT_S3_ACCESS_KEY").unwrap(); - let region = env::var("GT_S3_REGION").unwrap(); + // export + let root = uuid::Uuid::new_v4().to_string(); + let key_id = env::var("GT_S3_ACCESS_KEY_ID").unwrap(); + let key = env::var("GT_S3_ACCESS_KEY").unwrap(); + let region = env::var("GT_S3_REGION").unwrap(); - let copy_to_stmt = format!("Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", bucket, root, key_id, key, region); + let copy_to_stmt = format!( + "Copy demo TO 's3://{}/{}/export/demo.parquet' CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", + bucket, root, key_id, key, region + ); - let output = execute_sql(&instance, ©_to_stmt).await.data; - assert!(matches!(output, OutputData::AffectedRows(2))); + let output = execute_sql(&instance, ©_to_stmt).await.data; + assert!(matches!(output, OutputData::AffectedRows(2))); - struct Test<'a> { - sql: &'a str, - table_name: &'a str, - } - let tests = [ - Test { - sql: &format!( - "Copy with_filename FROM 's3://{}/{}/export/demo.parquet'", - bucket, root - ), - table_name: "with_filename", - }, - Test { - sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root), - table_name: "with_path", - }, - Test { - sql: &format!( - "Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')", - bucket, root - ), - table_name: "with_pattern", - }, - ]; + struct Test<'a> { + sql: &'a str, + table_name: &'a str, + } + let tests = [ + Test { + sql: &format!( + "Copy with_filename FROM 's3://{}/{}/export/demo.parquet'", + bucket, root + ), + table_name: "with_filename", + }, + Test { + sql: &format!("Copy with_path FROM 's3://{}/{}/export/'", bucket, root), + table_name: "with_path", + }, + Test { + sql: &format!( + "Copy with_pattern FROM 's3://{}/{}/export/' WITH (PATTERN = 'demo.*')", + bucket, root + ), + table_name: "with_pattern", + }, + ]; - for test in tests { - // import - assert!(matches!( + for test in tests { + // import + assert!(matches!( execute_sql( &instance, &format!( @@ -1709,30 +1735,29 @@ async fn test_execute_copy_from_s3(instance: Arc) { .data, OutputData::AffectedRows(0) )); - let sql = format!( - "{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", - test.sql, key_id, key, region, - ); - info!("Running sql: {}", sql); + let sql = format!( + "{} CONNECTION (ACCESS_KEY_ID='{}',SECRET_ACCESS_KEY='{}',REGION='{}')", + test.sql, key_id, key, region, + ); + info!("Running sql: {}", sql); - let output = execute_sql(&instance, &sql).await.data; - assert!(matches!(output, OutputData::AffectedRows(2))); + let output = execute_sql(&instance, &sql).await.data; + assert!(matches!(output, OutputData::AffectedRows(2))); - let output = execute_sql( - &instance, - &format!("select * from {} order by ts", test.table_name), - ) - .await - .data; - let expected = "\ + let output = execute_sql( + &instance, + &format!("select * from {} order by ts", test.table_name), + ) + .await + .data; + let expected = "\ +-------+------+--------+---------------------+ | host | cpu | memory | ts | +-------+------+--------+---------------------+ | host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 | | host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 | +-------+------+--------+---------------------+"; - check_output_stream(output, expected).await; - } + check_output_stream(output, expected).await; } } } diff --git a/tests-integration/src/tests/reconcile_table.rs b/tests-integration/src/tests/reconcile_table.rs index 8a0bf9e26d..bd83a7d930 100644 --- a/tests-integration/src/tests/reconcile_table.rs +++ b/tests-integration/src/tests/reconcile_table.rs @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use client::{OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; -use common_meta::reconciliation::manager::ReconciliationManagerRef; +use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, OutputData}; use common_meta::reconciliation::ResolveStrategy; +use common_meta::reconciliation::manager::ReconciliationManagerRef; use common_procedure::ProcedureManagerRef; use common_recordbatch::util::collect_batches; use common_test_util::recordbatch::check_output_stream; @@ -22,8 +22,8 @@ use table::table_reference::TableReference; use crate::cluster::GreptimeDbClusterBuilder; use crate::tests::test_util::{ - dump_kvbackend, execute_sql, restore_kvbackend, try_execute_sql, wait_procedure, MockInstance, - MockInstanceBuilder, RebuildableMockInstance, TestContext, + MockInstance, MockInstanceBuilder, RebuildableMockInstance, TestContext, dump_kvbackend, + execute_sql, restore_kvbackend, try_execute_sql, wait_procedure, }; const CREATE_MONITOR_TABLE_SQL: &str = r#" diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs index 5c79b5b2d1..84efb2acf6 100644 --- a/tests-integration/src/tests/test_util.rs +++ b/tests-integration/src/tests/test_util.rs @@ -18,10 +18,10 @@ use std::sync::Arc; use async_trait::async_trait; use client::OutputData; use common_meta::kv_backend::KvBackendRef; -use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; -use common_meta::rpc::store::{BatchPutRequest, DeleteRangeRequest, RangeRequest}; +use common_meta::range_stream::{DEFAULT_PAGE_SIZE, PaginationStream}; use common_meta::rpc::KeyValue; -use common_procedure::{watcher, ProcedureId, ProcedureManagerRef}; +use common_meta::rpc::store::{BatchPutRequest, DeleteRangeRequest, RangeRequest}; +use common_procedure::{ProcedureId, ProcedureManagerRef, watcher}; use common_query::Output; use common_recordbatch::util; use common_telemetry::tracing::info; @@ -41,7 +41,7 @@ use session::context::{QueryContext, QueryContextRef}; use crate::cluster::{GreptimeDbCluster, GreptimeDbClusterBuilder}; use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder}; use crate::test_util::StorageType; -use crate::tests::{create_distributed_instance, MockDistributedInstance}; +use crate::tests::{MockDistributedInstance, create_distributed_instance}; #[async_trait::async_trait] pub(crate) trait RebuildableMockInstance: MockInstance { diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs index 7f03e84cf5..1ee3e4d09b 100644 --- a/tests-integration/tests/grpc.rs +++ b/tests-integration/tests/grpc.rs @@ -15,23 +15,23 @@ use api::v1::alter_table_expr::Kind; use api::v1::promql_request::Promql; use api::v1::{ - column, AddColumn, AddColumns, AlterTableExpr, Basic, Column, ColumnDataType, ColumnDef, + AddColumn, AddColumns, AlterTableExpr, Basic, Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, PromInstantQuery, PromRangeQuery, - PromqlRequest, RequestHeader, SemanticType, + PromqlRequest, RequestHeader, SemanticType, column, }; use auth::user_provider_from_option; -use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Database, OutputData}; use common_catalog::consts::MITO_ENGINE; use common_grpc::channel_manager::ClientTlsOption; use common_query::Output; use common_recordbatch::RecordBatches; -use common_runtime::runtime::{BuilderBuild, RuntimeTrait}; use common_runtime::Runtime; +use common_runtime::runtime::{BuilderBuild, RuntimeTrait}; use common_test_util::find_workspace_path; -use otel_arrow_rust::proto::opentelemetry::arrow::v1::arrow_metrics_service_client::ArrowMetricsServiceClient; use otel_arrow_rust::proto::opentelemetry::arrow::v1::BatchArrowRecords; -use servers::grpc::builder::GrpcServerBuilder; +use otel_arrow_rust::proto::opentelemetry::arrow::v1::arrow_metrics_service_client::ArrowMetricsServiceClient; use servers::grpc::GrpcServerConfig; +use servers::grpc::builder::GrpcServerBuilder; use servers::http::prometheus::{ PromData, PromQueryResult, PromSeriesMatrix, PromSeriesVector, PrometheusJsonResponse, PrometheusResponse, @@ -39,10 +39,10 @@ use servers::http::prometheus::{ use servers::server::Server; use servers::tls::{TlsMode, TlsOption}; use tests_integration::test_util::{ - setup_grpc_server, setup_grpc_server_with, setup_grpc_server_with_user_provider, StorageType, + StorageType, setup_grpc_server, setup_grpc_server_with, setup_grpc_server_with_user_provider, }; -use tonic::metadata::MetadataValue; use tonic::Request; +use tonic::metadata::MetadataValue; #[macro_export] macro_rules! grpc_test { diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 4f031a4851..d02ed2ecd6 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -25,14 +25,14 @@ use auth::user_provider_from_option; use axum::http::{HeaderName, HeaderValue, StatusCode}; use chrono::Utc; use common_catalog::consts::{ - trace_services_table_name, DEFAULT_PRIVATE_SCHEMA_NAME, TRACE_TABLE_NAME, + DEFAULT_PRIVATE_SCHEMA_NAME, TRACE_TABLE_NAME, trace_services_table_name, }; use common_error::status_code::StatusCode as ErrorCode; use common_frontend::slow_query_event::{ SLOW_QUERY_TABLE_NAME, SLOW_QUERY_TABLE_QUERY_COLUMN_NAME, }; -use flate2::write::GzEncoder; use flate2::Compression; +use flate2::write::GzEncoder; use log_query::{Context, Limit, LogQuery, TimeFilter}; use loki_proto::logproto::{EntryAdapter, LabelPairAdapter, PushRequest, StreamAdapter}; use loki_proto::prost_types::Timestamp; @@ -41,7 +41,8 @@ use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequ use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; use pipeline::GREPTIME_INTERNAL_TRACE_PIPELINE_V1_NAME; use prost::Message; -use serde_json::{json, Value}; +use serde_json::{Value, json}; +use servers::http::GreptimeQueryOutput; use servers::http::handler::HealthResponse; use servers::http::header::constants::{ GREPTIME_LOG_TABLE_NAME_HEADER_NAME, GREPTIME_PIPELINE_NAME_HEADER_NAME, @@ -53,13 +54,11 @@ use servers::http::result::error_result::ErrorResponse; use servers::http::result::greptime_result_v1::GreptimedbV1Response; use servers::http::result::influxdb_result_v1::{InfluxdbOutput, InfluxdbV1Response}; use servers::http::test_helpers::{TestClient, TestResponse}; -use servers::http::GreptimeQueryOutput; use servers::prom_store::{self, mock_timeseries_new_label}; use table::table_name::TableName; use tests_integration::test_util::{ - setup_test_http_app, setup_test_http_app_with_frontend, + StorageType, setup_test_http_app, setup_test_http_app_with_frontend, setup_test_http_app_with_frontend_and_user_provider, setup_test_prom_app_with_frontend, - StorageType, }; use urlencoding::encode; use yaml_rust::YamlLoader; @@ -591,7 +590,10 @@ async fn test_sql_format_api() { .get("formatted") .and_then(|v| v.as_str()) .unwrap_or_default(); - assert_eq!(formatted, "WITH RECURSIVE slow_cte AS (SELECT 1 AS n, md5(CAST(random() AS STRING)) AS hash UNION ALL SELECT n + 1, md5(concat(hash, n)) FROM slow_cte WHERE n < 4500) SELECT COUNT(*) FROM slow_cte;"); + assert_eq!( + formatted, + "WITH RECURSIVE slow_cte AS (SELECT 1 AS n, md5(CAST(random() AS STRING)) AS hash UNION ALL SELECT n + 1, md5(concat(hash, n)) FROM slow_cte WHERE n < 4500) SELECT COUNT(*) FROM slow_cte;" + ); guard.remove_all().await; } @@ -639,7 +641,10 @@ pub async fn test_prometheus_promql_api(store_type: StorageType) { assert_eq!(res.status(), StatusCode::OK); let csv_body = &res.text().await; - assert_eq!("0,1.0\r\n5000,1.0\r\n10000,1.0\r\n15000,1.0\r\n20000,1.0\r\n25000,1.0\r\n30000,1.0\r\n35000,1.0\r\n40000,1.0\r\n45000,1.0\r\n50000,1.0\r\n55000,1.0\r\n60000,1.0\r\n65000,1.0\r\n70000,1.0\r\n75000,1.0\r\n80000,1.0\r\n85000,1.0\r\n90000,1.0\r\n95000,1.0\r\n100000,1.0\r\n", csv_body); + assert_eq!( + "0,1.0\r\n5000,1.0\r\n10000,1.0\r\n15000,1.0\r\n20000,1.0\r\n25000,1.0\r\n30000,1.0\r\n35000,1.0\r\n40000,1.0\r\n45000,1.0\r\n50000,1.0\r\n55000,1.0\r\n60000,1.0\r\n65000,1.0\r\n70000,1.0\r\n75000,1.0\r\n80000,1.0\r\n85000,1.0\r\n90000,1.0\r\n95000,1.0\r\n100000,1.0\r\n", + csv_body + ); guard.remove_all().await; } @@ -810,12 +815,16 @@ pub async fn test_prom_http_api(store_type: StorageType) { assert_eq!(res.status(), StatusCode::BAD_REQUEST); let prom_resp = res.json::().await; assert_eq!(prom_resp.status, "error"); - assert!(prom_resp - .error - .is_some_and(|err| err.eq_ignore_ascii_case("match[] parameter is required"))); - assert!(prom_resp - .error_type - .is_some_and(|err| err.eq_ignore_ascii_case("InvalidArguments"))); + assert!( + prom_resp + .error + .is_some_and(|err| err.eq_ignore_ascii_case("match[] parameter is required")) + ); + assert!( + prom_resp + .error_type + .is_some_and(|err| err.eq_ignore_ascii_case("InvalidArguments")) + ); // single match[] let res = client @@ -3638,7 +3647,12 @@ transform: .await; assert_eq!(res.status(), StatusCode::BAD_REQUEST); let body: Value = res.json().await; - assert_eq!(body["error"], json!("Invalid request parameter: invalid content type: application/yaml, expected: one of application/json, application/x-ndjson, text/plain")); + assert_eq!( + body["error"], + json!( + "Invalid request parameter: invalid content type: application/yaml, expected: one of application/json, application/x-ndjson, text/plain" + ) + ); body_for_text["data_type"] = json!("application/json"); let res = client @@ -3651,7 +3665,9 @@ transform: let body: Value = res.json().await; assert_eq!( body["error"], - json!("Invalid request parameter: json format error, please check the date is valid JSON.") + json!( + "Invalid request parameter: json format error, please check the date is valid JSON." + ) ); body_for_text["data_type"] = json!("text/plain"); @@ -4837,7 +4853,7 @@ processors: .await; // test content - let expected = "[[1730976830000,\"test\",\"integration\",\"do anything\",\"this is a log message\",\"value1\",\"value2\",null],[1730976831000,\"test\",\"integration\",\"do anything\",\"this is a log message 2\",null,null,\"value3\"],[1730976832000,\"test\",\"integration\",\"do anything\",\"this is a log message 2\",null,null,null]]"; + let expected = "[[1730976830000,\"test\",\"integration\",\"do anything\",\"this is a log message\",\"value1\",\"value2\",null],[1730976831000,\"test\",\"integration\",\"do anything\",\"this is a log message 2\",null,null,\"value3\"],[1730976832000,\"test\",\"integration\",\"do anything\",\"this is a log message 2\",null,null,null]]"; validate_data( "loki_pb_content", &client, @@ -4897,7 +4913,7 @@ pub async fn test_loki_json_logs(store_type: StorageType) { assert_eq!(StatusCode::OK, res.status()); // test schema - let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"structured_metadata\\\" JSON NULL,\\n \\\"sender\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"sender\\\", \\\"source\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]"; + let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"structured_metadata\\\" JSON NULL,\\n \\\"sender\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"sender\\\", \\\"source\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]"; validate_data( "loki_json_schema", &client, diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs index eb2814e732..732f3b77f7 100644 --- a/tests-integration/tests/region_migration.rs +++ b/tests-integration/tests/region_migration.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use std::time::Duration; -use client::{OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, OutputData}; use common_catalog::consts::DEFAULT_PRIVATE_SCHEMA_NAME; use common_event_recorder::{ DEFAULT_EVENTS_TABLE_NAME, DEFAULT_FLUSH_INTERVAL_SECONDS, EVENTS_TABLE_TIMESTAMP_COLUMN_NAME, @@ -58,7 +58,7 @@ use session::context::{QueryContext, QueryContextRef}; use store_api::storage::RegionId; use table::metadata::TableId; use tests_integration::cluster::{GreptimeDbCluster, GreptimeDbClusterBuilder}; -use tests_integration::test_util::{get_test_store_config, StorageType, PEER_PLACEHOLDER_ADDR}; +use tests_integration::test_util::{PEER_PLACEHOLDER_ADDR, StorageType, get_test_store_config}; use uuid::Uuid; const TEST_TABLE_NAME: &str = "migration_target"; diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs index 9d015bb904..b9484d5fa6 100644 --- a/tests-integration/tests/sql.rs +++ b/tests-integration/tests/sql.rs @@ -26,8 +26,8 @@ use sqlx::mysql::{MySqlConnection, MySqlDatabaseError, MySqlPoolOptions}; use sqlx::postgres::{PgDatabaseError, PgPoolOptions}; use sqlx::{Connection, Executor, Row}; use tests_integration::test_util::{ - setup_mysql_server, setup_mysql_server_with_user_provider, setup_pg_server, - setup_pg_server_with_user_provider, StorageType, + StorageType, setup_mysql_server, setup_mysql_server_with_user_provider, setup_pg_server, + setup_pg_server_with_user_provider, }; use tokio_postgres::{Client, NoTls, SimpleQueryMessage}; @@ -1376,10 +1376,12 @@ pub async fn test_declare_fetch_close_cursor(store_type: StorageType) { .expect("declare cursor"); // duplicated cursor - assert!(client - .execute("DECLARE c1 CURSOR FOR SELECT 1", &[],) - .await - .is_err()); + assert!( + client + .execute("DECLARE c1 CURSOR FOR SELECT 1", &[],) + .await + .is_err() + ); let rows = client.query("FETCH 5 FROM c1", &[]).await.unwrap(); assert_eq!(5, rows.len()); diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index 77a8e703f2..5eabe284f5 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -28,7 +28,7 @@ use std::time::Duration; use async_trait::async_trait; use client::error::ServerSnafu; use client::{ - Client, Database as DB, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, + Client, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, Database as DB, Error as ClientError, }; use common_error::ext::ErrorExt; use common_query::{Output, OutputData}; @@ -45,8 +45,8 @@ use tokio_postgres::{Client as PgClient, SimpleQueryMessage as PgRow}; use crate::protocol_interceptor::{MYSQL, PROTOCOL_KEY}; use crate::server_mode::ServerMode; -use crate::util::{get_workspace_root, maybe_pull_binary, PROGRAM}; -use crate::{util, ServerAddr}; +use crate::util::{PROGRAM, get_workspace_root, maybe_pull_binary}; +use crate::{ServerAddr, util}; // standalone mode const SERVER_MODE_STANDALONE_IDX: usize = 0; @@ -102,7 +102,9 @@ impl EnvController for Env { panic!("Parallel test mode is not supported when server address is already set."); } - std::env::set_var("SQLNESS_HOME", self.sqlness_home.display().to_string()); + unsafe { + std::env::set_var("SQLNESS_HOME", self.sqlness_home.display().to_string()); + } match mode { "standalone" => self.start_standalone(id).await, "distributed" => self.start_distributed(id).await, @@ -177,13 +179,17 @@ impl Env { // start a distributed GreptimeDB let meta_server_mode = ServerMode::random_metasrv(); let metasrv_port = match &meta_server_mode { - ServerMode::Metasrv { rpc_server_addr, .. } => rpc_server_addr + ServerMode::Metasrv { + rpc_server_addr, .. + } => rpc_server_addr .split(':') .nth(1) .unwrap() .parse::() .unwrap(), - _ => panic!("metasrv mode not set, maybe running in remote mode which doesn't support restart?"), + _ => panic!( + "metasrv mode not set, maybe running in remote mode which doesn't support restart?" + ), }; db_ctx.set_server_mode(meta_server_mode.clone(), SERVER_MODE_METASRV_IDX); let meta_server = self.start_server(meta_server_mode, &db_ctx, id, true).await; @@ -612,7 +618,7 @@ impl GreptimeDB { match row { Ok(r) => rows.push(r), Err(e) => { - return Box::new(format!("Failed to parse query result, err: {:?}", e)) + return Box::new(format!("Failed to parse query result, err: {:?}", e)); } } } diff --git a/tests/runner/src/main.rs b/tests/runner/src/main.rs index 14eb9f9654..5b1f8ec683 100644 --- a/tests/runner/src/main.rs +++ b/tests/runner/src/main.rs @@ -137,10 +137,10 @@ async fn main() { Arc::new(protocol_interceptor::ProtocolInterceptorFactory), ); - if let Some(d) = &args.case_dir { - if !d.is_dir() { - panic!("{} is not a directory", d.display()); - } + if let Some(d) = &args.case_dir + && !d.is_dir() + { + panic!("{} is not a directory", d.display()); } if args.jobs == 0 { args.jobs = num_cpus::get() / 2; @@ -156,7 +156,9 @@ async fn main() { || args.test_filter != ".*" { args.jobs = 1; - println!("Normalizing parallelism to 1 due to server addresses, etcd/pg/mysql setup, or test filter usage"); + println!( + "Normalizing parallelism to 1 due to server addresses, etcd/pg/mysql setup, or test filter usage" + ); } let config = ConfigBuilder::default() diff --git a/tests/runner/src/protocol_interceptor.rs b/tests/runner/src/protocol_interceptor.rs index 0f8a01e235..dc8b4239c2 100644 --- a/tests/runner/src/protocol_interceptor.rs +++ b/tests/runner/src/protocol_interceptor.rs @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sqlness::interceptor::{Interceptor, InterceptorFactory, InterceptorRef}; use sqlness::SqlnessError; +use sqlness::interceptor::{Interceptor, InterceptorFactory, InterceptorRef}; pub const PROTOCOL_KEY: &str = "protocol"; pub const POSTGRES: &str = "postgres"; diff --git a/tests/runner/src/server_mode.rs b/tests/runner/src/server_mode.rs index b3d471da46..f26ac93792 100644 --- a/tests/runner/src/server_mode.rs +++ b/tests/runner/src/server_mode.rs @@ -20,7 +20,7 @@ use serde::Serialize; use tinytemplate::TinyTemplate; use crate::env::{Env, GreptimeDBContext}; -use crate::{util, ServerAddr}; +use crate::{ServerAddr, util}; const DEFAULT_LOG_LEVEL: &str = "--log-level=debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info"; diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs index be8c9d4432..a887e4ceb5 100644 --- a/tests/runner/src/util.rs +++ b/tests/runner/src/util.rs @@ -182,10 +182,14 @@ pub async fn pull_binary(version: &str) { /// Pull the binary if it does not exist and `pull_version_on_need` is true. pub async fn maybe_pull_binary(version: &str, pull_version_on_need: bool) { let exist = Path::new(version).join(PROGRAM).is_file(); - match (exist, pull_version_on_need){ + match (exist, pull_version_on_need) { (true, _) => println!("Binary {version} exists"), - (false, false) => panic!("Binary {version} does not exist, please run with --pull-version-on-need or manually download it"), - (false, true) => { pull_binary(version).await; }, + (false, false) => panic!( + "Binary {version} does not exist, please run with --pull-version-on-need or manually download it" + ), + (false, true) => { + pull_binary(version).await; + } } }