From a56030e6a5011bcd34d1f6795ed14f10ee2945fd Mon Sep 17 00:00:00 2001 From: "Lei, HUANG" <6406592+v0y4g3r@users.noreply.github.com> Date: Wed, 5 Mar 2025 16:22:18 +0800 Subject: [PATCH] refactor: remove cluster id field (#5610) * chore: resolve conflicts * chore: merge main * test: add compatibility test for DatanodeLeaseKey with missing cluster_id * test: add compatibility test for DatanodeLeaseKey without cluster_id * refactor/remove-cluster-id: - **Update `greptime-proto` Dependency**: Updated the `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml` to a new revision. - **Remove `cluster_id` Usage**: Removed the `cluster_id` field and its related logic from various files, including `cluster.rs`, `datanode.rs`, `rpc.rs`, `adapter.rs`, `client.rs`, `ask_leader.rs`, `heartbeat.rs`, `procedure.rs`, `store.rs`, `handler.rs`, `response_header_handler.rs`, `key.rs`, `datanode.rs`, `lease.rs`, `metrics.rs`, `cluster.rs`, `heartbeat.rs`, `procedure.rs`, and `store.rs`. - **Refactor Tests**: Updated tests in `client.rs`, `response_header_handler.rs`, `store.rs`, and `service` modules to reflect the removal of `cluster_id`. * fix: clippy * refactor/remove-cluster-id: **Refactor and Cleanup in Meta Server** - **`response_header_handler.rs`**: Removed unused import of `HeartbeatResponse` and cleaned up the test function by eliminating the creation of an unused `HeartbeatResponse` object. - **`node_lease.rs`**: Simplified parameter handling in `HttpHandler` implementation by using an underscore for unused parameters. * refactor/remove-cluster-id: ### Remove `TableMetadataAllocatorContext` and Refactor Code - **Removed `TableMetadataAllocatorContext`**: Eliminated the `TableMetadataAllocatorContext` struct and its usage across multiple files, including `ddl.rs`, `create_table.rs`, `create_view.rs`, `table_meta.rs`, `test_util.rs`, `create_logical_tables.rs`, `drop_table.rs`, and `table_meta_alloc.rs`. - **Refactored Function Signatures**: Updated function signatures to remove the `TableMetadataAllocatorContext` parameter in methods like `create`, `create_view`, and `alloc` in `table_meta.rs` and `table_meta_alloc.rs`. - **Updated Imports**: Adjusted import statements to reflect the removal of `TableMetadataAllocatorContext` in affected files. These changes simplify the codebase by removing an unnecessary context struct and updating related function calls. * refactor/remove-cluster-id: ### Update `datanode.rs` to Modify Key Prefix - **File Modified**: `src/common/meta/src/datanode.rs` - **Key Changes**: - Updated `DatanodeStatKey::prefix_key` and `From` to remove the cluster ID from the key prefix. - Adjusted comments to reflect the changes in key prefix handling. * reformat code * refactor/remove-cluster-id: ### Commit Summary - **Refactor `Pusher` Initialization**: Removed the `RequestHeader` parameter from the `Pusher::new` method across multiple files, including `handler.rs`, `test_util.rs`, and `heartbeat.rs`. This change simplifies the `Pusher` initialization process by eliminating th unnecessary parameter. - **Update Imports**: Adjusted import statements in `handler.rs` and `test_util.rs` to remove unused `RequestHeader` references, ensuring cleaner and more efficient code. * chore: update proto --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/cmd/src/datanode.rs | 12 +- src/cmd/src/flownode.rs | 14 +- src/cmd/src/frontend.rs | 12 +- src/common/meta/src/cluster.rs | 49 ++----- src/common/meta/src/datanode.rs | 45 ++---- src/common/meta/src/ddl.rs | 9 +- src/common/meta/src/ddl/alter_database.rs | 13 +- .../meta/src/ddl/alter_logical_tables.rs | 5 +- src/common/meta/src/ddl/alter_table.rs | 15 +- .../src/ddl/alter_table/region_request.rs | 23 +-- src/common/meta/src/ddl/create_flow.rs | 11 +- .../meta/src/ddl/create_flow/metadata.rs | 3 +- .../meta/src/ddl/create_logical_tables.rs | 5 +- src/common/meta/src/ddl/create_table.rs | 26 ++-- src/common/meta/src/ddl/create_view.rs | 15 +- src/common/meta/src/ddl/drop_database.rs | 4 - .../meta/src/ddl/drop_database/cursor.rs | 9 +- .../meta/src/ddl/drop_database/executor.rs | 21 +-- .../meta/src/ddl/drop_database/metadata.rs | 2 - .../meta/src/ddl/drop_database/start.rs | 3 - src/common/meta/src/ddl/drop_flow.rs | 6 +- src/common/meta/src/ddl/drop_table.rs | 11 +- .../meta/src/ddl/drop_table/executor.rs | 15 +- src/common/meta/src/ddl/drop_view.rs | 6 +- src/common/meta/src/ddl/flow_meta.rs | 16 +-- src/common/meta/src/ddl/table_meta.rs | 28 +--- src/common/meta/src/ddl/test_util.rs | 18 +-- .../src/ddl/tests/alter_logical_tables.rs | 65 ++++----- src/common/meta/src/ddl/tests/alter_table.rs | 26 +--- src/common/meta/src/ddl/tests/create_flow.rs | 16 +-- .../src/ddl/tests/create_logical_tables.rs | 59 ++------ src/common/meta/src/ddl/tests/create_table.rs | 28 ++-- src/common/meta/src/ddl/tests/create_view.rs | 24 ++-- .../meta/src/ddl/tests/drop_database.rs | 23 ++- src/common/meta/src/ddl/tests/drop_flow.rs | 20 +-- src/common/meta/src/ddl/tests/drop_table.rs | 50 +++---- src/common/meta/src/ddl/tests/drop_view.rs | 16 +-- src/common/meta/src/ddl/truncate_table.rs | 8 +- src/common/meta/src/ddl/utils.rs | 4 +- src/common/meta/src/ddl_manager.rs | 133 +++++------------- src/common/meta/src/instruction.rs | 14 +- src/common/meta/src/lib.rs | 2 - src/common/meta/src/node_expiry_listener.rs | 2 +- src/common/meta/src/peer.rs | 6 +- src/common/meta/src/rpc.rs | 7 - src/common/meta/src/test_util.rs | 6 +- src/datanode/src/heartbeat/handler.rs | 2 - src/flow/src/adapter.rs | 2 - src/meta-client/examples/meta_client.rs | 4 +- src/meta-client/src/client.rs | 68 +++++---- src/meta-client/src/client/heartbeat.rs | 5 +- src/meta-client/src/client/store.rs | 4 +- src/meta-client/src/lib.rs | 11 +- src/meta-client/src/mocks.rs | 4 +- src/meta-srv/src/cluster.rs | 7 +- src/meta-srv/src/flow_meta_alloc.rs | 8 +- src/meta-srv/src/handler.rs | 18 +-- .../src/handler/collect_stats_handler.rs | 7 +- src/meta-srv/src/handler/failure_handler.rs | 1 - .../src/handler/keep_lease_handler.rs | 14 +- .../src/handler/region_lease_handler.rs | 9 +- .../src/handler/response_header_handler.rs | 16 +-- src/meta-srv/src/key.rs | 12 +- src/meta-srv/src/key/datanode.rs | 45 ++---- src/meta-srv/src/key/flownode.rs | 11 +- src/meta-srv/src/lease.rs | 34 ++--- src/meta-srv/src/metasrv.rs | 9 +- src/meta-srv/src/metrics.rs | 2 +- .../src/procedure/region_migration.rs | 27 +--- .../close_downgraded_region.rs | 2 - .../downgrade_leader_region.rs | 1 - .../src/procedure/region_migration/manager.rs | 24 +--- .../region_migration/open_candidate_region.rs | 3 - .../procedure/region_migration/test_util.rs | 5 +- .../rollback_downgraded_region.rs | 6 +- .../upgrade_candidate_region.rs | 1 - src/meta-srv/src/procedure/tests.rs | 4 +- src/meta-srv/src/region/failure_detector.rs | 2 +- src/meta-srv/src/region/lease_keeper.rs | 15 +- src/meta-srv/src/region/supervisor.rs | 42 ++---- src/meta-srv/src/selector.rs | 9 +- src/meta-srv/src/selector/lease_based.rs | 11 +- src/meta-srv/src/selector/load_based.rs | 40 ++---- src/meta-srv/src/selector/round_robin.rs | 24 +--- src/meta-srv/src/selector/test_utils.rs | 9 +- src/meta-srv/src/selector/weight_compute.rs | 15 +- src/meta-srv/src/service/admin/node_lease.rs | 8 +- src/meta-srv/src/service/admin/util.rs | 18 +-- src/meta-srv/src/service/cluster.rs | 12 +- src/meta-srv/src/service/heartbeat.rs | 11 +- src/meta-srv/src/service/procedure.rs | 19 +-- src/meta-srv/src/service/store.rs | 117 +++------------ src/meta-srv/src/table_meta_alloc.rs | 21 +-- src/meta-srv/src/test_util.rs | 8 +- tests-integration/src/cluster.rs | 18 ++- tests-integration/tests/region_migration.rs | 12 +- 98 files changed, 478 insertions(+), 1208 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc70cbc8fb..cea1229542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4701,7 +4701,7 @@ dependencies = [ [[package]] name = "greptime-proto" version = "0.1.0" -source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a" +source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486#d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486" dependencies = [ "prost 0.13.3", "serde", diff --git a/Cargo.toml b/Cargo.toml index de351dba90..67f9649dad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -129,7 +129,7 @@ etcd-client = "0.14" fst = "0.4.7" futures = "0.3" futures-util = "0.3" -greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" } +greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486" } hex = "0.4" http = "1" humantime = "2.1" diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index c26201e7c0..18427fef98 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -287,7 +287,6 @@ impl StartCommand { .await .context(StartDatanodeSnafu)?; - let cluster_id = 0; // TODO(hl): read from config let member_id = opts .node_id .context(MissingConfigSnafu { msg: "'node_id'" })?; @@ -296,13 +295,10 @@ impl StartCommand { msg: "'meta_client_options'", })?; - let meta_client = meta_client::create_meta_client( - cluster_id, - MetaClientType::Datanode { member_id }, - meta_config, - ) - .await - .context(MetaClientInitSnafu)?; + let meta_client = + meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config) + .await + .context(MetaClientInitSnafu)?; let meta_backend = Arc::new(MetaKvBackend { client: meta_client.clone(), diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs index 9280202471..6bd02a6a46 100644 --- a/src/cmd/src/flownode.rs +++ b/src/cmd/src/flownode.rs @@ -241,9 +241,6 @@ impl StartCommand { let mut opts = opts.component; opts.grpc.detect_server_addr(); - // TODO(discord9): make it not optionale after cluster id is required - let cluster_id = opts.cluster_id.unwrap_or(0); - let member_id = opts .node_id .context(MissingConfigSnafu { msg: "'node_id'" })?; @@ -252,13 +249,10 @@ impl StartCommand { msg: "'meta_client_options'", })?; - let meta_client = meta_client::create_meta_client( - cluster_id, - MetaClientType::Flownode { member_id }, - meta_config, - ) - .await - .context(MetaClientInitSnafu)?; + let meta_client = + meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config) + .await + .context(MetaClientInitSnafu)?; let cache_max_capacity = meta_config.metadata_cache_max_capacity; let cache_ttl = meta_config.metadata_cache_ttl; diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs index 3324328a8c..030783cb2f 100644 --- a/src/cmd/src/frontend.rs +++ b/src/cmd/src/frontend.rs @@ -295,14 +295,10 @@ impl StartCommand { let cache_ttl = meta_client_options.metadata_cache_ttl; let cache_tti = meta_client_options.metadata_cache_tti; - let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled. - let meta_client = meta_client::create_meta_client( - cluster_id, - MetaClientType::Frontend, - meta_client_options, - ) - .await - .context(MetaClientInitSnafu)?; + let meta_client = + meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options) + .await + .context(MetaClientInitSnafu)?; // TODO(discord9): add helper function to ease the creation of cache registry&such let cached_meta_backend = diff --git a/src/common/meta/src/cluster.rs b/src/common/meta/src/cluster.rs index f73dcf1537..27146ea940 100644 --- a/src/common/meta/src/cluster.rs +++ b/src/common/meta/src/cluster.rs @@ -28,7 +28,6 @@ use crate::error::{ InvalidRoleSnafu, ParseNumSnafu, Result, }; use crate::peer::Peer; -use crate::ClusterId; const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info"; @@ -56,12 +55,9 @@ pub trait ClusterInfo { // TODO(jeremy): Other info, like region status, etc. } -/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`. +/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`. #[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct NodeInfoKey { - /// The cluster id. - // todo(hl): remove cluster_id as it is not assigned anywhere. - pub cluster_id: ClusterId, /// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`. pub role: Role, /// The node id. @@ -84,24 +80,15 @@ impl NodeInfoKey { _ => peer.id, }; - Some(NodeInfoKey { - cluster_id: header.cluster_id, - role, - node_id, - }) + Some(NodeInfoKey { role, node_id }) } - pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String { - format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id) + pub fn key_prefix() -> String { + format!("{}-0-", CLUSTER_NODE_INFO_PREFIX) } - pub fn key_prefix_with_role(cluster_id: ClusterId, role: Role) -> String { - format!( - "{}-{}-{}-", - CLUSTER_NODE_INFO_PREFIX, - cluster_id, - i32::from(role) - ) + pub fn key_prefix_with_role(role: Role) -> String { + format!("{}-0-{}-", CLUSTER_NODE_INFO_PREFIX, i32::from(role)) } } @@ -193,15 +180,10 @@ impl FromStr for NodeInfoKey { let caps = CLUSTER_NODE_INFO_PREFIX_PATTERN .captures(key) .context(InvalidNodeInfoKeySnafu { key })?; - ensure!(caps.len() == 4, InvalidNodeInfoKeySnafu { key }); - let cluster_id = caps[1].to_string(); let role = caps[2].to_string(); let node_id = caps[3].to_string(); - let cluster_id: u64 = cluster_id.parse().context(ParseNumSnafu { - err_msg: format!("invalid cluster_id: {cluster_id}"), - })?; let role: i32 = role.parse().context(ParseNumSnafu { err_msg: format!("invalid role {role}"), })?; @@ -210,11 +192,7 @@ impl FromStr for NodeInfoKey { err_msg: format!("invalid node_id: {node_id}"), })?; - Ok(Self { - cluster_id, - role, - node_id, - }) + Ok(Self { role, node_id }) } } @@ -233,9 +211,8 @@ impl TryFrom> for NodeInfoKey { impl From<&NodeInfoKey> for Vec { fn from(key: &NodeInfoKey) -> Self { format!( - "{}-{}-{}-{}", + "{}-0-{}-{}", CLUSTER_NODE_INFO_PREFIX, - key.cluster_id, i32::from(key.role), key.node_id ) @@ -308,7 +285,6 @@ mod tests { #[test] fn test_node_info_key_round_trip() { let key = NodeInfoKey { - cluster_id: 1, role: Datanode, node_id: 2, }; @@ -316,7 +292,6 @@ mod tests { let key_bytes: Vec = (&key).into(); let new_key: NodeInfoKey = key_bytes.try_into().unwrap(); - assert_eq!(1, new_key.cluster_id); assert_eq!(Datanode, new_key.role); assert_eq!(2, new_key.node_id); } @@ -362,11 +337,11 @@ mod tests { #[test] fn test_node_info_key_prefix() { - let prefix = NodeInfoKey::key_prefix_with_cluster_id(1); - assert_eq!(prefix, "__meta_cluster_node_info-1-"); + let prefix = NodeInfoKey::key_prefix(); + assert_eq!(prefix, "__meta_cluster_node_info-0-"); - let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend); - assert_eq!(prefix, "__meta_cluster_node_info-2-1-"); + let prefix = NodeInfoKey::key_prefix_with_role(Frontend); + assert_eq!(prefix, "__meta_cluster_node_info-0-1-"); } #[test] diff --git a/src/common/meta/src/datanode.rs b/src/common/meta/src/datanode.rs index 03226027ca..d717d1523e 100644 --- a/src/common/meta/src/datanode.rs +++ b/src/common/meta/src/datanode.rs @@ -25,8 +25,8 @@ use store_api::region_engine::{RegionRole, RegionStatistic}; use store_api::storage::RegionId; use table::metadata::TableId; +use crate::error; use crate::error::Result; -use crate::{error, ClusterId}; pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease"; const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region"; @@ -48,11 +48,10 @@ lazy_static! { /// The key of the datanode stat in the storage. /// -/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`. +/// The format is `__meta_datanode_stat-0-{node_id}`. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Stat { pub timestamp_millis: i64, - pub cluster_id: ClusterId, // The datanode Id. pub id: u64, // The datanode address. @@ -102,10 +101,7 @@ impl Stat { } pub fn stat_key(&self) -> DatanodeStatKey { - DatanodeStatKey { - cluster_id: self.cluster_id, - node_id: self.id, - } + DatanodeStatKey { node_id: self.id } } /// Returns a tuple array containing [RegionId] and [RegionRole]. @@ -145,7 +141,7 @@ impl TryFrom<&HeartbeatRequest> for Stat { } = value; match (header, peer) { - (Some(header), Some(peer)) => { + (Some(_header), Some(peer)) => { let region_stats = region_stats .iter() .map(RegionStat::from) @@ -153,7 +149,6 @@ impl TryFrom<&HeartbeatRequest> for Stat { Ok(Self { timestamp_millis: time_util::current_time_millis(), - cluster_id: header.cluster_id, // datanode id id: peer.id, // datanode address @@ -196,32 +191,24 @@ impl From<&api::v1::meta::RegionStat> for RegionStat { /// The key of the datanode stat in the memory store. /// -/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`. +/// The format is `__meta_datanode_stat-0-{node_id}`. #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub struct DatanodeStatKey { - pub cluster_id: ClusterId, pub node_id: u64, } impl DatanodeStatKey { /// The key prefix. pub fn prefix_key() -> Vec { - format!("{DATANODE_STAT_PREFIX}-").into_bytes() - } - - /// The key prefix with the cluster id. - pub fn key_prefix_with_cluster_id(cluster_id: ClusterId) -> String { - format!("{DATANODE_STAT_PREFIX}-{cluster_id}-") + // todo(hl): remove cluster id in prefix + format!("{DATANODE_STAT_PREFIX}-0-").into_bytes() } } impl From for Vec { fn from(value: DatanodeStatKey) -> Self { - format!( - "{}-{}-{}", - DATANODE_STAT_PREFIX, value.cluster_id, value.node_id - ) - .into_bytes() + // todo(hl): remove cluster id in prefix + format!("{}-0-{}", DATANODE_STAT_PREFIX, value.node_id).into_bytes() } } @@ -234,20 +221,12 @@ impl FromStr for DatanodeStatKey { .context(error::InvalidStatKeySnafu { key })?; ensure!(caps.len() == 3, error::InvalidStatKeySnafu { key }); - - let cluster_id = caps[1].to_string(); let node_id = caps[2].to_string(); - let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu { - err_msg: format!("invalid cluster_id: {cluster_id}"), - })?; let node_id: u64 = node_id.parse().context(error::ParseNumSnafu { err_msg: format!("invalid node_id: {node_id}"), })?; - Ok(Self { - cluster_id, - node_id, - }) + Ok(Self { node_id }) } } @@ -321,7 +300,6 @@ mod tests { #[test] fn test_stat_key() { let stat = Stat { - cluster_id: 3, id: 101, region_num: 10, ..Default::default() @@ -329,14 +307,12 @@ mod tests { let stat_key = stat.stat_key(); - assert_eq!(3, stat_key.cluster_id); assert_eq!(101, stat_key.node_id); } #[test] fn test_stat_val_round_trip() { let stat = Stat { - cluster_id: 0, id: 101, region_num: 100, ..Default::default() @@ -351,7 +327,6 @@ mod tests { assert_eq!(1, stats.len()); let stat = stats.first().unwrap(); - assert_eq!(0, stat.cluster_id); assert_eq!(101, stat.id); assert_eq!(100, stat.region_num); } diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs index 0753ab51fc..55a9a64c84 100644 --- a/src/common/meta/src/ddl.rs +++ b/src/common/meta/src/ddl.rs @@ -30,7 +30,7 @@ use crate::node_manager::NodeManagerRef; use crate::region_keeper::MemoryRegionKeeperRef; use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse}; use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse}; -use crate::{ClusterId, DatanodeId}; +use crate::DatanodeId; pub mod alter_database; pub mod alter_logical_tables; @@ -57,7 +57,6 @@ pub mod utils; #[derive(Debug, Default)] pub struct ExecutorContext { - pub cluster_id: Option, pub tracing_context: Option, } @@ -90,10 +89,6 @@ pub trait ProcedureExecutor: Send + Sync { pub type ProcedureExecutorRef = Arc; -pub struct TableMetadataAllocatorContext { - pub cluster_id: ClusterId, -} - /// Metadata allocated to a table. #[derive(Default)] pub struct TableMetadata { @@ -108,7 +103,7 @@ pub struct TableMetadata { pub type RegionFailureDetectorControllerRef = Arc; -pub type DetectingRegion = (ClusterId, DatanodeId, RegionId); +pub type DetectingRegion = (DatanodeId, RegionId); /// Used for actively registering Region failure detectors. /// diff --git a/src/common/meta/src/ddl/alter_database.rs b/src/common/meta/src/ddl/alter_database.rs index 68f0f5428e..983222144c 100644 --- a/src/common/meta/src/ddl/alter_database.rs +++ b/src/common/meta/src/ddl/alter_database.rs @@ -30,7 +30,6 @@ use crate::key::DeserializedValueWithBytes; use crate::lock_key::{CatalogLock, SchemaLock}; use crate::rpc::ddl::UnsetDatabaseOption::{self}; use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption}; -use crate::ClusterId; pub struct AlterDatabaseProcedure { pub context: DdlContext, @@ -65,14 +64,10 @@ fn build_new_schema_value( impl AlterDatabaseProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase"; - pub fn new( - cluster_id: ClusterId, - task: AlterDatabaseTask, - context: DdlContext, - ) -> Result { + pub fn new(task: AlterDatabaseTask, context: DdlContext) -> Result { Ok(Self { context, - data: AlterDatabaseData::new(task, cluster_id)?, + data: AlterDatabaseData::new(task)?, }) } @@ -183,7 +178,6 @@ enum AlterDatabaseState { /// The data of alter database procedure. #[derive(Debug, Serialize, Deserialize)] pub struct AlterDatabaseData { - cluster_id: ClusterId, state: AlterDatabaseState, kind: AlterDatabaseKind, catalog_name: String, @@ -192,9 +186,8 @@ pub struct AlterDatabaseData { } impl AlterDatabaseData { - pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result { + pub fn new(task: AlterDatabaseTask) -> Result { Ok(Self { - cluster_id, state: AlterDatabaseState::Prepare, kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?, catalog_name: task.alter_expr.catalog_name, diff --git a/src/common/meta/src/ddl/alter_logical_tables.rs b/src/common/meta/src/ddl/alter_logical_tables.rs index d9b318e9e9..ea741accf3 100644 --- a/src/common/meta/src/ddl/alter_logical_tables.rs +++ b/src/common/meta/src/ddl/alter_logical_tables.rs @@ -37,9 +37,9 @@ use crate::key::table_info::TableInfoValue; use crate::key::table_route::PhysicalTableRouteValue; use crate::key::DeserializedValueWithBytes; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; +use crate::metrics; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::find_leaders; -use crate::{metrics, ClusterId}; pub struct AlterLogicalTablesProcedure { pub context: DdlContext, @@ -50,7 +50,6 @@ impl AlterLogicalTablesProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables"; pub fn new( - cluster_id: ClusterId, tasks: Vec, physical_table_id: TableId, context: DdlContext, @@ -58,7 +57,6 @@ impl AlterLogicalTablesProcedure { Self { context, data: AlterTablesData { - cluster_id, state: AlterTablesState::Prepare, tasks, table_info_values: vec![], @@ -240,7 +238,6 @@ impl Procedure for AlterLogicalTablesProcedure { #[derive(Debug, Serialize, Deserialize)] pub struct AlterTablesData { - cluster_id: ClusterId, state: AlterTablesState, tasks: Vec, /// Table info values before the alter operation. diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs index 55ecdba545..e18d532292 100644 --- a/src/common/meta/src/ddl/alter_table.rs +++ b/src/common/meta/src/ddl/alter_table.rs @@ -45,9 +45,9 @@ use crate::instruction::CacheIdent; use crate::key::table_info::TableInfoValue; use crate::key::{DeserializedValueWithBytes, RegionDistribution}; use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock}; +use crate::metrics; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution}; -use crate::{metrics, ClusterId}; /// The alter table procedure pub struct AlterTableProcedure { @@ -64,16 +64,11 @@ pub struct AlterTableProcedure { impl AlterTableProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterTable"; - pub fn new( - cluster_id: ClusterId, - table_id: TableId, - task: AlterTableTask, - context: DdlContext, - ) -> Result { + pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result { task.validate()?; Ok(Self { context, - data: AlterTableData::new(task, table_id, cluster_id), + data: AlterTableData::new(task, table_id), new_table_info: None, }) } @@ -307,7 +302,6 @@ enum AlterTableState { // The serialized data of alter table. #[derive(Debug, Serialize, Deserialize)] pub struct AlterTableData { - cluster_id: ClusterId, state: AlterTableState, task: AlterTableTask, table_id: TableId, @@ -318,12 +312,11 @@ pub struct AlterTableData { } impl AlterTableData { - pub fn new(task: AlterTableTask, table_id: TableId, cluster_id: u64) -> Self { + pub fn new(task: AlterTableTask, table_id: TableId) -> Self { Self { state: AlterTableState::Prepare, task, table_id, - cluster_id, table_info_value: None, region_distribution: None, } diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs index 7de578aced..ef3038cfba 100644 --- a/src/common/meta/src/ddl/alter_table/region_request.rs +++ b/src/common/meta/src/ddl/alter_table/region_request.rs @@ -167,10 +167,9 @@ mod tests { use crate::test_util::{new_ddl_context, MockDatanodeManager}; /// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`. - async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) { + async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) { let datanode_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(datanode_manager); - let cluster_id = 1; let table_id = 1024; let region_id = RegionId::new(table_id, 1); let table_name = "foo"; @@ -225,19 +224,12 @@ mod tests { ) .await .unwrap(); - ( - ddl_context, - cluster_id, - table_id, - region_id, - table_name.to_string(), - ) + (ddl_context, table_id, region_id, table_name.to_string()) } #[tokio::test] async fn test_make_alter_region_request() { - let (ddl_context, cluster_id, table_id, region_id, table_name) = - prepare_ddl_context().await; + let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await; let task = AlterTableTask { alter_table: AlterTableExpr { @@ -265,8 +257,7 @@ mod tests { }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap(); procedure.on_prepare().await.unwrap(); let alter_kind = procedure.make_region_alter_kind().unwrap(); let Some(Body::Alter(alter_region_request)) = procedure @@ -307,8 +298,7 @@ mod tests { #[tokio::test] async fn test_make_alter_column_type_region_request() { - let (ddl_context, cluster_id, table_id, region_id, table_name) = - prepare_ddl_context().await; + let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await; let task = AlterTableTask { alter_table: AlterTableExpr { @@ -325,8 +315,7 @@ mod tests { }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap(); procedure.on_prepare().await.unwrap(); let alter_kind = procedure.make_region_alter_kind().unwrap(); let Some(Body::Alter(alter_region_request)) = procedure diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs index db8a700059..3e6f6ff551 100644 --- a/src/common/meta/src/ddl/create_flow.rs +++ b/src/common/meta/src/ddl/create_flow.rs @@ -46,9 +46,9 @@ use crate::key::flow::flow_route::FlowRouteValue; use crate::key::table_name::TableNameKey; use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId}; use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock}; +use crate::metrics; use crate::peer::Peer; use crate::rpc::ddl::{CreateFlowTask, QueryContext}; -use crate::{metrics, ClusterId}; /// The procedure of flow creation. pub struct CreateFlowProcedure { @@ -60,16 +60,10 @@ impl CreateFlowProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateFlow"; /// Returns a new [CreateFlowProcedure]. - pub fn new( - cluster_id: ClusterId, - task: CreateFlowTask, - query_context: QueryContext, - context: DdlContext, - ) -> Self { + pub fn new(task: CreateFlowTask, query_context: QueryContext, context: DdlContext) -> Self { Self { context, data: CreateFlowData { - cluster_id, task, flow_id: None, peers: vec![], @@ -363,7 +357,6 @@ impl fmt::Display for FlowType { /// The serializable data. #[derive(Debug, Serialize, Deserialize)] pub struct CreateFlowData { - pub(crate) cluster_id: ClusterId, pub(crate) state: CreateFlowState, pub(crate) task: CreateFlowTask, pub(crate) flow_id: Option, diff --git a/src/common/meta/src/ddl/create_flow/metadata.rs b/src/common/meta/src/ddl/create_flow/metadata.rs index 40cf99ccc4..1681479d91 100644 --- a/src/common/meta/src/ddl/create_flow/metadata.rs +++ b/src/common/meta/src/ddl/create_flow/metadata.rs @@ -23,11 +23,10 @@ impl CreateFlowProcedure { pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> { //TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now. let partitions = 1; - let cluster_id = self.data.cluster_id; let (flow_id, peers) = self .context .flow_metadata_allocator - .create(cluster_id, partitions) + .create(partitions) .await?; self.data.flow_id = Some(flow_id); self.data.peers = peers; diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs index 7a72de63a1..59882ec491 100644 --- a/src/common/meta/src/ddl/create_logical_tables.rs +++ b/src/common/meta/src/ddl/create_logical_tables.rs @@ -36,9 +36,9 @@ use crate::ddl::DdlContext; use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result}; use crate::key::table_route::TableRouteValue; use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock}; +use crate::metrics; use crate::rpc::ddl::CreateTableTask; use crate::rpc::router::{find_leaders, RegionRoute}; -use crate::{metrics, ClusterId}; pub struct CreateLogicalTablesProcedure { pub context: DdlContext, @@ -49,7 +49,6 @@ impl CreateLogicalTablesProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateLogicalTables"; pub fn new( - cluster_id: ClusterId, tasks: Vec, physical_table_id: TableId, context: DdlContext, @@ -57,7 +56,6 @@ impl CreateLogicalTablesProcedure { Self { context, data: CreateTablesData { - cluster_id, state: CreateTablesState::Prepare, tasks, table_ids_already_exists: vec![], @@ -245,7 +243,6 @@ impl Procedure for CreateLogicalTablesProcedure { #[derive(Debug, Serialize, Deserialize)] pub struct CreateTablesData { - cluster_id: ClusterId, state: CreateTablesState, tasks: Vec, table_ids_already_exists: Vec>, diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs index 1d171f595e..3bd97827df 100644 --- a/src/common/meta/src/ddl/create_table.rs +++ b/src/common/meta/src/ddl/create_table.rs @@ -37,17 +37,17 @@ use crate::ddl::utils::{ add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error, region_storage_path, }; -use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::{DdlContext, TableMetadata}; use crate::error::{self, Result}; use crate::key::table_name::TableNameKey; use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue}; use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock}; +use crate::metrics; use crate::region_keeper::OperatingRegionGuard; use crate::rpc::ddl::CreateTableTask; use crate::rpc::router::{ find_leader_regions, find_leaders, operating_leader_regions, RegionRoute, }; -use crate::{metrics, ClusterId}; pub struct CreateTableProcedure { pub context: DdlContext, pub creator: TableCreator, @@ -56,10 +56,10 @@ pub struct CreateTableProcedure { impl CreateTableProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateTable"; - pub fn new(cluster_id: ClusterId, task: CreateTableTask, context: DdlContext) -> Self { + pub fn new(task: CreateTableTask, context: DdlContext) -> Self { Self { context, - creator: TableCreator::new(cluster_id, task), + creator: TableCreator::new(task), } } @@ -154,12 +154,7 @@ impl CreateTableProcedure { } = self .context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { - cluster_id: self.creator.data.cluster_id, - }, - &self.creator.data.task, - ) + .create(&self.creator.data.task) .await?; self.creator .set_allocated_metadata(table_id, table_route, region_wal_options); @@ -268,7 +263,6 @@ impl CreateTableProcedure { /// - Failed to create table metadata. async fn on_create_metadata(&mut self) -> Result { let table_id = self.table_id(); - let cluster_id = self.creator.data.cluster_id; let manager = &self.context.table_metadata_manager; let raw_table_info = self.table_info().clone(); @@ -276,10 +270,8 @@ impl CreateTableProcedure { let region_wal_options = self.region_wal_options()?.clone(); // Safety: the table_route must be allocated. let physical_table_route = self.table_route()?.clone(); - let detecting_regions = convert_region_routes_to_detecting_regions( - cluster_id, - &physical_table_route.region_routes, - ); + let detecting_regions = + convert_region_routes_to_detecting_regions(&physical_table_route.region_routes); let table_route = TableRouteValue::Physical(physical_table_route); manager .create_table_metadata(raw_table_info, table_route, region_wal_options) @@ -351,11 +343,10 @@ pub struct TableCreator { } impl TableCreator { - pub fn new(cluster_id: ClusterId, task: CreateTableTask) -> Self { + pub fn new(task: CreateTableTask) -> Self { Self { data: CreateTableData { state: CreateTableState::Prepare, - cluster_id, task, table_route: None, region_wal_options: None, @@ -421,7 +412,6 @@ pub struct CreateTableData { table_route: Option, /// None stands for not allocated yet. pub region_wal_options: Option>, - pub cluster_id: ClusterId, } impl CreateTableData { diff --git a/src/common/meta/src/ddl/create_view.rs b/src/common/meta/src/ddl/create_view.rs index 093311c413..349d2a84cc 100644 --- a/src/common/meta/src/ddl/create_view.rs +++ b/src/common/meta/src/ddl/create_view.rs @@ -24,13 +24,13 @@ use table::table_reference::TableReference; use crate::cache_invalidator::Context; use crate::ddl::utils::handle_retry_error; -use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::{DdlContext, TableMetadata}; use crate::error::{self, Result}; use crate::instruction::CacheIdent; use crate::key::table_name::TableNameKey; use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock}; +use crate::metrics; use crate::rpc::ddl::CreateViewTask; -use crate::{metrics, ClusterId}; // The procedure to execute `[CreateViewTask]`. pub struct CreateViewProcedure { @@ -41,12 +41,11 @@ pub struct CreateViewProcedure { impl CreateViewProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView"; - pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self { + pub fn new(task: CreateViewTask, context: DdlContext) -> Self { Self { context, data: CreateViewData { state: CreateViewState::Prepare, - cluster_id, task, need_update: false, }, @@ -144,12 +143,7 @@ impl CreateViewProcedure { let TableMetadata { table_id, .. } = self .context .table_metadata_allocator - .create_view( - &TableMetadataAllocatorContext { - cluster_id: self.data.cluster_id, - }, - &None, - ) + .create_view(&None) .await?; self.data.set_allocated_metadata(table_id, false); } @@ -285,7 +279,6 @@ pub enum CreateViewState { pub struct CreateViewData { pub state: CreateViewState, pub task: CreateViewTask, - pub cluster_id: ClusterId, /// Whether to update the view info. pub need_update: bool, } diff --git a/src/common/meta/src/ddl/drop_database.rs b/src/common/meta/src/ddl/drop_database.rs index 578e7744f1..ce62b7d0c3 100644 --- a/src/common/meta/src/ddl/drop_database.rs +++ b/src/common/meta/src/ddl/drop_database.rs @@ -35,7 +35,6 @@ use crate::ddl::DdlContext; use crate::error::Result; use crate::key::table_name::TableNameValue; use crate::lock_key::{CatalogLock, SchemaLock}; -use crate::ClusterId; pub struct DropDatabaseProcedure { /// The context of procedure runtime. @@ -54,7 +53,6 @@ pub(crate) enum DropTableTarget { /// Context of [DropDatabaseProcedure] execution. pub(crate) struct DropDatabaseContext { - cluster_id: ClusterId, catalog: String, schema: String, drop_if_exists: bool, @@ -87,7 +85,6 @@ impl DropDatabaseProcedure { Self { runtime_context: context, context: DropDatabaseContext { - cluster_id: 0, catalog, schema, drop_if_exists, @@ -108,7 +105,6 @@ impl DropDatabaseProcedure { Ok(Self { runtime_context, context: DropDatabaseContext { - cluster_id: 0, catalog, schema, drop_if_exists, diff --git a/src/common/meta/src/ddl/drop_database/cursor.rs b/src/common/meta/src/ddl/drop_database/cursor.rs index 3b25b42025..fcfe41f5ff 100644 --- a/src/common/meta/src/ddl/drop_database/cursor.rs +++ b/src/common/meta/src/ddl/drop_database/cursor.rs @@ -217,11 +217,10 @@ mod tests { async fn test_next_without_logical_tables() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - create_physical_table(&ddl_context, 0, "phy").await; + create_physical_table(&ddl_context, "phy").await; // It always starts from Logical let mut state = DropDatabaseCursor::new(DropTableTarget::Logical); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -252,12 +251,11 @@ mod tests { async fn test_next_with_logical_tables() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await; - create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await; + let physical_table_id = create_physical_table(&ddl_context, "phy").await; + create_logical_table(ddl_context.clone(), physical_table_id, "metric_0").await; // It always starts from Logical let mut state = DropDatabaseCursor::new(DropTableTarget::Logical); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -286,7 +284,6 @@ mod tests { let ddl_context = new_ddl_context(node_manager); let mut state = DropDatabaseCursor::new(DropTableTarget::Physical); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, diff --git a/src/common/meta/src/ddl/drop_database/executor.rs b/src/common/meta/src/ddl/drop_database/executor.rs index 5b57b5cf57..f662e826e7 100644 --- a/src/common/meta/src/ddl/drop_database/executor.rs +++ b/src/common/meta/src/ddl/drop_database/executor.rs @@ -98,11 +98,10 @@ impl State for DropDatabaseExecutor { async fn next( &mut self, ddl_ctx: &DdlContext, - ctx: &mut DropDatabaseContext, + _ctx: &mut DropDatabaseContext, ) -> Result<(Box, Status)> { self.register_dropping_regions(ddl_ctx)?; - let executor = - DropTableExecutor::new(ctx.cluster_id, self.table_name.clone(), self.table_id, true); + let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true); // Deletes metadata for table permanently. let table_route_value = TableRouteValue::new( self.table_id, @@ -187,7 +186,7 @@ mod tests { async fn test_next_with_physical_table() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await; + let physical_table_id = create_physical_table(&ddl_context, "phy").await; let (_, table_route) = ddl_context .table_metadata_manager .table_route_manager() @@ -203,7 +202,6 @@ mod tests { DropTableTarget::Physical, ); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -216,7 +214,6 @@ mod tests { } // Execute again let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -239,8 +236,8 @@ mod tests { async fn test_next_logical_table() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await; - create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await; + let physical_table_id = create_physical_table(&ddl_context, "phy").await; + create_logical_table(ddl_context.clone(), physical_table_id, "metric").await; let logical_table_id = physical_table_id + 1; let (_, table_route) = ddl_context .table_metadata_manager @@ -257,7 +254,6 @@ mod tests { DropTableTarget::Logical, ); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -270,7 +266,6 @@ mod tests { } // Execute again let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -345,7 +340,7 @@ mod tests { async fn test_next_retryable_err() { let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await; + let physical_table_id = create_physical_table(&ddl_context, "phy").await; let (_, table_route) = ddl_context .table_metadata_manager .table_route_manager() @@ -360,7 +355,6 @@ mod tests { DropTableTarget::Physical, ); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, @@ -374,7 +368,7 @@ mod tests { async fn test_on_recovery() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await; + let physical_table_id = create_physical_table(&ddl_context, "phy").await; let (_, table_route) = ddl_context .table_metadata_manager .table_route_manager() @@ -390,7 +384,6 @@ mod tests { DropTableTarget::Physical, ); let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: DEFAULT_CATALOG_NAME.to_string(), schema: DEFAULT_SCHEMA_NAME.to_string(), drop_if_exists: false, diff --git a/src/common/meta/src/ddl/drop_database/metadata.rs b/src/common/meta/src/ddl/drop_database/metadata.rs index 8d338df07c..0058061460 100644 --- a/src/common/meta/src/ddl/drop_database/metadata.rs +++ b/src/common/meta/src/ddl/drop_database/metadata.rs @@ -118,7 +118,6 @@ mod tests { .unwrap(); let mut state = DropDatabaseRemoveMetadata; let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: "foo".to_string(), schema: "bar".to_string(), drop_if_exists: true, @@ -145,7 +144,6 @@ mod tests { // Schema not exists let mut state = DropDatabaseRemoveMetadata; let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: "foo".to_string(), schema: "bar".to_string(), drop_if_exists: true, diff --git a/src/common/meta/src/ddl/drop_database/start.rs b/src/common/meta/src/ddl/drop_database/start.rs index deeb8ed215..792eeac8dd 100644 --- a/src/common/meta/src/ddl/drop_database/start.rs +++ b/src/common/meta/src/ddl/drop_database/start.rs @@ -89,7 +89,6 @@ mod tests { let ddl_context = new_ddl_context(node_manager); let mut step = DropDatabaseStart; let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: "foo".to_string(), schema: "bar".to_string(), drop_if_exists: false, @@ -105,7 +104,6 @@ mod tests { let ddl_context = new_ddl_context(node_manager); let mut state = DropDatabaseStart; let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: "foo".to_string(), schema: "bar".to_string(), drop_if_exists: true, @@ -128,7 +126,6 @@ mod tests { .unwrap(); let mut state = DropDatabaseStart; let mut ctx = DropDatabaseContext { - cluster_id: 0, catalog: "foo".to_string(), schema: "bar".to_string(), drop_if_exists: false, diff --git a/src/common/meta/src/ddl/drop_flow.rs b/src/common/meta/src/ddl/drop_flow.rs index 59b813c452..38a4622aad 100644 --- a/src/common/meta/src/ddl/drop_flow.rs +++ b/src/common/meta/src/ddl/drop_flow.rs @@ -37,8 +37,8 @@ use crate::instruction::{CacheIdent, DropFlow}; use crate::key::flow::flow_info::FlowInfoValue; use crate::key::flow::flow_route::FlowRouteValue; use crate::lock_key::{CatalogLock, FlowLock}; +use crate::metrics; use crate::rpc::ddl::DropFlowTask; -use crate::{metrics, ClusterId}; /// The procedure for dropping a flow. pub struct DropFlowProcedure { @@ -51,12 +51,11 @@ pub struct DropFlowProcedure { impl DropFlowProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::DropFlow"; - pub fn new(cluster_id: ClusterId, task: DropFlowTask, context: DdlContext) -> Self { + pub fn new(task: DropFlowTask, context: DdlContext) -> Self { Self { context, data: DropFlowData { state: DropFlowState::Prepare, - cluster_id, task, flow_info_value: None, flow_route_values: vec![], @@ -218,7 +217,6 @@ impl Procedure for DropFlowProcedure { #[derive(Debug, Serialize, Deserialize)] pub(crate) struct DropFlowData { state: DropFlowState, - cluster_id: ClusterId, task: DropFlowTask, pub(crate) flow_info_value: Option, pub(crate) flow_route_values: Vec, diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs index 9f38e5450f..e68cae3382 100644 --- a/src/common/meta/src/ddl/drop_table.rs +++ b/src/common/meta/src/ddl/drop_table.rs @@ -40,10 +40,10 @@ use crate::ddl::DdlContext; use crate::error::{self, Result}; use crate::key::table_route::TableRouteValue; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; +use crate::metrics; use crate::region_keeper::OperatingRegionGuard; use crate::rpc::ddl::DropTableTask; use crate::rpc::router::{operating_leader_regions, RegionRoute}; -use crate::{metrics, ClusterId}; pub struct DropTableProcedure { /// The context of procedure runtime. @@ -59,8 +59,8 @@ pub struct DropTableProcedure { impl DropTableProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable"; - pub fn new(cluster_id: ClusterId, task: DropTableTask, context: DdlContext) -> Self { - let data = DropTableData::new(cluster_id, task); + pub fn new(task: DropTableTask, context: DdlContext) -> Self { + let data = DropTableData::new(task); let executor = data.build_executor(); Self { context, @@ -268,7 +268,6 @@ impl Procedure for DropTableProcedure { #[derive(Debug, Serialize, Deserialize)] pub struct DropTableData { pub state: DropTableState, - pub cluster_id: ClusterId, pub task: DropTableTask, pub physical_region_routes: Vec, pub physical_table_id: Option, @@ -279,10 +278,9 @@ pub struct DropTableData { } impl DropTableData { - pub fn new(cluster_id: ClusterId, task: DropTableTask) -> Self { + pub fn new(task: DropTableTask) -> Self { Self { state: DropTableState::Prepare, - cluster_id, task, physical_region_routes: vec![], physical_table_id: None, @@ -301,7 +299,6 @@ impl DropTableData { fn build_executor(&self) -> DropTableExecutor { DropTableExecutor::new( - self.cluster_id, self.task.table_name(), self.task.table_id, self.task.drop_if_exists, diff --git a/src/common/meta/src/ddl/drop_table/executor.rs b/src/common/meta/src/ddl/drop_table/executor.rs index 7746f8da85..43ca7ce5ac 100644 --- a/src/common/meta/src/ddl/drop_table/executor.rs +++ b/src/common/meta/src/ddl/drop_table/executor.rs @@ -36,7 +36,6 @@ use crate::instruction::CacheIdent; use crate::key::table_name::TableNameKey; use crate::key::table_route::TableRouteValue; use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute}; -use crate::ClusterId; /// [Control] indicated to the caller whether to go to the next step. #[derive(Debug)] @@ -54,14 +53,8 @@ impl Control { impl DropTableExecutor { /// Returns the [DropTableExecutor]. - pub fn new( - cluster_id: ClusterId, - table: TableName, - table_id: TableId, - drop_if_exists: bool, - ) -> Self { + pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self { Self { - cluster_id, table, table_id, drop_if_exists, @@ -74,7 +67,6 @@ impl DropTableExecutor { /// - Invalidates the cache on the Frontend nodes. /// - Drops the regions on the Datanode nodes. pub struct DropTableExecutor { - cluster_id: ClusterId, table: TableName, table_id: TableId, drop_if_exists: bool, @@ -164,7 +156,7 @@ impl DropTableExecutor { let detecting_regions = if table_route_value.is_physical() { // Safety: checked. let regions = table_route_value.region_routes().unwrap(); - convert_region_routes_to_detecting_regions(self.cluster_id, regions) + convert_region_routes_to_detecting_regions(regions) } else { vec![] }; @@ -321,7 +313,6 @@ mod tests { let node_manager = Arc::new(MockDatanodeManager::new(())); let ctx = new_ddl_context(node_manager); let executor = DropTableExecutor::new( - 0, TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"), 1024, true, @@ -331,7 +322,6 @@ mod tests { // Drops a non-exists table let executor = DropTableExecutor::new( - 0, TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"), 1024, false, @@ -341,7 +331,6 @@ mod tests { // Drops a exists table let executor = DropTableExecutor::new( - 0, TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"), 1024, false, diff --git a/src/common/meta/src/ddl/drop_view.rs b/src/common/meta/src/ddl/drop_view.rs index b56b39eee0..d803bf68e5 100644 --- a/src/common/meta/src/ddl/drop_view.rs +++ b/src/common/meta/src/ddl/drop_view.rs @@ -31,8 +31,8 @@ use crate::error::{self, Result}; use crate::instruction::CacheIdent; use crate::key::table_name::TableNameKey; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; +use crate::metrics; use crate::rpc::ddl::DropViewTask; -use crate::{metrics, ClusterId}; /// The procedure for dropping a view. pub struct DropViewProcedure { @@ -45,12 +45,11 @@ pub struct DropViewProcedure { impl DropViewProcedure { pub const TYPE_NAME: &'static str = "metasrv-procedure::DropView"; - pub fn new(cluster_id: ClusterId, task: DropViewTask, context: DdlContext) -> Self { + pub fn new(task: DropViewTask, context: DdlContext) -> Self { Self { context, data: DropViewData { state: DropViewState::Prepare, - cluster_id, task, }, } @@ -216,7 +215,6 @@ impl Procedure for DropViewProcedure { #[derive(Debug, Serialize, Deserialize)] pub(crate) struct DropViewData { state: DropViewState, - cluster_id: ClusterId, task: DropViewTask, } diff --git a/src/common/meta/src/ddl/flow_meta.rs b/src/common/meta/src/ddl/flow_meta.rs index f92f404882..2e71c97217 100644 --- a/src/common/meta/src/ddl/flow_meta.rs +++ b/src/common/meta/src/ddl/flow_meta.rs @@ -20,7 +20,6 @@ use crate::error::Result; use crate::key::FlowId; use crate::peer::Peer; use crate::sequence::SequenceRef; -use crate::ClusterId; /// The reference of [FlowMetadataAllocator]. pub type FlowMetadataAllocatorRef = Arc; @@ -60,16 +59,9 @@ impl FlowMetadataAllocator { } /// Allocates the [FlowId] and [Peer]s. - pub async fn create( - &self, - cluster_id: ClusterId, - partitions: usize, - ) -> Result<(FlowId, Vec)> { + pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec)> { let flow_id = self.allocate_flow_id().await?; - let peers = self - .partition_peer_allocator - .alloc(cluster_id, partitions) - .await?; + let peers = self.partition_peer_allocator.alloc(partitions).await?; Ok((flow_id, peers)) } @@ -79,7 +71,7 @@ impl FlowMetadataAllocator { #[async_trait] pub trait PartitionPeerAllocator: Send + Sync { /// Allocates [Peer] nodes for storing partitions. - async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result>; + async fn alloc(&self, partitions: usize) -> Result>; } /// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions. @@ -89,7 +81,7 @@ struct NoopPartitionPeerAllocator; #[async_trait] impl PartitionPeerAllocator for NoopPartitionPeerAllocator { - async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result> { + async fn alloc(&self, partitions: usize) -> Result> { Ok(vec![Peer::default(); partitions]) } } diff --git a/src/common/meta/src/ddl/table_meta.rs b/src/common/meta/src/ddl/table_meta.rs index 4ce4c15894..c3ebfcd3f5 100644 --- a/src/common/meta/src/ddl/table_meta.rs +++ b/src/common/meta/src/ddl/table_meta.rs @@ -20,7 +20,7 @@ use common_telemetry::{debug, info}; use snafu::ensure; use store_api::storage::{RegionId, RegionNumber, TableId}; -use crate::ddl::{TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::TableMetadata; use crate::error::{self, Result, UnsupportedSnafu}; use crate::key::table_route::PhysicalTableRouteValue; use crate::peer::Peer; @@ -109,7 +109,6 @@ impl TableMetadataAllocator { async fn create_table_route( &self, - ctx: &TableMetadataAllocatorContext, table_id: TableId, task: &CreateTableTask, ) -> Result { @@ -121,7 +120,7 @@ impl TableMetadataAllocator { } ); - let peers = self.peer_allocator.alloc(ctx, regions).await?; + let peers = self.peer_allocator.alloc(regions).await?; let region_routes = task .partitions .iter() @@ -147,11 +146,7 @@ impl TableMetadataAllocator { } /// Create VIEW metadata - pub async fn create_view( - &self, - _ctx: &TableMetadataAllocatorContext, - table_id: &Option, - ) -> Result { + pub async fn create_view(&self, table_id: &Option) -> Result { let table_id = self.allocate_table_id(table_id).await?; Ok(TableMetadata { @@ -160,13 +155,9 @@ impl TableMetadataAllocator { }) } - pub async fn create( - &self, - ctx: &TableMetadataAllocatorContext, - task: &CreateTableTask, - ) -> Result { + pub async fn create(&self, task: &CreateTableTask) -> Result { let table_id = self.allocate_table_id(&task.create_table.table_id).await?; - let table_route = self.create_table_route(ctx, table_id, task).await?; + let table_route = self.create_table_route(table_id, task).await?; let region_wal_options = self.create_wal_options(&table_route)?; debug!( @@ -188,19 +179,14 @@ pub type PeerAllocatorRef = Arc; #[async_trait] pub trait PeerAllocator: Send + Sync { /// Allocates `regions` size [`Peer`]s. - async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize) - -> Result>; + async fn alloc(&self, regions: usize) -> Result>; } struct NoopPeerAllocator; #[async_trait] impl PeerAllocator for NoopPeerAllocator { - async fn alloc( - &self, - _ctx: &TableMetadataAllocatorContext, - regions: usize, - ) -> Result> { + async fn alloc(&self, regions: usize) -> Result> { Ok(vec![Peer::default(); regions]) } } diff --git a/src/common/meta/src/ddl/test_util.rs b/src/common/meta/src/ddl/test_util.rs index 3a82f644e4..4d6a6c63b5 100644 --- a/src/common/meta/src/ddl/test_util.rs +++ b/src/common/meta/src/ddl/test_util.rs @@ -31,10 +31,9 @@ use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ build_raw_table_info_from_expr, TestCreateTableExprBuilder, }; -use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::{DdlContext, TableMetadata}; use crate::key::table_route::TableRouteValue; use crate::rpc::ddl::CreateTableTask; -use crate::ClusterId; pub async fn create_physical_table_metadata( ddl_context: &DdlContext, @@ -48,11 +47,7 @@ pub async fn create_physical_table_metadata( .unwrap(); } -pub async fn create_physical_table( - ddl_context: &DdlContext, - cluster_id: ClusterId, - name: &str, -) -> TableId { +pub async fn create_physical_table(ddl_context: &DdlContext, name: &str) -> TableId { // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task(name); let TableMetadata { @@ -61,10 +56,7 @@ pub async fn create_physical_table( .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -80,15 +72,13 @@ pub async fn create_physical_table( pub async fn create_logical_table( ddl_context: DdlContext, - cluster_id: ClusterId, physical_table_id: TableId, table_name: &str, ) -> TableId { use std::assert_matches::assert_matches; let tasks = vec![test_create_logical_table_task(table_name)]; - let mut procedure = - CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context); + let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); let status = procedure.on_create_metadata().await.unwrap(); diff --git a/src/common/meta/src/ddl/tests/alter_logical_tables.rs b/src/common/meta/src/ddl/tests/alter_logical_tables.rs index 03348c3930..4fa9992a17 100644 --- a/src/common/meta/src/ddl/tests/alter_logical_tables.rs +++ b/src/common/meta/src/ddl/tests/alter_logical_tables.rs @@ -86,7 +86,6 @@ fn make_alter_logical_table_rename_task( async fn test_on_prepare_check_schema() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let tasks = vec![ make_alter_logical_table_add_column_task( Some("schema1"), @@ -100,8 +99,7 @@ async fn test_on_prepare_check_schema() { ), ]; let physical_table_id = 1024u32; - let mut procedure = - AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, AlterLogicalTablesInvalidArguments { .. }); } @@ -110,50 +108,46 @@ async fn test_on_prepare_check_schema() { async fn test_on_prepare_check_alter_kind() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let tasks = vec![make_alter_logical_table_rename_task( "schema1", "table1", "new_table1", )]; let physical_table_id = 1024u32; - let mut procedure = - AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, AlterLogicalTablesInvalidArguments { .. }); } #[tokio::test] async fn test_on_prepare_different_physical_table() { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await; - let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await; - create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await; + let phy1_id = create_physical_table(&ddl_context, "phy1").await; + create_logical_table(ddl_context.clone(), phy1_id, "table1").await; + let phy2_id = create_physical_table(&ddl_context, "phy2").await; + create_logical_table(ddl_context.clone(), phy2_id, "table2").await; let tasks = vec![ make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]), make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]), ]; - let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy1_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, AlterLogicalTablesInvalidArguments { .. }); } #[tokio::test] async fn test_on_prepare_logical_table_not_exists() { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; let tasks = vec![ make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]), @@ -161,23 +155,22 @@ async fn test_on_prepare_logical_table_not_exists() { make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]), ]; - let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, TableNotFound { .. }); } #[tokio::test] async fn test_on_prepare() { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table2").await; + create_logical_table(ddl_context.clone(), phy_id, "table3").await; let tasks = vec![ make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]), @@ -185,25 +178,24 @@ async fn test_on_prepare() { make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]), ]; - let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context); let result = procedure.on_prepare().await; assert_matches!(result, Ok(Status::Executing { persist: true })); } #[tokio::test] async fn test_on_update_metadata() { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table2").await; + create_logical_table(ddl_context.clone(), phy_id, "table3").await; + create_logical_table(ddl_context.clone(), phy_id, "table4").await; + create_logical_table(ddl_context.clone(), phy_id, "table5").await; let tasks = vec![ make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]), @@ -211,7 +203,7 @@ async fn test_on_update_metadata() { make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]), ]; - let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context); let mut status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); @@ -229,23 +221,21 @@ async fn test_on_update_metadata() { #[tokio::test] async fn test_on_part_duplicate_alter_request() { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table2").await; let tasks = vec![ make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]), make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]), ]; - let mut procedure = - AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone()); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone()); let mut status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); @@ -278,8 +268,7 @@ async fn test_on_part_duplicate_alter_request() { ), ]; - let mut procedure = - AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone()); + let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone()); let mut status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs index 18294efe00..f3abfab91a 100644 --- a/src/common/meta/src/ddl/tests/alter_table.rs +++ b/src/common/meta/src/ddl/tests/alter_table.rs @@ -59,7 +59,6 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter async fn test_on_prepare_table_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo", 1024); // Puts a value to table name key. ddl_context @@ -73,7 +72,7 @@ async fn test_on_prepare_table_exists_err() { .unwrap(); let task = test_rename_alter_table_task("non-exists", "foo"); - let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap(); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err.status_code(), StatusCode::TableAlreadyExists); } @@ -82,9 +81,8 @@ async fn test_on_prepare_table_exists_err() { async fn test_on_prepare_table_not_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_rename_alter_table_task("non-exists", "foo"); - let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap(); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err.status_code(), StatusCode::TableNotFound); } @@ -95,7 +93,6 @@ async fn test_on_submit_alter_request() { let datanode_handler = DatanodeWatcher(tx); let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_id = 1024; let table_name = "foo"; let task = test_create_table_task(table_name, table_id); @@ -144,8 +141,7 @@ async fn test_on_submit_alter_request() { })), }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap(); procedure.on_prepare().await.unwrap(); procedure.submit_alter_region_requests().await.unwrap(); @@ -181,7 +177,6 @@ async fn test_on_submit_alter_request_with_outdated_request() { RequestOutdatedErrorDatanodeHandler, )); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_id = 1024; let table_name = "foo"; let task = test_create_table_task(table_name, table_id); @@ -230,8 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() { })), }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap(); procedure.on_prepare().await.unwrap(); procedure.submit_alter_region_requests().await.unwrap(); } @@ -240,7 +234,6 @@ async fn test_on_submit_alter_request_with_outdated_request() { async fn test_on_update_metadata_rename() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_name = "foo"; let new_table_name = "bar"; let table_id = 1024; @@ -257,8 +250,7 @@ async fn test_on_update_metadata_rename() { .unwrap(); let task = test_rename_alter_table_task(table_name, new_table_name); - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap(); procedure.on_prepare().await.unwrap(); procedure.on_update_metadata().await.unwrap(); @@ -291,7 +283,6 @@ async fn test_on_update_metadata_rename() { async fn test_on_update_metadata_add_columns() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_name = "foo"; let table_id = 1024; let task = test_create_table_task(table_name, table_id); @@ -335,8 +326,7 @@ async fn test_on_update_metadata_add_columns() { })), }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap(); procedure.on_prepare().await.unwrap(); procedure.submit_alter_region_requests().await.unwrap(); procedure.on_update_metadata().await.unwrap(); @@ -361,7 +351,6 @@ async fn test_on_update_metadata_add_columns() { async fn test_on_update_table_options() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_name = "foo"; let table_id = 1024; let task = test_create_table_task(table_name, table_id); @@ -398,8 +387,7 @@ async fn test_on_update_table_options() { })), }, }; - let mut procedure = - AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap(); + let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap(); procedure.on_prepare().await.unwrap(); procedure.submit_alter_region_requests().await.unwrap(); procedure.on_update_metadata().await.unwrap(); diff --git a/src/common/meta/src/ddl/tests/create_flow.rs b/src/common/meta/src/ddl/tests/create_flow.rs index a130e0590c..4c9f86fe09 100644 --- a/src/common/meta/src/ddl/tests/create_flow.rs +++ b/src/common/meta/src/ddl/tests/create_flow.rs @@ -25,11 +25,11 @@ use crate::ddl::create_flow::CreateFlowProcedure; use crate::ddl::test_util::create_table::test_create_table_task; use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler; use crate::ddl::DdlContext; +use crate::error; use crate::key::table_route::TableRouteValue; use crate::key::FlowId; use crate::rpc::ddl::CreateFlowTask; use crate::test_util::{new_ddl_context, MockFlownodeManager}; -use crate::{error, ClusterId}; pub(crate) fn test_create_flow_task( name: &str, @@ -53,7 +53,6 @@ pub(crate) fn test_create_flow_task( #[tokio::test] async fn test_create_flow_source_table_not_found() { - let cluster_id = 1; let source_table_names = vec![TableName::new( DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, @@ -65,14 +64,13 @@ async fn test_create_flow_source_table_not_found() { let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler)); let ddl_context = new_ddl_context(node_manager); let query_ctx = QueryContext::arc().into(); - let mut procedure = CreateFlowProcedure::new(cluster_id, task, query_ctx, ddl_context); + let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, error::Error::TableNotFound { .. }); } pub(crate) async fn create_test_flow( ddl_context: &DdlContext, - cluster_id: ClusterId, flow_name: &str, source_table_names: Vec, sink_table_name: TableName, @@ -84,8 +82,7 @@ pub(crate) async fn create_test_flow( false, ); let query_ctx = QueryContext::arc().into(); - let mut procedure = - CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone()); + let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone()); let output = execute_procedure_until_done(&mut procedure).await.unwrap(); let flow_id = output.downcast_ref::().unwrap(); @@ -94,7 +91,6 @@ pub(crate) async fn create_test_flow( #[tokio::test] async fn test_create_flow() { - let cluster_id = 1; let table_id = 1024; let source_table_names = vec![TableName::new( DEFAULT_CATALOG_NAME, @@ -118,7 +114,6 @@ async fn test_create_flow() { .unwrap(); let flow_id = create_test_flow( &ddl_context, - cluster_id, "my_flow", source_table_names.clone(), sink_table_name.clone(), @@ -134,8 +129,7 @@ async fn test_create_flow() { true, ); let query_ctx = QueryContext::arc().into(); - let mut procedure = - CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone()); + let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone()); let output = execute_procedure_until_done(&mut procedure).await.unwrap(); let flow_id = output.downcast_ref::().unwrap(); assert_eq!(*flow_id, 1024); @@ -143,7 +137,7 @@ async fn test_create_flow() { // Creates again let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false); let query_ctx = QueryContext::arc().into(); - let mut procedure = CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context); + let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, error::Error::FlowAlreadyExists { .. }); } diff --git a/src/common/meta/src/ddl/tests/create_logical_tables.rs b/src/common/meta/src/ddl/tests/create_logical_tables.rs index c4f65bcac4..a331b32bb0 100644 --- a/src/common/meta/src/ddl/tests/create_logical_tables.rs +++ b/src/common/meta/src/ddl/tests/create_logical_tables.rs @@ -26,7 +26,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler; use crate::ddl::test_util::{ create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task, }; -use crate::ddl::{TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::TableMetadata; use crate::error::Error; use crate::key::table_route::TableRouteValue; use crate::test_util::{new_ddl_context, MockDatanodeManager}; @@ -35,11 +35,9 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager}; async fn test_on_prepare_physical_table_not_found() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let tasks = vec![test_create_logical_table_task("foo")]; let physical_table_id = 1024u32; - let mut procedure = - CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context); + let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::TableRouteNotFound { .. }); } @@ -48,7 +46,6 @@ async fn test_on_prepare_physical_table_not_found() { async fn test_on_prepare() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -57,10 +54,7 @@ async fn test_on_prepare() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -73,8 +67,7 @@ async fn test_on_prepare() { // The create logical table procedure. let tasks = vec![test_create_logical_table_task("foo")]; let physical_table_id = table_id; - let mut procedure = - CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context); + let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); } @@ -83,7 +76,6 @@ async fn test_on_prepare() { async fn test_on_prepare_logical_table_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -92,10 +84,7 @@ async fn test_on_prepare_logical_table_exists_err() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -119,7 +108,7 @@ async fn test_on_prepare_logical_table_exists_err() { // The create logical table procedure. let physical_table_id = table_id; let mut procedure = - CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context); + CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::TableAlreadyExists { .. }); assert_eq!(err.status_code(), StatusCode::TableAlreadyExists); @@ -129,7 +118,6 @@ async fn test_on_prepare_logical_table_exists_err() { async fn test_on_prepare_with_create_if_table_exists() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -138,10 +126,7 @@ async fn test_on_prepare_with_create_if_table_exists() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -167,7 +152,7 @@ async fn test_on_prepare_with_create_if_table_exists() { // Sets `create_if_not_exists` task.create_table.create_if_not_exists = true; let mut procedure = - CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context); + CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context); let status = procedure.on_prepare().await.unwrap(); let output = status.downcast_output_ref::>().unwrap(); assert_eq!(*output, vec![8192]); @@ -177,7 +162,6 @@ async fn test_on_prepare_with_create_if_table_exists() { async fn test_on_prepare_part_logical_tables_exist() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -186,10 +170,7 @@ async fn test_on_prepare_part_logical_tables_exist() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -216,7 +197,6 @@ async fn test_on_prepare_part_logical_tables_exist() { task.create_table.create_if_not_exists = true; let non_exist_task = test_create_logical_table_task("non_exists"); let mut procedure = CreateLogicalTablesProcedure::new( - cluster_id, vec![task, non_exist_task], physical_table_id, ddl_context, @@ -229,7 +209,6 @@ async fn test_on_prepare_part_logical_tables_exist() { async fn test_on_create_metadata() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -238,10 +217,7 @@ async fn test_on_create_metadata() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -257,7 +233,6 @@ async fn test_on_create_metadata() { let task = test_create_logical_table_task("foo"); let yet_another_task = test_create_logical_table_task("bar"); let mut procedure = CreateLogicalTablesProcedure::new( - cluster_id, vec![task, yet_another_task], physical_table_id, ddl_context, @@ -279,7 +254,6 @@ async fn test_on_create_metadata() { async fn test_on_create_metadata_part_logical_tables_exist() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -288,10 +262,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -318,7 +289,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() { task.create_table.create_if_not_exists = true; let non_exist_task = test_create_logical_table_task("non_exists"); let mut procedure = CreateLogicalTablesProcedure::new( - cluster_id, vec![task, non_exist_task], physical_table_id, ddl_context, @@ -340,7 +310,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() { async fn test_on_create_metadata_err() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -349,10 +318,7 @@ async fn test_on_create_metadata_err() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -368,7 +334,6 @@ async fn test_on_create_metadata_err() { let task = test_create_logical_table_task("foo"); let yet_another_task = test_create_logical_table_task("bar"); let mut procedure = CreateLogicalTablesProcedure::new( - cluster_id, vec![task.clone(), yet_another_task], physical_table_id, ddl_context.clone(), diff --git a/src/common/meta/src/ddl/tests/create_table.rs b/src/common/meta/src/ddl/tests/create_table.rs index b2756ceb40..e62329c780 100644 --- a/src/common/meta/src/ddl/tests/create_table.rs +++ b/src/common/meta/src/ddl/tests/create_table.rs @@ -87,7 +87,6 @@ pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask { async fn test_on_prepare_table_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo"); assert!(!task.create_table.create_if_not_exists); // Puts a value to table name key. @@ -100,7 +99,7 @@ async fn test_on_prepare_table_exists_err() { ) .await .unwrap(); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::TableAlreadyExists { .. }); assert_eq!(err.status_code(), StatusCode::TableAlreadyExists); @@ -110,7 +109,6 @@ async fn test_on_prepare_table_exists_err() { async fn test_on_prepare_with_create_if_table_exists() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let mut task = test_create_table_task("foo"); task.create_table.create_if_not_exists = true; task.table_info.ident.table_id = 1024; @@ -124,7 +122,7 @@ async fn test_on_prepare_with_create_if_table_exists() { ) .await .unwrap(); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Done { output: Some(..) }); let table_id = *status.downcast_output_ref::().unwrap(); @@ -135,10 +133,9 @@ async fn test_on_prepare_with_create_if_table_exists() { async fn test_on_prepare_without_create_if_table_exists() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let mut task = test_create_table_task("foo"); task.create_table.create_if_not_exists = true; - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); assert_eq!(procedure.table_id(), 1024); @@ -148,11 +145,10 @@ async fn test_on_prepare_without_create_if_table_exists() { async fn test_on_prepare_with_no_partition_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let mut task = test_create_table_task("foo"); task.partitions = vec![]; task.create_table.create_if_not_exists = true; - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::Unexpected { .. }); assert!(err @@ -165,10 +161,9 @@ async fn test_on_datanode_create_regions_should_retry() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo"); assert!(!task.create_table.create_if_not_exists); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -183,10 +178,9 @@ async fn test_on_datanode_create_regions_should_not_retry() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo"); assert!(!task.create_table.create_if_not_exists); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -201,10 +195,9 @@ async fn test_on_create_metadata_error() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo"); assert!(!task.create_table.create_if_not_exists); - let mut procedure = CreateTableProcedure::new(cluster_id, task.clone(), ddl_context.clone()); + let mut procedure = CreateTableProcedure::new(task.clone(), ddl_context.clone()); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -233,10 +226,9 @@ async fn test_on_create_metadata() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_table_task("foo"); assert!(!task.create_table.create_if_not_exists); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateTableProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -251,14 +243,12 @@ async fn test_on_create_metadata() { #[tokio::test] async fn test_memory_region_keeper_guard_dropped_on_procedure_done() { - let cluster_id = 1; - let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let kv_backend = Arc::new(MemoryKvBackend::new()); let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend); let task = test_create_table_task("foo"); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = CreateTableProcedure::new(task, ddl_context.clone()); execute_procedure_until(&mut procedure, |p| { p.creator.data.state == CreateTableState::CreateMetadata diff --git a/src/common/meta/src/ddl/tests/create_view.rs b/src/common/meta/src/ddl/tests/create_view.rs index f20022f49d..3f833333d2 100644 --- a/src/common/meta/src/ddl/tests/create_view.rs +++ b/src/common/meta/src/ddl/tests/create_view.rs @@ -97,7 +97,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask { async fn test_on_prepare_view_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_view_task("foo"); assert!(!task.create_view.create_if_not_exists); // Puts a value to table name key. @@ -113,7 +112,7 @@ async fn test_on_prepare_view_exists_err() { ) .await .unwrap(); - let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateViewProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::ViewAlreadyExists { .. }); assert_eq!(err.status_code(), StatusCode::TableAlreadyExists); @@ -123,7 +122,6 @@ async fn test_on_prepare_view_exists_err() { async fn test_on_prepare_with_create_if_view_exists() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let mut task = test_create_view_task("foo"); task.create_view.create_if_not_exists = true; task.view_info.ident.table_id = 1024; @@ -140,7 +138,7 @@ async fn test_on_prepare_with_create_if_view_exists() { ) .await .unwrap(); - let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateViewProcedure::new(task, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Done { output: Some(..) }); let table_id = *status.downcast_output_ref::().unwrap(); @@ -151,10 +149,9 @@ async fn test_on_prepare_with_create_if_view_exists() { async fn test_on_prepare_without_create_if_table_exists() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let mut task = test_create_view_task("foo"); task.create_view.create_if_not_exists = true; - let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateViewProcedure::new(task, ddl_context); let status = procedure.on_prepare().await.unwrap(); assert_matches!(status, Status::Executing { persist: true }); assert_eq!(procedure.view_id(), 1024); @@ -165,10 +162,9 @@ async fn test_on_create_metadata() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let task = test_create_view_task("foo"); assert!(!task.create_view.create_if_not_exists); - let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = CreateViewProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -185,10 +181,9 @@ async fn test_replace_view_metadata() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager.clone()); - let cluster_id = 1; let task = test_create_view_task("foo"); assert!(!task.create_view.create_if_not_exists); - let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone()); + let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone()); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -213,7 +208,7 @@ async fn test_replace_view_metadata() { let mut task = test_create_view_task("foo"); // The view already exists, prepare should fail { - let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone()); + let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone()); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::ViewAlreadyExists { .. }); assert_eq!(err.status_code(), StatusCode::TableAlreadyExists); @@ -224,7 +219,7 @@ async fn test_replace_view_metadata() { task.create_view.logical_plan = vec![4, 5, 6]; task.create_view.definition = "new_definition".to_string(); - let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = CreateViewProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -254,12 +249,11 @@ async fn test_replace_table() { common_telemetry::init_default_ut_logging(); let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager.clone()); - let cluster_id = 1; { // Create a `foo` table. let task = test_create_table_task("foo"); - let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = CreateTableProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); let ctx = ProcedureContext { procedure_id: ProcedureId::random(), @@ -272,7 +266,7 @@ async fn test_replace_table() { // Try to replace a view named `foo` too. let mut task = test_create_view_task("foo"); task.create_view.or_replace = true; - let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone()); + let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone()); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, Error::TableAlreadyExists { .. }); assert_eq!(err.status_code(), StatusCode::TableAlreadyExists); diff --git a/src/common/meta/src/ddl/tests/drop_database.rs b/src/common/meta/src/ddl/tests/drop_database.rs index 66a5d3e756..8118cda53d 100644 --- a/src/common/meta/src/ddl/tests/drop_database.rs +++ b/src/common/meta/src/ddl/tests/drop_database.rs @@ -31,7 +31,6 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager}; #[tokio::test] async fn test_drop_database_with_logical_tables() { common_telemetry::init_default_ut_logging(); - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); ddl_context @@ -45,11 +44,11 @@ async fn test_drop_database_with_logical_tables() { .await .unwrap(); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table2").await; + create_logical_table(ddl_context.clone(), phy_id, "table3").await; let mut procedure = DropDatabaseProcedure::new( DEFAULT_CATALOG_NAME.to_string(), @@ -80,7 +79,6 @@ async fn test_drop_database_with_logical_tables() { #[tokio::test] async fn test_drop_database_retryable_error() { common_telemetry::init_default_ut_logging(); - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); ddl_context @@ -94,11 +92,11 @@ async fn test_drop_database_retryable_error() { .await .unwrap(); // Creates physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates 3 logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await; - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table2").await; + create_logical_table(ddl_context.clone(), phy_id, "table3").await; let mut procedure = DropDatabaseProcedure::new( DEFAULT_CATALOG_NAME.to_string(), @@ -128,7 +126,6 @@ async fn test_drop_database_retryable_error() { #[tokio::test] async fn test_drop_database_recover() { common_telemetry::init_default_ut_logging(); - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let ddl_context = new_ddl_context(node_manager); ddl_context @@ -142,9 +139,9 @@ async fn test_drop_database_recover() { .await .unwrap(); // Creates a physical table - let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await; + let phy_id = create_physical_table(&ddl_context, "phy").await; // Creates a logical tables - create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await; + create_logical_table(ddl_context.clone(), phy_id, "table1").await; let mut procedure = DropDatabaseProcedure::new( DEFAULT_CATALOG_NAME.to_string(), DEFAULT_SCHEMA_NAME.to_string(), diff --git a/src/common/meta/src/ddl/tests/drop_flow.rs b/src/common/meta/src/ddl/tests/drop_flow.rs index 97b4632a59..9afb36a7d4 100644 --- a/src/common/meta/src/ddl/tests/drop_flow.rs +++ b/src/common/meta/src/ddl/tests/drop_flow.rs @@ -40,12 +40,11 @@ fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> D #[tokio::test] async fn test_drop_flow_not_found() { - let cluster_id = 1; let flow_id = 1024; let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler)); let ddl_context = new_ddl_context(node_manager); let task = test_drop_flow_task("my_flow", flow_id, false); - let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropFlowProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, error::Error::FlowNotFound { .. }); } @@ -53,7 +52,6 @@ async fn test_drop_flow_not_found() { #[tokio::test] async fn test_drop_flow() { // create a flow - let cluster_id = 1; let table_id = 1024; let source_table_names = vec![TableName::new( DEFAULT_CATALOG_NAME, @@ -75,27 +73,21 @@ async fn test_drop_flow() { ) .await .unwrap(); - let flow_id = create_test_flow( - &ddl_context, - cluster_id, - "my_flow", - source_table_names, - sink_table_name, - ) - .await; + let flow_id = + create_test_flow(&ddl_context, "my_flow", source_table_names, sink_table_name).await; // Drops the flows let task = test_drop_flow_task("my_flow", flow_id, false); - let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropFlowProcedure::new(task, ddl_context.clone()); execute_procedure_until_done(&mut procedure).await; // Drops if not exists let task = test_drop_flow_task("my_flow", flow_id, true); - let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropFlowProcedure::new(task, ddl_context.clone()); execute_procedure_until_done(&mut procedure).await; // Drops again let task = test_drop_flow_task("my_flow", flow_id, false); - let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropFlowProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_matches!(err, error::Error::FlowNotFound { .. }); } diff --git a/src/common/meta/src/ddl/tests/drop_table.rs b/src/common/meta/src/ddl/tests/drop_table.rs index c3a5f5875c..3e09f65422 100644 --- a/src/common/meta/src/ddl/tests/drop_table.rs +++ b/src/common/meta/src/ddl/tests/drop_table.rs @@ -35,7 +35,7 @@ use crate::ddl::test_util::{ create_logical_table, create_physical_table, create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task, }; -use crate::ddl::{TableMetadata, TableMetadataAllocatorContext}; +use crate::ddl::TableMetadata; use crate::key::table_route::TableRouteValue; use crate::kv_backend::memory::MemoryKvBackend; use crate::peer::Peer; @@ -47,7 +47,6 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat async fn test_on_prepare_table_not_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_name = "foo"; let table_id = 1024; let task = test_create_table_task(table_name, table_id); @@ -63,7 +62,7 @@ async fn test_on_prepare_table_not_exists_err() { .unwrap(); let task = new_drop_table_task("bar", table_id, false); - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropTableProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_eq!(err.status_code(), StatusCode::TableNotFound); } @@ -72,7 +71,6 @@ async fn test_on_prepare_table_not_exists_err() { async fn test_on_prepare_table() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_name = "foo"; let table_id = 1024; let task = test_create_table_task(table_name, table_id); @@ -89,13 +87,13 @@ async fn test_on_prepare_table() { let task = new_drop_table_task("bar", table_id, true); // Drop if exists - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); assert!(!procedure.rollback_supported()); let task = new_drop_table_task(table_name, table_id, false); // Drop table - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropTableProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); } @@ -105,7 +103,6 @@ async fn test_on_datanode_drop_regions() { let datanode_handler = DatanodeWatcher(tx); let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler)); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let table_id = 1024; let table_name = "foo"; let task = test_create_table_task(table_name, table_id); @@ -144,7 +141,7 @@ async fn test_on_datanode_drop_regions() { let task = new_drop_table_task(table_name, table_id, false); // Drop table - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropTableProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); procedure.on_datanode_drop_regions().await.unwrap(); @@ -179,7 +176,6 @@ async fn test_on_rollback() { let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let kv_backend = Arc::new(MemoryKvBackend::new()); let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone()); - let cluster_id = 1; // Prepares physical table metadata. let mut create_physical_table_task = test_create_physical_table_task("phy_table"); let TableMetadata { @@ -188,10 +184,7 @@ async fn test_on_rollback() { .. } = ddl_context .table_metadata_allocator - .create( - &TableMetadataAllocatorContext { cluster_id }, - &create_physical_table_task, - ) + .create(&create_physical_table_task) .await .unwrap(); create_physical_table_task.set_table_id(table_id); @@ -205,12 +198,8 @@ async fn test_on_rollback() { let physical_table_id = table_id; // Creates the logical table metadata. let task = test_create_logical_table_task("foo"); - let mut procedure = CreateLogicalTablesProcedure::new( - cluster_id, - vec![task], - physical_table_id, - ddl_context.clone(), - ); + let mut procedure = + CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context.clone()); procedure.on_prepare().await.unwrap(); let ctx = new_test_procedure_context(); procedure.execute(&ctx).await.unwrap(); @@ -223,7 +212,7 @@ async fn test_on_rollback() { // Drops the physical table { let task = new_drop_table_task("phy_table", physical_table_id, false); - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); assert!(procedure.rollback_supported()); procedure.on_delete_metadata().await.unwrap(); @@ -238,7 +227,7 @@ async fn test_on_rollback() { // Drops the logical table let task = new_drop_table_task("foo", table_ids[0], false); - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); assert!(!procedure.rollback_supported()); } @@ -255,18 +244,15 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool #[tokio::test] async fn test_memory_region_keeper_guard_dropped_on_procedure_done() { - let cluster_id = 1; - let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let kv_backend = Arc::new(MemoryKvBackend::new()); let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend); - let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await; - let logical_table_id = - create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await; + let physical_table_id = create_physical_table(&ddl_context, "t").await; + let logical_table_id = create_logical_table(ddl_context.clone(), physical_table_id, "s").await; let inner_test = |task: DropTableTask| async { - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); execute_procedure_until(&mut procedure, |p| { p.data.state == DropTableState::InvalidateTableCache }) @@ -304,14 +290,13 @@ async fn test_from_json() { (DropTableState::DatanodeDropRegions, 1, 1), (DropTableState::DeleteTombstone, 1, 0), ] { - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let kv_backend = Arc::new(MemoryKvBackend::new()); let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend); - let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await; + let physical_table_id = create_physical_table(&ddl_context, "t").await; let task = new_drop_table_task("t", physical_table_id, false); - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); execute_procedure_until(&mut procedure, |p| p.data.state == state).await; let data = procedure.dump().unwrap(); assert_eq!( @@ -334,14 +319,13 @@ async fn test_from_json() { let num_operating_regions = 0; let num_operating_regions_after_recovery = 0; - let cluster_id = 1; let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler)); let kv_backend = Arc::new(MemoryKvBackend::new()); let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend); - let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await; + let physical_table_id = create_physical_table(&ddl_context, "t").await; let task = new_drop_table_task("t", physical_table_id, false); - let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropTableProcedure::new(task, ddl_context.clone()); execute_procedure_until_done(&mut procedure).await; let data = procedure.dump().unwrap(); assert_eq!( diff --git a/src/common/meta/src/ddl/tests/drop_view.rs b/src/common/meta/src/ddl/tests/drop_view.rs index 1e0cb66859..f6e8391e35 100644 --- a/src/common/meta/src/ddl/tests/drop_view.rs +++ b/src/common/meta/src/ddl/tests/drop_view.rs @@ -41,7 +41,6 @@ fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> Dro async fn test_on_prepare_view_not_exists_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let view_id = 1024; let mut task = test_create_view_task("foo"); task.view_info.ident.table_id = view_id; @@ -60,7 +59,7 @@ async fn test_on_prepare_view_not_exists_err() { .unwrap(); let task = new_drop_view_task("bar", view_id, false); - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropViewProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_eq!(err.status_code(), StatusCode::TableNotFound); } @@ -69,7 +68,6 @@ async fn test_on_prepare_view_not_exists_err() { async fn test_on_prepare_not_view_err() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let view_id = 1024; let view_name = "foo"; let task = test_create_table_task(view_name, view_id); @@ -85,7 +83,7 @@ async fn test_on_prepare_not_view_err() { .unwrap(); let task = new_drop_view_task(view_name, view_id, false); - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropViewProcedure::new(task, ddl_context); // It's not a view, expect error let err = procedure.on_prepare().await.unwrap_err(); assert_eq!(err.status_code(), StatusCode::InvalidArguments); @@ -95,7 +93,6 @@ async fn test_on_prepare_not_view_err() { async fn test_on_prepare_success() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let view_id = 1024; let view_name = "foo"; let mut task = test_create_view_task("foo"); @@ -116,12 +113,12 @@ async fn test_on_prepare_success() { let task = new_drop_view_task("bar", view_id, true); // Drop if exists - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropViewProcedure::new(task, ddl_context.clone()); procedure.on_prepare().await.unwrap(); let task = new_drop_view_task(view_name, view_id, false); // Prepare success - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropViewProcedure::new(task, ddl_context); procedure.on_prepare().await.unwrap(); assert_eq!(DropViewState::DeleteMetadata, procedure.state()); } @@ -130,7 +127,6 @@ async fn test_on_prepare_success() { async fn test_drop_view_success() { let node_manager = Arc::new(MockDatanodeManager::new(())); let ddl_context = new_ddl_context(node_manager); - let cluster_id = 1; let view_id = 1024; let view_name = "foo"; let mut task = test_create_view_task("foo"); @@ -159,7 +155,7 @@ async fn test_drop_view_success() { let task = new_drop_view_task(view_name, view_id, false); // Prepare success - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone()); + let mut procedure = DropViewProcedure::new(task, ddl_context.clone()); execute_procedure_until_done(&mut procedure).await; assert_eq!(DropViewState::InvalidateViewCache, procedure.state()); @@ -174,7 +170,7 @@ async fn test_drop_view_success() { // Drop again let task = new_drop_view_task(view_name, view_id, false); - let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context); + let mut procedure = DropViewProcedure::new(task, ddl_context); let err = procedure.on_prepare().await.unwrap_err(); assert_eq!(err.status_code(), StatusCode::TableNotFound); } diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs index edc7321e09..c0608dc3b3 100644 --- a/src/common/meta/src/ddl/truncate_table.rs +++ b/src/common/meta/src/ddl/truncate_table.rs @@ -39,9 +39,9 @@ use crate::key::table_info::TableInfoValue; use crate::key::table_name::TableNameKey; use crate::key::DeserializedValueWithBytes; use crate::lock_key::{CatalogLock, SchemaLock, TableLock}; +use crate::metrics; use crate::rpc::ddl::TruncateTableTask; use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute}; -use crate::{metrics, ClusterId}; pub struct TruncateTableProcedure { context: DdlContext, @@ -91,7 +91,6 @@ impl TruncateTableProcedure { pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable"; pub(crate) fn new( - cluster_id: ClusterId, task: TruncateTableTask, table_info_value: DeserializedValueWithBytes, region_routes: Vec, @@ -99,7 +98,7 @@ impl TruncateTableProcedure { ) -> Self { Self { context, - data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes), + data: TruncateTableData::new(task, table_info_value, region_routes), } } @@ -189,7 +188,6 @@ impl TruncateTableProcedure { #[derive(Debug, Serialize, Deserialize)] pub struct TruncateTableData { state: TruncateTableState, - cluster_id: ClusterId, task: TruncateTableTask, table_info_value: DeserializedValueWithBytes, region_routes: Vec, @@ -197,14 +195,12 @@ pub struct TruncateTableData { impl TruncateTableData { pub fn new( - cluster_id: ClusterId, task: TruncateTableTask, table_info_value: DeserializedValueWithBytes, region_routes: Vec, ) -> Self { Self { state: TruncateTableState::Prepare, - cluster_id, task, table_info_value, region_routes, diff --git a/src/common/meta/src/ddl/utils.rs b/src/common/meta/src/ddl/utils.rs index f6852db753..a0973855f6 100644 --- a/src/common/meta/src/ddl/utils.rs +++ b/src/common/meta/src/ddl/utils.rs @@ -34,7 +34,6 @@ use crate::key::TableMetadataManagerRef; use crate::peer::Peer; use crate::rpc::ddl::CreateTableTask; use crate::rpc::router::RegionRoute; -use crate::ClusterId; /// Adds [Peer] context if the error is unretryable. pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error { @@ -144,7 +143,6 @@ pub async fn get_physical_table_id( /// Converts a list of [`RegionRoute`] to a list of [`DetectingRegion`]. pub fn convert_region_routes_to_detecting_regions( - cluster_id: ClusterId, region_routes: &[RegionRoute], ) -> Vec { region_routes @@ -153,7 +151,7 @@ pub fn convert_region_routes_to_detecting_regions( route .leader_peer .as_ref() - .map(|peer| (cluster_id, peer.id, route.region.id)) + .map(|peer| (peer.id, route.region.id)) }) .collect::>() } diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs index bac640d401..fa01e9f700 100644 --- a/src/common/meta/src/ddl_manager.rs +++ b/src/common/meta/src/ddl_manager.rs @@ -60,7 +60,6 @@ use crate::rpc::ddl::{ use crate::rpc::procedure; use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse}; use crate::rpc::router::RegionRoute; -use crate::ClusterId; pub type DdlManagerRef = Arc; @@ -154,13 +153,12 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_alter_table_task( &self, - cluster_id: ClusterId, table_id: TableId, alter_table_task: AlterTableTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = AlterTableProcedure::new(cluster_id, table_id, alter_table_task, context)?; + let procedure = AlterTableProcedure::new(table_id, alter_table_task, context)?; let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -171,12 +169,11 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_create_table_task( &self, - cluster_id: ClusterId, create_table_task: CreateTableTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = CreateTableProcedure::new(cluster_id, create_table_task, context); + let procedure = CreateTableProcedure::new(create_table_task, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -187,12 +184,11 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_create_view_task( &self, - cluster_id: ClusterId, create_view_task: CreateViewTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = CreateViewProcedure::new(cluster_id, create_view_task, context); + let procedure = CreateViewProcedure::new(create_view_task, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -203,18 +199,13 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_create_logical_table_tasks( &self, - cluster_id: ClusterId, create_table_tasks: Vec, physical_table_id: TableId, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = CreateLogicalTablesProcedure::new( - cluster_id, - create_table_tasks, - physical_table_id, - context, - ); + let procedure = + CreateLogicalTablesProcedure::new(create_table_tasks, physical_table_id, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -225,18 +216,13 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_alter_logical_table_tasks( &self, - cluster_id: ClusterId, alter_table_tasks: Vec, physical_table_id: TableId, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = AlterLogicalTablesProcedure::new( - cluster_id, - alter_table_tasks, - physical_table_id, - context, - ); + let procedure = + AlterLogicalTablesProcedure::new(alter_table_tasks, physical_table_id, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -247,12 +233,11 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_drop_table_task( &self, - cluster_id: ClusterId, drop_table_task: DropTableTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = DropTableProcedure::new(cluster_id, drop_table_task, context); + let procedure = DropTableProcedure::new(drop_table_task, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); @@ -263,7 +248,6 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_create_database( &self, - _cluster_id: ClusterId, CreateDatabaseTask { catalog, schema, @@ -283,7 +267,6 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_drop_database( &self, - _cluster_id: ClusterId, DropDatabaseTask { catalog, schema, @@ -299,11 +282,10 @@ impl DdlManager { pub async fn submit_alter_database( &self, - cluster_id: ClusterId, alter_database_task: AlterDatabaseTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?; + let procedure = AlterDatabaseProcedure::new(alter_database_task, context)?; let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); self.submit_procedure(procedure_with_id).await @@ -313,12 +295,11 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_create_flow_task( &self, - cluster_id: ClusterId, create_flow: CreateFlowTask, query_context: QueryContext, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = CreateFlowProcedure::new(cluster_id, create_flow, query_context, context); + let procedure = CreateFlowProcedure::new(create_flow, query_context, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); self.submit_procedure(procedure_with_id).await @@ -328,11 +309,10 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_drop_flow_task( &self, - cluster_id: ClusterId, drop_flow: DropFlowTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = DropFlowProcedure::new(cluster_id, drop_flow, context); + let procedure = DropFlowProcedure::new(drop_flow, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); self.submit_procedure(procedure_with_id).await @@ -342,11 +322,10 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_drop_view_task( &self, - cluster_id: ClusterId, drop_view: DropViewTask, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); - let procedure = DropViewProcedure::new(cluster_id, drop_view, context); + let procedure = DropViewProcedure::new(drop_view, context); let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure)); self.submit_procedure(procedure_with_id).await @@ -356,14 +335,12 @@ impl DdlManager { #[tracing::instrument(skip_all)] pub async fn submit_truncate_table_task( &self, - cluster_id: ClusterId, truncate_table_task: TruncateTableTask, table_info_value: DeserializedValueWithBytes, region_routes: Vec, ) -> Result<(ProcedureId, Option)> { let context = self.create_context(); let procedure = TruncateTableProcedure::new( - cluster_id, truncate_table_task, table_info_value, region_routes, @@ -397,7 +374,6 @@ impl DdlManager { async fn handle_truncate_table_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, truncate_table_task: TruncateTableTask, ) -> Result { let table_id = truncate_table_task.table_id; @@ -416,12 +392,7 @@ async fn handle_truncate_table_task( let table_route = table_route_value.into_inner().region_routes()?.clone(); let (id, _) = ddl_manager - .submit_truncate_table_task( - cluster_id, - truncate_table_task, - table_info_value, - table_route, - ) + .submit_truncate_table_task(truncate_table_task, table_info_value, table_route) .await?; info!("Table: {table_id} is truncated via procedure_id {id:?}"); @@ -434,7 +405,6 @@ async fn handle_truncate_table_task( async fn handle_alter_table_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, alter_table_task: AlterTableTask, ) -> Result { let table_ref = alter_table_task.table_ref(); @@ -468,7 +438,7 @@ async fn handle_alter_table_task( ); let (id, _) = ddl_manager - .submit_alter_table_task(cluster_id, table_id, alter_table_task) + .submit_alter_table_task(table_id, alter_table_task) .await?; info!("Table: {table_id} is altered via procedure_id {id:?}"); @@ -481,13 +451,10 @@ async fn handle_alter_table_task( async fn handle_drop_table_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, drop_table_task: DropTableTask, ) -> Result { let table_id = drop_table_task.table_id; - let (id, _) = ddl_manager - .submit_drop_table_task(cluster_id, drop_table_task) - .await?; + let (id, _) = ddl_manager.submit_drop_table_task(drop_table_task).await?; info!("Table: {table_id} is dropped via procedure_id {id:?}"); @@ -499,11 +466,10 @@ async fn handle_drop_table_task( async fn handle_create_table_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, create_table_task: CreateTableTask, ) -> Result { let (id, output) = ddl_manager - .submit_create_table_task(cluster_id, create_table_task) + .submit_create_table_task(create_table_task) .await?; let procedure_id = id.to_string(); @@ -525,7 +491,6 @@ async fn handle_create_table_task( async fn handle_create_logical_table_tasks( ddl_manager: &DdlManager, - cluster_id: ClusterId, create_table_tasks: Vec, ) -> Result { ensure!( @@ -542,7 +507,7 @@ async fn handle_create_logical_table_tasks( let num_logical_tables = create_table_tasks.len(); let (id, output) = ddl_manager - .submit_create_logical_table_tasks(cluster_id, create_table_tasks, physical_table_id) + .submit_create_logical_table_tasks(create_table_tasks, physical_table_id) .await?; info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}"); @@ -568,11 +533,10 @@ async fn handle_create_logical_table_tasks( async fn handle_create_database_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, create_database_task: CreateDatabaseTask, ) -> Result { let (id, _) = ddl_manager - .submit_create_database(cluster_id, create_database_task.clone()) + .submit_create_database(create_database_task.clone()) .await?; let procedure_id = id.to_string(); @@ -589,11 +553,10 @@ async fn handle_create_database_task( async fn handle_drop_database_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, drop_database_task: DropDatabaseTask, ) -> Result { let (id, _) = ddl_manager - .submit_drop_database(cluster_id, drop_database_task.clone()) + .submit_drop_database(drop_database_task.clone()) .await?; let procedure_id = id.to_string(); @@ -610,11 +573,10 @@ async fn handle_drop_database_task( async fn handle_alter_database_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, alter_database_task: AlterDatabaseTask, ) -> Result { let (id, _) = ddl_manager - .submit_alter_database(cluster_id, alter_database_task.clone()) + .submit_alter_database(alter_database_task.clone()) .await?; let procedure_id = id.to_string(); @@ -632,11 +594,10 @@ async fn handle_alter_database_task( async fn handle_drop_flow_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, drop_flow_task: DropFlowTask, ) -> Result { let (id, _) = ddl_manager - .submit_drop_flow_task(cluster_id, drop_flow_task.clone()) + .submit_drop_flow_task(drop_flow_task.clone()) .await?; let procedure_id = id.to_string(); @@ -653,11 +614,10 @@ async fn handle_drop_flow_task( async fn handle_drop_view_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, drop_view_task: DropViewTask, ) -> Result { let (id, _) = ddl_manager - .submit_drop_view_task(cluster_id, drop_view_task.clone()) + .submit_drop_view_task(drop_view_task.clone()) .await?; let procedure_id = id.to_string(); @@ -675,12 +635,11 @@ async fn handle_drop_view_task( async fn handle_create_flow_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, create_flow_task: CreateFlowTask, query_context: QueryContext, ) -> Result { let (id, output) = ddl_manager - .submit_create_flow_task(cluster_id, create_flow_task.clone(), query_context) + .submit_create_flow_task(create_flow_task.clone(), query_context) .await?; let procedure_id = id.to_string(); @@ -712,7 +671,6 @@ async fn handle_create_flow_task( async fn handle_alter_logical_table_tasks( ddl_manager: &DdlManager, - cluster_id: ClusterId, alter_table_tasks: Vec, ) -> Result { ensure!( @@ -733,7 +691,7 @@ async fn handle_alter_logical_table_tasks( let num_logical_tables = alter_table_tasks.len(); let (id, _) = ddl_manager - .submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id) + .submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id) .await?; info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}"); @@ -749,11 +707,10 @@ async fn handle_alter_logical_table_tasks( /// Handle the `[CreateViewTask]` and returns the DDL response when success. async fn handle_create_view_task( ddl_manager: &DdlManager, - cluster_id: ClusterId, create_view_task: CreateViewTask, ) -> Result { let (id, output) = ddl_manager - .submit_create_view_task(cluster_id, create_view_task) + .submit_create_view_task(create_view_task) .await?; let procedure_id = id.to_string(); @@ -788,55 +745,43 @@ impl ProcedureExecutor for DdlManager { .unwrap_or(TracingContext::from_current_span()) .attach(tracing::info_span!("DdlManager::submit_ddl_task")); async move { - let cluster_id = ctx.cluster_id.unwrap_or_default(); debug!("Submitting Ddl task: {:?}", request.task); match request.task { CreateTable(create_table_task) => { - handle_create_table_task(self, cluster_id, create_table_task).await - } - DropTable(drop_table_task) => { - handle_drop_table_task(self, cluster_id, drop_table_task).await + handle_create_table_task(self, create_table_task).await } + DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await, AlterTable(alter_table_task) => { - handle_alter_table_task(self, cluster_id, alter_table_task).await + handle_alter_table_task(self, alter_table_task).await } TruncateTable(truncate_table_task) => { - handle_truncate_table_task(self, cluster_id, truncate_table_task).await + handle_truncate_table_task(self, truncate_table_task).await } CreateLogicalTables(create_table_tasks) => { - handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await + handle_create_logical_table_tasks(self, create_table_tasks).await } AlterLogicalTables(alter_table_tasks) => { - handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await + handle_alter_logical_table_tasks(self, alter_table_tasks).await } DropLogicalTables(_) => todo!(), CreateDatabase(create_database_task) => { - handle_create_database_task(self, cluster_id, create_database_task).await + handle_create_database_task(self, create_database_task).await } DropDatabase(drop_database_task) => { - handle_drop_database_task(self, cluster_id, drop_database_task).await + handle_drop_database_task(self, drop_database_task).await } AlterDatabase(alter_database_task) => { - handle_alter_database_task(self, cluster_id, alter_database_task).await + handle_alter_database_task(self, alter_database_task).await } CreateFlow(create_flow_task) => { - handle_create_flow_task( - self, - cluster_id, - create_flow_task, - request.query_context.into(), - ) - .await - } - DropFlow(drop_flow_task) => { - handle_drop_flow_task(self, cluster_id, drop_flow_task).await + handle_create_flow_task(self, create_flow_task, request.query_context.into()) + .await } + DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await, CreateView(create_view_task) => { - handle_create_view_task(self, cluster_id, create_view_task).await - } - DropView(drop_view_task) => { - handle_drop_view_task(self, cluster_id, drop_view_task).await + handle_create_view_task(self, create_view_task).await } + DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await, } } .trace(span) diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs index 4864f7562d..8cfc06e882 100644 --- a/src/common/meta/src/instruction.rs +++ b/src/common/meta/src/instruction.rs @@ -26,11 +26,10 @@ use crate::flow_name::FlowName; use crate::key::schema_name::SchemaName; use crate::key::FlowId; use crate::peer::Peer; -use crate::{ClusterId, DatanodeId, FlownodeId}; +use crate::{DatanodeId, FlownodeId}; #[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)] pub struct RegionIdent { - pub cluster_id: ClusterId, pub datanode_id: DatanodeId, pub table_id: TableId, pub region_number: RegionNumber, @@ -47,8 +46,8 @@ impl Display for RegionIdent { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, - "RegionIdent(datanode_id='{}.{}', table_id={}, region_number={}, engine = {})", - self.cluster_id, self.datanode_id, self.table_id, self.region_number, self.engine + "RegionIdent(datanode_id='{}', table_id={}, region_number={}, engine = {})", + self.datanode_id, self.table_id, self.region_number, self.engine ) } } @@ -262,7 +261,6 @@ mod tests { fn test_serialize_instruction() { let open_region = Instruction::OpenRegion(OpenRegion::new( RegionIdent { - cluster_id: 1, datanode_id: 2, table_id: 1024, region_number: 1, @@ -277,12 +275,11 @@ mod tests { let serialized = serde_json::to_string(&open_region).unwrap(); assert_eq!( - r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#, + r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#, serialized ); let close_region = Instruction::CloseRegion(RegionIdent { - cluster_id: 1, datanode_id: 2, table_id: 1024, region_number: 1, @@ -292,7 +289,7 @@ mod tests { let serialized = serde_json::to_string(&close_region).unwrap(); assert_eq!( - r#"{"CloseRegion":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#, + r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#, serialized ); } @@ -307,7 +304,6 @@ mod tests { #[test] fn test_compatible_serialize_open_region() { let region_ident = RegionIdent { - cluster_id: 1, datanode_id: 2, table_id: 1024, region_number: 1, diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs index 7479a14337..ccd00ab890 100644 --- a/src/common/meta/src/lib.rs +++ b/src/common/meta/src/lib.rs @@ -47,8 +47,6 @@ pub mod test_util; pub mod util; pub mod wal_options_allocator; -// The id of the cluster. -pub type ClusterId = u64; // The id of the datanode. pub type DatanodeId = u64; // The id of the flownode. diff --git a/src/common/meta/src/node_expiry_listener.rs b/src/common/meta/src/node_expiry_listener.rs index c5da2936a5..7bc69f9065 100644 --- a/src/common/meta/src/node_expiry_listener.rs +++ b/src/common/meta/src/node_expiry_listener.rs @@ -99,7 +99,7 @@ impl NodeExpiryListener { in_memory: &ResettableKvBackendRef, max_idle_time: Duration, ) -> error::Result> { - let prefix = NodeInfoKey::key_prefix_with_cluster_id(0); + let prefix = NodeInfoKey::key_prefix(); let req = RangeRequest::new().with_prefix(prefix); let current_time_millis = common_time::util::current_time_millis(); let resp = in_memory.range(req).await?; diff --git a/src/common/meta/src/peer.rs b/src/common/meta/src/peer.rs index af1739ef91..daa64e36e7 100644 --- a/src/common/meta/src/peer.rs +++ b/src/common/meta/src/peer.rs @@ -19,7 +19,7 @@ use api::v1::meta::Peer as PbPeer; use serde::{Deserialize, Serialize}; use crate::error::Error; -use crate::{ClusterId, DatanodeId, FlownodeId}; +use crate::{DatanodeId, FlownodeId}; #[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)] pub struct Peer { @@ -72,8 +72,8 @@ impl Display for Peer { /// can query peer given a node id #[async_trait::async_trait] pub trait PeerLookupService { - async fn datanode(&self, cluster_id: ClusterId, id: DatanodeId) -> Result, Error>; - async fn flownode(&self, cluster_id: ClusterId, id: FlownodeId) -> Result, Error>; + async fn datanode(&self, id: DatanodeId) -> Result, Error>; + async fn flownode(&self, id: FlownodeId) -> Result, Error>; } pub type PeerLookupServiceRef = Arc; diff --git a/src/common/meta/src/rpc.rs b/src/common/meta/src/rpc.rs index a11c5164b8..4996df11f3 100644 --- a/src/common/meta/src/rpc.rs +++ b/src/common/meta/src/rpc.rs @@ -31,11 +31,6 @@ impl ResponseHeader { self.0.protocol_version } - #[inline] - pub fn cluster_id(&self) -> u64 { - self.0.cluster_id - } - #[inline] pub fn error_code(&self) -> i32 { match self.0.error.as_ref() { @@ -143,7 +138,6 @@ mod tests { fn test_response_header_trans() { let pb_header = PbResponseHeader { protocol_version: 101, - cluster_id: 1, error: Some(Error { code: 100, err_msg: "test".to_string(), @@ -152,7 +146,6 @@ mod tests { let header = ResponseHeader(pb_header); assert_eq!(101, header.protocol_version()); - assert_eq!(1, header.cluster_id()); assert_eq!(100, header.error_code()); assert_eq!("test".to_string(), header.error_msg()); } diff --git a/src/common/meta/src/test_util.rs b/src/common/meta/src/test_util.rs index 3ceb473108..2c4ba59c7b 100644 --- a/src/common/meta/src/test_util.rs +++ b/src/common/meta/src/test_util.rs @@ -37,7 +37,7 @@ use crate::peer::{Peer, PeerLookupService}; use crate::region_keeper::MemoryRegionKeeper; use crate::sequence::SequenceBuilder; use crate::wal_options_allocator::WalOptionsAllocator; -use crate::{ClusterId, DatanodeId, FlownodeId}; +use crate::{DatanodeId, FlownodeId}; #[async_trait::async_trait] pub trait MockDatanodeHandler: Sync + Send + Clone { @@ -189,11 +189,11 @@ pub struct NoopPeerLookupService; #[async_trait::async_trait] impl PeerLookupService for NoopPeerLookupService { - async fn datanode(&self, _cluster_id: ClusterId, id: DatanodeId) -> Result> { + async fn datanode(&self, id: DatanodeId) -> Result> { Ok(Some(Peer::empty(id))) } - async fn flownode(&self, _cluster_id: ClusterId, id: FlownodeId) -> Result> { + async fn flownode(&self, id: FlownodeId) -> Result> { Ok(Some(Peer::empty(id))) } } diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs index b5c99e57ee..34b568550d 100644 --- a/src/datanode/src/heartbeat/handler.rs +++ b/src/datanode/src/heartbeat/handler.rs @@ -235,7 +235,6 @@ mod tests { Instruction::CloseRegion(RegionIdent { table_id: region_id.table_id(), region_number: region_id.region_number(), - cluster_id: 1, datanode_id: 2, engine: MITO_ENGINE_NAME.to_string(), }) @@ -246,7 +245,6 @@ mod tests { RegionIdent { table_id: region_id.table_id(), region_number: region_id.region_number(), - cluster_id: 1, datanode_id: 2, engine: MITO_ENGINE_NAME.to_string(), }, diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs index 47557752f9..8fe859ff06 100644 --- a/src/flow/src/adapter.rs +++ b/src/flow/src/adapter.rs @@ -103,7 +103,6 @@ impl Default for FlowConfig { #[serde(default)] pub struct FlownodeOptions { pub mode: Mode, - pub cluster_id: Option, pub node_id: Option, pub flow: FlowConfig, pub grpc: GrpcOptions, @@ -118,7 +117,6 @@ impl Default for FlownodeOptions { fn default() -> Self { Self { mode: servers::Mode::Standalone, - cluster_id: None, node_id: None, flow: FlowConfig::default(), grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3004"), diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs index 1e4043b5d6..e365eba220 100644 --- a/src/meta-client/examples/meta_client.rs +++ b/src/meta-client/examples/meta_client.rs @@ -31,13 +31,13 @@ fn main() { #[tokio::main] async fn run() { - let id = (1000u64, 2000u64); + let id = 2000u64; let config = ChannelConfig::new() .timeout(Duration::from_secs(3)) .connect_timeout(Duration::from_secs(5)) .tcp_nodelay(true); let channel_manager = ChannelManager::with_config(config); - let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1) + let mut meta_client = MetaClientBuilder::datanode_default_options(id) .channel_manager(channel_manager) .build(); meta_client.start(&["127.0.0.1:3002"]).await.unwrap(); diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs index ee7aebba52..38e2ee2800 100644 --- a/src/meta-client/src/client.rs +++ b/src/meta-client/src/client.rs @@ -47,7 +47,6 @@ use common_meta::rpc::store::{ DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse, }; use common_meta::rpc::KeyValue; -use common_meta::ClusterId; use common_telemetry::info; use futures::TryStreamExt; use heartbeat::Client as HeartbeatClient; @@ -61,7 +60,7 @@ use crate::error::{ Result, }; -pub type Id = (u64, u64); +pub type Id = u64; const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3; const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3; @@ -81,18 +80,18 @@ pub struct MetaClientBuilder { } impl MetaClientBuilder { - pub fn new(cluster_id: ClusterId, member_id: u64, role: Role) -> Self { + pub fn new(member_id: u64, role: Role) -> Self { Self { - id: (cluster_id, member_id), + id: member_id, role, ..Default::default() } } /// Returns the role of Frontend's default options. - pub fn frontend_default_options(cluster_id: ClusterId) -> Self { + pub fn frontend_default_options() -> Self { // Frontend does not need a member id. - Self::new(cluster_id, 0, Role::Frontend) + Self::new(0, Role::Frontend) .enable_store() .enable_heartbeat() .enable_procedure() @@ -100,15 +99,15 @@ impl MetaClientBuilder { } /// Returns the role of Datanode's default options. - pub fn datanode_default_options(cluster_id: ClusterId, member_id: u64) -> Self { - Self::new(cluster_id, member_id, Role::Datanode) + pub fn datanode_default_options(member_id: u64) -> Self { + Self::new(member_id, Role::Datanode) .enable_store() .enable_heartbeat() } /// Returns the role of Flownode's default options. - pub fn flownode_default_options(cluster_id: ClusterId, member_id: u64) -> Self { - Self::new(cluster_id, member_id, Role::Flownode) + pub fn flownode_default_options(member_id: u64) -> Self { + Self::new(member_id, Role::Flownode) .enable_store() .enable_heartbeat() .enable_procedure() @@ -273,15 +272,9 @@ impl ClusterInfo for MetaClient { let cluster_client = self.cluster_client()?; let (get_metasrv_nodes, nodes_key_prefix) = match role { - None => ( - true, - Some(NodeInfoKey::key_prefix_with_cluster_id(self.id.0)), - ), + None => (true, Some(NodeInfoKey::key_prefix())), Some(ClusterRole::Metasrv) => (true, None), - Some(role) => ( - false, - Some(NodeInfoKey::key_prefix_with_role(self.id.0, role)), - ), + Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))), }; let mut nodes = if get_metasrv_nodes { @@ -324,7 +317,7 @@ impl ClusterInfo for MetaClient { async fn list_region_stats(&self) -> Result> { let cluster_kv_backend = Arc::new(self.cluster_client()?); - let range_prefix = DatanodeStatKey::key_prefix_with_cluster_id(self.id.0); + let range_prefix = DatanodeStatKey::prefix_key(); let req = RangeRequest::new().with_prefix(range_prefix); let stream = PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream(); @@ -555,6 +548,8 @@ impl MetaClient { #[cfg(test)] mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + use api::v1::meta::{HeartbeatRequest, Peer}; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef}; use rand::Rng; @@ -624,31 +619,31 @@ mod tests { async fn test_meta_client_builder() { let urls = &["127.0.0.1:3001", "127.0.0.1:3002"]; - let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode) + let mut meta_client = MetaClientBuilder::new(0, Role::Datanode) .enable_heartbeat() .build(); let _ = meta_client.heartbeat_client().unwrap(); assert!(meta_client.store_client().is_err()); meta_client.start(urls).await.unwrap(); - let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode).build(); + let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build(); assert!(meta_client.heartbeat_client().is_err()); assert!(meta_client.store_client().is_err()); meta_client.start(urls).await.unwrap(); - let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode) + let mut meta_client = MetaClientBuilder::new(0, Role::Datanode) .enable_store() .build(); assert!(meta_client.heartbeat_client().is_err()); let _ = meta_client.store_client().unwrap(); meta_client.start(urls).await.unwrap(); - let mut meta_client = MetaClientBuilder::new(1, 2, Role::Datanode) + let mut meta_client = MetaClientBuilder::new(2, Role::Datanode) .enable_heartbeat() .enable_store() .build(); - assert_eq!(1, meta_client.id().0); - assert_eq!(2, meta_client.id().1); + assert_eq!(2, meta_client.id()); + assert_eq!(2, meta_client.id()); let _ = meta_client.heartbeat_client().unwrap(); let _ = meta_client.store_client().unwrap(); meta_client.start(urls).await.unwrap(); @@ -657,7 +652,7 @@ mod tests { #[tokio::test] async fn test_not_start_heartbeat_client() { let urls = &["127.0.0.1:3001", "127.0.0.1:3002"]; - let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode) + let mut meta_client = MetaClientBuilder::new(0, Role::Datanode) .enable_store() .build(); meta_client.start(urls).await.unwrap(); @@ -668,7 +663,7 @@ mod tests { #[tokio::test] async fn test_not_start_store_client() { let urls = &["127.0.0.1:3001", "127.0.0.1:3002"]; - let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode) + let mut meta_client = MetaClientBuilder::new(0, Role::Datanode) .enable_heartbeat() .build(); @@ -688,6 +683,9 @@ mod tests { let tc = new_client("test_heartbeat").await; let (sender, mut receiver) = tc.client.heartbeat().await.unwrap(); // send heartbeats + + let request_sent = Arc::new(AtomicUsize::new(0)); + let request_sent_clone = request_sent.clone(); let _handle = tokio::spawn(async move { for _ in 0..5 { let req = HeartbeatRequest { @@ -698,14 +696,24 @@ mod tests { ..Default::default() }; sender.send(req).await.unwrap(); + request_sent_clone.fetch_add(1, Ordering::Relaxed); } }); - let _handle = tokio::spawn(async move { - while let Some(res) = receiver.message().await.unwrap() { - assert_eq!(1000, res.header.unwrap().cluster_id); + let heartbeat_count = Arc::new(AtomicUsize::new(0)); + let heartbeat_count_clone = heartbeat_count.clone(); + let handle = tokio::spawn(async move { + while let Some(_resp) = receiver.message().await.unwrap() { + heartbeat_count_clone.fetch_add(1, Ordering::Relaxed); } }); + + handle.await.unwrap(); + //+1 for the initial response + assert_eq!( + request_sent.load(Ordering::Relaxed) + 1, + heartbeat_count.load(Ordering::Relaxed) + ); } #[tokio::test] diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs index 81d7597750..fd5a6da46f 100644 --- a/src/meta-client/src/client/heartbeat.rs +++ b/src/meta-client/src/client/heartbeat.rs @@ -272,7 +272,7 @@ mod test { #[tokio::test] async fn test_already_start() { - let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default(), 3); + let mut client = Client::new(0, Role::Datanode, ChannelManager::default(), 3); client .start(&["127.0.0.1:1000", "127.0.0.1:1001"]) .await @@ -288,7 +288,7 @@ mod test { #[tokio::test] async fn test_heartbeat_stream() { let (sender, mut receiver) = mpsc::channel::(100); - let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender); + let sender = HeartbeatSender::new(8, Role::Datanode, sender); let _handle = tokio::spawn(async move { for _ in 0..10 { sender.send(HeartbeatRequest::default()).await.unwrap(); @@ -296,7 +296,6 @@ mod test { }); while let Some(req) = receiver.recv().await { let header = req.header.unwrap(); - assert_eq!(8, header.cluster_id); assert_eq!(8, header.member_id); } } diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs index 4f0fea7e0f..3026920555 100644 --- a/src/meta-client/src/client/store.rs +++ b/src/meta-client/src/client/store.rs @@ -255,7 +255,7 @@ mod test { #[tokio::test] async fn test_already_start() { - let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default()); + let mut client = Client::new(0, Role::Frontend, ChannelManager::default()); client .start(&["127.0.0.1:1000", "127.0.0.1:1001"]) .await @@ -270,7 +270,7 @@ mod test { #[tokio::test] async fn test_start_with_duplicate_peers() { - let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default()); + let mut client = Client::new(0, Role::Frontend, ChannelManager::default()); client .start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"]) .await diff --git a/src/meta-client/src/lib.rs b/src/meta-client/src/lib.rs index 0a19539977..87eab997bc 100644 --- a/src/meta-client/src/lib.rs +++ b/src/meta-client/src/lib.rs @@ -71,23 +71,22 @@ pub enum MetaClientType { pub type MetaClientRef = Arc; pub async fn create_meta_client( - cluster_id: u64, client_type: MetaClientType, meta_client_options: &MetaClientOptions, ) -> error::Result { info!( - "Creating {:?} instance from cluster {} with Metasrv addrs {:?}", - client_type, cluster_id, meta_client_options.metasrv_addrs + "Creating {:?} instance with Metasrv addrs {:?}", + client_type, meta_client_options.metasrv_addrs ); let mut builder = match client_type { MetaClientType::Datanode { member_id } => { - MetaClientBuilder::datanode_default_options(cluster_id, member_id) + MetaClientBuilder::datanode_default_options(member_id) } MetaClientType::Flownode { member_id } => { - MetaClientBuilder::flownode_default_options(cluster_id, member_id) + MetaClientBuilder::flownode_default_options(member_id) } - MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(cluster_id), + MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(), }; let base_config = ChannelConfig::new() diff --git a/src/meta-client/src/mocks.rs b/src/meta-client/src/mocks.rs index 2643c44927..294ac16ef9 100644 --- a/src/meta-client/src/mocks.rs +++ b/src/meta-client/src/mocks.rs @@ -60,8 +60,8 @@ pub async fn mock_client_with_etcdstore(addr: &str) -> (MetaClient, MockMetaCont } pub async fn mock_client_by(server_addr: String, channel_manager: ChannelManager) -> MetaClient { - let id = (1000u64, 2000u64); - let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1) + let id = 2000u64; + let mut meta_client = MetaClientBuilder::datanode_default_options(id) .enable_access_cluster_info() .channel_manager(channel_manager) .build(); diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs index 9a6cecbd36..23d8cd05dc 100644 --- a/src/meta-srv/src/cluster.rs +++ b/src/meta-srv/src/cluster.rs @@ -375,13 +375,9 @@ mod tests { #[test] fn test_to_stat_kv_map() { - let stat_key = DatanodeStatKey { - cluster_id: 0, - node_id: 100, - }; + let stat_key = DatanodeStatKey { node_id: 100 }; let stat = Stat { - cluster_id: 0, id: 100, addr: "127.0.0.1:3001".to_string(), ..Default::default() @@ -400,7 +396,6 @@ mod tests { let stat_val = kv_map.get(&stat_key).unwrap(); let stat = stat_val.stats.first().unwrap(); - assert_eq!(0, stat.cluster_id); assert_eq!(100, stat.id); assert_eq!("127.0.0.1:3001", stat.addr); } diff --git a/src/meta-srv/src/flow_meta_alloc.rs b/src/meta-srv/src/flow_meta_alloc.rs index 1fac6efab1..bdfac158aa 100644 --- a/src/meta-srv/src/flow_meta_alloc.rs +++ b/src/meta-srv/src/flow_meta_alloc.rs @@ -15,7 +15,6 @@ use common_error::ext::BoxedError; use common_meta::ddl::flow_meta::PartitionPeerAllocator; use common_meta::peer::Peer; -use common_meta::ClusterId; use snafu::ResultExt; use crate::metasrv::{SelectorContext, SelectorRef}; @@ -34,14 +33,9 @@ impl FlowPeerAllocator { #[async_trait::async_trait] impl PartitionPeerAllocator for FlowPeerAllocator { - async fn alloc( - &self, - cluster_id: ClusterId, - partitions: usize, - ) -> common_meta::error::Result> { + async fn alloc(&self, partitions: usize) -> common_meta::error::Result> { self.selector .select( - cluster_id, &self.ctx, SelectorOptions { min_required_items: partitions, diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs index 4eb9fef91d..6d06f328c1 100644 --- a/src/meta-srv/src/handler.rs +++ b/src/meta-srv/src/handler.rs @@ -20,8 +20,8 @@ use std::time::{Duration, Instant}; use api::v1::meta::mailbox_message::Payload; use api::v1::meta::{ - HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, RequestHeader, - ResponseHeader, Role, PROTOCOL_VERSION, + HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, ResponseHeader, Role, + PROTOCOL_VERSION, }; use check_leader_handler::CheckLeaderHandler; use collect_cluster_info_handler::{ @@ -153,13 +153,9 @@ pub struct Pusher { } impl Pusher { - pub fn new( - sender: Sender>, - req_header: &RequestHeader, - ) -> Self { + pub fn new(sender: Sender>) -> Self { let res_header = ResponseHeader { protocol_version: PROTOCOL_VERSION, - cluster_id: req_header.cluster_id, ..Default::default() }; @@ -772,7 +768,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION}; + use api::v1::meta::{MailboxMessage, Role}; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::sequence::SequenceBuilder; use tokio::sync::mpsc; @@ -814,12 +810,8 @@ mod tests { async fn push_msg_via_mailbox() -> (MailboxRef, MailboxReceiver) { let datanode_id = 12; let (pusher_tx, mut pusher_rx) = mpsc::channel(16); - let res_header = RequestHeader { - protocol_version: PROTOCOL_VERSION, - ..Default::default() - }; let pusher_id = PusherId::new(Role::Datanode, datanode_id); - let pusher: Pusher = Pusher::new(pusher_tx, &res_header); + let pusher: Pusher = Pusher::new(pusher_tx); let handler_group = HeartbeatHandlerGroup::default(); handler_group.register_pusher(pusher_id, pusher).await; diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs index 7b57ab9e55..20f803cb1e 100644 --- a/src/meta-srv/src/handler/collect_stats_handler.rs +++ b/src/meta-srv/src/handler/collect_stats_handler.rs @@ -262,15 +262,11 @@ mod tests { let handler = CollectStatsHandler::default(); handle_request_many_times(ctx.clone(), &handler, 1).await; - let key = DatanodeStatKey { - cluster_id: 3, - node_id: 101, - }; + let key = DatanodeStatKey { node_id: 101 }; let key: Vec = key.into(); let res = ctx.in_memory.get(&key).await.unwrap(); let kv = res.unwrap(); let key: DatanodeStatKey = kv.key.clone().try_into().unwrap(); - assert_eq!(3, key.cluster_id); assert_eq!(101, key.node_id); let val: DatanodeStatValue = kv.value.try_into().unwrap(); // first new stat must be set in kv store immediately @@ -295,7 +291,6 @@ mod tests { for i in 1..=loop_times { let mut acc = HeartbeatAccumulator { stat: Some(Stat { - cluster_id: 3, id: 101, region_num: i as _, ..Default::default() diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs index ae38f887f4..203cf4af33 100644 --- a/src/meta-srv/src/handler/failure_handler.rs +++ b/src/meta-srv/src/handler/failure_handler.rs @@ -101,7 +101,6 @@ mod tests { } } acc.stat = Some(Stat { - cluster_id: 1, id: 42, region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)], timestamp_millis: 1000, diff --git a/src/meta-srv/src/handler/keep_lease_handler.rs b/src/meta-srv/src/handler/keep_lease_handler.rs index 76669cd76b..553963aaa8 100644 --- a/src/meta-srv/src/handler/keep_lease_handler.rs +++ b/src/meta-srv/src/handler/keep_lease_handler.rs @@ -38,17 +38,14 @@ impl HeartbeatHandler for DatanodeKeepLeaseHandler { _acc: &mut HeartbeatAccumulator, ) -> Result { let HeartbeatRequest { header, peer, .. } = req; - let Some(header) = &header else { + let Some(_header) = &header else { return Ok(HandleControl::Continue); }; let Some(peer) = &peer else { return Ok(HandleControl::Continue); }; - let key = DatanodeLeaseKey { - cluster_id: header.cluster_id, - node_id: peer.id, - }; + let key = DatanodeLeaseKey { node_id: peer.id }; let value = LeaseValue { timestamp_millis: time_util::current_time_millis(), node_addr: peer.addr.clone(), @@ -80,17 +77,14 @@ impl HeartbeatHandler for FlownodeKeepLeaseHandler { _acc: &mut HeartbeatAccumulator, ) -> Result { let HeartbeatRequest { header, peer, .. } = req; - let Some(header) = &header else { + let Some(_header) = &header else { return Ok(HandleControl::Continue); }; let Some(peer) = &peer else { return Ok(HandleControl::Continue); }; - let key = FlownodeLeaseKey { - cluster_id: header.cluster_id, - node_id: peer.id, - }; + let key = FlownodeLeaseKey { node_id: peer.id }; let value = LeaseValue { timestamp_millis: time_util::current_time_millis(), node_addr: peer.addr.clone(), diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs index 98a74f67bb..64ec1f01e4 100644 --- a/src/meta-srv/src/handler/region_lease_handler.rs +++ b/src/meta-srv/src/handler/region_lease_handler.rs @@ -64,7 +64,6 @@ impl HeartbeatHandler for RegionLeaseHandler { }; let regions = stat.regions(); - let cluster_id = stat.cluster_id; let datanode_id = stat.id; let RenewRegionLeasesResponse { @@ -72,7 +71,7 @@ impl HeartbeatHandler for RegionLeaseHandler { renewed, } = self .region_lease_keeper - .renew_region_leases(cluster_id, datanode_id, ®ions) + .renew_region_leases(datanode_id, ®ions) .await?; let renewed = renewed @@ -153,7 +152,6 @@ mod test { let peer = Peer::empty(datanode_id); let follower_peer = Peer::empty(datanode_id + 1); let table_info = new_test_table_info(table_id, vec![region_number]).into(); - let cluster_id = 1; let region_routes = vec![RegionRoute { region: Region::new_test(region_id), @@ -181,7 +179,6 @@ mod test { let acc = &mut HeartbeatAccumulator::default(); acc.stat = Some(Stat { - cluster_id, id: peer.id, region_stats: vec![ new_empty_region_stat(region_id, RegionRole::Follower), @@ -215,7 +212,6 @@ mod test { let acc = &mut HeartbeatAccumulator::default(); acc.stat = Some(Stat { - cluster_id, id: follower_peer.id, region_stats: vec![ new_empty_region_stat(region_id, RegionRole::Follower), @@ -249,7 +245,6 @@ mod test { let acc = &mut HeartbeatAccumulator::default(); acc.stat = Some(Stat { - cluster_id, id: follower_peer.id, region_stats: vec![ new_empty_region_stat(region_id, RegionRole::Follower), @@ -292,7 +287,6 @@ mod test { let peer = Peer::empty(datanode_id); let follower_peer = Peer::empty(datanode_id + 1); let table_info = new_test_table_info(table_id, vec![region_number]).into(); - let cluster_id = 1; let region_routes = vec![ RegionRoute { @@ -333,7 +327,6 @@ mod test { let acc = &mut HeartbeatAccumulator::default(); acc.stat = Some(Stat { - cluster_id, id: peer.id, region_stats: vec![ new_empty_region_stat(region_id, RegionRole::Leader), diff --git a/src/meta-srv/src/handler/response_header_handler.rs b/src/meta-srv/src/handler/response_header_handler.rs index baa7e7ee29..1cd6201598 100644 --- a/src/meta-srv/src/handler/response_header_handler.rs +++ b/src/meta-srv/src/handler/response_header_handler.rs @@ -28,18 +28,15 @@ impl HeartbeatHandler for ResponseHeaderHandler { async fn handle( &self, - req: &HeartbeatRequest, + _req: &HeartbeatRequest, _ctx: &mut Context, acc: &mut HeartbeatAccumulator, ) -> Result { - let HeartbeatRequest { header, .. } = req; let res_header = ResponseHeader { protocol_version: PROTOCOL_VERSION, - cluster_id: header.as_ref().map_or(0, |h| h.cluster_id), ..Default::default() }; acc.header = Some(res_header); - Ok(HandleControl::Continue) } } @@ -48,7 +45,7 @@ impl HeartbeatHandler for ResponseHeaderHandler { mod tests { use std::sync::Arc; - use api::v1::meta::{HeartbeatResponse, RequestHeader}; + use api::v1::meta::RequestHeader; use common_meta::cache_invalidator::DummyCacheInvalidator; use common_meta::key::TableMetadataManager; use common_meta::kv_backend::memory::MemoryKvBackend; @@ -90,7 +87,7 @@ mod tests { }; let req = HeartbeatRequest { - header: Some(RequestHeader::new((1, 2), Role::Datanode, W3cTrace::new())), + header: Some(RequestHeader::new(2, Role::Datanode, W3cTrace::new())), ..Default::default() }; let mut acc = HeartbeatAccumulator::default(); @@ -100,12 +97,5 @@ mod tests { .handle(&req, &mut ctx, &mut acc) .await .unwrap(); - let header = std::mem::take(&mut acc.header); - let res = HeartbeatResponse { - header, - mailbox_message: acc.into_mailbox_message(), - ..Default::default() - }; - assert_eq!(1, res.header.unwrap().cluster_id); } } diff --git a/src/meta-srv/src/key.rs b/src/meta-srv/src/key.rs index 243efe69c8..aabebb8bbc 100644 --- a/src/meta-srv/src/key.rs +++ b/src/meta-srv/src/key.rs @@ -35,20 +35,12 @@ macro_rules! impl_from_str_lease_key { .context(error::InvalidLeaseKeySnafu { key })?; ensure!(caps.len() == 3, error::InvalidLeaseKeySnafu { key }); - - let cluster_id = caps[1].to_string(); let node_id = caps[2].to_string(); - let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu { - err_msg: format!("invalid cluster_id: {cluster_id}"), - })?; let node_id: u64 = node_id.parse().context(error::ParseNumSnafu { err_msg: format!("invalid node_id: {node_id}"), })?; - Ok(Self { - cluster_id, - node_id, - }) + Ok(Self { node_id }) } } }; @@ -73,7 +65,7 @@ macro_rules! impl_try_from_lease_key { type Error = error::Error; fn try_from(key: $key_type) -> error::Result { - Ok(format!("{}-{}-{}", $prefix, key.cluster_id, key.node_id).into_bytes()) + Ok(format!("{}-0-{}", $prefix, key.node_id).into_bytes()) } } }; diff --git a/src/meta-srv/src/key/datanode.rs b/src/meta-srv/src/key/datanode.rs index 1c4583c233..ef4c89c34d 100644 --- a/src/meta-srv/src/key/datanode.rs +++ b/src/meta-srv/src/key/datanode.rs @@ -15,7 +15,6 @@ use std::str::FromStr; use common_meta::datanode::DatanodeStatKey; -use common_meta::ClusterId; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -42,20 +41,18 @@ lazy_static! { #[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct DatanodeLeaseKey { - pub cluster_id: ClusterId, pub node_id: u64, } impl DatanodeLeaseKey { - pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec { - format!("{DATANODE_LEASE_PREFIX}-{cluster_id}-").into_bytes() + pub fn prefix_key() -> Vec { + format!("{DATANODE_LEASE_PREFIX}-0-").into_bytes() } } impl From<&DatanodeLeaseKey> for DatanodeStatKey { fn from(lease_key: &DatanodeLeaseKey) -> Self { DatanodeStatKey { - cluster_id: lease_key.cluster_id, node_id: lease_key.node_id, } } @@ -63,22 +60,21 @@ impl From<&DatanodeLeaseKey> for DatanodeStatKey { #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] pub struct InactiveRegionKey { - pub cluster_id: ClusterId, pub node_id: u64, pub region_id: u64, } impl InactiveRegionKey { - pub fn get_prefix_by_cluster(cluster_id: u64) -> Vec { - format!("{}-{}-", INACTIVE_REGION_PREFIX, cluster_id).into_bytes() + pub fn get_prefix_by_cluster() -> Vec { + format!("{}-0-", INACTIVE_REGION_PREFIX).into_bytes() } } impl From for Vec { fn from(value: InactiveRegionKey) -> Self { format!( - "{}-{}-{}-{}", - INACTIVE_REGION_PREFIX, value.cluster_id, value.node_id, value.region_id + "{}-0-{}-{}", + INACTIVE_REGION_PREFIX, value.node_id, value.region_id ) .into_bytes() } @@ -97,13 +93,8 @@ impl FromStr for InactiveRegionKey { error::InvalidInactiveRegionKeySnafu { key } ); - let cluster_id = caps[1].to_string(); let node_id = caps[2].to_string(); let region_id = caps[3].to_string(); - - let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu { - err_msg: format!("invalid cluster_id: {cluster_id}"), - })?; let node_id: u64 = node_id.parse().context(error::ParseNumSnafu { err_msg: format!("invalid node_id: {node_id}"), })?; @@ -111,11 +102,7 @@ impl FromStr for InactiveRegionKey { err_msg: format!("invalid region_id: {region_id}"), })?; - Ok(Self { - cluster_id, - node_id, - region_id, - }) + Ok(Self { node_id, region_id }) } } @@ -135,24 +122,17 @@ mod tests { #[test] fn test_stat_key_round_trip() { - let key = DatanodeStatKey { - cluster_id: 0, - node_id: 1, - }; + let key = DatanodeStatKey { node_id: 1 }; let key_bytes: Vec = key.into(); let new_key: DatanodeStatKey = key_bytes.try_into().unwrap(); - assert_eq!(0, new_key.cluster_id); assert_eq!(1, new_key.node_id); } #[test] fn test_lease_key_round_trip() { - let key = DatanodeLeaseKey { - cluster_id: 0, - node_id: 1, - }; + let key = DatanodeLeaseKey { node_id: 1 }; let key_bytes: Vec = key.clone().try_into().unwrap(); let new_key: DatanodeLeaseKey = key_bytes.try_into().unwrap(); @@ -162,21 +142,16 @@ mod tests { #[test] fn test_lease_key_to_stat_key() { - let lease_key = DatanodeLeaseKey { - cluster_id: 1, - node_id: 101, - }; + let lease_key = DatanodeLeaseKey { node_id: 101 }; let stat_key: DatanodeStatKey = (&lease_key).into(); - assert_eq!(1, stat_key.cluster_id); assert_eq!(101, stat_key.node_id); } #[test] fn test_inactive_region_key_round_trip() { let key = InactiveRegionKey { - cluster_id: 0, node_id: 1, region_id: 2, }; diff --git a/src/meta-srv/src/key/flownode.rs b/src/meta-srv/src/key/flownode.rs index acb36cbf75..0255c36df5 100644 --- a/src/meta-srv/src/key/flownode.rs +++ b/src/meta-srv/src/key/flownode.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use common_meta::ClusterId; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -26,13 +25,12 @@ lazy_static! { #[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct FlownodeLeaseKey { - pub cluster_id: ClusterId, pub node_id: u64, } impl FlownodeLeaseKey { - pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec { - format!("{FLOWNODE_LEASE_PREFIX}-{cluster_id}-").into_bytes() + pub fn prefix_key_by_cluster() -> Vec { + format!("{FLOWNODE_LEASE_PREFIX}-0-").into_bytes() } } @@ -42,10 +40,7 @@ mod tests { #[test] fn test_lease_key_round_trip() { - let key = FlownodeLeaseKey { - cluster_id: 0, - node_id: 1, - }; + let key = FlownodeLeaseKey { node_id: 1 }; let key_bytes: Vec = key.clone().try_into().unwrap(); let new_key: FlownodeLeaseKey = key_bytes.try_into().unwrap(); diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs index ef28c2ed74..063c5233c7 100644 --- a/src/meta-srv/src/lease.rs +++ b/src/meta-srv/src/lease.rs @@ -18,7 +18,7 @@ use std::hash::Hash; use common_error::ext::BoxedError; use common_meta::kv_backend::KvBackend; use common_meta::peer::{Peer, PeerLookupService}; -use common_meta::{util, ClusterId, DatanodeId, FlownodeId}; +use common_meta::{util, DatanodeId, FlownodeId}; use common_time::util as time_util; use snafu::ResultExt; @@ -35,14 +35,12 @@ fn build_lease_filter(lease_secs: u64) -> impl Fn(&LeaseValue) -> bool { /// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], will only return if it's alive under given `lease_secs` pub async fn lookup_datanode_peer( - cluster_id: ClusterId, datanode_id: DatanodeId, meta_peer_client: &MetaPeerClientRef, lease_secs: u64, ) -> Result> { let lease_filter = build_lease_filter(lease_secs); let lease_key = DatanodeLeaseKey { - cluster_id, node_id: datanode_id, }; let lease_key_bytes: Vec = lease_key.clone().try_into()?; @@ -63,29 +61,24 @@ pub async fn lookup_datanode_peer( /// Find all alive datanodes pub async fn alive_datanodes( - cluster_id: ClusterId, meta_peer_client: &MetaPeerClientRef, lease_secs: u64, ) -> Result> { let predicate = build_lease_filter(lease_secs); - filter( - DatanodeLeaseKey::prefix_key_by_cluster(cluster_id), - meta_peer_client, - |v| predicate(v), - ) + filter(DatanodeLeaseKey::prefix_key(), meta_peer_client, |v| { + predicate(v) + }) .await } /// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], only return if it's alive under given `lease_secs` pub async fn lookup_flownode_peer( - cluster_id: ClusterId, flownode_id: FlownodeId, meta_peer_client: &MetaPeerClientRef, lease_secs: u64, ) -> Result> { let lease_filter = build_lease_filter(lease_secs); let lease_key = FlownodeLeaseKey { - cluster_id, node_id: flownode_id, }; let lease_key_bytes: Vec = lease_key.clone().try_into()?; @@ -107,13 +100,12 @@ pub async fn lookup_flownode_peer( /// Find all alive flownodes pub async fn alive_flownodes( - cluster_id: ClusterId, meta_peer_client: &MetaPeerClientRef, lease_secs: u64, ) -> Result> { let predicate = build_lease_filter(lease_secs); filter( - FlownodeLeaseKey::prefix_key_by_cluster(cluster_id), + FlownodeLeaseKey::prefix_key_by_cluster(), meta_peer_client, |v| predicate(v), ) @@ -163,22 +155,14 @@ impl MetaPeerLookupService { #[async_trait::async_trait] impl PeerLookupService for MetaPeerLookupService { - async fn datanode( - &self, - cluster_id: ClusterId, - id: DatanodeId, - ) -> common_meta::error::Result> { - lookup_datanode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX) + async fn datanode(&self, id: DatanodeId) -> common_meta::error::Result> { + lookup_datanode_peer(id, &self.meta_peer_client, u64::MAX) .await .map_err(BoxedError::new) .context(common_meta::error::ExternalSnafu) } - async fn flownode( - &self, - cluster_id: ClusterId, - id: FlownodeId, - ) -> common_meta::error::Result> { - lookup_flownode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX) + async fn flownode(&self, id: FlownodeId) -> common_meta::error::Result> { + lookup_flownode_peer(id, &self.meta_peer_client, u64::MAX) .await .map_err(BoxedError::new) .context(common_meta::error::ExternalSnafu) diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs index b8c29d988a..b15e8ece31 100644 --- a/src/meta-srv/src/metasrv.rs +++ b/src/meta-srv/src/metasrv.rs @@ -26,6 +26,7 @@ use common_config::Configurable; use common_greptimedb_telemetry::GreptimeDBTelemetryTask; use common_meta::cache_invalidator::CacheInvalidatorRef; use common_meta::ddl::ProcedureExecutorRef; +use common_meta::distributed_time_constants; use common_meta::key::maintenance::MaintenanceModeManagerRef; use common_meta::key::TableMetadataManagerRef; use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef}; @@ -36,7 +37,6 @@ use common_meta::node_expiry_listener::NodeExpiryListener; use common_meta::peer::Peer; use common_meta::region_keeper::MemoryRegionKeeperRef; use common_meta::wal_options_allocator::WalOptionsAllocatorRef; -use common_meta::{distributed_time_constants, ClusterId}; use common_options::datanode::DatanodeClientOptions; use common_procedure::options::ProcedureConfig; use common_procedure::ProcedureManagerRef; @@ -572,13 +572,8 @@ impl Metasrv { } /// Lookup a peer by peer_id, return it only when it's alive. - pub(crate) async fn lookup_peer( - &self, - cluster_id: ClusterId, - peer_id: u64, - ) -> Result> { + pub(crate) async fn lookup_peer(&self, peer_id: u64) -> Result> { lookup_datanode_peer( - cluster_id, peer_id, &self.meta_peer_client, distributed_time_constants::DATANODE_LEASE_SECS, diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs index 7a7bdce9a8..1ed34bcd3b 100644 --- a/src/meta-srv/src/metrics.rs +++ b/src/meta-srv/src/metrics.rs @@ -20,7 +20,7 @@ lazy_static! { pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!( "greptime_meta_kv_request_elapsed", "meta kv request", - &["target", "op", "cluster_id"] + &["target", "op"] ) .unwrap(); /// The heartbeat connection gauge. diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs index 3b27d33f22..59f476ce68 100644 --- a/src/meta-srv/src/procedure/region_migration.rs +++ b/src/meta-srv/src/procedure/region_migration.rs @@ -39,7 +39,6 @@ use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef}; use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock}; use common_meta::peer::Peer; use common_meta::region_keeper::{MemoryRegionKeeperRef, OperatingRegionGuard}; -use common_meta::ClusterId; use common_procedure::error::{ Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu, }; @@ -70,8 +69,6 @@ pub struct PersistentContext { catalog: String, /// The table schema. schema: String, - /// The Id of the cluster. - cluster_id: ClusterId, /// The [Peer] of migration source. from_peer: Peer, /// The [Peer] of migration destination. @@ -273,12 +270,11 @@ impl Context { /// The original failure detector was removed once the procedure was triggered. /// Now, we need to register the failure detector for the failed region again. pub async fn register_failure_detectors(&self) { - let cluster_id = self.persistent_ctx.cluster_id; let datanode_id = self.persistent_ctx.from_peer.id; let region_id = self.persistent_ctx.region_id; self.region_failure_detector_controller - .register_failure_detectors(vec![(cluster_id, datanode_id, region_id)]) + .register_failure_detectors(vec![(datanode_id, region_id)]) .await; } @@ -287,12 +283,11 @@ impl Context { /// The original failure detectors was removed once the procedure was triggered. /// However, the `from_peer` may still send the heartbeats contains the failed region. pub async fn deregister_failure_detectors(&self) { - let cluster_id = self.persistent_ctx.cluster_id; let datanode_id = self.persistent_ctx.from_peer.id; let region_id = self.persistent_ctx.region_id; self.region_failure_detector_controller - .deregister_failure_detectors(vec![(cluster_id, datanode_id, region_id)]) + .deregister_failure_detectors(vec![(datanode_id, region_id)]) .await; } @@ -458,7 +453,6 @@ impl RegionMigrationProcedure { } = serde_json::from_str(json).context(FromJsonSnafu)?; let guard = tracker.insert_running_procedure(&RegionMigrationProcedureTask { - cluster_id: persistent_ctx.cluster_id, region_id: persistent_ctx.region_id, from_peer: persistent_ctx.from_peer.clone(), to_peer: persistent_ctx.to_peer.clone(), @@ -580,7 +574,6 @@ mod tests { use common_meta::key::test_utils::new_test_table_info; use common_meta::rpc::router::{Region, RegionRoute}; - use super::migration_end::RegionMigrationEnd; use super::update_metadata::UpdateMetadata; use super::*; use crate::handler::HeartbeatMailbox; @@ -620,7 +613,7 @@ mod tests { let procedure = RegionMigrationProcedure::new(persistent_context, context, None); let serialized = procedure.dump().unwrap(); - let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#; + let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#; assert_eq!(expected, serialized); } @@ -628,7 +621,7 @@ mod tests { fn test_backward_compatibility() { let persistent_ctx = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1)); // NOTES: Changes it will break backward compatibility. - let serialized = r#"{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#; + let serialized = r#"{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#; let deserialized: PersistentContext = serde_json::from_str(serialized).unwrap(); assert_eq!(persistent_ctx, deserialized); @@ -640,15 +633,8 @@ mod tests { #[async_trait::async_trait] #[typetag::serde] impl State for MockState { - async fn next(&mut self, ctx: &mut Context) -> Result<(Box, Status)> { - let pc = &mut ctx.persistent_ctx; - - if pc.cluster_id == 2 { - Ok((Box::new(RegionMigrationEnd), Status::done())) - } else { - pc.cluster_id += 1; - Ok((Box::new(MockState), Status::executing(false))) - } + async fn next(&mut self, _ctx: &mut Context) -> Result<(Box, Status)> { + Ok((Box::new(MockState), Status::done())) } fn as_any(&self) -> &dyn Any { @@ -692,7 +678,6 @@ mod tests { for _ in 1..3 { status = Some(procedure.execute(&ctx).await.unwrap()); } - assert_eq!(procedure.context.persistent_ctx.cluster_id, 2); assert!(status.unwrap().is_done()); } diff --git a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs index 9113607681..ca451e6238 100644 --- a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs +++ b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs @@ -62,7 +62,6 @@ impl CloseDowngradedRegion { async fn build_close_region_instruction(&self, ctx: &mut Context) -> Result { let pc = &ctx.persistent_ctx; let downgrade_leader_datanode_id = pc.from_peer.id; - let cluster_id = pc.cluster_id; let table_id = pc.region_id.table_id(); let region_number = pc.region_id.region_number(); let datanode_table_value = ctx.get_from_peer_datanode_table_value().await?; @@ -70,7 +69,6 @@ impl CloseDowngradedRegion { let RegionInfo { engine, .. } = datanode_table_value.region_info.clone(); Ok(Instruction::CloseRegion(RegionIdent { - cluster_id, datanode_id: downgrade_leader_datanode_id, table_id, region_number, diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs index 51b55d2be1..d1dfcd3e05 100644 --- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs +++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs @@ -294,7 +294,6 @@ mod tests { from_peer: Peer::empty(1), to_peer: Peer::empty(2), region_id: RegionId::new(1024, 1), - cluster_id: 0, timeout: Duration::from_millis(1000), } } diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs index c61a9a4b6c..e2345559d0 100644 --- a/src/meta-srv/src/procedure/region_migration/manager.rs +++ b/src/meta-srv/src/procedure/region_migration/manager.rs @@ -22,7 +22,6 @@ use common_meta::key::table_info::TableInfoValue; use common_meta::key::table_route::TableRouteValue; use common_meta::peer::Peer; use common_meta::rpc::router::RegionRoute; -use common_meta::ClusterId; use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId}; use common_telemetry::{error, info}; use snafu::{ensure, OptionExt, ResultExt}; @@ -101,7 +100,6 @@ impl Drop for RegionMigrationProcedureGuard { #[derive(Debug, Clone)] pub struct RegionMigrationProcedureTask { - pub(crate) cluster_id: ClusterId, pub(crate) region_id: RegionId, pub(crate) from_peer: Peer, pub(crate) to_peer: Peer, @@ -109,15 +107,8 @@ pub struct RegionMigrationProcedureTask { } impl RegionMigrationProcedureTask { - pub fn new( - cluster_id: ClusterId, - region_id: RegionId, - from_peer: Peer, - to_peer: Peer, - timeout: Duration, - ) -> Self { + pub fn new(region_id: RegionId, from_peer: Peer, to_peer: Peer, timeout: Duration) -> Self { Self { - cluster_id, region_id, from_peer, to_peer, @@ -130,8 +121,8 @@ impl Display for RegionMigrationProcedureTask { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "cluster: {}, region: {}, from_peer: {}, to_peer: {}", - self.cluster_id, self.region_id, self.from_peer, self.to_peer + "region: {}, from_peer: {}, to_peer: {}", + self.region_id, self.from_peer, self.to_peer ) } } @@ -331,7 +322,6 @@ impl RegionMigrationManager { .with_label_values(&["desc", &task.to_peer.id.to_string()]) .inc(); let RegionMigrationProcedureTask { - cluster_id, region_id, from_peer, to_peer, @@ -341,7 +331,6 @@ impl RegionMigrationManager { PersistentContext { catalog: catalog_name, schema: schema_name, - cluster_id, region_id, from_peer, to_peer, @@ -394,7 +383,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(2), to_peer: Peer::empty(1), @@ -419,7 +407,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(1), @@ -437,7 +424,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(2), @@ -455,7 +441,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(2), @@ -483,7 +468,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(2), @@ -515,7 +499,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(2), @@ -542,7 +525,6 @@ mod test { let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory); let region_id = RegionId::new(1024, 1); let task = RegionMigrationProcedureTask { - cluster_id: 1, region_id, from_peer: Peer::empty(1), to_peer: Peer::empty(2), diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs index 454c0bf9c0..679dfd1355 100644 --- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs @@ -62,7 +62,6 @@ impl OpenCandidateRegion { /// - Datanode Table is not found. async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result { let pc = &ctx.persistent_ctx; - let cluster_id = pc.cluster_id; let table_id = pc.region_id.table_id(); let region_number = pc.region_id.region_number(); let candidate_id = pc.to_peer.id; @@ -77,7 +76,6 @@ impl OpenCandidateRegion { let open_instruction = Instruction::OpenRegion(OpenRegion::new( RegionIdent { - cluster_id, datanode_id: candidate_id, table_id, region_number, @@ -214,7 +212,6 @@ mod tests { fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction { Instruction::OpenRegion(OpenRegion { region_ident: RegionIdent { - cluster_id: 0, datanode_id, table_id: region_id.table_id(), region_number: region_id.region_number(), diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs index 2fe55edcab..40d8325c89 100644 --- a/src/meta-srv/src/procedure/region_migration/test_util.rs +++ b/src/meta-srv/src/procedure/region_migration/test_util.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use std::time::Duration; use api::v1::meta::mailbox_message::Payload; -use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader}; +use api::v1::meta::{HeartbeatResponse, MailboxMessage}; use common_meta::ddl::NoopRegionFailureDetectorControl; use common_meta::instruction::{ DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply, @@ -85,7 +85,7 @@ impl MailboxContext { tx: Sender>, ) { let pusher_id = channel.pusher_id(); - let pusher = Pusher::new(tx, &RequestHeader::default()); + let pusher = Pusher::new(tx); let _ = self.pushers.insert(pusher_id.string_key(), pusher).await; } @@ -317,7 +317,6 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis from_peer: Peer::empty(from), to_peer: Peer::empty(to), region_id, - cluster_id: 0, timeout: Duration::from_secs(10), } } diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs index 0d568ab7b0..9e038bebc6 100644 --- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs +++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs @@ -172,11 +172,7 @@ mod tests { let detecting_regions = event.into_region_failure_detectors(); assert_eq!( detecting_regions, - vec![( - ctx.persistent_ctx.cluster_id, - from_peer.id, - ctx.persistent_ctx.region_id - )] + vec![(from_peer.id, ctx.persistent_ctx.region_id)] ); let table_route = table_metadata_manager diff --git a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs index fa989274b4..6ed8e4905b 100644 --- a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs +++ b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs @@ -238,7 +238,6 @@ mod tests { from_peer: Peer::empty(1), to_peer: Peer::empty(2), region_id: RegionId::new(1024, 1), - cluster_id: 0, timeout: Duration::from_millis(1000), } } diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs index 5690b8fedd..2d25094c98 100644 --- a/src/meta-srv/src/procedure/tests.rs +++ b/src/meta-srv/src/procedure/tests.rs @@ -97,7 +97,6 @@ fn create_table_task(table_name: Option<&str>) -> CreateTableTask { #[test] fn test_region_request_builder() { let mut procedure = CreateTableProcedure::new( - 1, create_table_task(None), test_data::new_ddl_context(Arc::new(NodeClients::default())), ); @@ -192,7 +191,6 @@ async fn test_on_datanode_create_regions() { let node_manager = new_node_manager(®ion_server, ®ion_routes).await; let mut procedure = CreateTableProcedure::new( - 1, create_table_task(None), test_data::new_ddl_context(node_manager), ); @@ -260,7 +258,7 @@ async fn test_on_datanode_create_logical_regions() { .0; let _ = kv_backend.txn(physical_route_txn).await.unwrap(); let mut procedure = - CreateLogicalTablesProcedure::new(1, vec![task1, task2, task3], physical_table_id, ctx); + CreateLogicalTablesProcedure::new(vec![task1, task2, task3], physical_table_id, ctx); let expected_created_regions = Arc::new(Mutex::new(HashMap::from([(1, 3), (2, 3), (3, 3)]))); diff --git a/src/meta-srv/src/region/failure_detector.rs b/src/meta-srv/src/region/failure_detector.rs index 8533d27f30..a795e2e3e8 100644 --- a/src/meta-srv/src/region/failure_detector.rs +++ b/src/meta-srv/src/region/failure_detector.rs @@ -129,7 +129,7 @@ mod tests { #[test] fn test_default_failure_detector_container() { let container = RegionFailureDetector::new(Default::default()); - let detecting_region = (0, 2, RegionId::new(1, 1)); + let detecting_region = (2, RegionId::new(1, 1)); let _ = container.region_failure_detector(detecting_region); assert!(container.contains(&detecting_region)); diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs index 194f3710c8..68e492406b 100644 --- a/src/meta-srv/src/region/lease_keeper.rs +++ b/src/meta-srv/src/region/lease_keeper.rs @@ -19,7 +19,7 @@ use common_meta::key::table_route::TableRouteValue; use common_meta::key::TableMetadataManagerRef; use common_meta::region_keeper::MemoryRegionKeeperRef; use common_meta::rpc::router::RegionRoute; -use common_meta::{ClusterId, DatanodeId}; +use common_meta::DatanodeId; use common_telemetry::warn; use snafu::ResultExt; use store_api::region_engine::RegionRole; @@ -167,7 +167,6 @@ impl RegionLeaseKeeper { /// and corresponding regions will be added to `non_exists` of [RenewRegionLeasesResponse]. pub async fn renew_region_leases( &self, - _cluster_id: ClusterId, datanode_id: DatanodeId, regions: &[(RegionId, RegionRole)], ) -> Result { @@ -282,7 +281,6 @@ mod tests { renewed, } = keeper .renew_region_leases( - 0, 1, &[ (RegionId::new(1024, 1), RegionRole::Follower), @@ -384,7 +382,7 @@ mod tests { non_exists, renewed, } = keeper - .renew_region_leases(0, 1, &[(region_id, RegionRole::Follower)]) + .renew_region_leases(1, &[(region_id, RegionRole::Follower)]) .await .unwrap(); assert!(renewed.is_empty()); @@ -397,7 +395,7 @@ mod tests { non_exists, renewed, } = keeper - .renew_region_leases(0, leader_peer_id, &[(region_id, role)]) + .renew_region_leases(leader_peer_id, &[(region_id, role)]) .await .unwrap(); @@ -411,7 +409,7 @@ mod tests { non_exists, renewed, } = keeper - .renew_region_leases(0, follower_peer_id, &[(region_id, role)]) + .renew_region_leases(follower_peer_id, &[(region_id, role)]) .await .unwrap(); @@ -432,7 +430,7 @@ mod tests { non_exists, renewed, } = keeper - .renew_region_leases(0, leader_peer_id, &[(opening_region_id, role)]) + .renew_region_leases(leader_peer_id, &[(opening_region_id, role)]) .await .unwrap(); @@ -465,7 +463,6 @@ mod tests { renewed, } = keeper .renew_region_leases( - 0, 1, &[ (region_id, RegionRole::Follower), @@ -513,7 +510,7 @@ mod tests { non_exists, renewed, } = keeper - .renew_region_leases(0, follower_peer_id, &[(region_id, role)]) + .renew_region_leases(follower_peer_id, &[(region_id, role)]) .await .unwrap(); diff --git a/src/meta-srv/src/region/supervisor.rs b/src/meta-srv/src/region/supervisor.rs index 4c3725d114..44b2a6e7f0 100644 --- a/src/meta-srv/src/region/supervisor.rs +++ b/src/meta-srv/src/region/supervisor.rs @@ -22,7 +22,7 @@ use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController}; use common_meta::key::maintenance::MaintenanceModeManagerRef; use common_meta::leadership_notifier::LeadershipChangeListener; use common_meta::peer::PeerLookupServiceRef; -use common_meta::{ClusterId, DatanodeId}; +use common_meta::DatanodeId; use common_runtime::JoinHandle; use common_telemetry::{error, info, warn}; use common_time::util::current_time_millis; @@ -45,7 +45,6 @@ use crate::selector::SelectorOptions; /// and a timestamp indicating when the heartbeat was sent. #[derive(Debug)] pub(crate) struct DatanodeHeartbeat { - cluster_id: ClusterId, datanode_id: DatanodeId, // TODO(weny): Considers collecting the memtable size in regions. regions: Vec, @@ -55,7 +54,6 @@ pub(crate) struct DatanodeHeartbeat { impl From<&Stat> for DatanodeHeartbeat { fn from(value: &Stat) -> Self { DatanodeHeartbeat { - cluster_id: value.cluster_id, datanode_id: value.id, regions: value.region_stats.iter().map(|x| x.id).collect(), timestamp: value.timestamp_millis, @@ -341,7 +339,7 @@ impl RegionSupervisor { } } - async fn handle_region_failures(&self, mut regions: Vec<(ClusterId, DatanodeId, RegionId)>) { + async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) { if regions.is_empty() { return; } @@ -358,22 +356,19 @@ impl RegionSupervisor { } let migrating_regions = regions - .extract_if(.., |(_, _, region_id)| { + .extract_if(.., |(_, region_id)| { self.region_migration_manager.tracker().contains(*region_id) }) .collect::>(); - for (cluster_id, datanode_id, region_id) in migrating_regions { - self.failure_detector - .remove(&(cluster_id, datanode_id, region_id)); + for (datanode_id, region_id) in migrating_regions { + self.failure_detector.remove(&(datanode_id, region_id)); } warn!("Detects region failures: {:?}", regions); - for (cluster_id, datanode_id, region_id) in regions { - match self.do_failover(cluster_id, datanode_id, region_id).await { - Ok(_) => self - .failure_detector - .remove(&(cluster_id, datanode_id, region_id)), + for (datanode_id, region_id) in regions { + match self.do_failover(datanode_id, region_id).await { + Ok(_) => self.failure_detector.remove(&(datanode_id, region_id)), Err(err) => { error!(err; "Failed to execute region failover for region: {region_id}, datanode: {datanode_id}"); } @@ -388,15 +383,10 @@ impl RegionSupervisor { .context(error::MaintenanceModeManagerSnafu) } - async fn do_failover( - &self, - cluster_id: ClusterId, - datanode_id: DatanodeId, - region_id: RegionId, - ) -> Result<()> { + async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> { let from_peer = self .peer_lookup - .datanode(cluster_id, datanode_id) + .datanode(datanode_id) .await .context(error::LookupPeerSnafu { peer_id: datanode_id, @@ -407,7 +397,6 @@ impl RegionSupervisor { let mut peers = self .selector .select( - cluster_id, &self.selector_context, SelectorOptions { min_required_items: 1, @@ -423,7 +412,6 @@ impl RegionSupervisor { return Ok(()); } let task = RegionMigrationProcedureTask { - cluster_id, region_id, from_peer, to_peer, @@ -442,7 +430,7 @@ impl RegionSupervisor { } /// Detects the failure of regions. - fn detect_region_failure(&self) -> Vec<(ClusterId, DatanodeId, RegionId)> { + fn detect_region_failure(&self) -> Vec<(DatanodeId, RegionId)> { self.failure_detector .iter() .filter_map(|e| { @@ -464,7 +452,7 @@ impl RegionSupervisor { /// Updates the state of corresponding failure detectors. fn on_heartbeat_arrived(&self, heartbeat: DatanodeHeartbeat) { for region_id in heartbeat.regions { - let detecting_region = (heartbeat.cluster_id, heartbeat.datanode_id, region_id); + let detecting_region = (heartbeat.datanode_id, region_id); let mut detector = self .failure_detector .region_failure_detector(detecting_region); @@ -537,7 +525,6 @@ pub(crate) mod tests { sender .send(Event::HeartbeatArrived(DatanodeHeartbeat { - cluster_id: 0, datanode_id: 0, regions: vec![RegionId::new(1, 1)], timestamp: 100, @@ -547,7 +534,7 @@ pub(crate) mod tests { let (tx, rx) = oneshot::channel(); sender.send(Event::Dump(tx)).await.unwrap(); let detector = rx.await.unwrap(); - assert!(detector.contains(&(0, 0, RegionId::new(1, 1)))); + assert!(detector.contains(&(0, RegionId::new(1, 1)))); // Clear up sender.send(Event::Clear).await.unwrap(); @@ -561,7 +548,6 @@ pub(crate) mod tests { (0..2000) .map(|i| DatanodeHeartbeat { timestamp: start + i * 1000 + rng.gen_range(0..100), - cluster_id: 0, datanode_id, regions: region_ids .iter() @@ -630,7 +616,7 @@ pub(crate) mod tests { let (mut supervisor, sender) = new_test_supervisor(); let controller = RegionFailureDetectorControl::new(sender.clone()); tokio::spawn(async move { supervisor.run().await }); - let detecting_region = (0, 1, RegionId::new(1, 1)); + let detecting_region = (1, RegionId::new(1, 1)); controller .register_failure_detectors(vec![detecting_region]) .await; diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs index 0795bccd9b..c197f04e59 100644 --- a/src/meta-srv/src/selector.rs +++ b/src/meta-srv/src/selector.rs @@ -25,19 +25,12 @@ use serde::{Deserialize, Serialize}; use crate::error; use crate::error::Result; -pub type Namespace = u64; - #[async_trait::async_trait] pub trait Selector: Send + Sync { type Context; type Output; - async fn select( - &self, - ns: Namespace, - ctx: &Self::Context, - opts: SelectorOptions, - ) -> Result; + async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result; } #[derive(Debug)] diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs index d9af63da65..a7ce7c7321 100644 --- a/src/meta-srv/src/selector/lease_based.rs +++ b/src/meta-srv/src/selector/lease_based.rs @@ -19,7 +19,7 @@ use crate::lease; use crate::metasrv::SelectorContext; use crate::selector::common::choose_items; use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem}; -use crate::selector::{Namespace, Selector, SelectorOptions}; +use crate::selector::{Selector, SelectorOptions}; /// Select all alive datanodes based using a random weighted choose. pub struct LeaseBasedSelector; @@ -29,15 +29,10 @@ impl Selector for LeaseBasedSelector { type Context = SelectorContext; type Output = Vec; - async fn select( - &self, - ns: Namespace, - ctx: &Self::Context, - opts: SelectorOptions, - ) -> Result { + async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result { // 1. get alive datanodes. let lease_kvs = - lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?; + lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?; // 2. compute weight array, but the weight of each item is the same. let weight_array = lease_kvs diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs index 8a00c7fdb7..2628990bf4 100644 --- a/src/meta-srv/src/selector/load_based.rs +++ b/src/meta-srv/src/selector/load_based.rs @@ -29,7 +29,7 @@ use crate::metasrv::SelectorContext; use crate::selector::common::choose_items; use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute}; use crate::selector::weighted_choose::RandomWeightedChoose; -use crate::selector::{Namespace, Selector, SelectorOptions}; +use crate::selector::{Selector, SelectorOptions}; pub struct LoadBasedSelector { weight_compute: C, @@ -57,15 +57,10 @@ where type Context = SelectorContext; type Output = Vec; - async fn select( - &self, - ns: Namespace, - ctx: &Self::Context, - opts: SelectorOptions, - ) -> Result { + async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result { // 1. get alive datanodes. let lease_kvs = - lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?; + lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?; // 2. get stat kvs and filter out expired datanodes. let stat_keys = lease_kvs.keys().map(|k| k.into()).collect(); @@ -97,8 +92,8 @@ where let selected = choose_items(&opts, &mut weighted_choose)?; debug!( - "LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.", - selected, ns, opts, + "LoadBasedSelector select peers: {:?}, opts: {:?}.", + selected, opts, ); Ok(selected) @@ -165,33 +160,21 @@ mod tests { fn test_filter_out_expired_datanode() { let mut stat_kvs = HashMap::new(); stat_kvs.insert( - DatanodeStatKey { - cluster_id: 1, - node_id: 0, - }, + DatanodeStatKey { node_id: 0 }, DatanodeStatValue { stats: vec![] }, ); stat_kvs.insert( - DatanodeStatKey { - cluster_id: 1, - node_id: 1, - }, + DatanodeStatKey { node_id: 1 }, DatanodeStatValue { stats: vec![] }, ); stat_kvs.insert( - DatanodeStatKey { - cluster_id: 1, - node_id: 2, - }, + DatanodeStatKey { node_id: 2 }, DatanodeStatValue { stats: vec![] }, ); let mut lease_kvs = HashMap::new(); lease_kvs.insert( - DatanodeLeaseKey { - cluster_id: 1, - node_id: 1, - }, + DatanodeLeaseKey { node_id: 1 }, LeaseValue { timestamp_millis: 0, node_addr: "127.0.0.1:3002".to_string(), @@ -201,9 +184,6 @@ mod tests { let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs); assert_eq!(1, alive_stat_kvs.len()); - assert!(alive_stat_kvs.contains_key(&DatanodeStatKey { - cluster_id: 1, - node_id: 1 - })); + assert!(alive_stat_kvs.contains_key(&DatanodeStatKey { node_id: 1 })); } } diff --git a/src/meta-srv/src/selector/round_robin.rs b/src/meta-srv/src/selector/round_robin.rs index b50823cb02..f11a36555f 100644 --- a/src/meta-srv/src/selector/round_robin.rs +++ b/src/meta-srv/src/selector/round_robin.rs @@ -20,7 +20,7 @@ use snafu::ensure; use crate::error::{NoEnoughAvailableNodeSnafu, Result}; use crate::lease; use crate::metasrv::{SelectTarget, SelectorContext}; -use crate::selector::{Namespace, Selector, SelectorOptions}; +use crate::selector::{Selector, SelectorOptions}; /// Round-robin selector that returns the next peer in the list in sequence. /// Datanodes are ordered by their node_id. @@ -53,7 +53,6 @@ impl RoundRobinSelector { async fn get_peers( &self, - ns: Namespace, min_required_items: usize, ctx: &SelectorContext, ) -> Result> { @@ -61,8 +60,7 @@ impl RoundRobinSelector { SelectTarget::Datanode => { // 1. get alive datanodes. let lease_kvs = - lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs) - .await?; + lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?; // 2. map into peers lease_kvs @@ -73,8 +71,7 @@ impl RoundRobinSelector { SelectTarget::Flownode => { // 1. get alive flownodes. let lease_kvs = - lease::alive_flownodes(ns, &ctx.meta_peer_client, ctx.flownode_lease_secs) - .await?; + lease::alive_flownodes(&ctx.meta_peer_client, ctx.flownode_lease_secs).await?; // 2. map into peers lease_kvs @@ -105,13 +102,8 @@ impl Selector for RoundRobinSelector { type Context = SelectorContext; type Output = Vec; - async fn select( - &self, - ns: Namespace, - ctx: &Self::Context, - opts: SelectorOptions, - ) -> Result> { - let peers = self.get_peers(ns, opts.min_required_items, ctx).await?; + async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result> { + let peers = self.get_peers(opts.min_required_items, ctx).await?; // choose peers let mut selected = Vec::with_capacity(opts.min_required_items); for _ in 0..opts.min_required_items { @@ -135,8 +127,6 @@ mod test { async fn test_round_robin_selector() { let selector = RoundRobinSelector::default(); let ctx = create_selector_context(); - let ns = 0; - // add three nodes let peer1 = Peer { id: 2, @@ -151,11 +141,10 @@ mod test { addr: "node3".to_string(), }; let peers = vec![peer1.clone(), peer2.clone(), peer3.clone()]; - put_datanodes(ns, &ctx.meta_peer_client, peers).await; + put_datanodes(&ctx.meta_peer_client, peers).await; let peers = selector .select( - ns, &ctx, SelectorOptions { min_required_items: 4, @@ -172,7 +161,6 @@ mod test { let peers = selector .select( - ns, &ctx, SelectorOptions { min_required_items: 2, diff --git a/src/meta-srv/src/selector/test_utils.rs b/src/meta-srv/src/selector/test_utils.rs index 0c3b4e3f21..edf0f8d897 100644 --- a/src/meta-srv/src/selector/test_utils.rs +++ b/src/meta-srv/src/selector/test_utils.rs @@ -22,7 +22,7 @@ use rand::prelude::SliceRandom; use crate::cluster::MetaPeerClientBuilder; use crate::error::Result; use crate::metasrv::SelectorContext; -use crate::selector::{Namespace, Selector, SelectorOptions}; +use crate::selector::{Selector, SelectorOptions}; /// Returns [SelectorContext] for test purpose. pub fn new_test_selector_context() -> SelectorContext { @@ -60,12 +60,7 @@ impl Selector for RandomNodeSelector { type Context = SelectorContext; type Output = Vec; - async fn select( - &self, - _ns: Namespace, - _ctx: &Self::Context, - _opts: SelectorOptions, - ) -> Result { + async fn select(&self, _ctx: &Self::Context, _opts: SelectorOptions) -> Result { let mut rng = rand::thread_rng(); let mut nodes = self.nodes.clone(); nodes.shuffle(&mut rng); diff --git a/src/meta-srv/src/selector/weight_compute.rs b/src/meta-srv/src/selector/weight_compute.rs index 16289bc3bd..cd8b0409cd 100644 --- a/src/meta-srv/src/selector/weight_compute.rs +++ b/src/meta-srv/src/selector/weight_compute.rs @@ -104,26 +104,17 @@ mod tests { #[test] fn test_weight_compute() { let mut stat_kvs: HashMap = HashMap::default(); - let stat_key = DatanodeStatKey { - cluster_id: 1, - node_id: 1, - }; + let stat_key = DatanodeStatKey { node_id: 1 }; let stat_val = DatanodeStatValue { stats: vec![mock_stat_1()], }; stat_kvs.insert(stat_key, stat_val); - let stat_key = DatanodeStatKey { - cluster_id: 1, - node_id: 2, - }; + let stat_key = DatanodeStatKey { node_id: 2 }; let stat_val = DatanodeStatValue { stats: vec![mock_stat_2()], }; stat_kvs.insert(stat_key, stat_val); - let stat_key = DatanodeStatKey { - cluster_id: 1, - node_id: 3, - }; + let stat_key = DatanodeStatKey { node_id: 3 }; let stat_val = DatanodeStatValue { stats: vec![mock_stat_3()], }; diff --git a/src/meta-srv/src/service/admin/node_lease.rs b/src/meta-srv/src/service/admin/node_lease.rs index a3736d1818..f7b4855dac 100644 --- a/src/meta-srv/src/service/admin/node_lease.rs +++ b/src/meta-srv/src/service/admin/node_lease.rs @@ -22,7 +22,7 @@ use crate::cluster::MetaPeerClientRef; use crate::error::{self, Result}; use crate::key::{DatanodeLeaseKey, LeaseValue}; use crate::lease; -use crate::service::admin::{util, HttpHandler}; +use crate::service::admin::HttpHandler; pub struct NodeLeaseHandler { pub meta_peer_client: MetaPeerClientRef, @@ -34,11 +34,9 @@ impl HttpHandler for NodeLeaseHandler { &self, _: &str, _: http::Method, - params: &HashMap, + _: &HashMap, ) -> Result> { - let cluster_id = util::extract_cluster_id(params)?; - - let leases = lease::alive_datanodes(cluster_id, &self.meta_peer_client, u64::MAX).await?; + let leases = lease::alive_datanodes(&self.meta_peer_client, u64::MAX).await?; let leases = leases .into_iter() .map(|(k, v)| HumanLease { diff --git a/src/meta-srv/src/service/admin/util.rs b/src/meta-srv/src/service/admin/util.rs index 0ea46f6702..cdabf38a63 100644 --- a/src/meta-srv/src/service/admin/util.rs +++ b/src/meta-srv/src/service/admin/util.rs @@ -12,24 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; - -use snafu::{OptionExt, ResultExt}; +use snafu::ResultExt; use tonic::codegen::http; -use crate::error::{self, MissingRequiredParameterSnafu, ParseNumSnafu, Result}; - -pub fn extract_cluster_id(params: &HashMap) -> Result { - params - .get("cluster_id") - .map(|id| id.parse::()) - .context(MissingRequiredParameterSnafu { - param: "cluster_id", - })? - .context(ParseNumSnafu { - err_msg: "`cluster_id` is not a valid number", - }) -} +use crate::error::{self, Result}; pub fn to_text_response(text: &str) -> Result> { http::Response::builder() diff --git a/src/meta-srv/src/service/cluster.rs b/src/meta-srv/src/service/cluster.rs index f5f5661b01..64e6eb9b6d 100644 --- a/src/meta-srv/src/service/cluster.rs +++ b/src/meta-srv/src/service/cluster.rs @@ -29,7 +29,7 @@ use crate::{error, metasrv}; impl cluster_server::Cluster for Metasrv { async fn batch_get(&self, req: Request) -> GrpcResult { if !self.is_leader() { - let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader()); + let is_not_leader = ResponseHeader::failed(Error::is_not_leader()); let resp = PbBatchGetResponse { header: Some(is_not_leader), ..Default::default() @@ -46,13 +46,13 @@ impl cluster_server::Cluster for Metasrv { .await .context(error::KvBackendSnafu)?; - let resp = resp.to_proto_resp(ResponseHeader::success(0)); + let resp = resp.to_proto_resp(ResponseHeader::success()); Ok(Response::new(resp)) } async fn range(&self, req: Request) -> GrpcResult { if !self.is_leader() { - let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader()); + let is_not_leader = ResponseHeader::failed(Error::is_not_leader()); let resp = PbRangeResponse { header: Some(is_not_leader), ..Default::default() @@ -69,7 +69,7 @@ impl cluster_server::Cluster for Metasrv { .await .context(error::KvBackendSnafu)?; - let resp = res.to_proto_resp(ResponseHeader::success(0)); + let resp = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(resp)) } @@ -78,7 +78,7 @@ impl cluster_server::Cluster for Metasrv { req: Request, ) -> GrpcResult { if !self.is_leader() { - let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader()); + let is_not_leader = ResponseHeader::failed(Error::is_not_leader()); let resp = MetasrvPeersResponse { header: Some(is_not_leader), ..Default::default() @@ -103,7 +103,7 @@ impl cluster_server::Cluster for Metasrv { }; let resp = MetasrvPeersResponse { - header: Some(ResponseHeader::success(0)), + header: Some(ResponseHeader::success()), leader: Some(leader), followers, }; diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs index 45adb5f57e..c450e17139 100644 --- a/src/meta-srv/src/service/heartbeat.rs +++ b/src/meta-srv/src/service/heartbeat.rs @@ -134,9 +134,7 @@ impl heartbeat_server::Heartbeat for Metasrv { } } -async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result { - let cluster_id = req.header.as_ref().map_or(0, |h| h.cluster_id); - +async fn handle_ask_leader(_req: AskLeaderRequest, ctx: Context) -> Result { let addr = match ctx.election { Some(election) => { if election.is_leader() { @@ -153,7 +151,7 @@ async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result GrpcResult { if !self.is_leader() { let resp = ProcedureStateResponse { - header: Some(ResponseHeader::failed(0, Error::is_not_leader())), + header: Some(ResponseHeader::failed(Error::is_not_leader())), ..Default::default() }; @@ -71,7 +71,7 @@ impl procedure_service_server::ProcedureService for Metasrv { async fn ddl(&self, request: Request) -> GrpcResult { if !self.is_leader() { let resp = PbDdlTaskResponse { - header: Some(ResponseHeader::failed(0, Error::is_not_leader())), + header: Some(ResponseHeader::failed(Error::is_not_leader())), ..Default::default() }; @@ -87,7 +87,6 @@ impl procedure_service_server::ProcedureService for Metasrv { } = request.into_inner(); let header = header.context(error::MissingRequestHeaderSnafu)?; - let cluster_id = header.cluster_id; let query_context = query_context .context(error::MissingRequiredParameterSnafu { param: "query_context", @@ -102,7 +101,6 @@ impl procedure_service_server::ProcedureService for Metasrv { .procedure_executor() .submit_ddl_task( &ExecutorContext { - cluster_id: Some(cluster_id), tracing_context: Some(header.tracing_context), }, SubmitDdlTaskRequest { @@ -123,7 +121,7 @@ impl procedure_service_server::ProcedureService for Metasrv { ) -> GrpcResult { if !self.is_leader() { let resp = MigrateRegionResponse { - header: Some(ResponseHeader::failed(0, Error::is_not_leader())), + header: Some(ResponseHeader::failed(Error::is_not_leader())), ..Default::default() }; @@ -139,22 +137,19 @@ impl procedure_service_server::ProcedureService for Metasrv { timeout_secs, } = request.into_inner(); - let header = header.context(error::MissingRequestHeaderSnafu)?; - let cluster_id = header.cluster_id; - + let _header = header.context(error::MissingRequestHeaderSnafu)?; let from_peer = self - .lookup_peer(cluster_id, from_peer) + .lookup_peer(from_peer) .await? .context(error::PeerUnavailableSnafu { peer_id: from_peer })?; let to_peer = self - .lookup_peer(cluster_id, to_peer) + .lookup_peer(to_peer) .await? .context(error::PeerUnavailableSnafu { peer_id: to_peer })?; let pid = self .region_migration_manager() .submit_procedure(RegionMigrationProcedureTask { - cluster_id, region_id: region_id.into(), from_peer, to_peer, @@ -177,7 +172,7 @@ impl procedure_service_server::ProcedureService for Metasrv { ) -> GrpcResult { if !self.is_leader() { let resp = ProcedureDetailResponse { - header: Some(ResponseHeader::failed(0, Error::is_not_leader())), + header: Some(ResponseHeader::failed(Error::is_not_leader())), ..Default::default() }; diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs index acbc090e5c..bb410849a2 100644 --- a/src/meta-srv/src/service/store.rs +++ b/src/meta-srv/src/service/store.rs @@ -28,10 +28,10 @@ use common_meta::rpc::store::{ BatchDeleteRequest, BatchGetRequest, BatchPutRequest, CompareAndPutRequest, DeleteRangeRequest, PutRequest, RangeRequest, }; -use snafu::{OptionExt, ResultExt}; +use snafu::ResultExt; use tonic::{Request, Response}; -use crate::error::{self, MissingRequestHeaderSnafu}; +use crate::error::{self}; use crate::metasrv::Metasrv; use crate::metrics::METRIC_META_KV_REQUEST_ELAPSED; use crate::service::GrpcResult; @@ -41,15 +41,8 @@ impl store_server::Store for Metasrv { async fn range(&self, req: Request) -> GrpcResult { let req = req.into_inner(); - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[self.kv_backend().name(), "range", cluster_id_str.as_str()]) + .with_label_values(&[self.kv_backend().name(), "range"]) .start_timer(); let req: RangeRequest = req.into(); @@ -60,22 +53,14 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } async fn put(&self, req: Request) -> GrpcResult { let req = req.into_inner(); - - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[self.kv_backend().name(), "put", cluster_id_str.as_str()]) + .with_label_values(&[self.kv_backend().name(), "put"]) .start_timer(); let req: PutRequest = req.into(); @@ -86,26 +71,14 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } async fn batch_get(&self, req: Request) -> GrpcResult { let req = req.into_inner(); - - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[ - self.kv_backend().name(), - "batch_get", - cluster_id_str.as_str(), - ]) + .with_label_values(&[self.kv_backend().name(), "batch_get"]) .start_timer(); let req: BatchGetRequest = req.into(); @@ -116,26 +89,15 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } async fn batch_put(&self, req: Request) -> GrpcResult { let req = req.into_inner(); - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[ - self.kv_backend().name(), - "batch_pub", - cluster_id_str.as_str(), - ]) + .with_label_values(&[self.kv_backend().name(), "batch_pub"]) .start_timer(); let req: BatchPutRequest = req.into(); @@ -146,7 +108,7 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } @@ -156,19 +118,8 @@ impl store_server::Store for Metasrv { ) -> GrpcResult { let req = req.into_inner(); - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[ - self.kv_backend().name(), - "batch_delete", - cluster_id_str.as_str(), - ]) + .with_label_values(&[self.kv_backend().name(), "batch_delete"]) .start_timer(); let req: BatchDeleteRequest = req.into(); @@ -179,7 +130,7 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } @@ -189,19 +140,8 @@ impl store_server::Store for Metasrv { ) -> GrpcResult { let req = req.into_inner(); - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[ - self.kv_backend().name(), - "compare_and_put", - cluster_id_str.as_str(), - ]) + .with_label_values(&[self.kv_backend().name(), "compare_and_put"]) .start_timer(); let req: CompareAndPutRequest = req.into(); @@ -212,7 +152,7 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } @@ -222,19 +162,8 @@ impl store_server::Store for Metasrv { ) -> GrpcResult { let req = req.into_inner(); - let cluster_id = req - .header - .as_ref() - .context(MissingRequestHeaderSnafu)? - .cluster_id; - let cluster_id_str = cluster_id.to_string(); - let _timer = METRIC_META_KV_REQUEST_ELAPSED - .with_label_values(&[ - self.kv_backend().name(), - "delete_range", - cluster_id_str.as_str(), - ]) + .with_label_values(&[self.kv_backend().name(), "delete_range"]) .start_timer(); let req: DeleteRangeRequest = req.into(); @@ -245,7 +174,7 @@ impl store_server::Store for Metasrv { .await .context(error::KvBackendSnafu)?; - let res = res.to_proto_resp(ResponseHeader::success(cluster_id)); + let res = res.to_proto_resp(ResponseHeader::success()); Ok(Response::new(res)) } } @@ -276,7 +205,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = RangeRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.range(req.into_request()).await; let _ = res.unwrap(); @@ -287,7 +216,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = PutRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.put(req.into_request()).await; let _ = res.unwrap(); @@ -298,7 +227,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = BatchGetRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.batch_get(req.into_request()).await; let _ = res.unwrap(); @@ -310,7 +239,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = BatchPutRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.batch_put(req.into_request()).await; let _ = res.unwrap(); @@ -321,7 +250,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = BatchDeleteRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.batch_delete(req.into_request()).await; let _ = res.unwrap(); @@ -332,7 +261,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = CompareAndPutRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.compare_and_put(req.into_request()).await; let _ = res.unwrap(); @@ -343,7 +272,7 @@ mod tests { let metasrv = new_metasrv().await; let mut req = DeleteRangeRequest::default(); - req.set_header((1, 1), Role::Datanode, W3cTrace::new()); + req.set_header(1, Role::Datanode, W3cTrace::new()); let res = metasrv.delete_range(req.into_request()).await; let _ = res.unwrap(); diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs index a8f4b44069..8578e6cd19 100644 --- a/src/meta-srv/src/table_meta_alloc.rs +++ b/src/meta-srv/src/table_meta_alloc.rs @@ -15,7 +15,6 @@ use async_trait::async_trait; use common_error::ext::BoxedError; use common_meta::ddl::table_meta::PeerAllocator; -use common_meta::ddl::TableMetadataAllocatorContext; use common_meta::error::{ExternalSnafu, Result as MetaResult}; use common_meta::peer::Peer; use snafu::{ensure, ResultExt}; @@ -36,24 +35,18 @@ impl MetasrvPeerAllocator { Self { ctx, selector } } - /// Allocates a specified number (by `regions`) of [`Peer`] instances based on the given - /// [`TableMetadataAllocatorContext`] and number of regions. The returned peers will have - /// the same length as the number of regions. + /// Allocates a specified number (by `regions`) of [`Peer`] instances based on the number of + /// regions. The returned peers will have the same length as the number of regions. /// /// This method is mainly a wrapper around the [`SelectorRef`]::`select` method. There is /// no guarantee that how the returned peers are used, like whether they are from the same /// table or not. So this method isn't idempotent. - async fn alloc( - &self, - ctx: &TableMetadataAllocatorContext, - regions: usize, - ) -> Result> { + async fn alloc(&self, regions: usize) -> Result> { ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu); let mut peers = self .selector .select( - ctx.cluster_id, &self.ctx, SelectorOptions { min_required_items: regions, @@ -79,12 +72,8 @@ impl MetasrvPeerAllocator { #[async_trait] impl PeerAllocator for MetasrvPeerAllocator { - async fn alloc( - &self, - ctx: &TableMetadataAllocatorContext, - regions: usize, - ) -> MetaResult> { - self.alloc(ctx, regions) + async fn alloc(&self, regions: usize) -> MetaResult> { + self.alloc(regions) .await .map_err(BoxedError::new) .context(ExternalSnafu) diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs index 6b9ccc99a0..b12e11fd19 100644 --- a/src/meta-srv/src/test_util.rs +++ b/src/meta-srv/src/test_util.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use common_meta::kv_backend::memory::MemoryKvBackend; use common_meta::peer::Peer; use common_meta::rpc::router::{Region, RegionRoute}; -use common_meta::ClusterId; use common_time::util as time_util; use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef}; @@ -63,15 +62,10 @@ pub(crate) fn create_selector_context() -> SelectorContext { } } -pub(crate) async fn put_datanodes( - cluster_id: ClusterId, - meta_peer_client: &MetaPeerClientRef, - datanodes: Vec, -) { +pub(crate) async fn put_datanodes(meta_peer_client: &MetaPeerClientRef, datanodes: Vec) { let backend = meta_peer_client.memory_backend(); for datanode in datanodes { let lease_key = DatanodeLeaseKey { - cluster_id, node_id: datanode.id, }; let lease_value = LeaseValue { diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs index d0051db7c5..d1bb7c64d5 100644 --- a/tests-integration/src/cluster.rs +++ b/tests-integration/src/cluster.rs @@ -308,11 +308,10 @@ impl GreptimeDbClusterBuilder { expected_datanodes: usize, ) { for _ in 0..10 { - let alive_datanodes = - meta_srv::lease::alive_datanodes(1000, meta_peer_client, u64::MAX) - .await - .unwrap() - .len(); + let alive_datanodes = meta_srv::lease::alive_datanodes(meta_peer_client, u64::MAX) + .await + .unwrap() + .len(); if alive_datanodes == expected_datanodes { return; } @@ -322,10 +321,9 @@ impl GreptimeDbClusterBuilder { } async fn create_datanode(&self, opts: DatanodeOptions, metasrv: MockInfo) -> Datanode { - let mut meta_client = - MetaClientBuilder::datanode_default_options(1000, opts.node_id.unwrap()) - .channel_manager(metasrv.channel_manager) - .build(); + let mut meta_client = MetaClientBuilder::datanode_default_options(opts.node_id.unwrap()) + .channel_manager(metasrv.channel_manager) + .build(); meta_client.start(&[&metasrv.server_addr]).await.unwrap(); let meta_client = Arc::new(meta_client); @@ -357,7 +355,7 @@ impl GreptimeDbClusterBuilder { metasrv: MockInfo, datanode_clients: Arc, ) -> Arc { - let mut meta_client = MetaClientBuilder::frontend_default_options(1000) + let mut meta_client = MetaClientBuilder::frontend_default_options() .channel_manager(metasrv.channel_manager) .enable_access_cluster_info() .build(); diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs index 3f72ee0cca..8f54e253f7 100644 --- a/tests-integration/tests/region_migration.rs +++ b/tests-integration/tests/region_migration.rs @@ -35,7 +35,7 @@ use futures::future::BoxFuture; use meta_srv::error::Result as MetaResult; use meta_srv::metasrv::SelectorContext; use meta_srv::procedure::region_migration::RegionMigrationProcedureTask; -use meta_srv::selector::{Namespace, Selector, SelectorOptions}; +use meta_srv::selector::{Selector, SelectorOptions}; use servers::query_handler::sql::SqlQueryHandler; use session::context::{QueryContext, QueryContextRef}; use store_api::storage::RegionId; @@ -169,7 +169,6 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec MetaResult {