refactor: remove cluster id field (#5610)

* chore: resolve conflicts

* chore: merge main

* test: add compatibility test for DatanodeLeaseKey with missing cluster_id

* test: add compatibility test for DatanodeLeaseKey without cluster_id

* refactor/remove-cluster-id:
 - **Update `greptime-proto` Dependency**: Updated the `greptime-proto` dependency in `Cargo.lock` and `Cargo.toml` to a new revision.
 - **Remove `cluster_id` Usage**: Removed the `cluster_id` field and its related logic from various files, including `cluster.rs`, `datanode.rs`, `rpc.rs`,
 `adapter.rs`, `client.rs`, `ask_leader.rs`, `heartbeat.rs`, `procedure.rs`, `store.rs`, `handler.rs`, `response_header_handler.rs`, `key.rs`, `datanode.rs`,
 `lease.rs`, `metrics.rs`, `cluster.rs`, `heartbeat.rs`, `procedure.rs`, and `store.rs`.
 - **Refactor Tests**: Updated tests in `client.rs`, `response_header_handler.rs`, `store.rs`, and `service` modules to reflect the removal of `cluster_id`.

* fix: clippy

* refactor/remove-cluster-id:
 **Refactor and Cleanup in Meta Server**

 - **`response_header_handler.rs`**: Removed unused import of `HeartbeatResponse` and cleaned up the test function by eliminating the creation of an unused `HeartbeatResponse` object.
 - **`node_lease.rs`**: Simplified parameter handling in `HttpHandler` implementation by using an underscore for unused parameters.

* refactor/remove-cluster-id:
 ### Remove `TableMetadataAllocatorContext` and Refactor Code

 - **Removed `TableMetadataAllocatorContext`**: Eliminated the `TableMetadataAllocatorContext` struct and its usage across multiple files, including `ddl.rs`, `create_table.rs`, `create_view.rs`, `table_meta.rs`, `test_util.rs`, `create_logical_tables.rs`,
 `drop_table.rs`, and `table_meta_alloc.rs`.
 - **Refactored Function Signatures**: Updated function signatures to remove the `TableMetadataAllocatorContext` parameter in methods like `create`, `create_view`, and `alloc` in `table_meta.rs` and `table_meta_alloc.rs`.
 - **Updated Imports**: Adjusted import statements to reflect the removal of `TableMetadataAllocatorContext` in affected files.

 These changes simplify the codebase by removing an unnecessary context struct and updating related function calls.

* refactor/remove-cluster-id:
 ### Update `datanode.rs` to Modify Key Prefix

 - **File Modified**: `src/common/meta/src/datanode.rs`
 - **Key Changes**:
   - Updated `DatanodeStatKey::prefix_key` and `From<DatanodeStatKey>` to remove the cluster ID from the key prefix.
   - Adjusted comments to reflect the changes in key prefix handling.

* reformat code

* refactor/remove-cluster-id:
 ### Commit Summary

 - **Refactor `Pusher` Initialization**: Removed the `RequestHeader` parameter from the `Pusher::new` method across multiple files, including `handler.rs`, `test_util.rs`, and `heartbeat.rs`. This change simplifies the `Pusher` initialization process by eliminating th
 unnecessary parameter.
 - **Update Imports**: Adjusted import statements in `handler.rs` and `test_util.rs` to remove unused `RequestHeader` references, ensuring cleaner and more efficient code.

* chore: update proto
This commit is contained in:
Lei, HUANG
2025-03-05 16:22:18 +08:00
committed by GitHub
parent a71b93dd84
commit a56030e6a5
98 changed files with 478 additions and 1208 deletions

2
Cargo.lock generated
View File

@@ -4701,7 +4701,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486#d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486"
dependencies = [
"prost 0.13.3",
"serde",

View File

@@ -129,7 +129,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486" }
hex = "0.4"
http = "1"
humantime = "2.1"

View File

@@ -287,7 +287,6 @@ impl StartCommand {
.await
.context(StartDatanodeSnafu)?;
let cluster_id = 0; // TODO(hl): read from config
let member_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -296,13 +295,10 @@ impl StartCommand {
msg: "'meta_client_options'",
})?;
let meta_client = meta_client::create_meta_client(
cluster_id,
MetaClientType::Datanode { member_id },
meta_config,
)
.await
.context(MetaClientInitSnafu)?;
let meta_client =
meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config)
.await
.context(MetaClientInitSnafu)?;
let meta_backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),

View File

@@ -241,9 +241,6 @@ impl StartCommand {
let mut opts = opts.component;
opts.grpc.detect_server_addr();
// TODO(discord9): make it not optionale after cluster id is required
let cluster_id = opts.cluster_id.unwrap_or(0);
let member_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -252,13 +249,10 @@ impl StartCommand {
msg: "'meta_client_options'",
})?;
let meta_client = meta_client::create_meta_client(
cluster_id,
MetaClientType::Flownode { member_id },
meta_config,
)
.await
.context(MetaClientInitSnafu)?;
let meta_client =
meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config)
.await
.context(MetaClientInitSnafu)?;
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
let cache_ttl = meta_config.metadata_cache_ttl;

View File

@@ -295,14 +295,10 @@ impl StartCommand {
let cache_ttl = meta_client_options.metadata_cache_ttl;
let cache_tti = meta_client_options.metadata_cache_tti;
let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
let meta_client = meta_client::create_meta_client(
cluster_id,
MetaClientType::Frontend,
meta_client_options,
)
.await
.context(MetaClientInitSnafu)?;
let meta_client =
meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options)
.await
.context(MetaClientInitSnafu)?;
// TODO(discord9): add helper function to ease the creation of cache registry&such
let cached_meta_backend =

View File

@@ -28,7 +28,6 @@ use crate::error::{
InvalidRoleSnafu, ParseNumSnafu, Result,
};
use crate::peer::Peer;
use crate::ClusterId;
const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info";
@@ -56,12 +55,9 @@ pub trait ClusterInfo {
// TODO(jeremy): Other info, like region status, etc.
}
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct NodeInfoKey {
/// The cluster id.
// todo(hl): remove cluster_id as it is not assigned anywhere.
pub cluster_id: ClusterId,
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
pub role: Role,
/// The node id.
@@ -84,24 +80,15 @@ impl NodeInfoKey {
_ => peer.id,
};
Some(NodeInfoKey {
cluster_id: header.cluster_id,
role,
node_id,
})
Some(NodeInfoKey { role, node_id })
}
pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String {
format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id)
pub fn key_prefix() -> String {
format!("{}-0-", CLUSTER_NODE_INFO_PREFIX)
}
pub fn key_prefix_with_role(cluster_id: ClusterId, role: Role) -> String {
format!(
"{}-{}-{}-",
CLUSTER_NODE_INFO_PREFIX,
cluster_id,
i32::from(role)
)
pub fn key_prefix_with_role(role: Role) -> String {
format!("{}-0-{}-", CLUSTER_NODE_INFO_PREFIX, i32::from(role))
}
}
@@ -193,15 +180,10 @@ impl FromStr for NodeInfoKey {
let caps = CLUSTER_NODE_INFO_PREFIX_PATTERN
.captures(key)
.context(InvalidNodeInfoKeySnafu { key })?;
ensure!(caps.len() == 4, InvalidNodeInfoKeySnafu { key });
let cluster_id = caps[1].to_string();
let role = caps[2].to_string();
let node_id = caps[3].to_string();
let cluster_id: u64 = cluster_id.parse().context(ParseNumSnafu {
err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let role: i32 = role.parse().context(ParseNumSnafu {
err_msg: format!("invalid role {role}"),
})?;
@@ -210,11 +192,7 @@ impl FromStr for NodeInfoKey {
err_msg: format!("invalid node_id: {node_id}"),
})?;
Ok(Self {
cluster_id,
role,
node_id,
})
Ok(Self { role, node_id })
}
}
@@ -233,9 +211,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
impl From<&NodeInfoKey> for Vec<u8> {
fn from(key: &NodeInfoKey) -> Self {
format!(
"{}-{}-{}-{}",
"{}-0-{}-{}",
CLUSTER_NODE_INFO_PREFIX,
key.cluster_id,
i32::from(key.role),
key.node_id
)
@@ -308,7 +285,6 @@ mod tests {
#[test]
fn test_node_info_key_round_trip() {
let key = NodeInfoKey {
cluster_id: 1,
role: Datanode,
node_id: 2,
};
@@ -316,7 +292,6 @@ mod tests {
let key_bytes: Vec<u8> = (&key).into();
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
assert_eq!(1, new_key.cluster_id);
assert_eq!(Datanode, new_key.role);
assert_eq!(2, new_key.node_id);
}
@@ -362,11 +337,11 @@ mod tests {
#[test]
fn test_node_info_key_prefix() {
let prefix = NodeInfoKey::key_prefix_with_cluster_id(1);
assert_eq!(prefix, "__meta_cluster_node_info-1-");
let prefix = NodeInfoKey::key_prefix();
assert_eq!(prefix, "__meta_cluster_node_info-0-");
let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend);
assert_eq!(prefix, "__meta_cluster_node_info-2-1-");
let prefix = NodeInfoKey::key_prefix_with_role(Frontend);
assert_eq!(prefix, "__meta_cluster_node_info-0-1-");
}
#[test]

View File

@@ -25,8 +25,8 @@ use store_api::region_engine::{RegionRole, RegionStatistic};
use store_api::storage::RegionId;
use table::metadata::TableId;
use crate::error;
use crate::error::Result;
use crate::{error, ClusterId};
pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease";
const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region";
@@ -48,11 +48,10 @@ lazy_static! {
/// The key of the datanode stat in the storage.
///
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
/// The format is `__meta_datanode_stat-0-{node_id}`.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Stat {
pub timestamp_millis: i64,
pub cluster_id: ClusterId,
// The datanode Id.
pub id: u64,
// The datanode address.
@@ -102,10 +101,7 @@ impl Stat {
}
pub fn stat_key(&self) -> DatanodeStatKey {
DatanodeStatKey {
cluster_id: self.cluster_id,
node_id: self.id,
}
DatanodeStatKey { node_id: self.id }
}
/// Returns a tuple array containing [RegionId] and [RegionRole].
@@ -145,7 +141,7 @@ impl TryFrom<&HeartbeatRequest> for Stat {
} = value;
match (header, peer) {
(Some(header), Some(peer)) => {
(Some(_header), Some(peer)) => {
let region_stats = region_stats
.iter()
.map(RegionStat::from)
@@ -153,7 +149,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
Ok(Self {
timestamp_millis: time_util::current_time_millis(),
cluster_id: header.cluster_id,
// datanode id
id: peer.id,
// datanode address
@@ -196,32 +191,24 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
/// The key of the datanode stat in the memory store.
///
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
/// The format is `__meta_datanode_stat-0-{node_id}`.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub struct DatanodeStatKey {
pub cluster_id: ClusterId,
pub node_id: u64,
}
impl DatanodeStatKey {
/// The key prefix.
pub fn prefix_key() -> Vec<u8> {
format!("{DATANODE_STAT_PREFIX}-").into_bytes()
}
/// The key prefix with the cluster id.
pub fn key_prefix_with_cluster_id(cluster_id: ClusterId) -> String {
format!("{DATANODE_STAT_PREFIX}-{cluster_id}-")
// todo(hl): remove cluster id in prefix
format!("{DATANODE_STAT_PREFIX}-0-").into_bytes()
}
}
impl From<DatanodeStatKey> for Vec<u8> {
fn from(value: DatanodeStatKey) -> Self {
format!(
"{}-{}-{}",
DATANODE_STAT_PREFIX, value.cluster_id, value.node_id
)
.into_bytes()
// todo(hl): remove cluster id in prefix
format!("{}-0-{}", DATANODE_STAT_PREFIX, value.node_id).into_bytes()
}
}
@@ -234,20 +221,12 @@ impl FromStr for DatanodeStatKey {
.context(error::InvalidStatKeySnafu { key })?;
ensure!(caps.len() == 3, error::InvalidStatKeySnafu { key });
let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
Ok(Self {
cluster_id,
node_id,
})
Ok(Self { node_id })
}
}
@@ -321,7 +300,6 @@ mod tests {
#[test]
fn test_stat_key() {
let stat = Stat {
cluster_id: 3,
id: 101,
region_num: 10,
..Default::default()
@@ -329,14 +307,12 @@ mod tests {
let stat_key = stat.stat_key();
assert_eq!(3, stat_key.cluster_id);
assert_eq!(101, stat_key.node_id);
}
#[test]
fn test_stat_val_round_trip() {
let stat = Stat {
cluster_id: 0,
id: 101,
region_num: 100,
..Default::default()
@@ -351,7 +327,6 @@ mod tests {
assert_eq!(1, stats.len());
let stat = stats.first().unwrap();
assert_eq!(0, stat.cluster_id);
assert_eq!(101, stat.id);
assert_eq!(100, stat.region_num);
}

View File

@@ -30,7 +30,7 @@ use crate::node_manager::NodeManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
use crate::{ClusterId, DatanodeId};
use crate::DatanodeId;
pub mod alter_database;
pub mod alter_logical_tables;
@@ -57,7 +57,6 @@ pub mod utils;
#[derive(Debug, Default)]
pub struct ExecutorContext {
pub cluster_id: Option<u64>,
pub tracing_context: Option<W3cTrace>,
}
@@ -90,10 +89,6 @@ pub trait ProcedureExecutor: Send + Sync {
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
pub struct TableMetadataAllocatorContext {
pub cluster_id: ClusterId,
}
/// Metadata allocated to a table.
#[derive(Default)]
pub struct TableMetadata {
@@ -108,7 +103,7 @@ pub struct TableMetadata {
pub type RegionFailureDetectorControllerRef = Arc<dyn RegionFailureDetectorController>;
pub type DetectingRegion = (ClusterId, DatanodeId, RegionId);
pub type DetectingRegion = (DatanodeId, RegionId);
/// Used for actively registering Region failure detectors.
///

View File

@@ -30,7 +30,6 @@ use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock};
use crate::rpc::ddl::UnsetDatabaseOption::{self};
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
use crate::ClusterId;
pub struct AlterDatabaseProcedure {
pub context: DdlContext,
@@ -65,14 +64,10 @@ fn build_new_schema_value(
impl AlterDatabaseProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
pub fn new(
cluster_id: ClusterId,
task: AlterDatabaseTask,
context: DdlContext,
) -> Result<Self> {
pub fn new(task: AlterDatabaseTask, context: DdlContext) -> Result<Self> {
Ok(Self {
context,
data: AlterDatabaseData::new(task, cluster_id)?,
data: AlterDatabaseData::new(task)?,
})
}
@@ -183,7 +178,6 @@ enum AlterDatabaseState {
/// The data of alter database procedure.
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterDatabaseData {
cluster_id: ClusterId,
state: AlterDatabaseState,
kind: AlterDatabaseKind,
catalog_name: String,
@@ -192,9 +186,8 @@ pub struct AlterDatabaseData {
}
impl AlterDatabaseData {
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
pub fn new(task: AlterDatabaseTask) -> Result<Self> {
Ok(Self {
cluster_id,
state: AlterDatabaseState::Prepare,
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
catalog_name: task.alter_expr.catalog_name,

View File

@@ -37,9 +37,9 @@ use crate::key::table_info::TableInfoValue;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::find_leaders;
use crate::{metrics, ClusterId};
pub struct AlterLogicalTablesProcedure {
pub context: DdlContext,
@@ -50,7 +50,6 @@ impl AlterLogicalTablesProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
pub fn new(
cluster_id: ClusterId,
tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
context: DdlContext,
@@ -58,7 +57,6 @@ impl AlterLogicalTablesProcedure {
Self {
context,
data: AlterTablesData {
cluster_id,
state: AlterTablesState::Prepare,
tasks,
table_info_values: vec![],
@@ -240,7 +238,6 @@ impl Procedure for AlterLogicalTablesProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterTablesData {
cluster_id: ClusterId,
state: AlterTablesState,
tasks: Vec<AlterTableTask>,
/// Table info values before the alter operation.

View File

@@ -45,9 +45,9 @@ use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
use crate::{metrics, ClusterId};
/// The alter table procedure
pub struct AlterTableProcedure {
@@ -64,16 +64,11 @@ pub struct AlterTableProcedure {
impl AlterTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterTable";
pub fn new(
cluster_id: ClusterId,
table_id: TableId,
task: AlterTableTask,
context: DdlContext,
) -> Result<Self> {
pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
task.validate()?;
Ok(Self {
context,
data: AlterTableData::new(task, table_id, cluster_id),
data: AlterTableData::new(task, table_id),
new_table_info: None,
})
}
@@ -307,7 +302,6 @@ enum AlterTableState {
// The serialized data of alter table.
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterTableData {
cluster_id: ClusterId,
state: AlterTableState,
task: AlterTableTask,
table_id: TableId,
@@ -318,12 +312,11 @@ pub struct AlterTableData {
}
impl AlterTableData {
pub fn new(task: AlterTableTask, table_id: TableId, cluster_id: u64) -> Self {
pub fn new(task: AlterTableTask, table_id: TableId) -> Self {
Self {
state: AlterTableState::Prepare,
task,
table_id,
cluster_id,
table_info_value: None,
region_distribution: None,
}

View File

@@ -167,10 +167,9 @@ mod tests {
use crate::test_util::{new_ddl_context, MockDatanodeManager};
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
let cluster_id = 1;
let table_id = 1024;
let region_id = RegionId::new(table_id, 1);
let table_name = "foo";
@@ -225,19 +224,12 @@ mod tests {
)
.await
.unwrap();
(
ddl_context,
cluster_id,
table_id,
region_id,
table_name.to_string(),
)
(ddl_context, table_id, region_id, table_name.to_string())
}
#[tokio::test]
async fn test_make_alter_region_request() {
let (ddl_context, cluster_id, table_id, region_id, table_name) =
prepare_ddl_context().await;
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
@@ -265,8 +257,7 @@ mod tests {
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure
@@ -307,8 +298,7 @@ mod tests {
#[tokio::test]
async fn test_make_alter_column_type_region_request() {
let (ddl_context, cluster_id, table_id, region_id, table_name) =
prepare_ddl_context().await;
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
@@ -325,8 +315,7 @@ mod tests {
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure

View File

@@ -46,9 +46,9 @@ use crate::key::flow::flow_route::FlowRouteValue;
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
use crate::metrics;
use crate::peer::Peer;
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
use crate::{metrics, ClusterId};
/// The procedure of flow creation.
pub struct CreateFlowProcedure {
@@ -60,16 +60,10 @@ impl CreateFlowProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateFlow";
/// Returns a new [CreateFlowProcedure].
pub fn new(
cluster_id: ClusterId,
task: CreateFlowTask,
query_context: QueryContext,
context: DdlContext,
) -> Self {
pub fn new(task: CreateFlowTask, query_context: QueryContext, context: DdlContext) -> Self {
Self {
context,
data: CreateFlowData {
cluster_id,
task,
flow_id: None,
peers: vec![],
@@ -363,7 +357,6 @@ impl fmt::Display for FlowType {
/// The serializable data.
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateFlowData {
pub(crate) cluster_id: ClusterId,
pub(crate) state: CreateFlowState,
pub(crate) task: CreateFlowTask,
pub(crate) flow_id: Option<FlowId>,

View File

@@ -23,11 +23,10 @@ impl CreateFlowProcedure {
pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
//TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
let partitions = 1;
let cluster_id = self.data.cluster_id;
let (flow_id, peers) = self
.context
.flow_metadata_allocator
.create(cluster_id, partitions)
.create(partitions)
.await?;
self.data.flow_id = Some(flow_id);
self.data.peers = peers;

View File

@@ -36,9 +36,9 @@ use crate::ddl::DdlContext;
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
use crate::key::table_route::TableRouteValue;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
use crate::metrics;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{find_leaders, RegionRoute};
use crate::{metrics, ClusterId};
pub struct CreateLogicalTablesProcedure {
pub context: DdlContext,
@@ -49,7 +49,6 @@ impl CreateLogicalTablesProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateLogicalTables";
pub fn new(
cluster_id: ClusterId,
tasks: Vec<CreateTableTask>,
physical_table_id: TableId,
context: DdlContext,
@@ -57,7 +56,6 @@ impl CreateLogicalTablesProcedure {
Self {
context,
data: CreateTablesData {
cluster_id,
state: CreateTablesState::Prepare,
tasks,
table_ids_already_exists: vec![],
@@ -245,7 +243,6 @@ impl Procedure for CreateLogicalTablesProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateTablesData {
cluster_id: ClusterId,
state: CreateTablesState,
tasks: Vec<CreateTableTask>,
table_ids_already_exists: Vec<Option<TableId>>,

View File

@@ -37,17 +37,17 @@ use crate::ddl::utils::{
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
region_storage_path,
};
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::{DdlContext, TableMetadata};
use crate::error::{self, Result};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
};
use crate::{metrics, ClusterId};
pub struct CreateTableProcedure {
pub context: DdlContext,
pub creator: TableCreator,
@@ -56,10 +56,10 @@ pub struct CreateTableProcedure {
impl CreateTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateTable";
pub fn new(cluster_id: ClusterId, task: CreateTableTask, context: DdlContext) -> Self {
pub fn new(task: CreateTableTask, context: DdlContext) -> Self {
Self {
context,
creator: TableCreator::new(cluster_id, task),
creator: TableCreator::new(task),
}
}
@@ -154,12 +154,7 @@ impl CreateTableProcedure {
} = self
.context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext {
cluster_id: self.creator.data.cluster_id,
},
&self.creator.data.task,
)
.create(&self.creator.data.task)
.await?;
self.creator
.set_allocated_metadata(table_id, table_route, region_wal_options);
@@ -268,7 +263,6 @@ impl CreateTableProcedure {
/// - Failed to create table metadata.
async fn on_create_metadata(&mut self) -> Result<Status> {
let table_id = self.table_id();
let cluster_id = self.creator.data.cluster_id;
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
@@ -276,10 +270,8 @@ impl CreateTableProcedure {
let region_wal_options = self.region_wal_options()?.clone();
// Safety: the table_route must be allocated.
let physical_table_route = self.table_route()?.clone();
let detecting_regions = convert_region_routes_to_detecting_regions(
cluster_id,
&physical_table_route.region_routes,
);
let detecting_regions =
convert_region_routes_to_detecting_regions(&physical_table_route.region_routes);
let table_route = TableRouteValue::Physical(physical_table_route);
manager
.create_table_metadata(raw_table_info, table_route, region_wal_options)
@@ -351,11 +343,10 @@ pub struct TableCreator {
}
impl TableCreator {
pub fn new(cluster_id: ClusterId, task: CreateTableTask) -> Self {
pub fn new(task: CreateTableTask) -> Self {
Self {
data: CreateTableData {
state: CreateTableState::Prepare,
cluster_id,
task,
table_route: None,
region_wal_options: None,
@@ -421,7 +412,6 @@ pub struct CreateTableData {
table_route: Option<PhysicalTableRouteValue>,
/// None stands for not allocated yet.
pub region_wal_options: Option<HashMap<RegionNumber, String>>,
pub cluster_id: ClusterId,
}
impl CreateTableData {

View File

@@ -24,13 +24,13 @@ use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::{DdlContext, TableMetadata};
use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
use crate::metrics;
use crate::rpc::ddl::CreateViewTask;
use crate::{metrics, ClusterId};
// The procedure to execute `[CreateViewTask]`.
pub struct CreateViewProcedure {
@@ -41,12 +41,11 @@ pub struct CreateViewProcedure {
impl CreateViewProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView";
pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
pub fn new(task: CreateViewTask, context: DdlContext) -> Self {
Self {
context,
data: CreateViewData {
state: CreateViewState::Prepare,
cluster_id,
task,
need_update: false,
},
@@ -144,12 +143,7 @@ impl CreateViewProcedure {
let TableMetadata { table_id, .. } = self
.context
.table_metadata_allocator
.create_view(
&TableMetadataAllocatorContext {
cluster_id: self.data.cluster_id,
},
&None,
)
.create_view(&None)
.await?;
self.data.set_allocated_metadata(table_id, false);
}
@@ -285,7 +279,6 @@ pub enum CreateViewState {
pub struct CreateViewData {
pub state: CreateViewState,
pub task: CreateViewTask,
pub cluster_id: ClusterId,
/// Whether to update the view info.
pub need_update: bool,
}

View File

@@ -35,7 +35,6 @@ use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::table_name::TableNameValue;
use crate::lock_key::{CatalogLock, SchemaLock};
use crate::ClusterId;
pub struct DropDatabaseProcedure {
/// The context of procedure runtime.
@@ -54,7 +53,6 @@ pub(crate) enum DropTableTarget {
/// Context of [DropDatabaseProcedure] execution.
pub(crate) struct DropDatabaseContext {
cluster_id: ClusterId,
catalog: String,
schema: String,
drop_if_exists: bool,
@@ -87,7 +85,6 @@ impl DropDatabaseProcedure {
Self {
runtime_context: context,
context: DropDatabaseContext {
cluster_id: 0,
catalog,
schema,
drop_if_exists,
@@ -108,7 +105,6 @@ impl DropDatabaseProcedure {
Ok(Self {
runtime_context,
context: DropDatabaseContext {
cluster_id: 0,
catalog,
schema,
drop_if_exists,

View File

@@ -217,11 +217,10 @@ mod tests {
async fn test_next_without_logical_tables() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
create_physical_table(&ddl_context, 0, "phy").await;
create_physical_table(&ddl_context, "phy").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -252,12 +251,11 @@ mod tests {
async fn test_next_with_logical_tables() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
create_logical_table(ddl_context.clone(), physical_table_id, "metric_0").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -286,7 +284,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,

View File

@@ -98,11 +98,10 @@ impl State for DropDatabaseExecutor {
async fn next(
&mut self,
ddl_ctx: &DdlContext,
ctx: &mut DropDatabaseContext,
_ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
self.register_dropping_regions(ddl_ctx)?;
let executor =
DropTableExecutor::new(ctx.cluster_id, self.table_name.clone(), self.table_id, true);
let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
// Deletes metadata for table permanently.
let table_route_value = TableRouteValue::new(
self.table_id,
@@ -187,7 +186,7 @@ mod tests {
async fn test_next_with_physical_table() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -203,7 +202,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -216,7 +214,6 @@ mod tests {
}
// Execute again
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -239,8 +236,8 @@ mod tests {
async fn test_next_logical_table() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
create_logical_table(ddl_context.clone(), physical_table_id, "metric").await;
let logical_table_id = physical_table_id + 1;
let (_, table_route) = ddl_context
.table_metadata_manager
@@ -257,7 +254,6 @@ mod tests {
DropTableTarget::Logical,
);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -270,7 +266,6 @@ mod tests {
}
// Execute again
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -345,7 +340,7 @@ mod tests {
async fn test_next_retryable_err() {
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -360,7 +355,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -374,7 +368,7 @@ mod tests {
async fn test_on_recovery() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -390,7 +384,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,

View File

@@ -118,7 +118,6 @@ mod tests {
.unwrap();
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
@@ -145,7 +144,6 @@ mod tests {
// Schema not exists
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,

View File

@@ -89,7 +89,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut step = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,
@@ -105,7 +104,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
@@ -128,7 +126,6 @@ mod tests {
.unwrap();
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,

View File

@@ -37,8 +37,8 @@ use crate::instruction::{CacheIdent, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
use crate::lock_key::{CatalogLock, FlowLock};
use crate::metrics;
use crate::rpc::ddl::DropFlowTask;
use crate::{metrics, ClusterId};
/// The procedure for dropping a flow.
pub struct DropFlowProcedure {
@@ -51,12 +51,11 @@ pub struct DropFlowProcedure {
impl DropFlowProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropFlow";
pub fn new(cluster_id: ClusterId, task: DropFlowTask, context: DdlContext) -> Self {
pub fn new(task: DropFlowTask, context: DdlContext) -> Self {
Self {
context,
data: DropFlowData {
state: DropFlowState::Prepare,
cluster_id,
task,
flow_info_value: None,
flow_route_values: vec![],
@@ -218,7 +217,6 @@ impl Procedure for DropFlowProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropFlowData {
state: DropFlowState,
cluster_id: ClusterId,
task: DropFlowTask,
pub(crate) flow_info_value: Option<FlowInfoValue>,
pub(crate) flow_route_values: Vec<FlowRouteValue>,

View File

@@ -40,10 +40,10 @@ use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::table_route::TableRouteValue;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::DropTableTask;
use crate::rpc::router::{operating_leader_regions, RegionRoute};
use crate::{metrics, ClusterId};
pub struct DropTableProcedure {
/// The context of procedure runtime.
@@ -59,8 +59,8 @@ pub struct DropTableProcedure {
impl DropTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
pub fn new(cluster_id: ClusterId, task: DropTableTask, context: DdlContext) -> Self {
let data = DropTableData::new(cluster_id, task);
pub fn new(task: DropTableTask, context: DdlContext) -> Self {
let data = DropTableData::new(task);
let executor = data.build_executor();
Self {
context,
@@ -268,7 +268,6 @@ impl Procedure for DropTableProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct DropTableData {
pub state: DropTableState,
pub cluster_id: ClusterId,
pub task: DropTableTask,
pub physical_region_routes: Vec<RegionRoute>,
pub physical_table_id: Option<TableId>,
@@ -279,10 +278,9 @@ pub struct DropTableData {
}
impl DropTableData {
pub fn new(cluster_id: ClusterId, task: DropTableTask) -> Self {
pub fn new(task: DropTableTask) -> Self {
Self {
state: DropTableState::Prepare,
cluster_id,
task,
physical_region_routes: vec![],
physical_table_id: None,
@@ -301,7 +299,6 @@ impl DropTableData {
fn build_executor(&self) -> DropTableExecutor {
DropTableExecutor::new(
self.cluster_id,
self.task.table_name(),
self.task.table_id,
self.task.drop_if_exists,

View File

@@ -36,7 +36,6 @@ use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::ClusterId;
/// [Control] indicated to the caller whether to go to the next step.
#[derive(Debug)]
@@ -54,14 +53,8 @@ impl<T> Control<T> {
impl DropTableExecutor {
/// Returns the [DropTableExecutor].
pub fn new(
cluster_id: ClusterId,
table: TableName,
table_id: TableId,
drop_if_exists: bool,
) -> Self {
pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self {
Self {
cluster_id,
table,
table_id,
drop_if_exists,
@@ -74,7 +67,6 @@ impl DropTableExecutor {
/// - Invalidates the cache on the Frontend nodes.
/// - Drops the regions on the Datanode nodes.
pub struct DropTableExecutor {
cluster_id: ClusterId,
table: TableName,
table_id: TableId,
drop_if_exists: bool,
@@ -164,7 +156,7 @@ impl DropTableExecutor {
let detecting_regions = if table_route_value.is_physical() {
// Safety: checked.
let regions = table_route_value.region_routes().unwrap();
convert_region_routes_to_detecting_regions(self.cluster_id, regions)
convert_region_routes_to_detecting_regions(regions)
} else {
vec![]
};
@@ -321,7 +313,6 @@ mod tests {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ctx = new_ddl_context(node_manager);
let executor = DropTableExecutor::new(
0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
true,
@@ -331,7 +322,6 @@ mod tests {
// Drops a non-exists table
let executor = DropTableExecutor::new(
0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,
@@ -341,7 +331,6 @@ mod tests {
// Drops a exists table
let executor = DropTableExecutor::new(
0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,

View File

@@ -31,8 +31,8 @@ use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::metrics;
use crate::rpc::ddl::DropViewTask;
use crate::{metrics, ClusterId};
/// The procedure for dropping a view.
pub struct DropViewProcedure {
@@ -45,12 +45,11 @@ pub struct DropViewProcedure {
impl DropViewProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropView";
pub fn new(cluster_id: ClusterId, task: DropViewTask, context: DdlContext) -> Self {
pub fn new(task: DropViewTask, context: DdlContext) -> Self {
Self {
context,
data: DropViewData {
state: DropViewState::Prepare,
cluster_id,
task,
},
}
@@ -216,7 +215,6 @@ impl Procedure for DropViewProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropViewData {
state: DropViewState,
cluster_id: ClusterId,
task: DropViewTask,
}

View File

@@ -20,7 +20,6 @@ use crate::error::Result;
use crate::key::FlowId;
use crate::peer::Peer;
use crate::sequence::SequenceRef;
use crate::ClusterId;
/// The reference of [FlowMetadataAllocator].
pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
@@ -60,16 +59,9 @@ impl FlowMetadataAllocator {
}
/// Allocates the [FlowId] and [Peer]s.
pub async fn create(
&self,
cluster_id: ClusterId,
partitions: usize,
) -> Result<(FlowId, Vec<Peer>)> {
pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
let flow_id = self.allocate_flow_id().await?;
let peers = self
.partition_peer_allocator
.alloc(cluster_id, partitions)
.await?;
let peers = self.partition_peer_allocator.alloc(partitions).await?;
Ok((flow_id, peers))
}
@@ -79,7 +71,7 @@ impl FlowMetadataAllocator {
#[async_trait]
pub trait PartitionPeerAllocator: Send + Sync {
/// Allocates [Peer] nodes for storing partitions.
async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>>;
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>>;
}
/// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions.
@@ -89,7 +81,7 @@ struct NoopPartitionPeerAllocator;
#[async_trait]
impl PartitionPeerAllocator for NoopPartitionPeerAllocator {
async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>> {
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>> {
Ok(vec![Peer::default(); partitions])
}
}

View File

@@ -20,7 +20,7 @@ use common_telemetry::{debug, info};
use snafu::ensure;
use store_api::storage::{RegionId, RegionNumber, TableId};
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::TableMetadata;
use crate::error::{self, Result, UnsupportedSnafu};
use crate::key::table_route::PhysicalTableRouteValue;
use crate::peer::Peer;
@@ -109,7 +109,6 @@ impl TableMetadataAllocator {
async fn create_table_route(
&self,
ctx: &TableMetadataAllocatorContext,
table_id: TableId,
task: &CreateTableTask,
) -> Result<PhysicalTableRouteValue> {
@@ -121,7 +120,7 @@ impl TableMetadataAllocator {
}
);
let peers = self.peer_allocator.alloc(ctx, regions).await?;
let peers = self.peer_allocator.alloc(regions).await?;
let region_routes = task
.partitions
.iter()
@@ -147,11 +146,7 @@ impl TableMetadataAllocator {
}
/// Create VIEW metadata
pub async fn create_view(
&self,
_ctx: &TableMetadataAllocatorContext,
table_id: &Option<api::v1::TableId>,
) -> Result<TableMetadata> {
pub async fn create_view(&self, table_id: &Option<api::v1::TableId>) -> Result<TableMetadata> {
let table_id = self.allocate_table_id(table_id).await?;
Ok(TableMetadata {
@@ -160,13 +155,9 @@ impl TableMetadataAllocator {
})
}
pub async fn create(
&self,
ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> Result<TableMetadata> {
pub async fn create(&self, task: &CreateTableTask) -> Result<TableMetadata> {
let table_id = self.allocate_table_id(&task.create_table.table_id).await?;
let table_route = self.create_table_route(ctx, table_id, task).await?;
let table_route = self.create_table_route(table_id, task).await?;
let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
@@ -188,19 +179,14 @@ pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
#[async_trait]
pub trait PeerAllocator: Send + Sync {
/// Allocates `regions` size [`Peer`]s.
async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
-> Result<Vec<Peer>>;
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>>;
}
struct NoopPeerAllocator;
#[async_trait]
impl PeerAllocator for NoopPeerAllocator {
async fn alloc(
&self,
_ctx: &TableMetadataAllocatorContext,
regions: usize,
) -> Result<Vec<Peer>> {
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
Ok(vec![Peer::default(); regions])
}
}

View File

@@ -31,10 +31,9 @@ use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::{DdlContext, TableMetadata};
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::CreateTableTask;
use crate::ClusterId;
pub async fn create_physical_table_metadata(
ddl_context: &DdlContext,
@@ -48,11 +47,7 @@ pub async fn create_physical_table_metadata(
.unwrap();
}
pub async fn create_physical_table(
ddl_context: &DdlContext,
cluster_id: ClusterId,
name: &str,
) -> TableId {
pub async fn create_physical_table(ddl_context: &DdlContext, name: &str) -> TableId {
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task(name);
let TableMetadata {
@@ -61,10 +56,7 @@ pub async fn create_physical_table(
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -80,15 +72,13 @@ pub async fn create_physical_table(
pub async fn create_logical_table(
ddl_context: DdlContext,
cluster_id: ClusterId,
physical_table_id: TableId,
table_name: &str,
) -> TableId {
use std::assert_matches::assert_matches;
let tasks = vec![test_create_logical_table_task(table_name)];
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let status = procedure.on_create_metadata().await.unwrap();

View File

@@ -86,7 +86,6 @@ fn make_alter_logical_table_rename_task(
async fn test_on_prepare_check_schema() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![
make_alter_logical_table_add_column_task(
Some("schema1"),
@@ -100,8 +99,7 @@ async fn test_on_prepare_check_schema() {
),
];
let physical_table_id = 1024u32;
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
@@ -110,50 +108,46 @@ async fn test_on_prepare_check_schema() {
async fn test_on_prepare_check_alter_kind() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![make_alter_logical_table_rename_task(
"schema1",
"table1",
"new_table1",
)];
let physical_table_id = 1024u32;
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_different_physical_table() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
let phy1_id = create_physical_table(&ddl_context, "phy1").await;
create_logical_table(ddl_context.clone(), phy1_id, "table1").await;
let phy2_id = create_physical_table(&ddl_context, "phy2").await;
create_logical_table(ddl_context.clone(), phy2_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy1_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_logical_table_not_exists() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
@@ -161,23 +155,22 @@ async fn test_on_prepare_logical_table_not_exists() {
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, TableNotFound { .. });
}
#[tokio::test]
async fn test_on_prepare() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
@@ -185,25 +178,24 @@ async fn test_on_prepare() {
make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let result = procedure.on_prepare().await;
assert_matches!(result, Ok(Status::Executing { persist: true }));
}
#[tokio::test]
async fn test_on_update_metadata() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
create_logical_table(ddl_context.clone(), phy_id, "table4").await;
create_logical_table(ddl_context.clone(), phy_id, "table5").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]),
@@ -211,7 +203,7 @@ async fn test_on_update_metadata() {
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
];
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
@@ -229,23 +221,21 @@ async fn test_on_update_metadata() {
#[tokio::test]
async fn test_on_part_duplicate_alter_request() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]),
];
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
@@ -278,8 +268,7 @@ async fn test_on_part_duplicate_alter_request() {
),
];
let mut procedure =
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });

View File

@@ -59,7 +59,6 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter
async fn test_on_prepare_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo", 1024);
// Puts a value to table name key.
ddl_context
@@ -73,7 +72,7 @@ async fn test_on_prepare_table_exists_err() {
.unwrap();
let task = test_rename_alter_table_task("non-exists", "foo");
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err.status_code(), StatusCode::TableAlreadyExists);
}
@@ -82,9 +81,8 @@ async fn test_on_prepare_table_exists_err() {
async fn test_on_prepare_table_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_rename_alter_table_task("non-exists", "foo");
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err.status_code(), StatusCode::TableNotFound);
}
@@ -95,7 +93,6 @@ async fn test_on_submit_alter_request() {
let datanode_handler = DatanodeWatcher(tx);
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -144,8 +141,7 @@ async fn test_on_submit_alter_request() {
})),
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
@@ -181,7 +177,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
RequestOutdatedErrorDatanodeHandler,
));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -230,8 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
})),
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
}
@@ -240,7 +234,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
async fn test_on_update_metadata_rename() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let new_table_name = "bar";
let table_id = 1024;
@@ -257,8 +250,7 @@ async fn test_on_update_metadata_rename() {
.unwrap();
let task = test_rename_alter_table_task(table_name, new_table_name);
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.on_update_metadata().await.unwrap();
@@ -291,7 +283,6 @@ async fn test_on_update_metadata_rename() {
async fn test_on_update_metadata_add_columns() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -335,8 +326,7 @@ async fn test_on_update_metadata_add_columns() {
})),
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
procedure.on_update_metadata().await.unwrap();
@@ -361,7 +351,6 @@ async fn test_on_update_metadata_add_columns() {
async fn test_on_update_table_options() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -398,8 +387,7 @@ async fn test_on_update_table_options() {
})),
},
};
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
procedure.on_update_metadata().await.unwrap();

View File

@@ -25,11 +25,11 @@ use crate::ddl::create_flow::CreateFlowProcedure;
use crate::ddl::test_util::create_table::test_create_table_task;
use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler;
use crate::ddl::DdlContext;
use crate::error;
use crate::key::table_route::TableRouteValue;
use crate::key::FlowId;
use crate::rpc::ddl::CreateFlowTask;
use crate::test_util::{new_ddl_context, MockFlownodeManager};
use crate::{error, ClusterId};
pub(crate) fn test_create_flow_task(
name: &str,
@@ -53,7 +53,6 @@ pub(crate) fn test_create_flow_task(
#[tokio::test]
async fn test_create_flow_source_table_not_found() {
let cluster_id = 1;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
DEFAULT_SCHEMA_NAME,
@@ -65,14 +64,13 @@ async fn test_create_flow_source_table_not_found() {
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
let ddl_context = new_ddl_context(node_manager);
let query_ctx = QueryContext::arc().into();
let mut procedure = CreateFlowProcedure::new(cluster_id, task, query_ctx, ddl_context);
let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::TableNotFound { .. });
}
pub(crate) async fn create_test_flow(
ddl_context: &DdlContext,
cluster_id: ClusterId,
flow_name: &str,
source_table_names: Vec<TableName>,
sink_table_name: TableName,
@@ -84,8 +82,7 @@ pub(crate) async fn create_test_flow(
false,
);
let query_ctx = QueryContext::arc().into();
let mut procedure =
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
let flow_id = output.downcast_ref::<FlowId>().unwrap();
@@ -94,7 +91,6 @@ pub(crate) async fn create_test_flow(
#[tokio::test]
async fn test_create_flow() {
let cluster_id = 1;
let table_id = 1024;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
@@ -118,7 +114,6 @@ async fn test_create_flow() {
.unwrap();
let flow_id = create_test_flow(
&ddl_context,
cluster_id,
"my_flow",
source_table_names.clone(),
sink_table_name.clone(),
@@ -134,8 +129,7 @@ async fn test_create_flow() {
true,
);
let query_ctx = QueryContext::arc().into();
let mut procedure =
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
let flow_id = output.downcast_ref::<FlowId>().unwrap();
assert_eq!(*flow_id, 1024);
@@ -143,7 +137,7 @@ async fn test_create_flow() {
// Creates again
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
let query_ctx = QueryContext::arc().into();
let mut procedure = CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context);
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
}

View File

@@ -26,7 +26,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler;
use crate::ddl::test_util::{
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
};
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::TableMetadata;
use crate::error::Error;
use crate::key::table_route::TableRouteValue;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
@@ -35,11 +35,9 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
async fn test_on_prepare_physical_table_not_found() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![test_create_logical_table_task("foo")];
let physical_table_id = 1024u32;
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
}
@@ -48,7 +46,6 @@ async fn test_on_prepare_physical_table_not_found() {
async fn test_on_prepare() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -57,10 +54,7 @@ async fn test_on_prepare() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -73,8 +67,7 @@ async fn test_on_prepare() {
// The create logical table procedure.
let tasks = vec![test_create_logical_table_task("foo")];
let physical_table_id = table_id;
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
}
@@ -83,7 +76,6 @@ async fn test_on_prepare() {
async fn test_on_prepare_logical_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -92,10 +84,7 @@ async fn test_on_prepare_logical_table_exists_err() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -119,7 +108,7 @@ async fn test_on_prepare_logical_table_exists_err() {
// The create logical table procedure.
let physical_table_id = table_id;
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -129,7 +118,6 @@ async fn test_on_prepare_logical_table_exists_err() {
async fn test_on_prepare_with_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -138,10 +126,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -167,7 +152,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
// Sets `create_if_not_exists`
task.create_table.create_if_not_exists = true;
let mut procedure =
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
let output = status.downcast_output_ref::<Vec<u32>>().unwrap();
assert_eq!(*output, vec![8192]);
@@ -177,7 +162,6 @@ async fn test_on_prepare_with_create_if_table_exists() {
async fn test_on_prepare_part_logical_tables_exist() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -186,10 +170,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -216,7 +197,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
task.create_table.create_if_not_exists = true;
let non_exist_task = test_create_logical_table_task("non_exists");
let mut procedure = CreateLogicalTablesProcedure::new(
cluster_id,
vec![task, non_exist_task],
physical_table_id,
ddl_context,
@@ -229,7 +209,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
async fn test_on_create_metadata() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -238,10 +217,7 @@ async fn test_on_create_metadata() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -257,7 +233,6 @@ async fn test_on_create_metadata() {
let task = test_create_logical_table_task("foo");
let yet_another_task = test_create_logical_table_task("bar");
let mut procedure = CreateLogicalTablesProcedure::new(
cluster_id,
vec![task, yet_another_task],
physical_table_id,
ddl_context,
@@ -279,7 +254,6 @@ async fn test_on_create_metadata() {
async fn test_on_create_metadata_part_logical_tables_exist() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -288,10 +262,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -318,7 +289,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
task.create_table.create_if_not_exists = true;
let non_exist_task = test_create_logical_table_task("non_exists");
let mut procedure = CreateLogicalTablesProcedure::new(
cluster_id,
vec![task, non_exist_task],
physical_table_id,
ddl_context,
@@ -340,7 +310,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
async fn test_on_create_metadata_err() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -349,10 +318,7 @@ async fn test_on_create_metadata_err() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -368,7 +334,6 @@ async fn test_on_create_metadata_err() {
let task = test_create_logical_table_task("foo");
let yet_another_task = test_create_logical_table_task("bar");
let mut procedure = CreateLogicalTablesProcedure::new(
cluster_id,
vec![task.clone(), yet_another_task],
physical_table_id,
ddl_context.clone(),

View File

@@ -87,7 +87,6 @@ pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
async fn test_on_prepare_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
// Puts a value to table name key.
@@ -100,7 +99,7 @@ async fn test_on_prepare_table_exists_err() {
)
.await
.unwrap();
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -110,7 +109,6 @@ async fn test_on_prepare_table_exists_err() {
async fn test_on_prepare_with_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
task.table_info.ident.table_id = 1024;
@@ -124,7 +122,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
)
.await
.unwrap();
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Done { output: Some(..) });
let table_id = *status.downcast_output_ref::<u32>().unwrap();
@@ -135,10 +133,9 @@ async fn test_on_prepare_with_create_if_table_exists() {
async fn test_on_prepare_without_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
assert_eq!(procedure.table_id(), 1024);
@@ -148,11 +145,10 @@ async fn test_on_prepare_without_create_if_table_exists() {
async fn test_on_prepare_with_no_partition_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.partitions = vec![];
task.create_table.create_if_not_exists = true;
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(err
@@ -165,10 +161,9 @@ async fn test_on_datanode_create_regions_should_retry() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -183,10 +178,9 @@ async fn test_on_datanode_create_regions_should_not_retry() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -201,10 +195,9 @@ async fn test_on_create_metadata_error() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
let mut procedure = CreateTableProcedure::new(cluster_id, task.clone(), ddl_context.clone());
let mut procedure = CreateTableProcedure::new(task.clone(), ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -233,10 +226,9 @@ async fn test_on_create_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -251,14 +243,12 @@ async fn test_on_create_metadata() {
#[tokio::test]
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let task = test_create_table_task("foo");
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| {
p.creator.data.state == CreateTableState::CreateMetadata

View File

@@ -97,7 +97,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
async fn test_on_prepare_view_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
// Puts a value to table name key.
@@ -113,7 +112,7 @@ async fn test_on_prepare_view_exists_err() {
)
.await
.unwrap();
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::ViewAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -123,7 +122,6 @@ async fn test_on_prepare_view_exists_err() {
async fn test_on_prepare_with_create_if_view_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_view_task("foo");
task.create_view.create_if_not_exists = true;
task.view_info.ident.table_id = 1024;
@@ -140,7 +138,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
)
.await
.unwrap();
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateViewProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Done { output: Some(..) });
let table_id = *status.downcast_output_ref::<u32>().unwrap();
@@ -151,10 +149,9 @@ async fn test_on_prepare_with_create_if_view_exists() {
async fn test_on_prepare_without_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_view_task("foo");
task.create_view.create_if_not_exists = true;
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateViewProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
assert_eq!(procedure.view_id(), 1024);
@@ -165,10 +162,9 @@ async fn test_on_create_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = CreateViewProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -185,10 +181,9 @@ async fn test_replace_view_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager.clone());
let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -213,7 +208,7 @@ async fn test_replace_view_metadata() {
let mut task = test_create_view_task("foo");
// The view already exists, prepare should fail
{
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::ViewAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -224,7 +219,7 @@ async fn test_replace_view_metadata() {
task.create_view.logical_plan = vec![4, 5, 6];
task.create_view.definition = "new_definition".to_string();
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = CreateViewProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -254,12 +249,11 @@ async fn test_replace_table() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager.clone());
let cluster_id = 1;
{
// Create a `foo` table.
let task = test_create_table_task("foo");
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -272,7 +266,7 @@ async fn test_replace_table() {
// Try to replace a view named `foo` too.
let mut task = test_create_view_task("foo");
task.create_view.or_replace = true;
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);

View File

@@ -31,7 +31,6 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_drop_database_with_logical_tables() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -45,11 +44,11 @@ async fn test_drop_database_with_logical_tables() {
.await
.unwrap();
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
@@ -80,7 +79,6 @@ async fn test_drop_database_with_logical_tables() {
#[tokio::test]
async fn test_drop_database_retryable_error() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -94,11 +92,11 @@ async fn test_drop_database_retryable_error() {
.await
.unwrap();
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
@@ -128,7 +126,6 @@ async fn test_drop_database_retryable_error() {
#[tokio::test]
async fn test_drop_database_recover() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -142,9 +139,9 @@ async fn test_drop_database_recover() {
.await
.unwrap();
// Creates a physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates a logical tables
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),

View File

@@ -40,12 +40,11 @@ fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> D
#[tokio::test]
async fn test_drop_flow_not_found() {
let cluster_id = 1;
let flow_id = 1024;
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
let ddl_context = new_ddl_context(node_manager);
let task = test_drop_flow_task("my_flow", flow_id, false);
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropFlowProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowNotFound { .. });
}
@@ -53,7 +52,6 @@ async fn test_drop_flow_not_found() {
#[tokio::test]
async fn test_drop_flow() {
// create a flow
let cluster_id = 1;
let table_id = 1024;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
@@ -75,27 +73,21 @@ async fn test_drop_flow() {
)
.await
.unwrap();
let flow_id = create_test_flow(
&ddl_context,
cluster_id,
"my_flow",
source_table_names,
sink_table_name,
)
.await;
let flow_id =
create_test_flow(&ddl_context, "my_flow", source_table_names, sink_table_name).await;
// Drops the flows
let task = test_drop_flow_task("my_flow", flow_id, false);
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
// Drops if not exists
let task = test_drop_flow_task("my_flow", flow_id, true);
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
// Drops again
let task = test_drop_flow_task("my_flow", flow_id, false);
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropFlowProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowNotFound { .. });
}

View File

@@ -35,7 +35,7 @@ use crate::ddl::test_util::{
create_logical_table, create_physical_table, create_physical_table_metadata,
test_create_logical_table_task, test_create_physical_table_task,
};
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
use crate::ddl::TableMetadata;
use crate::key::table_route::TableRouteValue;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
@@ -47,7 +47,6 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat
async fn test_on_prepare_table_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -63,7 +62,7 @@ async fn test_on_prepare_table_not_exists_err() {
.unwrap();
let task = new_drop_table_task("bar", table_id, false);
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
@@ -72,7 +71,6 @@ async fn test_on_prepare_table_not_exists_err() {
async fn test_on_prepare_table() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -89,13 +87,13 @@ async fn test_on_prepare_table() {
let task = new_drop_table_task("bar", table_id, true);
// Drop if exists
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(!procedure.rollback_supported());
let task = new_drop_table_task(table_name, table_id, false);
// Drop table
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
}
@@ -105,7 +103,6 @@ async fn test_on_datanode_drop_regions() {
let datanode_handler = DatanodeWatcher(tx);
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -144,7 +141,7 @@ async fn test_on_datanode_drop_regions() {
let task = new_drop_table_task(table_name, table_id, false);
// Drop table
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
procedure.on_datanode_drop_regions().await.unwrap();
@@ -179,7 +176,6 @@ async fn test_on_rollback() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone());
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -188,10 +184,7 @@ async fn test_on_rollback() {
..
} = ddl_context
.table_metadata_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&create_physical_table_task,
)
.create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -205,12 +198,8 @@ async fn test_on_rollback() {
let physical_table_id = table_id;
// Creates the logical table metadata.
let task = test_create_logical_table_task("foo");
let mut procedure = CreateLogicalTablesProcedure::new(
cluster_id,
vec![task],
physical_table_id,
ddl_context.clone(),
);
let mut procedure =
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = new_test_procedure_context();
procedure.execute(&ctx).await.unwrap();
@@ -223,7 +212,7 @@ async fn test_on_rollback() {
// Drops the physical table
{
let task = new_drop_table_task("phy_table", physical_table_id, false);
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(procedure.rollback_supported());
procedure.on_delete_metadata().await.unwrap();
@@ -238,7 +227,7 @@ async fn test_on_rollback() {
// Drops the logical table
let task = new_drop_table_task("foo", table_ids[0], false);
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(!procedure.rollback_supported());
}
@@ -255,18 +244,15 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool
#[tokio::test]
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
let logical_table_id =
create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
let physical_table_id = create_physical_table(&ddl_context, "t").await;
let logical_table_id = create_logical_table(ddl_context.clone(), physical_table_id, "s").await;
let inner_test = |task: DropTableTask| async {
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| {
p.data.state == DropTableState::InvalidateTableCache
})
@@ -304,14 +290,13 @@ async fn test_from_json() {
(DropTableState::DatanodeDropRegions, 1, 1),
(DropTableState::DeleteTombstone, 1, 0),
] {
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
let physical_table_id = create_physical_table(&ddl_context, "t").await;
let task = new_drop_table_task("t", physical_table_id, false);
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| p.data.state == state).await;
let data = procedure.dump().unwrap();
assert_eq!(
@@ -334,14 +319,13 @@ async fn test_from_json() {
let num_operating_regions = 0;
let num_operating_regions_after_recovery = 0;
let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
let physical_table_id = create_physical_table(&ddl_context, "t").await;
let task = new_drop_table_task("t", physical_table_id, false);
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
let data = procedure.dump().unwrap();
assert_eq!(

View File

@@ -41,7 +41,6 @@ fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> Dro
async fn test_on_prepare_view_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let view_id = 1024;
let mut task = test_create_view_task("foo");
task.view_info.ident.table_id = view_id;
@@ -60,7 +59,7 @@ async fn test_on_prepare_view_not_exists_err() {
.unwrap();
let task = new_drop_view_task("bar", view_id, false);
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
@@ -69,7 +68,6 @@ async fn test_on_prepare_view_not_exists_err() {
async fn test_on_prepare_not_view_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let task = test_create_table_task(view_name, view_id);
@@ -85,7 +83,7 @@ async fn test_on_prepare_not_view_err() {
.unwrap();
let task = new_drop_view_task(view_name, view_id, false);
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropViewProcedure::new(task, ddl_context);
// It's not a view, expect error
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::InvalidArguments);
@@ -95,7 +93,6 @@ async fn test_on_prepare_not_view_err() {
async fn test_on_prepare_success() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let mut task = test_create_view_task("foo");
@@ -116,12 +113,12 @@ async fn test_on_prepare_success() {
let task = new_drop_view_task("bar", view_id, true);
// Drop if exists
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let task = new_drop_view_task(view_name, view_id, false);
// Prepare success
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropViewProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
assert_eq!(DropViewState::DeleteMetadata, procedure.state());
}
@@ -130,7 +127,6 @@ async fn test_on_prepare_success() {
async fn test_drop_view_success() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let mut task = test_create_view_task("foo");
@@ -159,7 +155,7 @@ async fn test_drop_view_success() {
let task = new_drop_view_task(view_name, view_id, false);
// Prepare success
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
assert_eq!(DropViewState::InvalidateViewCache, procedure.state());
@@ -174,7 +170,7 @@ async fn test_drop_view_success() {
// Drop again
let task = new_drop_view_task(view_name, view_id, false);
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
let mut procedure = DropViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}

View File

@@ -39,9 +39,9 @@ use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::metrics;
use crate::rpc::ddl::TruncateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
use crate::{metrics, ClusterId};
pub struct TruncateTableProcedure {
context: DdlContext,
@@ -91,7 +91,6 @@ impl TruncateTableProcedure {
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
pub(crate) fn new(
cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
@@ -99,7 +98,7 @@ impl TruncateTableProcedure {
) -> Self {
Self {
context,
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
data: TruncateTableData::new(task, table_info_value, region_routes),
}
}
@@ -189,7 +188,6 @@ impl TruncateTableProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct TruncateTableData {
state: TruncateTableState,
cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
@@ -197,14 +195,12 @@ pub struct TruncateTableData {
impl TruncateTableData {
pub fn new(
cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Self {
Self {
state: TruncateTableState::Prepare,
cluster_id,
task,
table_info_value,
region_routes,

View File

@@ -34,7 +34,6 @@ use crate::key::TableMetadataManagerRef;
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::RegionRoute;
use crate::ClusterId;
/// Adds [Peer] context if the error is unretryable.
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
@@ -144,7 +143,6 @@ pub async fn get_physical_table_id(
/// Converts a list of [`RegionRoute`] to a list of [`DetectingRegion`].
pub fn convert_region_routes_to_detecting_regions(
cluster_id: ClusterId,
region_routes: &[RegionRoute],
) -> Vec<DetectingRegion> {
region_routes
@@ -153,7 +151,7 @@ pub fn convert_region_routes_to_detecting_regions(
route
.leader_peer
.as_ref()
.map(|peer| (cluster_id, peer.id, route.region.id))
.map(|peer| (peer.id, route.region.id))
})
.collect::<Vec<_>>()
}

View File

@@ -60,7 +60,6 @@ use crate::rpc::ddl::{
use crate::rpc::procedure;
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
use crate::rpc::router::RegionRoute;
use crate::ClusterId;
pub type DdlManagerRef = Arc<DdlManager>;
@@ -154,13 +153,12 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_alter_table_task(
&self,
cluster_id: ClusterId,
table_id: TableId,
alter_table_task: AlterTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterTableProcedure::new(cluster_id, table_id, alter_table_task, context)?;
let procedure = AlterTableProcedure::new(table_id, alter_table_task, context)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -171,12 +169,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_table_task(
&self,
cluster_id: ClusterId,
create_table_task: CreateTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CreateTableProcedure::new(cluster_id, create_table_task, context);
let procedure = CreateTableProcedure::new(create_table_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -187,12 +184,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_view_task(
&self,
cluster_id: ClusterId,
create_view_task: CreateViewTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CreateViewProcedure::new(cluster_id, create_view_task, context);
let procedure = CreateViewProcedure::new(create_view_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -203,18 +199,13 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_logical_table_tasks(
&self,
cluster_id: ClusterId,
create_table_tasks: Vec<CreateTableTask>,
physical_table_id: TableId,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CreateLogicalTablesProcedure::new(
cluster_id,
create_table_tasks,
physical_table_id,
context,
);
let procedure =
CreateLogicalTablesProcedure::new(create_table_tasks, physical_table_id, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -225,18 +216,13 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_alter_logical_table_tasks(
&self,
cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterLogicalTablesProcedure::new(
cluster_id,
alter_table_tasks,
physical_table_id,
context,
);
let procedure =
AlterLogicalTablesProcedure::new(alter_table_tasks, physical_table_id, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -247,12 +233,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_table_task(
&self,
cluster_id: ClusterId,
drop_table_task: DropTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = DropTableProcedure::new(cluster_id, drop_table_task, context);
let procedure = DropTableProcedure::new(drop_table_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -263,7 +248,6 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_database(
&self,
_cluster_id: ClusterId,
CreateDatabaseTask {
catalog,
schema,
@@ -283,7 +267,6 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_database(
&self,
_cluster_id: ClusterId,
DropDatabaseTask {
catalog,
schema,
@@ -299,11 +282,10 @@ impl DdlManager {
pub async fn submit_alter_database(
&self,
cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
let procedure = AlterDatabaseProcedure::new(alter_database_task, context)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -313,12 +295,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_flow_task(
&self,
cluster_id: ClusterId,
create_flow: CreateFlowTask,
query_context: QueryContext,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CreateFlowProcedure::new(cluster_id, create_flow, query_context, context);
let procedure = CreateFlowProcedure::new(create_flow, query_context, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -328,11 +309,10 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_flow_task(
&self,
cluster_id: ClusterId,
drop_flow: DropFlowTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = DropFlowProcedure::new(cluster_id, drop_flow, context);
let procedure = DropFlowProcedure::new(drop_flow, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -342,11 +322,10 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_view_task(
&self,
cluster_id: ClusterId,
drop_view: DropViewTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = DropViewProcedure::new(cluster_id, drop_view, context);
let procedure = DropViewProcedure::new(drop_view, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -356,14 +335,12 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_truncate_table_task(
&self,
cluster_id: ClusterId,
truncate_table_task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = TruncateTableProcedure::new(
cluster_id,
truncate_table_task,
table_info_value,
region_routes,
@@ -397,7 +374,6 @@ impl DdlManager {
async fn handle_truncate_table_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
truncate_table_task: TruncateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_id = truncate_table_task.table_id;
@@ -416,12 +392,7 @@ async fn handle_truncate_table_task(
let table_route = table_route_value.into_inner().region_routes()?.clone();
let (id, _) = ddl_manager
.submit_truncate_table_task(
cluster_id,
truncate_table_task,
table_info_value,
table_route,
)
.submit_truncate_table_task(truncate_table_task, table_info_value, table_route)
.await?;
info!("Table: {table_id} is truncated via procedure_id {id:?}");
@@ -434,7 +405,6 @@ async fn handle_truncate_table_task(
async fn handle_alter_table_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
alter_table_task: AlterTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_ref = alter_table_task.table_ref();
@@ -468,7 +438,7 @@ async fn handle_alter_table_task(
);
let (id, _) = ddl_manager
.submit_alter_table_task(cluster_id, table_id, alter_table_task)
.submit_alter_table_task(table_id, alter_table_task)
.await?;
info!("Table: {table_id} is altered via procedure_id {id:?}");
@@ -481,13 +451,10 @@ async fn handle_alter_table_task(
async fn handle_drop_table_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
drop_table_task: DropTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_id = drop_table_task.table_id;
let (id, _) = ddl_manager
.submit_drop_table_task(cluster_id, drop_table_task)
.await?;
let (id, _) = ddl_manager.submit_drop_table_task(drop_table_task).await?;
info!("Table: {table_id} is dropped via procedure_id {id:?}");
@@ -499,11 +466,10 @@ async fn handle_drop_table_task(
async fn handle_create_table_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_table_task: CreateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
.submit_create_table_task(cluster_id, create_table_task)
.submit_create_table_task(create_table_task)
.await?;
let procedure_id = id.to_string();
@@ -525,7 +491,6 @@ async fn handle_create_table_task(
async fn handle_create_logical_table_tasks(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_table_tasks: Vec<CreateTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
@@ -542,7 +507,7 @@ async fn handle_create_logical_table_tasks(
let num_logical_tables = create_table_tasks.len();
let (id, output) = ddl_manager
.submit_create_logical_table_tasks(cluster_id, create_table_tasks, physical_table_id)
.submit_create_logical_table_tasks(create_table_tasks, physical_table_id)
.await?;
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}");
@@ -568,11 +533,10 @@ async fn handle_create_logical_table_tasks(
async fn handle_create_database_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_database_task: CreateDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_create_database(cluster_id, create_database_task.clone())
.submit_create_database(create_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -589,11 +553,10 @@ async fn handle_create_database_task(
async fn handle_drop_database_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
drop_database_task: DropDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_drop_database(cluster_id, drop_database_task.clone())
.submit_drop_database(drop_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -610,11 +573,10 @@ async fn handle_drop_database_task(
async fn handle_alter_database_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_alter_database(cluster_id, alter_database_task.clone())
.submit_alter_database(alter_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -632,11 +594,10 @@ async fn handle_alter_database_task(
async fn handle_drop_flow_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
drop_flow_task: DropFlowTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_drop_flow_task(cluster_id, drop_flow_task.clone())
.submit_drop_flow_task(drop_flow_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -653,11 +614,10 @@ async fn handle_drop_flow_task(
async fn handle_drop_view_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
drop_view_task: DropViewTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
.submit_drop_view_task(cluster_id, drop_view_task.clone())
.submit_drop_view_task(drop_view_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -675,12 +635,11 @@ async fn handle_drop_view_task(
async fn handle_create_flow_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_flow_task: CreateFlowTask,
query_context: QueryContext,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
.submit_create_flow_task(cluster_id, create_flow_task.clone(), query_context)
.submit_create_flow_task(create_flow_task.clone(), query_context)
.await?;
let procedure_id = id.to_string();
@@ -712,7 +671,6 @@ async fn handle_create_flow_task(
async fn handle_alter_logical_table_tasks(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
@@ -733,7 +691,7 @@ async fn handle_alter_logical_table_tasks(
let num_logical_tables = alter_table_tasks.len();
let (id, _) = ddl_manager
.submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id)
.submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id)
.await?;
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}");
@@ -749,11 +707,10 @@ async fn handle_alter_logical_table_tasks(
/// Handle the `[CreateViewTask]` and returns the DDL response when success.
async fn handle_create_view_task(
ddl_manager: &DdlManager,
cluster_id: ClusterId,
create_view_task: CreateViewTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
.submit_create_view_task(cluster_id, create_view_task)
.submit_create_view_task(create_view_task)
.await?;
let procedure_id = id.to_string();
@@ -788,55 +745,43 @@ impl ProcedureExecutor for DdlManager {
.unwrap_or(TracingContext::from_current_span())
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
async move {
let cluster_id = ctx.cluster_id.unwrap_or_default();
debug!("Submitting Ddl task: {:?}", request.task);
match request.task {
CreateTable(create_table_task) => {
handle_create_table_task(self, cluster_id, create_table_task).await
}
DropTable(drop_table_task) => {
handle_drop_table_task(self, cluster_id, drop_table_task).await
handle_create_table_task(self, create_table_task).await
}
DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
AlterTable(alter_table_task) => {
handle_alter_table_task(self, cluster_id, alter_table_task).await
handle_alter_table_task(self, alter_table_task).await
}
TruncateTable(truncate_table_task) => {
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
handle_truncate_table_task(self, truncate_table_task).await
}
CreateLogicalTables(create_table_tasks) => {
handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await
handle_create_logical_table_tasks(self, create_table_tasks).await
}
AlterLogicalTables(alter_table_tasks) => {
handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await
handle_alter_logical_table_tasks(self, alter_table_tasks).await
}
DropLogicalTables(_) => todo!(),
CreateDatabase(create_database_task) => {
handle_create_database_task(self, cluster_id, create_database_task).await
handle_create_database_task(self, create_database_task).await
}
DropDatabase(drop_database_task) => {
handle_drop_database_task(self, cluster_id, drop_database_task).await
handle_drop_database_task(self, drop_database_task).await
}
AlterDatabase(alter_database_task) => {
handle_alter_database_task(self, cluster_id, alter_database_task).await
handle_alter_database_task(self, alter_database_task).await
}
CreateFlow(create_flow_task) => {
handle_create_flow_task(
self,
cluster_id,
create_flow_task,
request.query_context.into(),
)
.await
}
DropFlow(drop_flow_task) => {
handle_drop_flow_task(self, cluster_id, drop_flow_task).await
handle_create_flow_task(self, create_flow_task, request.query_context.into())
.await
}
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
CreateView(create_view_task) => {
handle_create_view_task(self, cluster_id, create_view_task).await
}
DropView(drop_view_task) => {
handle_drop_view_task(self, cluster_id, drop_view_task).await
handle_create_view_task(self, create_view_task).await
}
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
}
}
.trace(span)

View File

@@ -26,11 +26,10 @@ use crate::flow_name::FlowName;
use crate::key::schema_name::SchemaName;
use crate::key::FlowId;
use crate::peer::Peer;
use crate::{ClusterId, DatanodeId, FlownodeId};
use crate::{DatanodeId, FlownodeId};
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct RegionIdent {
pub cluster_id: ClusterId,
pub datanode_id: DatanodeId,
pub table_id: TableId,
pub region_number: RegionNumber,
@@ -47,8 +46,8 @@ impl Display for RegionIdent {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"RegionIdent(datanode_id='{}.{}', table_id={}, region_number={}, engine = {})",
self.cluster_id, self.datanode_id, self.table_id, self.region_number, self.engine
"RegionIdent(datanode_id='{}', table_id={}, region_number={}, engine = {})",
self.datanode_id, self.table_id, self.region_number, self.engine
)
}
}
@@ -262,7 +261,6 @@ mod tests {
fn test_serialize_instruction() {
let open_region = Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,
@@ -277,12 +275,11 @@ mod tests {
let serialized = serde_json::to_string(&open_region).unwrap();
assert_eq!(
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
serialized
);
let close_region = Instruction::CloseRegion(RegionIdent {
cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,
@@ -292,7 +289,7 @@ mod tests {
let serialized = serde_json::to_string(&close_region).unwrap();
assert_eq!(
r#"{"CloseRegion":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
serialized
);
}
@@ -307,7 +304,6 @@ mod tests {
#[test]
fn test_compatible_serialize_open_region() {
let region_ident = RegionIdent {
cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,

View File

@@ -47,8 +47,6 @@ pub mod test_util;
pub mod util;
pub mod wal_options_allocator;
// The id of the cluster.
pub type ClusterId = u64;
// The id of the datanode.
pub type DatanodeId = u64;
// The id of the flownode.

View File

@@ -99,7 +99,7 @@ impl NodeExpiryListener {
in_memory: &ResettableKvBackendRef,
max_idle_time: Duration,
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
let prefix = NodeInfoKey::key_prefix();
let req = RangeRequest::new().with_prefix(prefix);
let current_time_millis = common_time::util::current_time_millis();
let resp = in_memory.range(req).await?;

View File

@@ -19,7 +19,7 @@ use api::v1::meta::Peer as PbPeer;
use serde::{Deserialize, Serialize};
use crate::error::Error;
use crate::{ClusterId, DatanodeId, FlownodeId};
use crate::{DatanodeId, FlownodeId};
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
pub struct Peer {
@@ -72,8 +72,8 @@ impl Display for Peer {
/// can query peer given a node id
#[async_trait::async_trait]
pub trait PeerLookupService {
async fn datanode(&self, cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>, Error>;
async fn flownode(&self, cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>, Error>;
async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>, Error>;
async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>, Error>;
}
pub type PeerLookupServiceRef = Arc<dyn PeerLookupService + Send + Sync>;

View File

@@ -31,11 +31,6 @@ impl ResponseHeader {
self.0.protocol_version
}
#[inline]
pub fn cluster_id(&self) -> u64 {
self.0.cluster_id
}
#[inline]
pub fn error_code(&self) -> i32 {
match self.0.error.as_ref() {
@@ -143,7 +138,6 @@ mod tests {
fn test_response_header_trans() {
let pb_header = PbResponseHeader {
protocol_version: 101,
cluster_id: 1,
error: Some(Error {
code: 100,
err_msg: "test".to_string(),
@@ -152,7 +146,6 @@ mod tests {
let header = ResponseHeader(pb_header);
assert_eq!(101, header.protocol_version());
assert_eq!(1, header.cluster_id());
assert_eq!(100, header.error_code());
assert_eq!("test".to_string(), header.error_msg());
}

View File

@@ -37,7 +37,7 @@ use crate::peer::{Peer, PeerLookupService};
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
use crate::wal_options_allocator::WalOptionsAllocator;
use crate::{ClusterId, DatanodeId, FlownodeId};
use crate::{DatanodeId, FlownodeId};
#[async_trait::async_trait]
pub trait MockDatanodeHandler: Sync + Send + Clone {
@@ -189,11 +189,11 @@ pub struct NoopPeerLookupService;
#[async_trait::async_trait]
impl PeerLookupService for NoopPeerLookupService {
async fn datanode(&self, _cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>> {
async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
async fn flownode(&self, _cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>> {
async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
}

View File

@@ -235,7 +235,6 @@ mod tests {
Instruction::CloseRegion(RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
})
@@ -246,7 +245,6 @@ mod tests {
RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
},

View File

@@ -103,7 +103,6 @@ impl Default for FlowConfig {
#[serde(default)]
pub struct FlownodeOptions {
pub mode: Mode,
pub cluster_id: Option<u64>,
pub node_id: Option<u64>,
pub flow: FlowConfig,
pub grpc: GrpcOptions,
@@ -118,7 +117,6 @@ impl Default for FlownodeOptions {
fn default() -> Self {
Self {
mode: servers::Mode::Standalone,
cluster_id: None,
node_id: None,
flow: FlowConfig::default(),
grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3004"),

View File

@@ -31,13 +31,13 @@ fn main() {
#[tokio::main]
async fn run() {
let id = (1000u64, 2000u64);
let id = 2000u64;
let config = ChannelConfig::new()
.timeout(Duration::from_secs(3))
.connect_timeout(Duration::from_secs(5))
.tcp_nodelay(true);
let channel_manager = ChannelManager::with_config(config);
let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
let mut meta_client = MetaClientBuilder::datanode_default_options(id)
.channel_manager(channel_manager)
.build();
meta_client.start(&["127.0.0.1:3002"]).await.unwrap();

View File

@@ -47,7 +47,6 @@ use common_meta::rpc::store::{
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
use common_meta::ClusterId;
use common_telemetry::info;
use futures::TryStreamExt;
use heartbeat::Client as HeartbeatClient;
@@ -61,7 +60,7 @@ use crate::error::{
Result,
};
pub type Id = (u64, u64);
pub type Id = u64;
const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3;
const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3;
@@ -81,18 +80,18 @@ pub struct MetaClientBuilder {
}
impl MetaClientBuilder {
pub fn new(cluster_id: ClusterId, member_id: u64, role: Role) -> Self {
pub fn new(member_id: u64, role: Role) -> Self {
Self {
id: (cluster_id, member_id),
id: member_id,
role,
..Default::default()
}
}
/// Returns the role of Frontend's default options.
pub fn frontend_default_options(cluster_id: ClusterId) -> Self {
pub fn frontend_default_options() -> Self {
// Frontend does not need a member id.
Self::new(cluster_id, 0, Role::Frontend)
Self::new(0, Role::Frontend)
.enable_store()
.enable_heartbeat()
.enable_procedure()
@@ -100,15 +99,15 @@ impl MetaClientBuilder {
}
/// Returns the role of Datanode's default options.
pub fn datanode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
Self::new(cluster_id, member_id, Role::Datanode)
pub fn datanode_default_options(member_id: u64) -> Self {
Self::new(member_id, Role::Datanode)
.enable_store()
.enable_heartbeat()
}
/// Returns the role of Flownode's default options.
pub fn flownode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
Self::new(cluster_id, member_id, Role::Flownode)
pub fn flownode_default_options(member_id: u64) -> Self {
Self::new(member_id, Role::Flownode)
.enable_store()
.enable_heartbeat()
.enable_procedure()
@@ -273,15 +272,9 @@ impl ClusterInfo for MetaClient {
let cluster_client = self.cluster_client()?;
let (get_metasrv_nodes, nodes_key_prefix) = match role {
None => (
true,
Some(NodeInfoKey::key_prefix_with_cluster_id(self.id.0)),
),
None => (true, Some(NodeInfoKey::key_prefix())),
Some(ClusterRole::Metasrv) => (true, None),
Some(role) => (
false,
Some(NodeInfoKey::key_prefix_with_role(self.id.0, role)),
),
Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))),
};
let mut nodes = if get_metasrv_nodes {
@@ -324,7 +317,7 @@ impl ClusterInfo for MetaClient {
async fn list_region_stats(&self) -> Result<Vec<RegionStat>> {
let cluster_kv_backend = Arc::new(self.cluster_client()?);
let range_prefix = DatanodeStatKey::key_prefix_with_cluster_id(self.id.0);
let range_prefix = DatanodeStatKey::prefix_key();
let req = RangeRequest::new().with_prefix(range_prefix);
let stream =
PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream();
@@ -555,6 +548,8 @@ impl MetaClient {
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use api::v1::meta::{HeartbeatRequest, Peer};
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
use rand::Rng;
@@ -624,31 +619,31 @@ mod tests {
async fn test_meta_client_builder() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_heartbeat()
.build();
let _ = meta_client.heartbeat_client().unwrap();
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode).build();
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build();
assert!(meta_client.heartbeat_client().is_err());
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_store()
.build();
assert!(meta_client.heartbeat_client().is_err());
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
let mut meta_client = MetaClientBuilder::new(1, 2, Role::Datanode)
let mut meta_client = MetaClientBuilder::new(2, Role::Datanode)
.enable_heartbeat()
.enable_store()
.build();
assert_eq!(1, meta_client.id().0);
assert_eq!(2, meta_client.id().1);
assert_eq!(2, meta_client.id());
assert_eq!(2, meta_client.id());
let _ = meta_client.heartbeat_client().unwrap();
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
@@ -657,7 +652,7 @@ mod tests {
#[tokio::test]
async fn test_not_start_heartbeat_client() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_store()
.build();
meta_client.start(urls).await.unwrap();
@@ -668,7 +663,7 @@ mod tests {
#[tokio::test]
async fn test_not_start_store_client() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_heartbeat()
.build();
@@ -688,6 +683,9 @@ mod tests {
let tc = new_client("test_heartbeat").await;
let (sender, mut receiver) = tc.client.heartbeat().await.unwrap();
// send heartbeats
let request_sent = Arc::new(AtomicUsize::new(0));
let request_sent_clone = request_sent.clone();
let _handle = tokio::spawn(async move {
for _ in 0..5 {
let req = HeartbeatRequest {
@@ -698,14 +696,24 @@ mod tests {
..Default::default()
};
sender.send(req).await.unwrap();
request_sent_clone.fetch_add(1, Ordering::Relaxed);
}
});
let _handle = tokio::spawn(async move {
while let Some(res) = receiver.message().await.unwrap() {
assert_eq!(1000, res.header.unwrap().cluster_id);
let heartbeat_count = Arc::new(AtomicUsize::new(0));
let heartbeat_count_clone = heartbeat_count.clone();
let handle = tokio::spawn(async move {
while let Some(_resp) = receiver.message().await.unwrap() {
heartbeat_count_clone.fetch_add(1, Ordering::Relaxed);
}
});
handle.await.unwrap();
//+1 for the initial response
assert_eq!(
request_sent.load(Ordering::Relaxed) + 1,
heartbeat_count.load(Ordering::Relaxed)
);
}
#[tokio::test]

View File

@@ -272,7 +272,7 @@ mod test {
#[tokio::test]
async fn test_already_start() {
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default(), 3);
let mut client = Client::new(0, Role::Datanode, ChannelManager::default(), 3);
client
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
.await
@@ -288,7 +288,7 @@ mod test {
#[tokio::test]
async fn test_heartbeat_stream() {
let (sender, mut receiver) = mpsc::channel::<HeartbeatRequest>(100);
let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender);
let sender = HeartbeatSender::new(8, Role::Datanode, sender);
let _handle = tokio::spawn(async move {
for _ in 0..10 {
sender.send(HeartbeatRequest::default()).await.unwrap();
@@ -296,7 +296,6 @@ mod test {
});
while let Some(req) = receiver.recv().await {
let header = req.header.unwrap();
assert_eq!(8, header.cluster_id);
assert_eq!(8, header.member_id);
}
}

View File

@@ -255,7 +255,7 @@ mod test {
#[tokio::test]
async fn test_already_start() {
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
.await
@@ -270,7 +270,7 @@ mod test {
#[tokio::test]
async fn test_start_with_duplicate_peers() {
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
.await

View File

@@ -71,23 +71,22 @@ pub enum MetaClientType {
pub type MetaClientRef = Arc<client::MetaClient>;
pub async fn create_meta_client(
cluster_id: u64,
client_type: MetaClientType,
meta_client_options: &MetaClientOptions,
) -> error::Result<MetaClientRef> {
info!(
"Creating {:?} instance from cluster {} with Metasrv addrs {:?}",
client_type, cluster_id, meta_client_options.metasrv_addrs
"Creating {:?} instance with Metasrv addrs {:?}",
client_type, meta_client_options.metasrv_addrs
);
let mut builder = match client_type {
MetaClientType::Datanode { member_id } => {
MetaClientBuilder::datanode_default_options(cluster_id, member_id)
MetaClientBuilder::datanode_default_options(member_id)
}
MetaClientType::Flownode { member_id } => {
MetaClientBuilder::flownode_default_options(cluster_id, member_id)
MetaClientBuilder::flownode_default_options(member_id)
}
MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(cluster_id),
MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(),
};
let base_config = ChannelConfig::new()

View File

@@ -60,8 +60,8 @@ pub async fn mock_client_with_etcdstore(addr: &str) -> (MetaClient, MockMetaCont
}
pub async fn mock_client_by(server_addr: String, channel_manager: ChannelManager) -> MetaClient {
let id = (1000u64, 2000u64);
let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
let id = 2000u64;
let mut meta_client = MetaClientBuilder::datanode_default_options(id)
.enable_access_cluster_info()
.channel_manager(channel_manager)
.build();

View File

@@ -375,13 +375,9 @@ mod tests {
#[test]
fn test_to_stat_kv_map() {
let stat_key = DatanodeStatKey {
cluster_id: 0,
node_id: 100,
};
let stat_key = DatanodeStatKey { node_id: 100 };
let stat = Stat {
cluster_id: 0,
id: 100,
addr: "127.0.0.1:3001".to_string(),
..Default::default()
@@ -400,7 +396,6 @@ mod tests {
let stat_val = kv_map.get(&stat_key).unwrap();
let stat = stat_val.stats.first().unwrap();
assert_eq!(0, stat.cluster_id);
assert_eq!(100, stat.id);
assert_eq!("127.0.0.1:3001", stat.addr);
}

View File

@@ -15,7 +15,6 @@
use common_error::ext::BoxedError;
use common_meta::ddl::flow_meta::PartitionPeerAllocator;
use common_meta::peer::Peer;
use common_meta::ClusterId;
use snafu::ResultExt;
use crate::metasrv::{SelectorContext, SelectorRef};
@@ -34,14 +33,9 @@ impl FlowPeerAllocator {
#[async_trait::async_trait]
impl PartitionPeerAllocator for FlowPeerAllocator {
async fn alloc(
&self,
cluster_id: ClusterId,
partitions: usize,
) -> common_meta::error::Result<Vec<Peer>> {
async fn alloc(&self, partitions: usize) -> common_meta::error::Result<Vec<Peer>> {
self.selector
.select(
cluster_id,
&self.ctx,
SelectorOptions {
min_required_items: partitions,

View File

@@ -20,8 +20,8 @@ use std::time::{Duration, Instant};
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{
HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, RequestHeader,
ResponseHeader, Role, PROTOCOL_VERSION,
HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, ResponseHeader, Role,
PROTOCOL_VERSION,
};
use check_leader_handler::CheckLeaderHandler;
use collect_cluster_info_handler::{
@@ -153,13 +153,9 @@ pub struct Pusher {
}
impl Pusher {
pub fn new(
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
req_header: &RequestHeader,
) -> Self {
pub fn new(sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>) -> Self {
let res_header = ResponseHeader {
protocol_version: PROTOCOL_VERSION,
cluster_id: req_header.cluster_id,
..Default::default()
};
@@ -772,7 +768,7 @@ mod tests {
use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION};
use api::v1::meta::{MailboxMessage, Role};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::sequence::SequenceBuilder;
use tokio::sync::mpsc;
@@ -814,12 +810,8 @@ mod tests {
async fn push_msg_via_mailbox() -> (MailboxRef, MailboxReceiver) {
let datanode_id = 12;
let (pusher_tx, mut pusher_rx) = mpsc::channel(16);
let res_header = RequestHeader {
protocol_version: PROTOCOL_VERSION,
..Default::default()
};
let pusher_id = PusherId::new(Role::Datanode, datanode_id);
let pusher: Pusher = Pusher::new(pusher_tx, &res_header);
let pusher: Pusher = Pusher::new(pusher_tx);
let handler_group = HeartbeatHandlerGroup::default();
handler_group.register_pusher(pusher_id, pusher).await;

View File

@@ -262,15 +262,11 @@ mod tests {
let handler = CollectStatsHandler::default();
handle_request_many_times(ctx.clone(), &handler, 1).await;
let key = DatanodeStatKey {
cluster_id: 3,
node_id: 101,
};
let key = DatanodeStatKey { node_id: 101 };
let key: Vec<u8> = key.into();
let res = ctx.in_memory.get(&key).await.unwrap();
let kv = res.unwrap();
let key: DatanodeStatKey = kv.key.clone().try_into().unwrap();
assert_eq!(3, key.cluster_id);
assert_eq!(101, key.node_id);
let val: DatanodeStatValue = kv.value.try_into().unwrap();
// first new stat must be set in kv store immediately
@@ -295,7 +291,6 @@ mod tests {
for i in 1..=loop_times {
let mut acc = HeartbeatAccumulator {
stat: Some(Stat {
cluster_id: 3,
id: 101,
region_num: i as _,
..Default::default()

View File

@@ -101,7 +101,6 @@ mod tests {
}
}
acc.stat = Some(Stat {
cluster_id: 1,
id: 42,
region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
timestamp_millis: 1000,

View File

@@ -38,17 +38,14 @@ impl HeartbeatHandler for DatanodeKeepLeaseHandler {
_acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
let HeartbeatRequest { header, peer, .. } = req;
let Some(header) = &header else {
let Some(_header) = &header else {
return Ok(HandleControl::Continue);
};
let Some(peer) = &peer else {
return Ok(HandleControl::Continue);
};
let key = DatanodeLeaseKey {
cluster_id: header.cluster_id,
node_id: peer.id,
};
let key = DatanodeLeaseKey { node_id: peer.id };
let value = LeaseValue {
timestamp_millis: time_util::current_time_millis(),
node_addr: peer.addr.clone(),
@@ -80,17 +77,14 @@ impl HeartbeatHandler for FlownodeKeepLeaseHandler {
_acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
let HeartbeatRequest { header, peer, .. } = req;
let Some(header) = &header else {
let Some(_header) = &header else {
return Ok(HandleControl::Continue);
};
let Some(peer) = &peer else {
return Ok(HandleControl::Continue);
};
let key = FlownodeLeaseKey {
cluster_id: header.cluster_id,
node_id: peer.id,
};
let key = FlownodeLeaseKey { node_id: peer.id };
let value = LeaseValue {
timestamp_millis: time_util::current_time_millis(),
node_addr: peer.addr.clone(),

View File

@@ -64,7 +64,6 @@ impl HeartbeatHandler for RegionLeaseHandler {
};
let regions = stat.regions();
let cluster_id = stat.cluster_id;
let datanode_id = stat.id;
let RenewRegionLeasesResponse {
@@ -72,7 +71,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
renewed,
} = self
.region_lease_keeper
.renew_region_leases(cluster_id, datanode_id, &regions)
.renew_region_leases(datanode_id, &regions)
.await?;
let renewed = renewed
@@ -153,7 +152,6 @@ mod test {
let peer = Peer::empty(datanode_id);
let follower_peer = Peer::empty(datanode_id + 1);
let table_info = new_test_table_info(table_id, vec![region_number]).into();
let cluster_id = 1;
let region_routes = vec![RegionRoute {
region: Region::new_test(region_id),
@@ -181,7 +179,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
cluster_id,
id: peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -215,7 +212,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
cluster_id,
id: follower_peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -249,7 +245,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
cluster_id,
id: follower_peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -292,7 +287,6 @@ mod test {
let peer = Peer::empty(datanode_id);
let follower_peer = Peer::empty(datanode_id + 1);
let table_info = new_test_table_info(table_id, vec![region_number]).into();
let cluster_id = 1;
let region_routes = vec![
RegionRoute {
@@ -333,7 +327,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
cluster_id,
id: peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Leader),

View File

@@ -28,18 +28,15 @@ impl HeartbeatHandler for ResponseHeaderHandler {
async fn handle(
&self,
req: &HeartbeatRequest,
_req: &HeartbeatRequest,
_ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
let HeartbeatRequest { header, .. } = req;
let res_header = ResponseHeader {
protocol_version: PROTOCOL_VERSION,
cluster_id: header.as_ref().map_or(0, |h| h.cluster_id),
..Default::default()
};
acc.header = Some(res_header);
Ok(HandleControl::Continue)
}
}
@@ -48,7 +45,7 @@ impl HeartbeatHandler for ResponseHeaderHandler {
mod tests {
use std::sync::Arc;
use api::v1::meta::{HeartbeatResponse, RequestHeader};
use api::v1::meta::RequestHeader;
use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -90,7 +87,7 @@ mod tests {
};
let req = HeartbeatRequest {
header: Some(RequestHeader::new((1, 2), Role::Datanode, W3cTrace::new())),
header: Some(RequestHeader::new(2, Role::Datanode, W3cTrace::new())),
..Default::default()
};
let mut acc = HeartbeatAccumulator::default();
@@ -100,12 +97,5 @@ mod tests {
.handle(&req, &mut ctx, &mut acc)
.await
.unwrap();
let header = std::mem::take(&mut acc.header);
let res = HeartbeatResponse {
header,
mailbox_message: acc.into_mailbox_message(),
..Default::default()
};
assert_eq!(1, res.header.unwrap().cluster_id);
}
}

View File

@@ -35,20 +35,12 @@ macro_rules! impl_from_str_lease_key {
.context(error::InvalidLeaseKeySnafu { key })?;
ensure!(caps.len() == 3, error::InvalidLeaseKeySnafu { key });
let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
Ok(Self {
cluster_id,
node_id,
})
Ok(Self { node_id })
}
}
};
@@ -73,7 +65,7 @@ macro_rules! impl_try_from_lease_key {
type Error = error::Error;
fn try_from(key: $key_type) -> error::Result<Self> {
Ok(format!("{}-{}-{}", $prefix, key.cluster_id, key.node_id).into_bytes())
Ok(format!("{}-0-{}", $prefix, key.node_id).into_bytes())
}
}
};

View File

@@ -15,7 +15,6 @@
use std::str::FromStr;
use common_meta::datanode::DatanodeStatKey;
use common_meta::ClusterId;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -42,20 +41,18 @@ lazy_static! {
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DatanodeLeaseKey {
pub cluster_id: ClusterId,
pub node_id: u64,
}
impl DatanodeLeaseKey {
pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
format!("{DATANODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
pub fn prefix_key() -> Vec<u8> {
format!("{DATANODE_LEASE_PREFIX}-0-").into_bytes()
}
}
impl From<&DatanodeLeaseKey> for DatanodeStatKey {
fn from(lease_key: &DatanodeLeaseKey) -> Self {
DatanodeStatKey {
cluster_id: lease_key.cluster_id,
node_id: lease_key.node_id,
}
}
@@ -63,22 +60,21 @@ impl From<&DatanodeLeaseKey> for DatanodeStatKey {
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct InactiveRegionKey {
pub cluster_id: ClusterId,
pub node_id: u64,
pub region_id: u64,
}
impl InactiveRegionKey {
pub fn get_prefix_by_cluster(cluster_id: u64) -> Vec<u8> {
format!("{}-{}-", INACTIVE_REGION_PREFIX, cluster_id).into_bytes()
pub fn get_prefix_by_cluster() -> Vec<u8> {
format!("{}-0-", INACTIVE_REGION_PREFIX).into_bytes()
}
}
impl From<InactiveRegionKey> for Vec<u8> {
fn from(value: InactiveRegionKey) -> Self {
format!(
"{}-{}-{}-{}",
INACTIVE_REGION_PREFIX, value.cluster_id, value.node_id, value.region_id
"{}-0-{}-{}",
INACTIVE_REGION_PREFIX, value.node_id, value.region_id
)
.into_bytes()
}
@@ -97,13 +93,8 @@ impl FromStr for InactiveRegionKey {
error::InvalidInactiveRegionKeySnafu { key }
);
let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let region_id = caps[3].to_string();
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid cluster_id: {cluster_id}"),
})?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
@@ -111,11 +102,7 @@ impl FromStr for InactiveRegionKey {
err_msg: format!("invalid region_id: {region_id}"),
})?;
Ok(Self {
cluster_id,
node_id,
region_id,
})
Ok(Self { node_id, region_id })
}
}
@@ -135,24 +122,17 @@ mod tests {
#[test]
fn test_stat_key_round_trip() {
let key = DatanodeStatKey {
cluster_id: 0,
node_id: 1,
};
let key = DatanodeStatKey { node_id: 1 };
let key_bytes: Vec<u8> = key.into();
let new_key: DatanodeStatKey = key_bytes.try_into().unwrap();
assert_eq!(0, new_key.cluster_id);
assert_eq!(1, new_key.node_id);
}
#[test]
fn test_lease_key_round_trip() {
let key = DatanodeLeaseKey {
cluster_id: 0,
node_id: 1,
};
let key = DatanodeLeaseKey { node_id: 1 };
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
let new_key: DatanodeLeaseKey = key_bytes.try_into().unwrap();
@@ -162,21 +142,16 @@ mod tests {
#[test]
fn test_lease_key_to_stat_key() {
let lease_key = DatanodeLeaseKey {
cluster_id: 1,
node_id: 101,
};
let lease_key = DatanodeLeaseKey { node_id: 101 };
let stat_key: DatanodeStatKey = (&lease_key).into();
assert_eq!(1, stat_key.cluster_id);
assert_eq!(101, stat_key.node_id);
}
#[test]
fn test_inactive_region_key_round_trip() {
let key = InactiveRegionKey {
cluster_id: 0,
node_id: 1,
region_id: 2,
};

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use common_meta::ClusterId;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -26,13 +25,12 @@ lazy_static! {
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct FlownodeLeaseKey {
pub cluster_id: ClusterId,
pub node_id: u64,
}
impl FlownodeLeaseKey {
pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
format!("{FLOWNODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
pub fn prefix_key_by_cluster() -> Vec<u8> {
format!("{FLOWNODE_LEASE_PREFIX}-0-").into_bytes()
}
}
@@ -42,10 +40,7 @@ mod tests {
#[test]
fn test_lease_key_round_trip() {
let key = FlownodeLeaseKey {
cluster_id: 0,
node_id: 1,
};
let key = FlownodeLeaseKey { node_id: 1 };
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
let new_key: FlownodeLeaseKey = key_bytes.try_into().unwrap();

View File

@@ -18,7 +18,7 @@ use std::hash::Hash;
use common_error::ext::BoxedError;
use common_meta::kv_backend::KvBackend;
use common_meta::peer::{Peer, PeerLookupService};
use common_meta::{util, ClusterId, DatanodeId, FlownodeId};
use common_meta::{util, DatanodeId, FlownodeId};
use common_time::util as time_util;
use snafu::ResultExt;
@@ -35,14 +35,12 @@ fn build_lease_filter(lease_secs: u64) -> impl Fn(&LeaseValue) -> bool {
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], will only return if it's alive under given `lease_secs`
pub async fn lookup_datanode_peer(
cluster_id: ClusterId,
datanode_id: DatanodeId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<Option<Peer>> {
let lease_filter = build_lease_filter(lease_secs);
let lease_key = DatanodeLeaseKey {
cluster_id,
node_id: datanode_id,
};
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
@@ -63,29 +61,24 @@ pub async fn lookup_datanode_peer(
/// Find all alive datanodes
pub async fn alive_datanodes(
cluster_id: ClusterId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<HashMap<DatanodeLeaseKey, LeaseValue>> {
let predicate = build_lease_filter(lease_secs);
filter(
DatanodeLeaseKey::prefix_key_by_cluster(cluster_id),
meta_peer_client,
|v| predicate(v),
)
filter(DatanodeLeaseKey::prefix_key(), meta_peer_client, |v| {
predicate(v)
})
.await
}
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], only return if it's alive under given `lease_secs`
pub async fn lookup_flownode_peer(
cluster_id: ClusterId,
flownode_id: FlownodeId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<Option<Peer>> {
let lease_filter = build_lease_filter(lease_secs);
let lease_key = FlownodeLeaseKey {
cluster_id,
node_id: flownode_id,
};
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
@@ -107,13 +100,12 @@ pub async fn lookup_flownode_peer(
/// Find all alive flownodes
pub async fn alive_flownodes(
cluster_id: ClusterId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<HashMap<FlownodeLeaseKey, LeaseValue>> {
let predicate = build_lease_filter(lease_secs);
filter(
FlownodeLeaseKey::prefix_key_by_cluster(cluster_id),
FlownodeLeaseKey::prefix_key_by_cluster(),
meta_peer_client,
|v| predicate(v),
)
@@ -163,22 +155,14 @@ impl MetaPeerLookupService {
#[async_trait::async_trait]
impl PeerLookupService for MetaPeerLookupService {
async fn datanode(
&self,
cluster_id: ClusterId,
id: DatanodeId,
) -> common_meta::error::Result<Option<Peer>> {
lookup_datanode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
async fn datanode(&self, id: DatanodeId) -> common_meta::error::Result<Option<Peer>> {
lookup_datanode_peer(id, &self.meta_peer_client, u64::MAX)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
async fn flownode(
&self,
cluster_id: ClusterId,
id: FlownodeId,
) -> common_meta::error::Result<Option<Peer>> {
lookup_flownode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
async fn flownode(&self, id: FlownodeId) -> common_meta::error::Result<Option<Peer>> {
lookup_flownode_peer(id, &self.meta_peer_client, u64::MAX)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)

View File

@@ -26,6 +26,7 @@ use common_config::Configurable;
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::ddl::ProcedureExecutorRef;
use common_meta::distributed_time_constants;
use common_meta::key::maintenance::MaintenanceModeManagerRef;
use common_meta::key::TableMetadataManagerRef;
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
@@ -36,7 +37,6 @@ use common_meta::node_expiry_listener::NodeExpiryListener;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeperRef;
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
use common_meta::{distributed_time_constants, ClusterId};
use common_options::datanode::DatanodeClientOptions;
use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
@@ -572,13 +572,8 @@ impl Metasrv {
}
/// Lookup a peer by peer_id, return it only when it's alive.
pub(crate) async fn lookup_peer(
&self,
cluster_id: ClusterId,
peer_id: u64,
) -> Result<Option<Peer>> {
pub(crate) async fn lookup_peer(&self, peer_id: u64) -> Result<Option<Peer>> {
lookup_datanode_peer(
cluster_id,
peer_id,
&self.meta_peer_client,
distributed_time_constants::DATANODE_LEASE_SECS,

View File

@@ -20,7 +20,7 @@ lazy_static! {
pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
"greptime_meta_kv_request_elapsed",
"meta kv request",
&["target", "op", "cluster_id"]
&["target", "op"]
)
.unwrap();
/// The heartbeat connection gauge.

View File

@@ -39,7 +39,6 @@ use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock};
use common_meta::peer::Peer;
use common_meta::region_keeper::{MemoryRegionKeeperRef, OperatingRegionGuard};
use common_meta::ClusterId;
use common_procedure::error::{
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
};
@@ -70,8 +69,6 @@ pub struct PersistentContext {
catalog: String,
/// The table schema.
schema: String,
/// The Id of the cluster.
cluster_id: ClusterId,
/// The [Peer] of migration source.
from_peer: Peer,
/// The [Peer] of migration destination.
@@ -273,12 +270,11 @@ impl Context {
/// The original failure detector was removed once the procedure was triggered.
/// Now, we need to register the failure detector for the failed region again.
pub async fn register_failure_detectors(&self) {
let cluster_id = self.persistent_ctx.cluster_id;
let datanode_id = self.persistent_ctx.from_peer.id;
let region_id = self.persistent_ctx.region_id;
self.region_failure_detector_controller
.register_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
.register_failure_detectors(vec![(datanode_id, region_id)])
.await;
}
@@ -287,12 +283,11 @@ impl Context {
/// The original failure detectors was removed once the procedure was triggered.
/// However, the `from_peer` may still send the heartbeats contains the failed region.
pub async fn deregister_failure_detectors(&self) {
let cluster_id = self.persistent_ctx.cluster_id;
let datanode_id = self.persistent_ctx.from_peer.id;
let region_id = self.persistent_ctx.region_id;
self.region_failure_detector_controller
.deregister_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
.deregister_failure_detectors(vec![(datanode_id, region_id)])
.await;
}
@@ -458,7 +453,6 @@ impl RegionMigrationProcedure {
} = serde_json::from_str(json).context(FromJsonSnafu)?;
let guard = tracker.insert_running_procedure(&RegionMigrationProcedureTask {
cluster_id: persistent_ctx.cluster_id,
region_id: persistent_ctx.region_id,
from_peer: persistent_ctx.from_peer.clone(),
to_peer: persistent_ctx.to_peer.clone(),
@@ -580,7 +574,6 @@ mod tests {
use common_meta::key::test_utils::new_test_table_info;
use common_meta::rpc::router::{Region, RegionRoute};
use super::migration_end::RegionMigrationEnd;
use super::update_metadata::UpdateMetadata;
use super::*;
use crate::handler::HeartbeatMailbox;
@@ -620,7 +613,7 @@ mod tests {
let procedure = RegionMigrationProcedure::new(persistent_context, context, None);
let serialized = procedure.dump().unwrap();
let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
assert_eq!(expected, serialized);
}
@@ -628,7 +621,7 @@ mod tests {
fn test_backward_compatibility() {
let persistent_ctx = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
// NOTES: Changes it will break backward compatibility.
let serialized = r#"{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
let serialized = r#"{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
let deserialized: PersistentContext = serde_json::from_str(serialized).unwrap();
assert_eq!(persistent_ctx, deserialized);
@@ -640,15 +633,8 @@ mod tests {
#[async_trait::async_trait]
#[typetag::serde]
impl State for MockState {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
let pc = &mut ctx.persistent_ctx;
if pc.cluster_id == 2 {
Ok((Box::new(RegionMigrationEnd), Status::done()))
} else {
pc.cluster_id += 1;
Ok((Box::new(MockState), Status::executing(false)))
}
async fn next(&mut self, _ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
Ok((Box::new(MockState), Status::done()))
}
fn as_any(&self) -> &dyn Any {
@@ -692,7 +678,6 @@ mod tests {
for _ in 1..3 {
status = Some(procedure.execute(&ctx).await.unwrap());
}
assert_eq!(procedure.context.persistent_ctx.cluster_id, 2);
assert!(status.unwrap().is_done());
}

View File

@@ -62,7 +62,6 @@ impl CloseDowngradedRegion {
async fn build_close_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx;
let downgrade_leader_datanode_id = pc.from_peer.id;
let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number();
let datanode_table_value = ctx.get_from_peer_datanode_table_value().await?;
@@ -70,7 +69,6 @@ impl CloseDowngradedRegion {
let RegionInfo { engine, .. } = datanode_table_value.region_info.clone();
Ok(Instruction::CloseRegion(RegionIdent {
cluster_id,
datanode_id: downgrade_leader_datanode_id,
table_id,
region_number,

View File

@@ -294,7 +294,6 @@ mod tests {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
timeout: Duration::from_millis(1000),
}
}

View File

@@ -22,7 +22,6 @@ use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_route::TableRouteValue;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use common_meta::ClusterId;
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::{error, info};
use snafu::{ensure, OptionExt, ResultExt};
@@ -101,7 +100,6 @@ impl Drop for RegionMigrationProcedureGuard {
#[derive(Debug, Clone)]
pub struct RegionMigrationProcedureTask {
pub(crate) cluster_id: ClusterId,
pub(crate) region_id: RegionId,
pub(crate) from_peer: Peer,
pub(crate) to_peer: Peer,
@@ -109,15 +107,8 @@ pub struct RegionMigrationProcedureTask {
}
impl RegionMigrationProcedureTask {
pub fn new(
cluster_id: ClusterId,
region_id: RegionId,
from_peer: Peer,
to_peer: Peer,
timeout: Duration,
) -> Self {
pub fn new(region_id: RegionId, from_peer: Peer, to_peer: Peer, timeout: Duration) -> Self {
Self {
cluster_id,
region_id,
from_peer,
to_peer,
@@ -130,8 +121,8 @@ impl Display for RegionMigrationProcedureTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"cluster: {}, region: {}, from_peer: {}, to_peer: {}",
self.cluster_id, self.region_id, self.from_peer, self.to_peer
"region: {}, from_peer: {}, to_peer: {}",
self.region_id, self.from_peer, self.to_peer
)
}
}
@@ -331,7 +322,6 @@ impl RegionMigrationManager {
.with_label_values(&["desc", &task.to_peer.id.to_string()])
.inc();
let RegionMigrationProcedureTask {
cluster_id,
region_id,
from_peer,
to_peer,
@@ -341,7 +331,6 @@ impl RegionMigrationManager {
PersistentContext {
catalog: catalog_name,
schema: schema_name,
cluster_id,
region_id,
from_peer,
to_peer,
@@ -394,7 +383,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(2),
to_peer: Peer::empty(1),
@@ -419,7 +407,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(1),
@@ -437,7 +424,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -455,7 +441,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -483,7 +468,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -515,7 +499,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -542,7 +525,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),

View File

@@ -62,7 +62,6 @@ impl OpenCandidateRegion {
/// - Datanode Table is not found.
async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx;
let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number();
let candidate_id = pc.to_peer.id;
@@ -77,7 +76,6 @@ impl OpenCandidateRegion {
let open_instruction = Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
cluster_id,
datanode_id: candidate_id,
table_id,
region_number,
@@ -214,7 +212,6 @@ mod tests {
fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction {
Instruction::OpenRegion(OpenRegion {
region_ident: RegionIdent {
cluster_id: 0,
datanode_id,
table_id: region_id.table_id(),
region_number: region_id.region_number(),

View File

@@ -19,7 +19,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use api::v1::meta::{HeartbeatResponse, MailboxMessage};
use common_meta::ddl::NoopRegionFailureDetectorControl;
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
@@ -85,7 +85,7 @@ impl MailboxContext {
tx: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
) {
let pusher_id = channel.pusher_id();
let pusher = Pusher::new(tx, &RequestHeader::default());
let pusher = Pusher::new(tx);
let _ = self.pushers.insert(pusher_id.string_key(), pusher).await;
}
@@ -317,7 +317,6 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
from_peer: Peer::empty(from),
to_peer: Peer::empty(to),
region_id,
cluster_id: 0,
timeout: Duration::from_secs(10),
}
}

View File

@@ -172,11 +172,7 @@ mod tests {
let detecting_regions = event.into_region_failure_detectors();
assert_eq!(
detecting_regions,
vec![(
ctx.persistent_ctx.cluster_id,
from_peer.id,
ctx.persistent_ctx.region_id
)]
vec![(from_peer.id, ctx.persistent_ctx.region_id)]
);
let table_route = table_metadata_manager

View File

@@ -238,7 +238,6 @@ mod tests {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
timeout: Duration::from_millis(1000),
}
}

View File

@@ -97,7 +97,6 @@ fn create_table_task(table_name: Option<&str>) -> CreateTableTask {
#[test]
fn test_region_request_builder() {
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(None),
test_data::new_ddl_context(Arc::new(NodeClients::default())),
);
@@ -192,7 +191,6 @@ async fn test_on_datanode_create_regions() {
let node_manager = new_node_manager(&region_server, &region_routes).await;
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(None),
test_data::new_ddl_context(node_manager),
);
@@ -260,7 +258,7 @@ async fn test_on_datanode_create_logical_regions() {
.0;
let _ = kv_backend.txn(physical_route_txn).await.unwrap();
let mut procedure =
CreateLogicalTablesProcedure::new(1, vec![task1, task2, task3], physical_table_id, ctx);
CreateLogicalTablesProcedure::new(vec![task1, task2, task3], physical_table_id, ctx);
let expected_created_regions = Arc::new(Mutex::new(HashMap::from([(1, 3), (2, 3), (3, 3)])));

View File

@@ -129,7 +129,7 @@ mod tests {
#[test]
fn test_default_failure_detector_container() {
let container = RegionFailureDetector::new(Default::default());
let detecting_region = (0, 2, RegionId::new(1, 1));
let detecting_region = (2, RegionId::new(1, 1));
let _ = container.region_failure_detector(detecting_region);
assert!(container.contains(&detecting_region));

View File

@@ -19,7 +19,7 @@ use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use common_meta::region_keeper::MemoryRegionKeeperRef;
use common_meta::rpc::router::RegionRoute;
use common_meta::{ClusterId, DatanodeId};
use common_meta::DatanodeId;
use common_telemetry::warn;
use snafu::ResultExt;
use store_api::region_engine::RegionRole;
@@ -167,7 +167,6 @@ impl RegionLeaseKeeper {
/// and corresponding regions will be added to `non_exists` of [RenewRegionLeasesResponse].
pub async fn renew_region_leases(
&self,
_cluster_id: ClusterId,
datanode_id: DatanodeId,
regions: &[(RegionId, RegionRole)],
) -> Result<RenewRegionLeasesResponse> {
@@ -282,7 +281,6 @@ mod tests {
renewed,
} = keeper
.renew_region_leases(
0,
1,
&[
(RegionId::new(1024, 1), RegionRole::Follower),
@@ -384,7 +382,7 @@ mod tests {
non_exists,
renewed,
} = keeper
.renew_region_leases(0, 1, &[(region_id, RegionRole::Follower)])
.renew_region_leases(1, &[(region_id, RegionRole::Follower)])
.await
.unwrap();
assert!(renewed.is_empty());
@@ -397,7 +395,7 @@ mod tests {
non_exists,
renewed,
} = keeper
.renew_region_leases(0, leader_peer_id, &[(region_id, role)])
.renew_region_leases(leader_peer_id, &[(region_id, role)])
.await
.unwrap();
@@ -411,7 +409,7 @@ mod tests {
non_exists,
renewed,
} = keeper
.renew_region_leases(0, follower_peer_id, &[(region_id, role)])
.renew_region_leases(follower_peer_id, &[(region_id, role)])
.await
.unwrap();
@@ -432,7 +430,7 @@ mod tests {
non_exists,
renewed,
} = keeper
.renew_region_leases(0, leader_peer_id, &[(opening_region_id, role)])
.renew_region_leases(leader_peer_id, &[(opening_region_id, role)])
.await
.unwrap();
@@ -465,7 +463,6 @@ mod tests {
renewed,
} = keeper
.renew_region_leases(
0,
1,
&[
(region_id, RegionRole::Follower),
@@ -513,7 +510,7 @@ mod tests {
non_exists,
renewed,
} = keeper
.renew_region_leases(0, follower_peer_id, &[(region_id, role)])
.renew_region_leases(follower_peer_id, &[(region_id, role)])
.await
.unwrap();

View File

@@ -22,7 +22,7 @@ use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController};
use common_meta::key::maintenance::MaintenanceModeManagerRef;
use common_meta::leadership_notifier::LeadershipChangeListener;
use common_meta::peer::PeerLookupServiceRef;
use common_meta::{ClusterId, DatanodeId};
use common_meta::DatanodeId;
use common_runtime::JoinHandle;
use common_telemetry::{error, info, warn};
use common_time::util::current_time_millis;
@@ -45,7 +45,6 @@ use crate::selector::SelectorOptions;
/// and a timestamp indicating when the heartbeat was sent.
#[derive(Debug)]
pub(crate) struct DatanodeHeartbeat {
cluster_id: ClusterId,
datanode_id: DatanodeId,
// TODO(weny): Considers collecting the memtable size in regions.
regions: Vec<RegionId>,
@@ -55,7 +54,6 @@ pub(crate) struct DatanodeHeartbeat {
impl From<&Stat> for DatanodeHeartbeat {
fn from(value: &Stat) -> Self {
DatanodeHeartbeat {
cluster_id: value.cluster_id,
datanode_id: value.id,
regions: value.region_stats.iter().map(|x| x.id).collect(),
timestamp: value.timestamp_millis,
@@ -341,7 +339,7 @@ impl RegionSupervisor {
}
}
async fn handle_region_failures(&self, mut regions: Vec<(ClusterId, DatanodeId, RegionId)>) {
async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
if regions.is_empty() {
return;
}
@@ -358,22 +356,19 @@ impl RegionSupervisor {
}
let migrating_regions = regions
.extract_if(.., |(_, _, region_id)| {
.extract_if(.., |(_, region_id)| {
self.region_migration_manager.tracker().contains(*region_id)
})
.collect::<Vec<_>>();
for (cluster_id, datanode_id, region_id) in migrating_regions {
self.failure_detector
.remove(&(cluster_id, datanode_id, region_id));
for (datanode_id, region_id) in migrating_regions {
self.failure_detector.remove(&(datanode_id, region_id));
}
warn!("Detects region failures: {:?}", regions);
for (cluster_id, datanode_id, region_id) in regions {
match self.do_failover(cluster_id, datanode_id, region_id).await {
Ok(_) => self
.failure_detector
.remove(&(cluster_id, datanode_id, region_id)),
for (datanode_id, region_id) in regions {
match self.do_failover(datanode_id, region_id).await {
Ok(_) => self.failure_detector.remove(&(datanode_id, region_id)),
Err(err) => {
error!(err; "Failed to execute region failover for region: {region_id}, datanode: {datanode_id}");
}
@@ -388,15 +383,10 @@ impl RegionSupervisor {
.context(error::MaintenanceModeManagerSnafu)
}
async fn do_failover(
&self,
cluster_id: ClusterId,
datanode_id: DatanodeId,
region_id: RegionId,
) -> Result<()> {
async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
let from_peer = self
.peer_lookup
.datanode(cluster_id, datanode_id)
.datanode(datanode_id)
.await
.context(error::LookupPeerSnafu {
peer_id: datanode_id,
@@ -407,7 +397,6 @@ impl RegionSupervisor {
let mut peers = self
.selector
.select(
cluster_id,
&self.selector_context,
SelectorOptions {
min_required_items: 1,
@@ -423,7 +412,6 @@ impl RegionSupervisor {
return Ok(());
}
let task = RegionMigrationProcedureTask {
cluster_id,
region_id,
from_peer,
to_peer,
@@ -442,7 +430,7 @@ impl RegionSupervisor {
}
/// Detects the failure of regions.
fn detect_region_failure(&self) -> Vec<(ClusterId, DatanodeId, RegionId)> {
fn detect_region_failure(&self) -> Vec<(DatanodeId, RegionId)> {
self.failure_detector
.iter()
.filter_map(|e| {
@@ -464,7 +452,7 @@ impl RegionSupervisor {
/// Updates the state of corresponding failure detectors.
fn on_heartbeat_arrived(&self, heartbeat: DatanodeHeartbeat) {
for region_id in heartbeat.regions {
let detecting_region = (heartbeat.cluster_id, heartbeat.datanode_id, region_id);
let detecting_region = (heartbeat.datanode_id, region_id);
let mut detector = self
.failure_detector
.region_failure_detector(detecting_region);
@@ -537,7 +525,6 @@ pub(crate) mod tests {
sender
.send(Event::HeartbeatArrived(DatanodeHeartbeat {
cluster_id: 0,
datanode_id: 0,
regions: vec![RegionId::new(1, 1)],
timestamp: 100,
@@ -547,7 +534,7 @@ pub(crate) mod tests {
let (tx, rx) = oneshot::channel();
sender.send(Event::Dump(tx)).await.unwrap();
let detector = rx.await.unwrap();
assert!(detector.contains(&(0, 0, RegionId::new(1, 1))));
assert!(detector.contains(&(0, RegionId::new(1, 1))));
// Clear up
sender.send(Event::Clear).await.unwrap();
@@ -561,7 +548,6 @@ pub(crate) mod tests {
(0..2000)
.map(|i| DatanodeHeartbeat {
timestamp: start + i * 1000 + rng.gen_range(0..100),
cluster_id: 0,
datanode_id,
regions: region_ids
.iter()
@@ -630,7 +616,7 @@ pub(crate) mod tests {
let (mut supervisor, sender) = new_test_supervisor();
let controller = RegionFailureDetectorControl::new(sender.clone());
tokio::spawn(async move { supervisor.run().await });
let detecting_region = (0, 1, RegionId::new(1, 1));
let detecting_region = (1, RegionId::new(1, 1));
controller
.register_failure_detectors(vec![detecting_region])
.await;

View File

@@ -25,19 +25,12 @@ use serde::{Deserialize, Serialize};
use crate::error;
use crate::error::Result;
pub type Namespace = u64;
#[async_trait::async_trait]
pub trait Selector: Send + Sync {
type Context;
type Output;
async fn select(
&self,
ns: Namespace,
ctx: &Self::Context,
opts: SelectorOptions,
) -> Result<Self::Output>;
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output>;
}
#[derive(Debug)]

View File

@@ -19,7 +19,7 @@ use crate::lease;
use crate::metasrv::SelectorContext;
use crate::selector::common::choose_items;
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
use crate::selector::{Namespace, Selector, SelectorOptions};
use crate::selector::{Selector, SelectorOptions};
/// Select all alive datanodes based using a random weighted choose.
pub struct LeaseBasedSelector;
@@ -29,15 +29,10 @@ impl Selector for LeaseBasedSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
async fn select(
&self,
ns: Namespace,
ctx: &Self::Context,
opts: SelectorOptions,
) -> Result<Self::Output> {
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
// 1. get alive datanodes.
let lease_kvs =
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. compute weight array, but the weight of each item is the same.
let weight_array = lease_kvs

View File

@@ -29,7 +29,7 @@ use crate::metasrv::SelectorContext;
use crate::selector::common::choose_items;
use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
use crate::selector::weighted_choose::RandomWeightedChoose;
use crate::selector::{Namespace, Selector, SelectorOptions};
use crate::selector::{Selector, SelectorOptions};
pub struct LoadBasedSelector<C> {
weight_compute: C,
@@ -57,15 +57,10 @@ where
type Context = SelectorContext;
type Output = Vec<Peer>;
async fn select(
&self,
ns: Namespace,
ctx: &Self::Context,
opts: SelectorOptions,
) -> Result<Self::Output> {
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
// 1. get alive datanodes.
let lease_kvs =
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. get stat kvs and filter out expired datanodes.
let stat_keys = lease_kvs.keys().map(|k| k.into()).collect();
@@ -97,8 +92,8 @@ where
let selected = choose_items(&opts, &mut weighted_choose)?;
debug!(
"LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.",
selected, ns, opts,
"LoadBasedSelector select peers: {:?}, opts: {:?}.",
selected, opts,
);
Ok(selected)
@@ -165,33 +160,21 @@ mod tests {
fn test_filter_out_expired_datanode() {
let mut stat_kvs = HashMap::new();
stat_kvs.insert(
DatanodeStatKey {
cluster_id: 1,
node_id: 0,
},
DatanodeStatKey { node_id: 0 },
DatanodeStatValue { stats: vec![] },
);
stat_kvs.insert(
DatanodeStatKey {
cluster_id: 1,
node_id: 1,
},
DatanodeStatKey { node_id: 1 },
DatanodeStatValue { stats: vec![] },
);
stat_kvs.insert(
DatanodeStatKey {
cluster_id: 1,
node_id: 2,
},
DatanodeStatKey { node_id: 2 },
DatanodeStatValue { stats: vec![] },
);
let mut lease_kvs = HashMap::new();
lease_kvs.insert(
DatanodeLeaseKey {
cluster_id: 1,
node_id: 1,
},
DatanodeLeaseKey { node_id: 1 },
LeaseValue {
timestamp_millis: 0,
node_addr: "127.0.0.1:3002".to_string(),
@@ -201,9 +184,6 @@ mod tests {
let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
assert_eq!(1, alive_stat_kvs.len());
assert!(alive_stat_kvs.contains_key(&DatanodeStatKey {
cluster_id: 1,
node_id: 1
}));
assert!(alive_stat_kvs.contains_key(&DatanodeStatKey { node_id: 1 }));
}
}

View File

@@ -20,7 +20,7 @@ use snafu::ensure;
use crate::error::{NoEnoughAvailableNodeSnafu, Result};
use crate::lease;
use crate::metasrv::{SelectTarget, SelectorContext};
use crate::selector::{Namespace, Selector, SelectorOptions};
use crate::selector::{Selector, SelectorOptions};
/// Round-robin selector that returns the next peer in the list in sequence.
/// Datanodes are ordered by their node_id.
@@ -53,7 +53,6 @@ impl RoundRobinSelector {
async fn get_peers(
&self,
ns: Namespace,
min_required_items: usize,
ctx: &SelectorContext,
) -> Result<Vec<Peer>> {
@@ -61,8 +60,7 @@ impl RoundRobinSelector {
SelectTarget::Datanode => {
// 1. get alive datanodes.
let lease_kvs =
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs)
.await?;
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. map into peers
lease_kvs
@@ -73,8 +71,7 @@ impl RoundRobinSelector {
SelectTarget::Flownode => {
// 1. get alive flownodes.
let lease_kvs =
lease::alive_flownodes(ns, &ctx.meta_peer_client, ctx.flownode_lease_secs)
.await?;
lease::alive_flownodes(&ctx.meta_peer_client, ctx.flownode_lease_secs).await?;
// 2. map into peers
lease_kvs
@@ -105,13 +102,8 @@ impl Selector for RoundRobinSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
async fn select(
&self,
ns: Namespace,
ctx: &Self::Context,
opts: SelectorOptions,
) -> Result<Vec<Peer>> {
let peers = self.get_peers(ns, opts.min_required_items, ctx).await?;
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Vec<Peer>> {
let peers = self.get_peers(opts.min_required_items, ctx).await?;
// choose peers
let mut selected = Vec::with_capacity(opts.min_required_items);
for _ in 0..opts.min_required_items {
@@ -135,8 +127,6 @@ mod test {
async fn test_round_robin_selector() {
let selector = RoundRobinSelector::default();
let ctx = create_selector_context();
let ns = 0;
// add three nodes
let peer1 = Peer {
id: 2,
@@ -151,11 +141,10 @@ mod test {
addr: "node3".to_string(),
};
let peers = vec![peer1.clone(), peer2.clone(), peer3.clone()];
put_datanodes(ns, &ctx.meta_peer_client, peers).await;
put_datanodes(&ctx.meta_peer_client, peers).await;
let peers = selector
.select(
ns,
&ctx,
SelectorOptions {
min_required_items: 4,
@@ -172,7 +161,6 @@ mod test {
let peers = selector
.select(
ns,
&ctx,
SelectorOptions {
min_required_items: 2,

View File

@@ -22,7 +22,7 @@ use rand::prelude::SliceRandom;
use crate::cluster::MetaPeerClientBuilder;
use crate::error::Result;
use crate::metasrv::SelectorContext;
use crate::selector::{Namespace, Selector, SelectorOptions};
use crate::selector::{Selector, SelectorOptions};
/// Returns [SelectorContext] for test purpose.
pub fn new_test_selector_context() -> SelectorContext {
@@ -60,12 +60,7 @@ impl Selector for RandomNodeSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
async fn select(
&self,
_ns: Namespace,
_ctx: &Self::Context,
_opts: SelectorOptions,
) -> Result<Self::Output> {
async fn select(&self, _ctx: &Self::Context, _opts: SelectorOptions) -> Result<Self::Output> {
let mut rng = rand::thread_rng();
let mut nodes = self.nodes.clone();
nodes.shuffle(&mut rng);

View File

@@ -104,26 +104,17 @@ mod tests {
#[test]
fn test_weight_compute() {
let mut stat_kvs: HashMap<DatanodeStatKey, DatanodeStatValue> = HashMap::default();
let stat_key = DatanodeStatKey {
cluster_id: 1,
node_id: 1,
};
let stat_key = DatanodeStatKey { node_id: 1 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_1()],
};
stat_kvs.insert(stat_key, stat_val);
let stat_key = DatanodeStatKey {
cluster_id: 1,
node_id: 2,
};
let stat_key = DatanodeStatKey { node_id: 2 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_2()],
};
stat_kvs.insert(stat_key, stat_val);
let stat_key = DatanodeStatKey {
cluster_id: 1,
node_id: 3,
};
let stat_key = DatanodeStatKey { node_id: 3 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_3()],
};

View File

@@ -22,7 +22,7 @@ use crate::cluster::MetaPeerClientRef;
use crate::error::{self, Result};
use crate::key::{DatanodeLeaseKey, LeaseValue};
use crate::lease;
use crate::service::admin::{util, HttpHandler};
use crate::service::admin::HttpHandler;
pub struct NodeLeaseHandler {
pub meta_peer_client: MetaPeerClientRef,
@@ -34,11 +34,9 @@ impl HttpHandler for NodeLeaseHandler {
&self,
_: &str,
_: http::Method,
params: &HashMap<String, String>,
_: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let cluster_id = util::extract_cluster_id(params)?;
let leases = lease::alive_datanodes(cluster_id, &self.meta_peer_client, u64::MAX).await?;
let leases = lease::alive_datanodes(&self.meta_peer_client, u64::MAX).await?;
let leases = leases
.into_iter()
.map(|(k, v)| HumanLease {

View File

@@ -12,24 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use snafu::{OptionExt, ResultExt};
use snafu::ResultExt;
use tonic::codegen::http;
use crate::error::{self, MissingRequiredParameterSnafu, ParseNumSnafu, Result};
pub fn extract_cluster_id(params: &HashMap<String, String>) -> Result<u64> {
params
.get("cluster_id")
.map(|id| id.parse::<u64>())
.context(MissingRequiredParameterSnafu {
param: "cluster_id",
})?
.context(ParseNumSnafu {
err_msg: "`cluster_id` is not a valid number",
})
}
use crate::error::{self, Result};
pub fn to_text_response(text: &str) -> Result<http::Response<String>> {
http::Response::builder()

View File

@@ -29,7 +29,7 @@ use crate::{error, metasrv};
impl cluster_server::Cluster for Metasrv {
async fn batch_get(&self, req: Request<PbBatchGetRequest>) -> GrpcResult<PbBatchGetResponse> {
if !self.is_leader() {
let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = PbBatchGetResponse {
header: Some(is_not_leader),
..Default::default()
@@ -46,13 +46,13 @@ impl cluster_server::Cluster for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let resp = resp.to_proto_resp(ResponseHeader::success(0));
let resp = resp.to_proto_resp(ResponseHeader::success());
Ok(Response::new(resp))
}
async fn range(&self, req: Request<PbRangeRequest>) -> GrpcResult<PbRangeResponse> {
if !self.is_leader() {
let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = PbRangeResponse {
header: Some(is_not_leader),
..Default::default()
@@ -69,7 +69,7 @@ impl cluster_server::Cluster for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let resp = res.to_proto_resp(ResponseHeader::success(0));
let resp = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(resp))
}
@@ -78,7 +78,7 @@ impl cluster_server::Cluster for Metasrv {
req: Request<MetasrvPeersRequest>,
) -> GrpcResult<MetasrvPeersResponse> {
if !self.is_leader() {
let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = MetasrvPeersResponse {
header: Some(is_not_leader),
..Default::default()
@@ -103,7 +103,7 @@ impl cluster_server::Cluster for Metasrv {
};
let resp = MetasrvPeersResponse {
header: Some(ResponseHeader::success(0)),
header: Some(ResponseHeader::success()),
leader: Some(leader),
followers,
};

View File

@@ -134,9 +134,7 @@ impl heartbeat_server::Heartbeat for Metasrv {
}
}
async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result<AskLeaderResponse> {
let cluster_id = req.header.as_ref().map_or(0, |h| h.cluster_id);
async fn handle_ask_leader(_req: AskLeaderRequest, ctx: Context) -> Result<AskLeaderResponse> {
let addr = match ctx.election {
Some(election) => {
if election.is_leader() {
@@ -153,7 +151,7 @@ async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result<AskLea
addr,
});
let header = Some(ResponseHeader::success(cluster_id));
let header = Some(ResponseHeader::success());
Ok(AskLeaderResponse { header, leader })
}
@@ -179,7 +177,7 @@ async fn register_pusher(
let role = header.role();
let id = get_node_id(header);
let pusher_id = PusherId::new(role, id);
let pusher = Pusher::new(sender, header);
let pusher = Pusher::new(sender);
handler_group.register_pusher(pusher_id, pusher).await;
pusher_id
}
@@ -213,12 +211,11 @@ mod tests {
.unwrap();
let req = AskLeaderRequest {
header: Some(RequestHeader::new((1, 1), Role::Datanode, W3cTrace::new())),
header: Some(RequestHeader::new(1, Role::Datanode, W3cTrace::new())),
};
let res = metasrv.ask_leader(req.into_request()).await.unwrap();
let res = res.into_inner();
assert_eq!(1, res.header.unwrap().cluster_id);
assert_eq!(metasrv.options().bind_addr, res.leader.unwrap().addr);
}

View File

@@ -41,7 +41,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<ProcedureStateResponse> {
if !self.is_leader() {
let resp = ProcedureStateResponse {
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -71,7 +71,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
if !self.is_leader() {
let resp = PbDdlTaskResponse {
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -87,7 +87,6 @@ impl procedure_service_server::ProcedureService for Metasrv {
} = request.into_inner();
let header = header.context(error::MissingRequestHeaderSnafu)?;
let cluster_id = header.cluster_id;
let query_context = query_context
.context(error::MissingRequiredParameterSnafu {
param: "query_context",
@@ -102,7 +101,6 @@ impl procedure_service_server::ProcedureService for Metasrv {
.procedure_executor()
.submit_ddl_task(
&ExecutorContext {
cluster_id: Some(cluster_id),
tracing_context: Some(header.tracing_context),
},
SubmitDdlTaskRequest {
@@ -123,7 +121,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<MigrateRegionResponse> {
if !self.is_leader() {
let resp = MigrateRegionResponse {
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -139,22 +137,19 @@ impl procedure_service_server::ProcedureService for Metasrv {
timeout_secs,
} = request.into_inner();
let header = header.context(error::MissingRequestHeaderSnafu)?;
let cluster_id = header.cluster_id;
let _header = header.context(error::MissingRequestHeaderSnafu)?;
let from_peer = self
.lookup_peer(cluster_id, from_peer)
.lookup_peer(from_peer)
.await?
.context(error::PeerUnavailableSnafu { peer_id: from_peer })?;
let to_peer = self
.lookup_peer(cluster_id, to_peer)
.lookup_peer(to_peer)
.await?
.context(error::PeerUnavailableSnafu { peer_id: to_peer })?;
let pid = self
.region_migration_manager()
.submit_procedure(RegionMigrationProcedureTask {
cluster_id,
region_id: region_id.into(),
from_peer,
to_peer,
@@ -177,7 +172,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<ProcedureDetailResponse> {
if !self.is_leader() {
let resp = ProcedureDetailResponse {
header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};

View File

@@ -28,10 +28,10 @@ use common_meta::rpc::store::{
BatchDeleteRequest, BatchGetRequest, BatchPutRequest, CompareAndPutRequest, DeleteRangeRequest,
PutRequest, RangeRequest,
};
use snafu::{OptionExt, ResultExt};
use snafu::ResultExt;
use tonic::{Request, Response};
use crate::error::{self, MissingRequestHeaderSnafu};
use crate::error::{self};
use crate::metasrv::Metasrv;
use crate::metrics::METRIC_META_KV_REQUEST_ELAPSED;
use crate::service::GrpcResult;
@@ -41,15 +41,8 @@ impl store_server::Store for Metasrv {
async fn range(&self, req: Request<PbRangeRequest>) -> GrpcResult<PbRangeResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[self.kv_backend().name(), "range", cluster_id_str.as_str()])
.with_label_values(&[self.kv_backend().name(), "range"])
.start_timer();
let req: RangeRequest = req.into();
@@ -60,22 +53,14 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn put(&self, req: Request<PbPutRequest>) -> GrpcResult<PbPutResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[self.kv_backend().name(), "put", cluster_id_str.as_str()])
.with_label_values(&[self.kv_backend().name(), "put"])
.start_timer();
let req: PutRequest = req.into();
@@ -86,26 +71,14 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn batch_get(&self, req: Request<PbBatchGetRequest>) -> GrpcResult<PbBatchGetResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_get",
cluster_id_str.as_str(),
])
.with_label_values(&[self.kv_backend().name(), "batch_get"])
.start_timer();
let req: BatchGetRequest = req.into();
@@ -116,26 +89,15 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn batch_put(&self, req: Request<PbBatchPutRequest>) -> GrpcResult<PbBatchPutResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_pub",
cluster_id_str.as_str(),
])
.with_label_values(&[self.kv_backend().name(), "batch_pub"])
.start_timer();
let req: BatchPutRequest = req.into();
@@ -146,7 +108,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -156,19 +118,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbBatchDeleteResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"batch_delete",
cluster_id_str.as_str(),
])
.with_label_values(&[self.kv_backend().name(), "batch_delete"])
.start_timer();
let req: BatchDeleteRequest = req.into();
@@ -179,7 +130,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -189,19 +140,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbCompareAndPutResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"compare_and_put",
cluster_id_str.as_str(),
])
.with_label_values(&[self.kv_backend().name(), "compare_and_put"])
.start_timer();
let req: CompareAndPutRequest = req.into();
@@ -212,7 +152,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -222,19 +162,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbDeleteRangeResponse> {
let req = req.into_inner();
let cluster_id = req
.header
.as_ref()
.context(MissingRequestHeaderSnafu)?
.cluster_id;
let cluster_id_str = cluster_id.to_string();
let _timer = METRIC_META_KV_REQUEST_ELAPSED
.with_label_values(&[
self.kv_backend().name(),
"delete_range",
cluster_id_str.as_str(),
])
.with_label_values(&[self.kv_backend().name(), "delete_range"])
.start_timer();
let req: DeleteRangeRequest = req.into();
@@ -245,7 +174,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
}
@@ -276,7 +205,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = RangeRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.range(req.into_request()).await;
let _ = res.unwrap();
@@ -287,7 +216,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = PutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.put(req.into_request()).await;
let _ = res.unwrap();
@@ -298,7 +227,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchGetRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_get(req.into_request()).await;
let _ = res.unwrap();
@@ -310,7 +239,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchPutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_put(req.into_request()).await;
let _ = res.unwrap();
@@ -321,7 +250,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchDeleteRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_delete(req.into_request()).await;
let _ = res.unwrap();
@@ -332,7 +261,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = CompareAndPutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.compare_and_put(req.into_request()).await;
let _ = res.unwrap();
@@ -343,7 +272,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = DeleteRangeRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.delete_range(req.into_request()).await;
let _ = res.unwrap();

View File

@@ -15,7 +15,6 @@
use async_trait::async_trait;
use common_error::ext::BoxedError;
use common_meta::ddl::table_meta::PeerAllocator;
use common_meta::ddl::TableMetadataAllocatorContext;
use common_meta::error::{ExternalSnafu, Result as MetaResult};
use common_meta::peer::Peer;
use snafu::{ensure, ResultExt};
@@ -36,24 +35,18 @@ impl MetasrvPeerAllocator {
Self { ctx, selector }
}
/// Allocates a specified number (by `regions`) of [`Peer`] instances based on the given
/// [`TableMetadataAllocatorContext`] and number of regions. The returned peers will have
/// the same length as the number of regions.
/// Allocates a specified number (by `regions`) of [`Peer`] instances based on the number of
/// regions. The returned peers will have the same length as the number of regions.
///
/// This method is mainly a wrapper around the [`SelectorRef`]::`select` method. There is
/// no guarantee that how the returned peers are used, like whether they are from the same
/// table or not. So this method isn't idempotent.
async fn alloc(
&self,
ctx: &TableMetadataAllocatorContext,
regions: usize,
) -> Result<Vec<Peer>> {
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu);
let mut peers = self
.selector
.select(
ctx.cluster_id,
&self.ctx,
SelectorOptions {
min_required_items: regions,
@@ -79,12 +72,8 @@ impl MetasrvPeerAllocator {
#[async_trait]
impl PeerAllocator for MetasrvPeerAllocator {
async fn alloc(
&self,
ctx: &TableMetadataAllocatorContext,
regions: usize,
) -> MetaResult<Vec<Peer>> {
self.alloc(ctx, regions)
async fn alloc(&self, regions: usize) -> MetaResult<Vec<Peer>> {
self.alloc(regions)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)

View File

@@ -17,7 +17,6 @@ use std::sync::Arc;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::ClusterId;
use common_time::util as time_util;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
@@ -63,15 +62,10 @@ pub(crate) fn create_selector_context() -> SelectorContext {
}
}
pub(crate) async fn put_datanodes(
cluster_id: ClusterId,
meta_peer_client: &MetaPeerClientRef,
datanodes: Vec<Peer>,
) {
pub(crate) async fn put_datanodes(meta_peer_client: &MetaPeerClientRef, datanodes: Vec<Peer>) {
let backend = meta_peer_client.memory_backend();
for datanode in datanodes {
let lease_key = DatanodeLeaseKey {
cluster_id,
node_id: datanode.id,
};
let lease_value = LeaseValue {

View File

@@ -308,11 +308,10 @@ impl GreptimeDbClusterBuilder {
expected_datanodes: usize,
) {
for _ in 0..10 {
let alive_datanodes =
meta_srv::lease::alive_datanodes(1000, meta_peer_client, u64::MAX)
.await
.unwrap()
.len();
let alive_datanodes = meta_srv::lease::alive_datanodes(meta_peer_client, u64::MAX)
.await
.unwrap()
.len();
if alive_datanodes == expected_datanodes {
return;
}
@@ -322,10 +321,9 @@ impl GreptimeDbClusterBuilder {
}
async fn create_datanode(&self, opts: DatanodeOptions, metasrv: MockInfo) -> Datanode {
let mut meta_client =
MetaClientBuilder::datanode_default_options(1000, opts.node_id.unwrap())
.channel_manager(metasrv.channel_manager)
.build();
let mut meta_client = MetaClientBuilder::datanode_default_options(opts.node_id.unwrap())
.channel_manager(metasrv.channel_manager)
.build();
meta_client.start(&[&metasrv.server_addr]).await.unwrap();
let meta_client = Arc::new(meta_client);
@@ -357,7 +355,7 @@ impl GreptimeDbClusterBuilder {
metasrv: MockInfo,
datanode_clients: Arc<NodeClients>,
) -> Arc<FeInstance> {
let mut meta_client = MetaClientBuilder::frontend_default_options(1000)
let mut meta_client = MetaClientBuilder::frontend_default_options()
.channel_manager(metasrv.channel_manager)
.enable_access_cluster_info()
.build();

View File

@@ -35,7 +35,7 @@ use futures::future::BoxFuture;
use meta_srv::error::Result as MetaResult;
use meta_srv::metasrv::SelectorContext;
use meta_srv::procedure::region_migration::RegionMigrationProcedureTask;
use meta_srv::selector::{Namespace, Selector, SelectorOptions};
use meta_srv::selector::{Selector, SelectorOptions};
use servers::query_handler::sql::SqlQueryHandler;
use session::context::{QueryContext, QueryContextRef};
use store_api::storage::RegionId;
@@ -169,7 +169,6 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -216,7 +215,6 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -473,7 +471,6 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -578,7 +575,6 @@ pub async fn test_region_migration_multiple_regions(
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -625,7 +621,6 @@ pub async fn test_region_migration_multiple_regions(
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -715,7 +710,6 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -763,7 +757,6 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -842,7 +835,6 @@ pub async fn test_region_migration_incorrect_from_peer(
// Trigger region migration.
let err = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(5),
peer_factory(1),
@@ -925,7 +917,6 @@ pub async fn test_region_migration_incorrect_region_id(
// Trigger region migration.
let err = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
0,
region_id,
peer_factory(2),
peer_factory(1),
@@ -957,7 +948,6 @@ impl Selector for ConstNodeSelector {
async fn select(
&self,
_ns: Namespace,
_ctx: &Self::Context,
_opts: SelectorOptions,
) -> MetaResult<Self::Output> {