mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-05 12:52:57 +00:00
Compare commits
54 Commits
recording_
...
feat/sst-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d5760a7348 | ||
|
|
bc9614e22c | ||
|
|
7dd9e98ff6 | ||
|
|
fb6b7f7801 | ||
|
|
87d7c316df | ||
|
|
c80a73bc20 | ||
|
|
dd9d13e7df | ||
|
|
79d249f5fa | ||
|
|
63bc544514 | ||
|
|
30c29539a3 | ||
|
|
359da62d9e | ||
|
|
c9f4b36360 | ||
|
|
85c346b16a | ||
|
|
738c23beb0 | ||
|
|
8aadd1e59a | ||
|
|
cbd58291da | ||
|
|
e522e8959b | ||
|
|
7183a93e5a | ||
|
|
8c538622e2 | ||
|
|
142dacb2c8 | ||
|
|
371afc458f | ||
|
|
0751cd74c0 | ||
|
|
ec34e8739a | ||
|
|
b650743785 | ||
|
|
80a8b2e1bd | ||
|
|
ec8a15cadd | ||
|
|
f929d751a5 | ||
|
|
fad3621a7a | ||
|
|
87723effc7 | ||
|
|
62a333ad09 | ||
|
|
6ad186a13e | ||
|
|
77dee84a75 | ||
|
|
a57e263e5a | ||
|
|
8796ddaf31 | ||
|
|
7fa3fbdfef | ||
|
|
457d2a620c | ||
|
|
9f14edbb28 | ||
|
|
cb3fad0c2d | ||
|
|
2d1e7c2441 | ||
|
|
9860bca986 | ||
|
|
3a83c33a48 | ||
|
|
373bd59b07 | ||
|
|
c8db4b286d | ||
|
|
56c8c0651f | ||
|
|
448e588fa7 | ||
|
|
f4cbf1d776 | ||
|
|
b35eefcf45 | ||
|
|
408dd55a2f | ||
|
|
e463942a5b | ||
|
|
0124a0d156 | ||
|
|
e23628a4e0 | ||
|
|
1d637cad51 | ||
|
|
a56030e6a5 | ||
|
|
a71b93dd84 |
@@ -41,7 +41,14 @@ runs:
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
@@ -52,7 +59,7 @@ runs:
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
@@ -69,8 +76,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
102
Cargo.lock
generated
102
Cargo.lock
generated
@@ -1594,7 +1594,7 @@ dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"strsim 0.8.0",
|
||||
"textwrap 0.11.0",
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
@@ -1876,7 +1876,7 @@ checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7"
|
||||
dependencies = [
|
||||
"strum 0.26.3",
|
||||
"strum_macros 0.26.4",
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2469,6 +2469,7 @@ dependencies = [
|
||||
"encode_unicode",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"unicode-width 0.1.14",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
@@ -4645,7 +4646,7 @@ version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4701,7 +4702,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486#d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486"
|
||||
dependencies = [
|
||||
"prost 0.13.3",
|
||||
"serde",
|
||||
@@ -5599,6 +5600,19 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indicatif"
|
||||
version = "0.17.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235"
|
||||
dependencies = [
|
||||
"console",
|
||||
"number_prefix",
|
||||
"portable-atomic",
|
||||
"unicode-width 0.2.0",
|
||||
"web-time 1.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inferno"
|
||||
version = "0.11.21"
|
||||
@@ -5628,6 +5642,25 @@ dependencies = [
|
||||
"snafu 0.7.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ingester"
|
||||
version = "0.13.0"
|
||||
dependencies = [
|
||||
"clap 4.5.19",
|
||||
"common-telemetry",
|
||||
"common-time",
|
||||
"datanode",
|
||||
"meta-client",
|
||||
"mito2",
|
||||
"object-store",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sst-convert",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inotify"
|
||||
version = "0.9.6"
|
||||
@@ -7517,6 +7550,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "number_prefix"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
|
||||
|
||||
[[package]]
|
||||
name = "objc"
|
||||
version = "0.2.7"
|
||||
@@ -7973,7 +8012,7 @@ version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8069,6 +8108,19 @@ dependencies = [
|
||||
"zstd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parquet_opendal"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4140ae96f37c170f8d684a544711fabdac1d94adcbd97e8b033329bd37f40446"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures",
|
||||
"opendal",
|
||||
"parquet",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parse-zoneinfo"
|
||||
version = "0.3.1"
|
||||
@@ -10056,7 +10108,7 @@ dependencies = [
|
||||
"radix_trie",
|
||||
"scopeguard",
|
||||
"unicode-segmentation",
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
"utf8parse",
|
||||
"winapi",
|
||||
]
|
||||
@@ -11203,6 +11255,36 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sst-convert"
|
||||
version = "0.13.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-array",
|
||||
"async-trait",
|
||||
"catalog",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"indicatif",
|
||||
"meta-client",
|
||||
"metric-engine",
|
||||
"mito2",
|
||||
"object-store",
|
||||
"parquet",
|
||||
"parquet_opendal",
|
||||
"prost 0.13.3",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"table",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "stable_deref_trait"
|
||||
version = "1.2.0"
|
||||
@@ -11935,7 +12017,7 @@ version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
dependencies = [
|
||||
"unicode-width",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -13038,6 +13120,12 @@ version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.6"
|
||||
|
||||
@@ -41,6 +41,7 @@ members = [
|
||||
"src/flow",
|
||||
"src/frontend",
|
||||
"src/index",
|
||||
"src/ingester",
|
||||
"src/log-query",
|
||||
"src/log-store",
|
||||
"src/meta-client",
|
||||
@@ -58,6 +59,7 @@ members = [
|
||||
"src/servers",
|
||||
"src/session",
|
||||
"src/sql",
|
||||
"src/sst-convert",
|
||||
"src/store-api",
|
||||
"src/table",
|
||||
"tests-fuzz",
|
||||
@@ -129,7 +131,7 @@ etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -271,6 +273,7 @@ query = { path = "src/query" }
|
||||
servers = { path = "src/servers" }
|
||||
session = { path = "src/session" }
|
||||
sql = { path = "src/sql" }
|
||||
sst-convert = { path = "src/sst-convert" }
|
||||
store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
4
Makefile
4
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -60,6 +60,8 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
76
chore.md
Normal file
76
chore.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# log
|
||||
## first create table
|
||||
```bash
|
||||
mysql --host=127.0.0.1 --port=19195 --database=public;
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE DATABASE IF NOT EXISTS `cluster1`;
|
||||
USE `cluster1`;
|
||||
CREATE TABLE IF NOT EXISTS `app1` (
|
||||
`greptime_timestamp` TimestampNanosecond NOT NULL TIME INDEX,
|
||||
`app` STRING NULL INVERTED INDEX,
|
||||
`cluster` STRING NULL INVERTED INDEX,
|
||||
`message` STRING NULL,
|
||||
`region` STRING NULL,
|
||||
`cloud-provider` STRING NULL,
|
||||
`environment` STRING NULL,
|
||||
`product` STRING NULL,
|
||||
`sub-product` STRING NULL,
|
||||
`service` STRING NULL
|
||||
) WITH (
|
||||
append_mode = 'true',
|
||||
'compaction.type' = 'twcs',
|
||||
'compaction.twcs.max_output_file_size' = '500MB',
|
||||
'compaction.twcs.max_active_window_files' = '16',
|
||||
'compaction.twcs.max_active_window_runs' = '4',
|
||||
'compaction.twcs.max_inactive_window_files' = '4',
|
||||
'compaction.twcs.max_inactive_window_runs' = '2',
|
||||
);
|
||||
|
||||
select count(*) from app1;
|
||||
|
||||
SELECT * FROM app1 ORDER BY greptime_timestamp DESC LIMIT 10\G
|
||||
```
|
||||
|
||||
## then ingest
|
||||
```bash
|
||||
RUST_LOG="debug" cargo run --bin=ingester -- --input-dir="/home/discord9/greptimedb/parquet_store_bk/" --parquet-dir="parquet_store/" --cfg="ingester.toml" --db-http-addr="http://127.0.0.1:4000/v1/sst/ingest_json"
|
||||
```
|
||||
|
||||
# metrics!!!!!!!
|
||||
```bash
|
||||
mysql --host=127.0.0.1 --port=19195 --database=public < public.greptime_physical_table-create-tables.sql
|
||||
```
|
||||
|
||||
## then ingest
|
||||
```bash
|
||||
RUST_LOG="debug"
|
||||
cargo run --bin=ingester -- --input-dir="/home/discord9/greptimedb/parquet_store_bk/" --remote-write-dir="metrics_parquet/" --cfg="ingester.toml" --db-http-addr="http://127.0.0.1:4000/v1/sst/ingest_json"
|
||||
# perf it
|
||||
cargo build --release ---bin=ingester
|
||||
samply record target/release/ingester --input-dir="/home/discord9/greptimedb/parquet_store_bk/" --remote-write-dir="metrics_parquet/" --cfg="ingester.toml" --db-http-addr="http://127.0.0.1:4000/v1/sst/ingest_json"
|
||||
```
|
||||
|
||||
## check data
|
||||
```sql
|
||||
select count(*) from greptime_physical_table;
|
||||
+----------+
|
||||
| count(*) |
|
||||
+----------+
|
||||
| 36200 |
|
||||
+----------+
|
||||
1 row in set (0.06 sec)
|
||||
|
||||
select count(*) from storage_operation_errors_total;
|
||||
+----------+
|
||||
| count(*) |
|
||||
+----------+
|
||||
| 10 |
|
||||
+----------+
|
||||
1 row in set (0.03 sec)
|
||||
```
|
||||
|
||||
|
||||
# with oss
|
||||
the same, only different is change storage config in `ingester.toml`
|
||||
File diff suppressed because it is too large
Load Diff
35
ingester.toml
Normal file
35
ingester.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
## The metasrv client options.
|
||||
[meta_client]
|
||||
## The addresses of the metasrv.
|
||||
metasrv_addrs = ["127.0.0.1:3002", "127.0.0.1:3003"]
|
||||
|
||||
## Operation timeout.
|
||||
timeout = "3s"
|
||||
|
||||
## Heartbeat timeout.
|
||||
heartbeat_timeout = "500ms"
|
||||
|
||||
## DDL timeout.
|
||||
ddl_timeout = "10s"
|
||||
|
||||
## Connect server timeout.
|
||||
connect_timeout = "1s"
|
||||
|
||||
## `TCP_NODELAY` option for accepted connections.
|
||||
tcp_nodelay = true
|
||||
|
||||
## The configuration about the cache of the metadata.
|
||||
metadata_cache_max_capacity = 100000
|
||||
|
||||
## TTL of the metadata cache.
|
||||
metadata_cache_ttl = "10m"
|
||||
|
||||
# TTI of the metadata cache.
|
||||
metadata_cache_tti = "5m"
|
||||
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb-cluster/datanode0"
|
||||
type = "File"
|
||||
[mito]
|
||||
@@ -287,7 +287,6 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let cluster_id = 0; // TODO(hl): read from config
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -296,13 +295,10 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
|
||||
@@ -241,9 +241,6 @@ impl StartCommand {
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -252,13 +249,10 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_config.metadata_cache_ttl;
|
||||
|
||||
@@ -295,14 +295,10 @@ impl StartCommand {
|
||||
let cache_ttl = meta_client_options.metadata_cache_ttl;
|
||||
let cache_tti = meta_client_options.metadata_cache_tti;
|
||||
|
||||
let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend =
|
||||
|
||||
@@ -28,7 +28,6 @@ use crate::error::{
|
||||
InvalidRoleSnafu, ParseNumSnafu, Result,
|
||||
};
|
||||
use crate::peer::Peer;
|
||||
use crate::ClusterId;
|
||||
|
||||
const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info";
|
||||
|
||||
@@ -56,12 +55,9 @@ pub trait ClusterInfo {
|
||||
// TODO(jeremy): Other info, like region status, etc.
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct NodeInfoKey {
|
||||
/// The cluster id.
|
||||
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||
pub cluster_id: ClusterId,
|
||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||
pub role: Role,
|
||||
/// The node id.
|
||||
@@ -84,24 +80,15 @@ impl NodeInfoKey {
|
||||
_ => peer.id,
|
||||
};
|
||||
|
||||
Some(NodeInfoKey {
|
||||
cluster_id: header.cluster_id,
|
||||
role,
|
||||
node_id,
|
||||
})
|
||||
Some(NodeInfoKey { role, node_id })
|
||||
}
|
||||
|
||||
pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String {
|
||||
format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id)
|
||||
pub fn key_prefix() -> String {
|
||||
format!("{}-0-", CLUSTER_NODE_INFO_PREFIX)
|
||||
}
|
||||
|
||||
pub fn key_prefix_with_role(cluster_id: ClusterId, role: Role) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
cluster_id,
|
||||
i32::from(role)
|
||||
)
|
||||
pub fn key_prefix_with_role(role: Role) -> String {
|
||||
format!("{}-0-{}-", CLUSTER_NODE_INFO_PREFIX, i32::from(role))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,15 +180,10 @@ impl FromStr for NodeInfoKey {
|
||||
let caps = CLUSTER_NODE_INFO_PREFIX_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidNodeInfoKeySnafu { key })?;
|
||||
|
||||
ensure!(caps.len() == 4, InvalidNodeInfoKeySnafu { key });
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let role = caps[2].to_string();
|
||||
let node_id = caps[3].to_string();
|
||||
let cluster_id: u64 = cluster_id.parse().context(ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let role: i32 = role.parse().context(ParseNumSnafu {
|
||||
err_msg: format!("invalid role {role}"),
|
||||
})?;
|
||||
@@ -210,11 +192,7 @@ impl FromStr for NodeInfoKey {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
role,
|
||||
node_id,
|
||||
})
|
||||
Ok(Self { role, node_id })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,9 +211,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
||||
impl From<&NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: &NodeInfoKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}-{}",
|
||||
"{}-0-{}-{}",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
key.cluster_id,
|
||||
i32::from(key.role),
|
||||
key.node_id
|
||||
)
|
||||
@@ -308,7 +285,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_node_info_key_round_trip() {
|
||||
let key = NodeInfoKey {
|
||||
cluster_id: 1,
|
||||
role: Datanode,
|
||||
node_id: 2,
|
||||
};
|
||||
@@ -316,7 +292,6 @@ mod tests {
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||
|
||||
assert_eq!(1, new_key.cluster_id);
|
||||
assert_eq!(Datanode, new_key.role);
|
||||
assert_eq!(2, new_key.node_id);
|
||||
}
|
||||
@@ -362,11 +337,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_node_info_key_prefix() {
|
||||
let prefix = NodeInfoKey::key_prefix_with_cluster_id(1);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-1-");
|
||||
let prefix = NodeInfoKey::key_prefix();
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-0-");
|
||||
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-2-1-");
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(Frontend);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-0-1-");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -25,8 +25,8 @@ use store_api::region_engine::{RegionRole, RegionStatistic};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::{error, ClusterId};
|
||||
|
||||
pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease";
|
||||
const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region";
|
||||
@@ -48,11 +48,10 @@ lazy_static! {
|
||||
|
||||
/// The key of the datanode stat in the storage.
|
||||
///
|
||||
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
|
||||
/// The format is `__meta_datanode_stat-0-{node_id}`.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct Stat {
|
||||
pub timestamp_millis: i64,
|
||||
pub cluster_id: ClusterId,
|
||||
// The datanode Id.
|
||||
pub id: u64,
|
||||
// The datanode address.
|
||||
@@ -102,10 +101,7 @@ impl Stat {
|
||||
}
|
||||
|
||||
pub fn stat_key(&self) -> DatanodeStatKey {
|
||||
DatanodeStatKey {
|
||||
cluster_id: self.cluster_id,
|
||||
node_id: self.id,
|
||||
}
|
||||
DatanodeStatKey { node_id: self.id }
|
||||
}
|
||||
|
||||
/// Returns a tuple array containing [RegionId] and [RegionRole].
|
||||
@@ -145,7 +141,7 @@ impl TryFrom<&HeartbeatRequest> for Stat {
|
||||
} = value;
|
||||
|
||||
match (header, peer) {
|
||||
(Some(header), Some(peer)) => {
|
||||
(Some(_header), Some(peer)) => {
|
||||
let region_stats = region_stats
|
||||
.iter()
|
||||
.map(RegionStat::from)
|
||||
@@ -153,7 +149,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
|
||||
|
||||
Ok(Self {
|
||||
timestamp_millis: time_util::current_time_millis(),
|
||||
cluster_id: header.cluster_id,
|
||||
// datanode id
|
||||
id: peer.id,
|
||||
// datanode address
|
||||
@@ -196,32 +191,24 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
|
||||
|
||||
/// The key of the datanode stat in the memory store.
|
||||
///
|
||||
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
|
||||
/// The format is `__meta_datanode_stat-0-{node_id}`.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct DatanodeStatKey {
|
||||
pub cluster_id: ClusterId,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl DatanodeStatKey {
|
||||
/// The key prefix.
|
||||
pub fn prefix_key() -> Vec<u8> {
|
||||
format!("{DATANODE_STAT_PREFIX}-").into_bytes()
|
||||
}
|
||||
|
||||
/// The key prefix with the cluster id.
|
||||
pub fn key_prefix_with_cluster_id(cluster_id: ClusterId) -> String {
|
||||
format!("{DATANODE_STAT_PREFIX}-{cluster_id}-")
|
||||
// todo(hl): remove cluster id in prefix
|
||||
format!("{DATANODE_STAT_PREFIX}-0-").into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DatanodeStatKey> for Vec<u8> {
|
||||
fn from(value: DatanodeStatKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}",
|
||||
DATANODE_STAT_PREFIX, value.cluster_id, value.node_id
|
||||
)
|
||||
.into_bytes()
|
||||
// todo(hl): remove cluster id in prefix
|
||||
format!("{}-0-{}", DATANODE_STAT_PREFIX, value.node_id).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,20 +221,12 @@ impl FromStr for DatanodeStatKey {
|
||||
.context(error::InvalidStatKeySnafu { key })?;
|
||||
|
||||
ensure!(caps.len() == 3, error::InvalidStatKeySnafu { key });
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let node_id = caps[2].to_string();
|
||||
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
node_id,
|
||||
})
|
||||
Ok(Self { node_id })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +300,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_stat_key() {
|
||||
let stat = Stat {
|
||||
cluster_id: 3,
|
||||
id: 101,
|
||||
region_num: 10,
|
||||
..Default::default()
|
||||
@@ -329,14 +307,12 @@ mod tests {
|
||||
|
||||
let stat_key = stat.stat_key();
|
||||
|
||||
assert_eq!(3, stat_key.cluster_id);
|
||||
assert_eq!(101, stat_key.node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stat_val_round_trip() {
|
||||
let stat = Stat {
|
||||
cluster_id: 0,
|
||||
id: 101,
|
||||
region_num: 100,
|
||||
..Default::default()
|
||||
@@ -351,7 +327,6 @@ mod tests {
|
||||
assert_eq!(1, stats.len());
|
||||
|
||||
let stat = stats.first().unwrap();
|
||||
assert_eq!(0, stat.cluster_id);
|
||||
assert_eq!(101, stat.id);
|
||||
assert_eq!(100, stat.region_num);
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::node_manager::NodeManagerRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub mod alter_database;
|
||||
pub mod alter_logical_tables;
|
||||
@@ -57,7 +57,6 @@ pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub cluster_id: Option<u64>,
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
@@ -90,10 +89,6 @@ pub trait ProcedureExecutor: Send + Sync {
|
||||
|
||||
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
|
||||
|
||||
pub struct TableMetadataAllocatorContext {
|
||||
pub cluster_id: ClusterId,
|
||||
}
|
||||
|
||||
/// Metadata allocated to a table.
|
||||
#[derive(Default)]
|
||||
pub struct TableMetadata {
|
||||
@@ -108,7 +103,7 @@ pub struct TableMetadata {
|
||||
|
||||
pub type RegionFailureDetectorControllerRef = Arc<dyn RegionFailureDetectorController>;
|
||||
|
||||
pub type DetectingRegion = (ClusterId, DatanodeId, RegionId);
|
||||
pub type DetectingRegion = (DatanodeId, RegionId);
|
||||
|
||||
/// Used for actively registering Region failure detectors.
|
||||
///
|
||||
|
||||
@@ -30,7 +30,6 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::rpc::ddl::UnsetDatabaseOption::{self};
|
||||
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct AlterDatabaseProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -65,14 +64,10 @@ fn build_new_schema_value(
|
||||
impl AlterDatabaseProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: AlterDatabaseTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
pub fn new(task: AlterDatabaseTask, context: DdlContext) -> Result<Self> {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterDatabaseData::new(task, cluster_id)?,
|
||||
data: AlterDatabaseData::new(task)?,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -183,7 +178,6 @@ enum AlterDatabaseState {
|
||||
/// The data of alter database procedure.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterDatabaseData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterDatabaseState,
|
||||
kind: AlterDatabaseKind,
|
||||
catalog_name: String,
|
||||
@@ -192,9 +186,8 @@ pub struct AlterDatabaseData {
|
||||
}
|
||||
|
||||
impl AlterDatabaseData {
|
||||
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
|
||||
pub fn new(task: AlterDatabaseTask) -> Result<Self> {
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
state: AlterDatabaseState::Prepare,
|
||||
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
|
||||
catalog_name: task.alter_expr.catalog_name,
|
||||
|
||||
@@ -37,9 +37,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::find_leaders;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct AlterLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -50,7 +50,6 @@ impl AlterLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
tasks: Vec<AlterTableTask>,
|
||||
physical_table_id: TableId,
|
||||
context: DdlContext,
|
||||
@@ -58,7 +57,6 @@ impl AlterLogicalTablesProcedure {
|
||||
Self {
|
||||
context,
|
||||
data: AlterTablesData {
|
||||
cluster_id,
|
||||
state: AlterTablesState::Prepare,
|
||||
tasks,
|
||||
table_info_values: vec![],
|
||||
@@ -240,7 +238,6 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterTablesData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterTablesState,
|
||||
tasks: Vec<AlterTableTask>,
|
||||
/// Table info values before the alter operation.
|
||||
|
||||
@@ -45,9 +45,9 @@ use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
@@ -64,16 +64,11 @@ pub struct AlterTableProcedure {
|
||||
impl AlterTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterTable";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
table_id: TableId,
|
||||
task: AlterTableTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
|
||||
task.validate()?;
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterTableData::new(task, table_id, cluster_id),
|
||||
data: AlterTableData::new(task, table_id),
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
@@ -307,7 +302,6 @@ enum AlterTableState {
|
||||
// The serialized data of alter table.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterTableData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
table_id: TableId,
|
||||
@@ -318,12 +312,11 @@ pub struct AlterTableData {
|
||||
}
|
||||
|
||||
impl AlterTableData {
|
||||
pub fn new(task: AlterTableTask, table_id: TableId, cluster_id: u64) -> Self {
|
||||
pub fn new(task: AlterTableTask, table_id: TableId) -> Self {
|
||||
Self {
|
||||
state: AlterTableState::Prepare,
|
||||
task,
|
||||
table_id,
|
||||
cluster_id,
|
||||
table_info_value: None,
|
||||
region_distribution: None,
|
||||
}
|
||||
|
||||
@@ -167,10 +167,9 @@ mod tests {
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
|
||||
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
|
||||
async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let region_id = RegionId::new(table_id, 1);
|
||||
let table_name = "foo";
|
||||
@@ -225,19 +224,12 @@ mod tests {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
(
|
||||
ddl_context,
|
||||
cluster_id,
|
||||
table_id,
|
||||
region_id,
|
||||
table_name.to_string(),
|
||||
)
|
||||
(ddl_context, table_id, region_id, table_name.to_string())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_alter_region_request() {
|
||||
let (ddl_context, cluster_id, table_id, region_id, table_name) =
|
||||
prepare_ddl_context().await;
|
||||
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterTableExpr {
|
||||
@@ -265,8 +257,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
@@ -307,8 +298,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_alter_column_type_region_request() {
|
||||
let (ddl_context, cluster_id, table_id, region_id, table_name) =
|
||||
prepare_ddl_context().await;
|
||||
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterTableExpr {
|
||||
@@ -325,8 +315,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
|
||||
@@ -46,9 +46,9 @@ use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure of flow creation.
|
||||
pub struct CreateFlowProcedure {
|
||||
@@ -60,16 +60,10 @@ impl CreateFlowProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateFlow";
|
||||
|
||||
/// Returns a new [CreateFlowProcedure].
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
pub fn new(task: CreateFlowTask, query_context: QueryContext, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: CreateFlowData {
|
||||
cluster_id,
|
||||
task,
|
||||
flow_id: None,
|
||||
peers: vec![],
|
||||
@@ -363,7 +357,6 @@ impl fmt::Display for FlowType {
|
||||
/// The serializable data.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateFlowData {
|
||||
pub(crate) cluster_id: ClusterId,
|
||||
pub(crate) state: CreateFlowState,
|
||||
pub(crate) task: CreateFlowTask,
|
||||
pub(crate) flow_id: Option<FlowId>,
|
||||
|
||||
@@ -23,11 +23,10 @@ impl CreateFlowProcedure {
|
||||
pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
|
||||
//TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
|
||||
let partitions = 1;
|
||||
let cluster_id = self.data.cluster_id;
|
||||
let (flow_id, peers) = self
|
||||
.context
|
||||
.flow_metadata_allocator
|
||||
.create(cluster_id, partitions)
|
||||
.create(partitions)
|
||||
.await?;
|
||||
self.data.flow_id = Some(flow_id);
|
||||
self.data.peers = peers;
|
||||
|
||||
@@ -36,9 +36,9 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{find_leaders, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct CreateLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -49,7 +49,6 @@ impl CreateLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateLogicalTables";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
tasks: Vec<CreateTableTask>,
|
||||
physical_table_id: TableId,
|
||||
context: DdlContext,
|
||||
@@ -57,7 +56,6 @@ impl CreateLogicalTablesProcedure {
|
||||
Self {
|
||||
context,
|
||||
data: CreateTablesData {
|
||||
cluster_id,
|
||||
state: CreateTablesState::Prepare,
|
||||
tasks,
|
||||
table_ids_already_exists: vec![],
|
||||
@@ -245,7 +243,6 @@ impl Procedure for CreateLogicalTablesProcedure {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateTablesData {
|
||||
cluster_id: ClusterId,
|
||||
state: CreateTablesState,
|
||||
tasks: Vec<CreateTableTask>,
|
||||
table_ids_already_exists: Vec<Option<TableId>>,
|
||||
|
||||
@@ -37,17 +37,17 @@ use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
|
||||
region_storage_path,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{
|
||||
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
|
||||
};
|
||||
use crate::{metrics, ClusterId};
|
||||
pub struct CreateTableProcedure {
|
||||
pub context: DdlContext,
|
||||
pub creator: TableCreator,
|
||||
@@ -56,10 +56,10 @@ pub struct CreateTableProcedure {
|
||||
impl CreateTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateTable";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: CreateTableTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: CreateTableTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
creator: TableCreator::new(cluster_id, task),
|
||||
creator: TableCreator::new(task),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,12 +154,7 @@ impl CreateTableProcedure {
|
||||
} = self
|
||||
.context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext {
|
||||
cluster_id: self.creator.data.cluster_id,
|
||||
},
|
||||
&self.creator.data.task,
|
||||
)
|
||||
.create(&self.creator.data.task)
|
||||
.await?;
|
||||
self.creator
|
||||
.set_allocated_metadata(table_id, table_route, region_wal_options);
|
||||
@@ -268,7 +263,6 @@ impl CreateTableProcedure {
|
||||
/// - Failed to create table metadata.
|
||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
let table_id = self.table_id();
|
||||
let cluster_id = self.creator.data.cluster_id;
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let raw_table_info = self.table_info().clone();
|
||||
@@ -276,10 +270,8 @@ impl CreateTableProcedure {
|
||||
let region_wal_options = self.region_wal_options()?.clone();
|
||||
// Safety: the table_route must be allocated.
|
||||
let physical_table_route = self.table_route()?.clone();
|
||||
let detecting_regions = convert_region_routes_to_detecting_regions(
|
||||
cluster_id,
|
||||
&physical_table_route.region_routes,
|
||||
);
|
||||
let detecting_regions =
|
||||
convert_region_routes_to_detecting_regions(&physical_table_route.region_routes);
|
||||
let table_route = TableRouteValue::Physical(physical_table_route);
|
||||
manager
|
||||
.create_table_metadata(raw_table_info, table_route, region_wal_options)
|
||||
@@ -351,11 +343,10 @@ pub struct TableCreator {
|
||||
}
|
||||
|
||||
impl TableCreator {
|
||||
pub fn new(cluster_id: ClusterId, task: CreateTableTask) -> Self {
|
||||
pub fn new(task: CreateTableTask) -> Self {
|
||||
Self {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_route: None,
|
||||
region_wal_options: None,
|
||||
@@ -421,7 +412,6 @@ pub struct CreateTableData {
|
||||
table_route: Option<PhysicalTableRouteValue>,
|
||||
/// None stands for not allocated yet.
|
||||
pub region_wal_options: Option<HashMap<RegionNumber, String>>,
|
||||
pub cluster_id: ClusterId,
|
||||
}
|
||||
|
||||
impl CreateTableData {
|
||||
|
||||
@@ -24,13 +24,13 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::CreateViewTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
// The procedure to execute `[CreateViewTask]`.
|
||||
pub struct CreateViewProcedure {
|
||||
@@ -41,12 +41,11 @@ pub struct CreateViewProcedure {
|
||||
impl CreateViewProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: CreateViewTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: CreateViewData {
|
||||
state: CreateViewState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
need_update: false,
|
||||
},
|
||||
@@ -144,12 +143,7 @@ impl CreateViewProcedure {
|
||||
let TableMetadata { table_id, .. } = self
|
||||
.context
|
||||
.table_metadata_allocator
|
||||
.create_view(
|
||||
&TableMetadataAllocatorContext {
|
||||
cluster_id: self.data.cluster_id,
|
||||
},
|
||||
&None,
|
||||
)
|
||||
.create_view(&None)
|
||||
.await?;
|
||||
self.data.set_allocated_metadata(table_id, false);
|
||||
}
|
||||
@@ -285,7 +279,6 @@ pub enum CreateViewState {
|
||||
pub struct CreateViewData {
|
||||
pub state: CreateViewState,
|
||||
pub task: CreateViewTask,
|
||||
pub cluster_id: ClusterId,
|
||||
/// Whether to update the view info.
|
||||
pub need_update: bool,
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::key::table_name::TableNameValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct DropDatabaseProcedure {
|
||||
/// The context of procedure runtime.
|
||||
@@ -54,7 +53,6 @@ pub(crate) enum DropTableTarget {
|
||||
|
||||
/// Context of [DropDatabaseProcedure] execution.
|
||||
pub(crate) struct DropDatabaseContext {
|
||||
cluster_id: ClusterId,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
drop_if_exists: bool,
|
||||
@@ -87,7 +85,6 @@ impl DropDatabaseProcedure {
|
||||
Self {
|
||||
runtime_context: context,
|
||||
context: DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog,
|
||||
schema,
|
||||
drop_if_exists,
|
||||
@@ -108,7 +105,6 @@ impl DropDatabaseProcedure {
|
||||
Ok(Self {
|
||||
runtime_context,
|
||||
context: DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog,
|
||||
schema,
|
||||
drop_if_exists,
|
||||
|
||||
@@ -217,11 +217,10 @@ mod tests {
|
||||
async fn test_next_without_logical_tables() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_physical_table(&ddl_context, "phy").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -252,12 +251,11 @@ mod tests {
|
||||
async fn test_next_with_logical_tables() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), physical_table_id, "metric_0").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -286,7 +284,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -98,11 +98,10 @@ impl State for DropDatabaseExecutor {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ddl_ctx: &DdlContext,
|
||||
ctx: &mut DropDatabaseContext,
|
||||
_ctx: &mut DropDatabaseContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
self.register_dropping_regions(ddl_ctx)?;
|
||||
let executor =
|
||||
DropTableExecutor::new(ctx.cluster_id, self.table_name.clone(), self.table_id, true);
|
||||
let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
|
||||
// Deletes metadata for table permanently.
|
||||
let table_route_value = TableRouteValue::new(
|
||||
self.table_id,
|
||||
@@ -187,7 +186,7 @@ mod tests {
|
||||
async fn test_next_with_physical_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -203,7 +202,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -216,7 +214,6 @@ mod tests {
|
||||
}
|
||||
// Execute again
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -239,8 +236,8 @@ mod tests {
|
||||
async fn test_next_logical_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), physical_table_id, "metric").await;
|
||||
let logical_table_id = physical_table_id + 1;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
@@ -257,7 +254,6 @@ mod tests {
|
||||
DropTableTarget::Logical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -270,7 +266,6 @@ mod tests {
|
||||
}
|
||||
// Execute again
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -345,7 +340,7 @@ mod tests {
|
||||
async fn test_next_retryable_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -360,7 +355,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -374,7 +368,7 @@ mod tests {
|
||||
async fn test_on_recovery() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -390,7 +384,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -118,7 +118,6 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut state = DropDatabaseRemoveMetadata;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
@@ -145,7 +144,6 @@ mod tests {
|
||||
// Schema not exists
|
||||
let mut state = DropDatabaseRemoveMetadata;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
|
||||
@@ -89,7 +89,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut step = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -105,7 +104,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut state = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
@@ -128,7 +126,6 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut state = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -37,8 +37,8 @@ use crate::instruction::{CacheIdent, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::lock_key::{CatalogLock, FlowLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropFlowTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure for dropping a flow.
|
||||
pub struct DropFlowProcedure {
|
||||
@@ -51,12 +51,11 @@ pub struct DropFlowProcedure {
|
||||
impl DropFlowProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropFlow";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropFlowTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: DropFlowTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: DropFlowData {
|
||||
state: DropFlowState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
flow_info_value: None,
|
||||
flow_route_values: vec![],
|
||||
@@ -218,7 +217,6 @@ impl Procedure for DropFlowProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropFlowData {
|
||||
state: DropFlowState,
|
||||
cluster_id: ClusterId,
|
||||
task: DropFlowTask,
|
||||
pub(crate) flow_info_value: Option<FlowInfoValue>,
|
||||
pub(crate) flow_route_values: Vec<FlowRouteValue>,
|
||||
|
||||
@@ -40,10 +40,10 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct DropTableProcedure {
|
||||
/// The context of procedure runtime.
|
||||
@@ -59,8 +59,8 @@ pub struct DropTableProcedure {
|
||||
impl DropTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropTableTask, context: DdlContext) -> Self {
|
||||
let data = DropTableData::new(cluster_id, task);
|
||||
pub fn new(task: DropTableTask, context: DdlContext) -> Self {
|
||||
let data = DropTableData::new(task);
|
||||
let executor = data.build_executor();
|
||||
Self {
|
||||
context,
|
||||
@@ -268,7 +268,6 @@ impl Procedure for DropTableProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DropTableData {
|
||||
pub state: DropTableState,
|
||||
pub cluster_id: ClusterId,
|
||||
pub task: DropTableTask,
|
||||
pub physical_region_routes: Vec<RegionRoute>,
|
||||
pub physical_table_id: Option<TableId>,
|
||||
@@ -279,10 +278,9 @@ pub struct DropTableData {
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
pub fn new(cluster_id: ClusterId, task: DropTableTask) -> Self {
|
||||
pub fn new(task: DropTableTask) -> Self {
|
||||
Self {
|
||||
state: DropTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
physical_region_routes: vec![],
|
||||
physical_table_id: None,
|
||||
@@ -301,7 +299,6 @@ impl DropTableData {
|
||||
|
||||
fn build_executor(&self) -> DropTableExecutor {
|
||||
DropTableExecutor::new(
|
||||
self.cluster_id,
|
||||
self.task.table_name(),
|
||||
self.task.table_id,
|
||||
self.task.drop_if_exists,
|
||||
|
||||
@@ -36,7 +36,6 @@ use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::ClusterId;
|
||||
|
||||
/// [Control] indicated to the caller whether to go to the next step.
|
||||
#[derive(Debug)]
|
||||
@@ -54,14 +53,8 @@ impl<T> Control<T> {
|
||||
|
||||
impl DropTableExecutor {
|
||||
/// Returns the [DropTableExecutor].
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
) -> Self {
|
||||
pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self {
|
||||
Self {
|
||||
cluster_id,
|
||||
table,
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
@@ -74,7 +67,6 @@ impl DropTableExecutor {
|
||||
/// - Invalidates the cache on the Frontend nodes.
|
||||
/// - Drops the regions on the Datanode nodes.
|
||||
pub struct DropTableExecutor {
|
||||
cluster_id: ClusterId,
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
@@ -164,7 +156,7 @@ impl DropTableExecutor {
|
||||
let detecting_regions = if table_route_value.is_physical() {
|
||||
// Safety: checked.
|
||||
let regions = table_route_value.region_routes().unwrap();
|
||||
convert_region_routes_to_detecting_regions(self.cluster_id, regions)
|
||||
convert_region_routes_to_detecting_regions(regions)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -321,7 +313,6 @@ mod tests {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ctx = new_ddl_context(node_manager);
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
true,
|
||||
@@ -331,7 +322,6 @@ mod tests {
|
||||
|
||||
// Drops a non-exists table
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
false,
|
||||
@@ -341,7 +331,6 @@ mod tests {
|
||||
|
||||
// Drops a exists table
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
false,
|
||||
|
||||
@@ -31,8 +31,8 @@ use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropViewTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure for dropping a view.
|
||||
pub struct DropViewProcedure {
|
||||
@@ -45,12 +45,11 @@ pub struct DropViewProcedure {
|
||||
impl DropViewProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropView";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropViewTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: DropViewTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: DropViewData {
|
||||
state: DropViewState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
},
|
||||
}
|
||||
@@ -216,7 +215,6 @@ impl Procedure for DropViewProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropViewData {
|
||||
state: DropViewState,
|
||||
cluster_id: ClusterId,
|
||||
task: DropViewTask,
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::error::Result;
|
||||
use crate::key::FlowId;
|
||||
use crate::peer::Peer;
|
||||
use crate::sequence::SequenceRef;
|
||||
use crate::ClusterId;
|
||||
|
||||
/// The reference of [FlowMetadataAllocator].
|
||||
pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
|
||||
@@ -60,16 +59,9 @@ impl FlowMetadataAllocator {
|
||||
}
|
||||
|
||||
/// Allocates the [FlowId] and [Peer]s.
|
||||
pub async fn create(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
partitions: usize,
|
||||
) -> Result<(FlowId, Vec<Peer>)> {
|
||||
pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
|
||||
let flow_id = self.allocate_flow_id().await?;
|
||||
let peers = self
|
||||
.partition_peer_allocator
|
||||
.alloc(cluster_id, partitions)
|
||||
.await?;
|
||||
let peers = self.partition_peer_allocator.alloc(partitions).await?;
|
||||
|
||||
Ok((flow_id, peers))
|
||||
}
|
||||
@@ -79,7 +71,7 @@ impl FlowMetadataAllocator {
|
||||
#[async_trait]
|
||||
pub trait PartitionPeerAllocator: Send + Sync {
|
||||
/// Allocates [Peer] nodes for storing partitions.
|
||||
async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>>;
|
||||
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>>;
|
||||
}
|
||||
|
||||
/// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions.
|
||||
@@ -89,7 +81,7 @@ struct NoopPartitionPeerAllocator;
|
||||
|
||||
#[async_trait]
|
||||
impl PartitionPeerAllocator for NoopPartitionPeerAllocator {
|
||||
async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>> {
|
||||
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>> {
|
||||
Ok(vec![Peer::default(); partitions])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_telemetry::{debug, info};
|
||||
use snafu::ensure;
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::{self, Result, UnsupportedSnafu};
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::peer::Peer;
|
||||
@@ -109,7 +109,6 @@ impl TableMetadataAllocator {
|
||||
|
||||
async fn create_table_route(
|
||||
&self,
|
||||
ctx: &TableMetadataAllocatorContext,
|
||||
table_id: TableId,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<PhysicalTableRouteValue> {
|
||||
@@ -121,7 +120,7 @@ impl TableMetadataAllocator {
|
||||
}
|
||||
);
|
||||
|
||||
let peers = self.peer_allocator.alloc(ctx, regions).await?;
|
||||
let peers = self.peer_allocator.alloc(regions).await?;
|
||||
let region_routes = task
|
||||
.partitions
|
||||
.iter()
|
||||
@@ -147,11 +146,7 @@ impl TableMetadataAllocator {
|
||||
}
|
||||
|
||||
/// Create VIEW metadata
|
||||
pub async fn create_view(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
table_id: &Option<api::v1::TableId>,
|
||||
) -> Result<TableMetadata> {
|
||||
pub async fn create_view(&self, table_id: &Option<api::v1::TableId>) -> Result<TableMetadata> {
|
||||
let table_id = self.allocate_table_id(table_id).await?;
|
||||
|
||||
Ok(TableMetadata {
|
||||
@@ -160,13 +155,9 @@ impl TableMetadataAllocator {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn create(
|
||||
&self,
|
||||
ctx: &TableMetadataAllocatorContext,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<TableMetadata> {
|
||||
pub async fn create(&self, task: &CreateTableTask) -> Result<TableMetadata> {
|
||||
let table_id = self.allocate_table_id(&task.create_table.table_id).await?;
|
||||
let table_route = self.create_table_route(ctx, table_id, task).await?;
|
||||
let table_route = self.create_table_route(table_id, task).await?;
|
||||
let region_wal_options = self.create_wal_options(&table_route)?;
|
||||
|
||||
debug!(
|
||||
@@ -188,19 +179,14 @@ pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
|
||||
#[async_trait]
|
||||
pub trait PeerAllocator: Send + Sync {
|
||||
/// Allocates `regions` size [`Peer`]s.
|
||||
async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
|
||||
-> Result<Vec<Peer>>;
|
||||
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>>;
|
||||
}
|
||||
|
||||
struct NoopPeerAllocator;
|
||||
|
||||
#[async_trait]
|
||||
impl PeerAllocator for NoopPeerAllocator {
|
||||
async fn alloc(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
regions: usize,
|
||||
) -> Result<Vec<Peer>> {
|
||||
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
|
||||
Ok(vec![Peer::default(); regions])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,10 +31,9 @@ use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::create_table::{
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::ClusterId;
|
||||
|
||||
pub async fn create_physical_table_metadata(
|
||||
ddl_context: &DdlContext,
|
||||
@@ -48,11 +47,7 @@ pub async fn create_physical_table_metadata(
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn create_physical_table(
|
||||
ddl_context: &DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
name: &str,
|
||||
) -> TableId {
|
||||
pub async fn create_physical_table(ddl_context: &DdlContext, name: &str) -> TableId {
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task(name);
|
||||
let TableMetadata {
|
||||
@@ -61,10 +56,7 @@ pub async fn create_physical_table(
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -80,15 +72,13 @@ pub async fn create_physical_table(
|
||||
|
||||
pub async fn create_logical_table(
|
||||
ddl_context: DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
physical_table_id: TableId,
|
||||
table_name: &str,
|
||||
) -> TableId {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
let tasks = vec![test_create_logical_table_task(table_name)];
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
let status = procedure.on_create_metadata().await.unwrap();
|
||||
|
||||
@@ -86,7 +86,6 @@ fn make_alter_logical_table_rename_task(
|
||||
async fn test_on_prepare_check_schema() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(
|
||||
Some("schema1"),
|
||||
@@ -100,8 +99,7 @@ async fn test_on_prepare_check_schema() {
|
||||
),
|
||||
];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
@@ -110,50 +108,46 @@ async fn test_on_prepare_check_schema() {
|
||||
async fn test_on_prepare_check_alter_kind() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![make_alter_logical_table_rename_task(
|
||||
"schema1",
|
||||
"table1",
|
||||
"new_table1",
|
||||
)];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_different_physical_table() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
|
||||
let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
|
||||
let phy1_id = create_physical_table(&ddl_context, "phy1").await;
|
||||
create_logical_table(ddl_context.clone(), phy1_id, "table1").await;
|
||||
let phy2_id = create_physical_table(&ddl_context, "phy2").await;
|
||||
create_logical_table(ddl_context.clone(), phy2_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy1_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_logical_table_not_exists() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
@@ -161,23 +155,22 @@ async fn test_on_prepare_logical_table_not_exists() {
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, TableNotFound { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
@@ -185,25 +178,24 @@ async fn test_on_prepare() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let result = procedure.on_prepare().await;
|
||||
assert_matches!(result, Ok(Status::Executing { persist: true }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table5").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]),
|
||||
@@ -211,7 +203,7 @@ async fn test_on_update_metadata() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
@@ -229,23 +221,21 @@ async fn test_on_update_metadata() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_part_duplicate_alter_request() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
@@ -278,8 +268,7 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
),
|
||||
];
|
||||
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
|
||||
@@ -59,7 +59,6 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter
|
||||
async fn test_on_prepare_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo", 1024);
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
@@ -73,7 +72,7 @@ async fn test_on_prepare_table_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = test_rename_alter_table_task("non-exists", "foo");
|
||||
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
}
|
||||
@@ -82,9 +81,8 @@ async fn test_on_prepare_table_exists_err() {
|
||||
async fn test_on_prepare_table_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_rename_alter_table_task("non-exists", "foo");
|
||||
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -95,7 +93,6 @@ async fn test_on_submit_alter_request() {
|
||||
let datanode_handler = DatanodeWatcher(tx);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -144,8 +141,7 @@ async fn test_on_submit_alter_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
|
||||
@@ -181,7 +177,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
RequestOutdatedErrorDatanodeHandler,
|
||||
));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -230,8 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
}
|
||||
@@ -240,7 +234,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
async fn test_on_update_metadata_rename() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let new_table_name = "bar";
|
||||
let table_id = 1024;
|
||||
@@ -257,8 +250,7 @@ async fn test_on_update_metadata_rename() {
|
||||
.unwrap();
|
||||
|
||||
let task = test_rename_alter_table_task(table_name, new_table_name);
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
@@ -291,7 +283,6 @@ async fn test_on_update_metadata_rename() {
|
||||
async fn test_on_update_metadata_add_columns() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -335,8 +326,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
@@ -361,7 +351,6 @@ async fn test_on_update_metadata_add_columns() {
|
||||
async fn test_on_update_table_options() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -398,8 +387,7 @@ async fn test_on_update_table_options() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
@@ -25,11 +25,11 @@ use crate::ddl::create_flow::CreateFlowProcedure;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::FlowId;
|
||||
use crate::rpc::ddl::CreateFlowTask;
|
||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||
use crate::{error, ClusterId};
|
||||
|
||||
pub(crate) fn test_create_flow_task(
|
||||
name: &str,
|
||||
@@ -53,7 +53,6 @@ pub(crate) fn test_create_flow_task(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow_source_table_not_found() {
|
||||
let cluster_id = 1;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
@@ -65,14 +64,13 @@ async fn test_create_flow_source_table_not_found() {
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(cluster_id, task, query_ctx, ddl_context);
|
||||
let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::TableNotFound { .. });
|
||||
}
|
||||
|
||||
pub(crate) async fn create_test_flow(
|
||||
ddl_context: &DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
flow_name: &str,
|
||||
source_table_names: Vec<TableName>,
|
||||
sink_table_name: TableName,
|
||||
@@ -84,8 +82,7 @@ pub(crate) async fn create_test_flow(
|
||||
false,
|
||||
);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure =
|
||||
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
|
||||
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
|
||||
let flow_id = output.downcast_ref::<FlowId>().unwrap();
|
||||
|
||||
@@ -94,7 +91,6 @@ pub(crate) async fn create_test_flow(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow() {
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
@@ -118,7 +114,6 @@ async fn test_create_flow() {
|
||||
.unwrap();
|
||||
let flow_id = create_test_flow(
|
||||
&ddl_context,
|
||||
cluster_id,
|
||||
"my_flow",
|
||||
source_table_names.clone(),
|
||||
sink_table_name.clone(),
|
||||
@@ -134,8 +129,7 @@ async fn test_create_flow() {
|
||||
true,
|
||||
);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure =
|
||||
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
|
||||
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
|
||||
let flow_id = output.downcast_ref::<FlowId>().unwrap();
|
||||
assert_eq!(*flow_id, 1024);
|
||||
@@ -143,7 +137,7 @@ async fn test_create_flow() {
|
||||
// Creates again
|
||||
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context);
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler;
|
||||
use crate::ddl::test_util::{
|
||||
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::Error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
@@ -35,11 +35,9 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
async fn test_on_prepare_physical_table_not_found() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![test_create_logical_table_task("foo")];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableRouteNotFound { .. });
|
||||
}
|
||||
@@ -48,7 +46,6 @@ async fn test_on_prepare_physical_table_not_found() {
|
||||
async fn test_on_prepare() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -57,10 +54,7 @@ async fn test_on_prepare() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -73,8 +67,7 @@ async fn test_on_prepare() {
|
||||
// The create logical table procedure.
|
||||
let tasks = vec![test_create_logical_table_task("foo")];
|
||||
let physical_table_id = table_id;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
}
|
||||
@@ -83,7 +76,6 @@ async fn test_on_prepare() {
|
||||
async fn test_on_prepare_logical_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -92,10 +84,7 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -119,7 +108,7 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
// The create logical table procedure.
|
||||
let physical_table_id = table_id;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -129,7 +118,6 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -138,10 +126,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -167,7 +152,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
// Sets `create_if_not_exists`
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
let output = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*output, vec![8192]);
|
||||
@@ -177,7 +162,6 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
async fn test_on_prepare_part_logical_tables_exist() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -186,10 +170,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -216,7 +197,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let non_exist_task = test_create_logical_table_task("non_exists");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -229,7 +209,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
async fn test_on_create_metadata() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -238,10 +217,7 @@ async fn test_on_create_metadata() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -257,7 +233,6 @@ async fn test_on_create_metadata() {
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let yet_another_task = test_create_logical_table_task("bar");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -279,7 +254,6 @@ async fn test_on_create_metadata() {
|
||||
async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -288,10 +262,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -318,7 +289,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let non_exist_task = test_create_logical_table_task("non_exists");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -340,7 +310,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
async fn test_on_create_metadata_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -349,10 +318,7 @@ async fn test_on_create_metadata_err() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -368,7 +334,6 @@ async fn test_on_create_metadata_err() {
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let yet_another_task = test_create_logical_table_task("bar");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task.clone(), yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
|
||||
@@ -87,7 +87,6 @@ pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||
async fn test_on_prepare_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
// Puts a value to table name key.
|
||||
@@ -100,7 +99,7 @@ async fn test_on_prepare_table_exists_err() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -110,7 +109,6 @@ async fn test_on_prepare_table_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.create_table.create_if_not_exists = true;
|
||||
task.table_info.ident.table_id = 1024;
|
||||
@@ -124,7 +122,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Done { output: Some(..) });
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
@@ -135,10 +133,9 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
async fn test_on_prepare_without_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_eq!(procedure.table_id(), 1024);
|
||||
@@ -148,11 +145,10 @@ async fn test_on_prepare_without_create_if_table_exists() {
|
||||
async fn test_on_prepare_with_no_partition_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.partitions = vec![];
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::Unexpected { .. });
|
||||
assert!(err
|
||||
@@ -165,10 +161,9 @@ async fn test_on_datanode_create_regions_should_retry() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -183,10 +178,9 @@ async fn test_on_datanode_create_regions_should_not_retry() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -201,10 +195,9 @@ async fn test_on_create_metadata_error() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task.clone(), ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -233,10 +226,9 @@ async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -251,14 +243,12 @@ async fn test_on_create_metadata() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let task = test_create_table_task("foo");
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.creator.data.state == CreateTableState::CreateMetadata
|
||||
|
||||
@@ -97,7 +97,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||
async fn test_on_prepare_view_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
// Puts a value to table name key.
|
||||
@@ -113,7 +112,7 @@ async fn test_on_prepare_view_exists_err() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::ViewAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -123,7 +122,6 @@ async fn test_on_prepare_view_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_view_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.create_if_not_exists = true;
|
||||
task.view_info.ident.table_id = 1024;
|
||||
@@ -140,7 +138,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Done { output: Some(..) });
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
@@ -151,10 +149,9 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
||||
async fn test_on_prepare_without_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.create_if_not_exists = true;
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_eq!(procedure.view_id(), 1024);
|
||||
@@ -165,10 +162,9 @@ async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -185,10 +181,9 @@ async fn test_replace_view_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager.clone());
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -213,7 +208,7 @@ async fn test_replace_view_metadata() {
|
||||
let mut task = test_create_view_task("foo");
|
||||
// The view already exists, prepare should fail
|
||||
{
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::ViewAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -224,7 +219,7 @@ async fn test_replace_view_metadata() {
|
||||
task.create_view.logical_plan = vec![4, 5, 6];
|
||||
task.create_view.definition = "new_definition".to_string();
|
||||
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -254,12 +249,11 @@ async fn test_replace_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager.clone());
|
||||
let cluster_id = 1;
|
||||
|
||||
{
|
||||
// Create a `foo` table.
|
||||
let task = test_create_table_task("foo");
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -272,7 +266,7 @@ async fn test_replace_table() {
|
||||
// Try to replace a view named `foo` too.
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.or_replace = true;
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
|
||||
@@ -31,7 +31,6 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_with_logical_tables() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -45,11 +44,11 @@ async fn test_drop_database_with_logical_tables() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -80,7 +79,6 @@ async fn test_drop_database_with_logical_tables() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_retryable_error() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -94,11 +92,11 @@ async fn test_drop_database_retryable_error() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -128,7 +126,6 @@ async fn test_drop_database_retryable_error() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_recover() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -142,9 +139,9 @@ async fn test_drop_database_recover() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates a physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates a logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
|
||||
@@ -40,12 +40,11 @@ fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> D
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_flow_not_found() {
|
||||
let cluster_id = 1;
|
||||
let flow_id = 1024;
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowNotFound { .. });
|
||||
}
|
||||
@@ -53,7 +52,6 @@ async fn test_drop_flow_not_found() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_flow() {
|
||||
// create a flow
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
@@ -75,27 +73,21 @@ async fn test_drop_flow() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let flow_id = create_test_flow(
|
||||
&ddl_context,
|
||||
cluster_id,
|
||||
"my_flow",
|
||||
source_table_names,
|
||||
sink_table_name,
|
||||
)
|
||||
.await;
|
||||
let flow_id =
|
||||
create_test_flow(&ddl_context, "my_flow", source_table_names, sink_table_name).await;
|
||||
// Drops the flows
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Drops if not exists
|
||||
let task = test_drop_flow_task("my_flow", flow_id, true);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Drops again
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowNotFound { .. });
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::ddl::test_util::{
|
||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
@@ -47,7 +47,6 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat
|
||||
async fn test_on_prepare_table_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -63,7 +62,7 @@ async fn test_on_prepare_table_not_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -72,7 +71,6 @@ async fn test_on_prepare_table_not_exists_err() {
|
||||
async fn test_on_prepare_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -89,13 +87,13 @@ async fn test_on_prepare_table() {
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, true);
|
||||
// Drop if exists
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(!procedure.rollback_supported());
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
}
|
||||
|
||||
@@ -105,7 +103,6 @@ async fn test_on_datanode_drop_regions() {
|
||||
let datanode_handler = DatanodeWatcher(tx);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -144,7 +141,7 @@ async fn test_on_datanode_drop_regions() {
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_datanode_drop_regions().await.unwrap();
|
||||
|
||||
@@ -179,7 +176,6 @@ async fn test_on_rollback() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone());
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -188,10 +184,7 @@ async fn test_on_rollback() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -205,12 +198,8 @@ async fn test_on_rollback() {
|
||||
let physical_table_id = table_id;
|
||||
// Creates the logical table metadata.
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
);
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = new_test_procedure_context();
|
||||
procedure.execute(&ctx).await.unwrap();
|
||||
@@ -223,7 +212,7 @@ async fn test_on_rollback() {
|
||||
// Drops the physical table
|
||||
{
|
||||
let task = new_drop_table_task("phy_table", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(procedure.rollback_supported());
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
@@ -238,7 +227,7 @@ async fn test_on_rollback() {
|
||||
|
||||
// Drops the logical table
|
||||
let task = new_drop_table_task("foo", table_ids[0], false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(!procedure.rollback_supported());
|
||||
}
|
||||
@@ -255,18 +244,15 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let logical_table_id =
|
||||
create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let logical_table_id = create_logical_table(ddl_context.clone(), physical_table_id, "s").await;
|
||||
|
||||
let inner_test = |task: DropTableTask| async {
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.data.state == DropTableState::InvalidateTableCache
|
||||
})
|
||||
@@ -304,14 +290,13 @@ async fn test_from_json() {
|
||||
(DropTableState::DatanodeDropRegions, 1, 1),
|
||||
(DropTableState::DeleteTombstone, 1, 0),
|
||||
] {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let task = new_drop_table_task("t", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until(&mut procedure, |p| p.data.state == state).await;
|
||||
let data = procedure.dump().unwrap();
|
||||
assert_eq!(
|
||||
@@ -334,14 +319,13 @@ async fn test_from_json() {
|
||||
|
||||
let num_operating_regions = 0;
|
||||
let num_operating_regions_after_recovery = 0;
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let task = new_drop_table_task("t", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
let data = procedure.dump().unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -41,7 +41,6 @@ fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> Dro
|
||||
async fn test_on_prepare_view_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.view_info.ident.table_id = view_id;
|
||||
@@ -60,7 +59,7 @@ async fn test_on_prepare_view_not_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_view_task("bar", view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -69,7 +68,6 @@ async fn test_on_prepare_view_not_exists_err() {
|
||||
async fn test_on_prepare_not_view_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let task = test_create_table_task(view_name, view_id);
|
||||
@@ -85,7 +83,7 @@ async fn test_on_prepare_not_view_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
// It's not a view, expect error
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::InvalidArguments);
|
||||
@@ -95,7 +93,6 @@ async fn test_on_prepare_not_view_err() {
|
||||
async fn test_on_prepare_success() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let mut task = test_create_view_task("foo");
|
||||
@@ -116,12 +113,12 @@ async fn test_on_prepare_success() {
|
||||
|
||||
let task = new_drop_view_task("bar", view_id, true);
|
||||
// Drop if exists
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
// Prepare success
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert_eq!(DropViewState::DeleteMetadata, procedure.state());
|
||||
}
|
||||
@@ -130,7 +127,6 @@ async fn test_on_prepare_success() {
|
||||
async fn test_drop_view_success() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let mut task = test_create_view_task("foo");
|
||||
@@ -159,7 +155,7 @@ async fn test_drop_view_success() {
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
// Prepare success
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
assert_eq!(DropViewState::InvalidateViewCache, procedure.state());
|
||||
|
||||
@@ -174,7 +170,7 @@ async fn test_drop_view_success() {
|
||||
|
||||
// Drop again
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
|
||||
@@ -39,9 +39,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
context: DdlContext,
|
||||
@@ -91,7 +91,6 @@ impl TruncateTableProcedure {
|
||||
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
|
||||
|
||||
pub(crate) fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
@@ -99,7 +98,7 @@ impl TruncateTableProcedure {
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
|
||||
data: TruncateTableData::new(task, table_info_value, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,7 +188,6 @@ impl TruncateTableProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TruncateTableData {
|
||||
state: TruncateTableState,
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
@@ -197,14 +195,12 @@ pub struct TruncateTableData {
|
||||
|
||||
impl TruncateTableData {
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: TruncateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
|
||||
@@ -34,7 +34,6 @@ use crate::key::TableMetadataManagerRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::ClusterId;
|
||||
|
||||
/// Adds [Peer] context if the error is unretryable.
|
||||
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
|
||||
@@ -144,7 +143,6 @@ pub async fn get_physical_table_id(
|
||||
|
||||
/// Converts a list of [`RegionRoute`] to a list of [`DetectingRegion`].
|
||||
pub fn convert_region_routes_to_detecting_regions(
|
||||
cluster_id: ClusterId,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Vec<DetectingRegion> {
|
||||
region_routes
|
||||
@@ -153,7 +151,7 @@ pub fn convert_region_routes_to_detecting_regions(
|
||||
route
|
||||
.leader_peer
|
||||
.as_ref()
|
||||
.map(|peer| (cluster_id, peer.id, route.region.id))
|
||||
.map(|peer| (peer.id, route.region.id))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
@@ -60,7 +60,6 @@ use crate::rpc::ddl::{
|
||||
use crate::rpc::procedure;
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::ClusterId;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
@@ -154,13 +153,12 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_alter_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
table_id: TableId,
|
||||
alter_table_task: AlterTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = AlterTableProcedure::new(cluster_id, table_id, alter_table_task, context)?;
|
||||
let procedure = AlterTableProcedure::new(table_id, alter_table_task, context)?;
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -171,12 +169,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_table_task: CreateTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateTableProcedure::new(cluster_id, create_table_task, context);
|
||||
let procedure = CreateTableProcedure::new(create_table_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -187,12 +184,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_view_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_view_task: CreateViewTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateViewProcedure::new(cluster_id, create_view_task, context);
|
||||
let procedure = CreateViewProcedure::new(create_view_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -203,18 +199,13 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_logical_table_tasks(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_table_tasks: Vec<CreateTableTask>,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
create_table_tasks,
|
||||
physical_table_id,
|
||||
context,
|
||||
);
|
||||
let procedure =
|
||||
CreateLogicalTablesProcedure::new(create_table_tasks, physical_table_id, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -225,18 +216,13 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_alter_logical_table_tasks(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_tasks: Vec<AlterTableTask>,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = AlterLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
alter_table_tasks,
|
||||
physical_table_id,
|
||||
context,
|
||||
);
|
||||
let procedure =
|
||||
AlterLogicalTablesProcedure::new(alter_table_tasks, physical_table_id, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -247,12 +233,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_table_task: DropTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = DropTableProcedure::new(cluster_id, drop_table_task, context);
|
||||
let procedure = DropTableProcedure::new(drop_table_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -263,7 +248,6 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_database(
|
||||
&self,
|
||||
_cluster_id: ClusterId,
|
||||
CreateDatabaseTask {
|
||||
catalog,
|
||||
schema,
|
||||
@@ -283,7 +267,6 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_database(
|
||||
&self,
|
||||
_cluster_id: ClusterId,
|
||||
DropDatabaseTask {
|
||||
catalog,
|
||||
schema,
|
||||
@@ -299,11 +282,10 @@ impl DdlManager {
|
||||
|
||||
pub async fn submit_alter_database(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
|
||||
let procedure = AlterDatabaseProcedure::new(alter_database_task, context)?;
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -313,12 +295,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_flow_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_flow: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = CreateFlowProcedure::new(cluster_id, create_flow, query_context, context);
|
||||
let procedure = CreateFlowProcedure::new(create_flow, query_context, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -328,11 +309,10 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_flow_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_flow: DropFlowTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = DropFlowProcedure::new(cluster_id, drop_flow, context);
|
||||
let procedure = DropFlowProcedure::new(drop_flow, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -342,11 +322,10 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_view_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_view: DropViewTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = DropViewProcedure::new(cluster_id, drop_view, context);
|
||||
let procedure = DropViewProcedure::new(drop_view, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -356,14 +335,12 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_truncate_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = TruncateTableProcedure::new(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
@@ -397,7 +374,6 @@ impl DdlManager {
|
||||
|
||||
async fn handle_truncate_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_id = truncate_table_task.table_id;
|
||||
@@ -416,12 +392,7 @@ async fn handle_truncate_table_task(
|
||||
let table_route = table_route_value.into_inner().region_routes()?.clone();
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_truncate_table_task(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
table_route,
|
||||
)
|
||||
.submit_truncate_table_task(truncate_table_task, table_info_value, table_route)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is truncated via procedure_id {id:?}");
|
||||
@@ -434,7 +405,6 @@ async fn handle_truncate_table_task(
|
||||
|
||||
async fn handle_alter_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_task: AlterTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_ref = alter_table_task.table_ref();
|
||||
@@ -468,7 +438,7 @@ async fn handle_alter_table_task(
|
||||
);
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_table_task(cluster_id, table_id, alter_table_task)
|
||||
.submit_alter_table_task(table_id, alter_table_task)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is altered via procedure_id {id:?}");
|
||||
@@ -481,13 +451,10 @@ async fn handle_alter_table_task(
|
||||
|
||||
async fn handle_drop_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_table_task: DropTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_id = drop_table_task.table_id;
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_table_task(cluster_id, drop_table_task)
|
||||
.await?;
|
||||
let (id, _) = ddl_manager.submit_drop_table_task(drop_table_task).await?;
|
||||
|
||||
info!("Table: {table_id} is dropped via procedure_id {id:?}");
|
||||
|
||||
@@ -499,11 +466,10 @@ async fn handle_drop_table_task(
|
||||
|
||||
async fn handle_create_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_table_task: CreateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_table_task(cluster_id, create_table_task)
|
||||
.submit_create_table_task(create_table_task)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -525,7 +491,6 @@ async fn handle_create_table_task(
|
||||
|
||||
async fn handle_create_logical_table_tasks(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_table_tasks: Vec<CreateTableTask>,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
ensure!(
|
||||
@@ -542,7 +507,7 @@ async fn handle_create_logical_table_tasks(
|
||||
let num_logical_tables = create_table_tasks.len();
|
||||
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_logical_table_tasks(cluster_id, create_table_tasks, physical_table_id)
|
||||
.submit_create_logical_table_tasks(create_table_tasks, physical_table_id)
|
||||
.await?;
|
||||
|
||||
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}");
|
||||
@@ -568,11 +533,10 @@ async fn handle_create_logical_table_tasks(
|
||||
|
||||
async fn handle_create_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_database_task: CreateDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_create_database(cluster_id, create_database_task.clone())
|
||||
.submit_create_database(create_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -589,11 +553,10 @@ async fn handle_create_database_task(
|
||||
|
||||
async fn handle_drop_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_database_task: DropDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_database(cluster_id, drop_database_task.clone())
|
||||
.submit_drop_database(drop_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -610,11 +573,10 @@ async fn handle_drop_database_task(
|
||||
|
||||
async fn handle_alter_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_database(cluster_id, alter_database_task.clone())
|
||||
.submit_alter_database(alter_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -632,11 +594,10 @@ async fn handle_alter_database_task(
|
||||
|
||||
async fn handle_drop_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_flow_task: DropFlowTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_flow_task(cluster_id, drop_flow_task.clone())
|
||||
.submit_drop_flow_task(drop_flow_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -653,11 +614,10 @@ async fn handle_drop_flow_task(
|
||||
|
||||
async fn handle_drop_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_view_task: DropViewTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_view_task(cluster_id, drop_view_task.clone())
|
||||
.submit_drop_view_task(drop_view_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -675,12 +635,11 @@ async fn handle_drop_view_task(
|
||||
|
||||
async fn handle_create_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_flow_task: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_flow_task(cluster_id, create_flow_task.clone(), query_context)
|
||||
.submit_create_flow_task(create_flow_task.clone(), query_context)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -712,7 +671,6 @@ async fn handle_create_flow_task(
|
||||
|
||||
async fn handle_alter_logical_table_tasks(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_tasks: Vec<AlterTableTask>,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
ensure!(
|
||||
@@ -733,7 +691,7 @@ async fn handle_alter_logical_table_tasks(
|
||||
let num_logical_tables = alter_table_tasks.len();
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id)
|
||||
.submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id)
|
||||
.await?;
|
||||
|
||||
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}");
|
||||
@@ -749,11 +707,10 @@ async fn handle_alter_logical_table_tasks(
|
||||
/// Handle the `[CreateViewTask]` and returns the DDL response when success.
|
||||
async fn handle_create_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_view_task: CreateViewTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_view_task(cluster_id, create_view_task)
|
||||
.submit_create_view_task(create_view_task)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -788,55 +745,43 @@ impl ProcedureExecutor for DdlManager {
|
||||
.unwrap_or(TracingContext::from_current_span())
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
debug!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
handle_create_table_task(self, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
handle_alter_table_task(self, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
handle_truncate_table_task(self, truncate_table_task).await
|
||||
}
|
||||
CreateLogicalTables(create_table_tasks) => {
|
||||
handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await
|
||||
handle_create_logical_table_tasks(self, create_table_tasks).await
|
||||
}
|
||||
AlterLogicalTables(alter_table_tasks) => {
|
||||
handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await
|
||||
handle_alter_logical_table_tasks(self, alter_table_tasks).await
|
||||
}
|
||||
DropLogicalTables(_) => todo!(),
|
||||
CreateDatabase(create_database_task) => {
|
||||
handle_create_database_task(self, cluster_id, create_database_task).await
|
||||
handle_create_database_task(self, create_database_task).await
|
||||
}
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, cluster_id, drop_database_task).await
|
||||
handle_drop_database_task(self, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, cluster_id, alter_database_task).await
|
||||
handle_alter_database_task(self, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(
|
||||
self,
|
||||
cluster_id,
|
||||
create_flow_task,
|
||||
request.query_context.into(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => {
|
||||
handle_drop_flow_task(self, cluster_id, drop_flow_task).await
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, cluster_id, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => {
|
||||
handle_drop_view_task(self, cluster_id, drop_view_task).await
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
|
||||
@@ -26,11 +26,10 @@ use crate::flow_name::FlowName;
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::key::FlowId;
|
||||
use crate::peer::Peer;
|
||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RegionIdent {
|
||||
pub cluster_id: ClusterId,
|
||||
pub datanode_id: DatanodeId,
|
||||
pub table_id: TableId,
|
||||
pub region_number: RegionNumber,
|
||||
@@ -47,8 +46,8 @@ impl Display for RegionIdent {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"RegionIdent(datanode_id='{}.{}', table_id={}, region_number={}, engine = {})",
|
||||
self.cluster_id, self.datanode_id, self.table_id, self.region_number, self.engine
|
||||
"RegionIdent(datanode_id='{}', table_id={}, region_number={}, engine = {})",
|
||||
self.datanode_id, self.table_id, self.region_number, self.engine
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -262,7 +261,6 @@ mod tests {
|
||||
fn test_serialize_instruction() {
|
||||
let open_region = Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
@@ -277,12 +275,11 @@ mod tests {
|
||||
let serialized = serde_json::to_string(&open_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
|
||||
r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
|
||||
serialized
|
||||
);
|
||||
|
||||
let close_region = Instruction::CloseRegion(RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
@@ -292,7 +289,7 @@ mod tests {
|
||||
let serialized = serde_json::to_string(&close_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"CloseRegion":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
|
||||
r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
|
||||
serialized
|
||||
);
|
||||
}
|
||||
@@ -307,7 +304,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_compatible_serialize_open_region() {
|
||||
let region_ident = RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
|
||||
@@ -47,8 +47,6 @@ pub mod test_util;
|
||||
pub mod util;
|
||||
pub mod wal_options_allocator;
|
||||
|
||||
// The id of the cluster.
|
||||
pub type ClusterId = u64;
|
||||
// The id of the datanode.
|
||||
pub type DatanodeId = u64;
|
||||
// The id of the flownode.
|
||||
|
||||
@@ -99,7 +99,7 @@ impl NodeExpiryListener {
|
||||
in_memory: &ResettableKvBackendRef,
|
||||
max_idle_time: Duration,
|
||||
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
|
||||
let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
|
||||
let prefix = NodeInfoKey::key_prefix();
|
||||
let req = RangeRequest::new().with_prefix(prefix);
|
||||
let current_time_millis = common_time::util::current_time_millis();
|
||||
let resp = in_memory.range(req).await?;
|
||||
|
||||
@@ -19,7 +19,7 @@ use api::v1::meta::Peer as PbPeer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub struct Peer {
|
||||
@@ -72,8 +72,8 @@ impl Display for Peer {
|
||||
/// can query peer given a node id
|
||||
#[async_trait::async_trait]
|
||||
pub trait PeerLookupService {
|
||||
async fn datanode(&self, cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>, Error>;
|
||||
async fn flownode(&self, cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>, Error>;
|
||||
async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>, Error>;
|
||||
async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>, Error>;
|
||||
}
|
||||
|
||||
pub type PeerLookupServiceRef = Arc<dyn PeerLookupService + Send + Sync>;
|
||||
|
||||
@@ -31,11 +31,6 @@ impl ResponseHeader {
|
||||
self.0.protocol_version
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn cluster_id(&self) -> u64 {
|
||||
self.0.cluster_id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn error_code(&self) -> i32 {
|
||||
match self.0.error.as_ref() {
|
||||
@@ -143,7 +138,6 @@ mod tests {
|
||||
fn test_response_header_trans() {
|
||||
let pb_header = PbResponseHeader {
|
||||
protocol_version: 101,
|
||||
cluster_id: 1,
|
||||
error: Some(Error {
|
||||
code: 100,
|
||||
err_msg: "test".to_string(),
|
||||
@@ -152,7 +146,6 @@ mod tests {
|
||||
|
||||
let header = ResponseHeader(pb_header);
|
||||
assert_eq!(101, header.protocol_version());
|
||||
assert_eq!(1, header.cluster_id());
|
||||
assert_eq!(100, header.error_code());
|
||||
assert_eq!("test".to_string(), header.error_msg());
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ use crate::peer::{Peer, PeerLookupService};
|
||||
use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::sequence::SequenceBuilder;
|
||||
use crate::wal_options_allocator::WalOptionsAllocator;
|
||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait MockDatanodeHandler: Sync + Send + Clone {
|
||||
@@ -189,11 +189,11 @@ pub struct NoopPeerLookupService;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PeerLookupService for NoopPeerLookupService {
|
||||
async fn datanode(&self, _cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>> {
|
||||
async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>> {
|
||||
Ok(Some(Peer::empty(id)))
|
||||
}
|
||||
|
||||
async fn flownode(&self, _cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>> {
|
||||
async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>> {
|
||||
Ok(Some(Peer::empty(id)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,6 @@ mod tests {
|
||||
Instruction::CloseRegion(RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
})
|
||||
@@ -246,7 +245,6 @@ mod tests {
|
||||
RegionIdent {
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
engine: MITO_ENGINE_NAME.to_string(),
|
||||
},
|
||||
|
||||
@@ -25,6 +25,6 @@ pub mod heartbeat;
|
||||
pub mod metrics;
|
||||
pub mod region_server;
|
||||
pub mod service;
|
||||
mod store;
|
||||
pub mod store;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod tests;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
//! object storage utilities
|
||||
|
||||
mod azblob;
|
||||
mod fs;
|
||||
pub mod fs;
|
||||
mod gcs;
|
||||
mod oss;
|
||||
mod s3;
|
||||
|
||||
@@ -24,7 +24,8 @@ use crate::config::FileConfig;
|
||||
use crate::error::{self, Result};
|
||||
use crate::store;
|
||||
|
||||
pub(crate) async fn new_fs_object_store(
|
||||
/// A helper function to create a file system object store.
|
||||
pub async fn new_fs_object_store(
|
||||
data_home: &str,
|
||||
_file_config: &FileConfig,
|
||||
) -> Result<ObjectStore> {
|
||||
|
||||
@@ -103,7 +103,6 @@ impl Default for FlowConfig {
|
||||
#[serde(default)]
|
||||
pub struct FlownodeOptions {
|
||||
pub mode: Mode,
|
||||
pub cluster_id: Option<u64>,
|
||||
pub node_id: Option<u64>,
|
||||
pub flow: FlowConfig,
|
||||
pub grpc: GrpcOptions,
|
||||
@@ -118,7 +117,6 @@ impl Default for FlownodeOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: servers::Mode::Standalone,
|
||||
cluster_id: None,
|
||||
node_id: None,
|
||||
flow: FlowConfig::default(),
|
||||
grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3004"),
|
||||
|
||||
@@ -35,11 +35,10 @@ use servers::error::{
|
||||
CatalogSnafu, CollectRecordbatchSnafu, DataFusionSnafu, Result as ServerResult,
|
||||
TableNotFoundSnafu,
|
||||
};
|
||||
use servers::http::jaeger::QueryTraceParams;
|
||||
use servers::http::jaeger::{QueryTraceParams, FIND_TRACES_COLS};
|
||||
use servers::otlp::trace::{
|
||||
DURATION_NANO_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_ID_COLUMN,
|
||||
SPAN_KIND_COLUMN, SPAN_KIND_PREFIX, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, TRACE_ID_COLUMN,
|
||||
TRACE_TABLE_NAME,
|
||||
DURATION_NANO_COLUMN, SERVICE_NAME_COLUMN, SPAN_ATTRIBUTES_COLUMN, SPAN_KIND_COLUMN,
|
||||
SPAN_KIND_PREFIX, SPAN_NAME_COLUMN, TIMESTAMP_COLUMN, TRACE_ID_COLUMN, TRACE_TABLE_NAME,
|
||||
};
|
||||
use servers::query_handler::JaegerQueryHandler;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -102,16 +101,9 @@ impl JaegerQueryHandler for Instance {
|
||||
}
|
||||
|
||||
async fn get_trace(&self, ctx: QueryContextRef, trace_id: &str) -> ServerResult<Output> {
|
||||
// It's equivalent to `SELECT trace_id, timestamp, duration_nano, service_name, span_name, span_id, span_attributes FROM {db}.{trace_table} WHERE trace_id = '{trace_id}'`.
|
||||
let selects = vec![
|
||||
col(TRACE_ID_COLUMN),
|
||||
col(TIMESTAMP_COLUMN),
|
||||
col(DURATION_NANO_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_ID_COLUMN),
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
];
|
||||
// It's equivalent to `SELECT trace_id, timestamp, duration_nano, service_name, span_name, span_id, span_attributes, resource_attributes, parent_span_id
|
||||
// FROM {db}.{trace_table} WHERE trace_id = '{trace_id}'`.
|
||||
let selects: Vec<Expr> = FIND_TRACES_COLS.clone();
|
||||
|
||||
let filters = vec![col(TRACE_ID_COLUMN).eq(lit(trace_id))];
|
||||
|
||||
@@ -133,15 +125,7 @@ impl JaegerQueryHandler for Instance {
|
||||
ctx: QueryContextRef,
|
||||
query_params: QueryTraceParams,
|
||||
) -> ServerResult<Output> {
|
||||
let selects = vec![
|
||||
col(TRACE_ID_COLUMN),
|
||||
col(TIMESTAMP_COLUMN),
|
||||
col(DURATION_NANO_COLUMN),
|
||||
col(SERVICE_NAME_COLUMN),
|
||||
col(SPAN_NAME_COLUMN),
|
||||
col(SPAN_ID_COLUMN),
|
||||
col(SPAN_ATTRIBUTES_COLUMN),
|
||||
];
|
||||
let selects: Vec<Expr> = FIND_TRACES_COLS.clone();
|
||||
|
||||
let mut filters = vec![];
|
||||
|
||||
|
||||
23
src/ingester/Cargo.toml
Normal file
23
src/ingester/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "ingester"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
clap.workspace = true
|
||||
common-telemetry.workspace = true
|
||||
common-time.workspace = true
|
||||
datanode.workspace = true
|
||||
meta-client.workspace = true
|
||||
mito2.workspace = true
|
||||
object-store.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
sst-convert.workspace = true
|
||||
tokio.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
294
src/ingester/src/main.rs
Normal file
294
src/ingester/src/main.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use clap::Parser;
|
||||
use common_telemetry::info;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use datanode::config::StorageConfig;
|
||||
use meta_client::MetaClientOptions;
|
||||
use mito2::config::MitoConfig;
|
||||
use mito2::sst::file::IndexType;
|
||||
use mito2::sst::parquet::SstInfo;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sst_convert::converter::{InputFile, InputFileType, SstConverterBuilder};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about = "Greptime Ingester", long_about = None)]
|
||||
struct Args {
|
||||
/// Input directory
|
||||
#[arg(short, long)]
|
||||
input_dir: String,
|
||||
/// Directory of input parquet files, relative to input_dir
|
||||
#[arg(short, long)]
|
||||
parquet_dir: Option<String>,
|
||||
/// Directory of input json files, relative to input_dir
|
||||
#[arg(short, long)]
|
||||
remote_write_dir: Option<String>,
|
||||
/// Config file
|
||||
#[arg(short, long)]
|
||||
cfg: String,
|
||||
/// DB HTTP address
|
||||
#[arg(short, long)]
|
||||
db_http_addr: String,
|
||||
|
||||
/// Output path for the converted SST files.
|
||||
/// If it is not None, the converted SST files will be written to the specified path
|
||||
/// in the `input_store`.
|
||||
/// This is for debugging purposes.
|
||||
#[arg(short, long)]
|
||||
sst_output_path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
struct IngesterConfig {
|
||||
meta_client: MetaClientOptions,
|
||||
storage: StorageConfig,
|
||||
mito: MitoConfig,
|
||||
}
|
||||
|
||||
pub const APP_NAME: &str = "greptime-ingester";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let _guard = common_telemetry::init_global_logging(
|
||||
APP_NAME,
|
||||
&Default::default(),
|
||||
&Default::default(),
|
||||
None,
|
||||
);
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
let cfg_file = std::fs::read_to_string(&args.cfg).expect("Failed to read config file");
|
||||
let cfg: IngesterConfig = toml::from_str(&cfg_file).expect("Failed to parse config");
|
||||
|
||||
let sst_builder = {
|
||||
let mut builder = SstConverterBuilder::new_fs(args.input_dir)
|
||||
.with_meta_options(cfg.meta_client)
|
||||
.with_storage_config(cfg.storage)
|
||||
.with_config(cfg.mito);
|
||||
|
||||
if let Some(output_path) = args.sst_output_path {
|
||||
builder = builder.with_output_path(output_path);
|
||||
}
|
||||
|
||||
builder
|
||||
};
|
||||
|
||||
let sst_converter = sst_builder
|
||||
.clone()
|
||||
.build()
|
||||
.await
|
||||
.expect("Failed to build sst converter");
|
||||
|
||||
let input_store = sst_converter.input_store.clone();
|
||||
|
||||
if let Some(parquet_dir) = args.parquet_dir {
|
||||
// using opendal to read parquet files in given input object store
|
||||
let all_parquets = input_store
|
||||
.list(&parquet_dir)
|
||||
.await
|
||||
.expect("Failed to list parquet files");
|
||||
info!("Listed all files in parquet directory: {:?}", all_parquets);
|
||||
let all_parquets = all_parquets
|
||||
.iter()
|
||||
.filter(|parquet| parquet.name().ends_with(".parquet") && parquet.metadata().is_file())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let input_files = all_parquets
|
||||
.iter()
|
||||
.map(|parquet| {
|
||||
let full_table_name = parquet.name().split("-").next().unwrap();
|
||||
let (catalog_name, schema_name, table_name) = extract_name(full_table_name);
|
||||
|
||||
info!(
|
||||
"catalog: {}, schema: {}, table: {}",
|
||||
catalog_name, schema_name, table_name
|
||||
);
|
||||
|
||||
InputFile {
|
||||
catalog: catalog_name.to_string(),
|
||||
schema: schema_name.to_string(),
|
||||
table: table_name.to_string(),
|
||||
path: parquet.path().to_string(),
|
||||
file_type: InputFileType::Parquet,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
convert_and_send(&input_files, sst_builder.clone(), &args.db_http_addr).await;
|
||||
}
|
||||
|
||||
if let Some(remote_write_dir) = args.remote_write_dir {
|
||||
// using opendal to read parquet files in given input object store
|
||||
let all_parquets = input_store
|
||||
.list(&remote_write_dir)
|
||||
.await
|
||||
.expect("Failed to list parquet files");
|
||||
|
||||
let all_parquets = all_parquets
|
||||
.iter()
|
||||
.filter(|parquet| parquet.name().ends_with(".parquet") && parquet.metadata().is_file())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let input_files = all_parquets
|
||||
.iter()
|
||||
.map(|parquet| {
|
||||
let full_table_name = parquet.name().split("-").next().unwrap();
|
||||
let (catalog_name, schema_name, table_name) = extract_name(full_table_name);
|
||||
|
||||
info!(
|
||||
"catalog: {}, schema: {}, table: {}",
|
||||
catalog_name, schema_name, table_name
|
||||
);
|
||||
InputFile {
|
||||
catalog: catalog_name.to_string(),
|
||||
schema: schema_name.to_string(),
|
||||
table: table_name.to_string(),
|
||||
path: parquet.path().to_string(),
|
||||
file_type: InputFileType::RemoteWrite,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
convert_and_send(&input_files, sst_builder.clone(), &args.db_http_addr).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn convert_and_send(
|
||||
input_files: &[InputFile],
|
||||
sst_builder: SstConverterBuilder,
|
||||
db_http_addr: &str,
|
||||
) {
|
||||
let table_names = input_files
|
||||
.iter()
|
||||
.map(|f| (f.schema.clone(), f.table.clone()))
|
||||
.collect::<Vec<_>>();
|
||||
let mut rxs = Vec::new();
|
||||
|
||||
// Spawn a task for each input file
|
||||
info!("Spawning tasks for {} input files", input_files.len());
|
||||
for input_file in input_files.iter() {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let sst_builder = sst_builder.clone();
|
||||
let input_file = (*input_file).clone();
|
||||
tokio::task::spawn(async move {
|
||||
let mut sst_converter = sst_builder
|
||||
.build()
|
||||
.await
|
||||
.expect("Failed to build sst converter");
|
||||
let sst_info = sst_converter
|
||||
.convert_one(&input_file)
|
||||
.await
|
||||
.expect("Failed to convert parquet files");
|
||||
tx.send(sst_info).unwrap();
|
||||
});
|
||||
rxs.push(rx);
|
||||
}
|
||||
|
||||
let mut sst_infos = Vec::new();
|
||||
for rx in rxs {
|
||||
sst_infos.push(rx.await.unwrap());
|
||||
}
|
||||
|
||||
info!("Converted {} input files", sst_infos.len());
|
||||
|
||||
let ingest_reqs = table_names
|
||||
.iter()
|
||||
.zip(sst_infos.iter())
|
||||
.flat_map(|(schema_name, sst_info)| {
|
||||
sst_info
|
||||
.ssts
|
||||
.iter()
|
||||
.map(|sst| to_ingest_sst_req(&schema_name.0, &schema_name.1, sst))
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// send ingest requests to DB
|
||||
send_ingest_requests(db_http_addr, ingest_reqs)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn extract_name(full_table_name: &str) -> (String, String, String) {
|
||||
let mut names = full_table_name.split('.').rev();
|
||||
let table_name = names.next().unwrap();
|
||||
let schema_name = names.next().unwrap_or("public");
|
||||
let catalog_name = names.next().unwrap_or("greptime");
|
||||
(
|
||||
catalog_name.to_string(),
|
||||
schema_name.to_string(),
|
||||
table_name.to_string(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn send_ingest_requests(
|
||||
addr: &str,
|
||||
reqs: Vec<ClientIngestSstRequest>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = reqwest::Client::new();
|
||||
for req in reqs {
|
||||
info!("ingesting sst: {req:?}");
|
||||
let req = client.post(addr).json(&req);
|
||||
let resp = req.send().await?;
|
||||
info!("ingest response: {resp:?}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct ClientIngestSstRequest {
|
||||
schema: Option<String>,
|
||||
table: String,
|
||||
pub(crate) file_id: String,
|
||||
pub(crate) min_ts: i64,
|
||||
pub(crate) max_ts: i64,
|
||||
pub(crate) file_size: u64,
|
||||
pub(crate) rows: u32,
|
||||
pub(crate) row_groups: u32,
|
||||
/// Available indexes of the file.
|
||||
pub available_indexes: Vec<IndexType>,
|
||||
/// Size of the index file.
|
||||
pub index_file_size: u64,
|
||||
pub time_unit: u32,
|
||||
}
|
||||
|
||||
fn to_ingest_sst_req(
|
||||
schema_name: &str,
|
||||
table_name: &str,
|
||||
sst_info: &SstInfo,
|
||||
) -> ClientIngestSstRequest {
|
||||
let index_file_size = sst_info.index_metadata.file_size;
|
||||
let available_indexs = sst_info.index_metadata.build_available_indexes();
|
||||
ClientIngestSstRequest {
|
||||
schema: Some(schema_name.to_string()),
|
||||
table: table_name.to_string(),
|
||||
file_id: sst_info.file_id.to_string(),
|
||||
min_ts: sst_info.time_range.0.value(),
|
||||
max_ts: sst_info.time_range.1.value(),
|
||||
file_size: sst_info.file_size,
|
||||
rows: sst_info.num_rows as _,
|
||||
row_groups: sst_info.num_row_groups as _,
|
||||
available_indexes: available_indexs.to_vec(),
|
||||
index_file_size,
|
||||
time_unit: match sst_info.time_range.0.unit() {
|
||||
TimeUnit::Second => 0,
|
||||
TimeUnit::Millisecond => 3,
|
||||
TimeUnit::Microsecond => 6,
|
||||
TimeUnit::Nanosecond => 9,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -31,13 +31,13 @@ fn main() {
|
||||
|
||||
#[tokio::main]
|
||||
async fn run() {
|
||||
let id = (1000u64, 2000u64);
|
||||
let id = 2000u64;
|
||||
let config = ChannelConfig::new()
|
||||
.timeout(Duration::from_secs(3))
|
||||
.connect_timeout(Duration::from_secs(5))
|
||||
.tcp_nodelay(true);
|
||||
let channel_manager = ChannelManager::with_config(config);
|
||||
let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
|
||||
let mut meta_client = MetaClientBuilder::datanode_default_options(id)
|
||||
.channel_manager(channel_manager)
|
||||
.build();
|
||||
meta_client.start(&["127.0.0.1:3002"]).await.unwrap();
|
||||
|
||||
@@ -47,7 +47,6 @@ use common_meta::rpc::store::{
|
||||
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use common_meta::rpc::KeyValue;
|
||||
use common_meta::ClusterId;
|
||||
use common_telemetry::info;
|
||||
use futures::TryStreamExt;
|
||||
use heartbeat::Client as HeartbeatClient;
|
||||
@@ -61,7 +60,7 @@ use crate::error::{
|
||||
Result,
|
||||
};
|
||||
|
||||
pub type Id = (u64, u64);
|
||||
pub type Id = u64;
|
||||
|
||||
const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3;
|
||||
const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3;
|
||||
@@ -81,18 +80,18 @@ pub struct MetaClientBuilder {
|
||||
}
|
||||
|
||||
impl MetaClientBuilder {
|
||||
pub fn new(cluster_id: ClusterId, member_id: u64, role: Role) -> Self {
|
||||
pub fn new(member_id: u64, role: Role) -> Self {
|
||||
Self {
|
||||
id: (cluster_id, member_id),
|
||||
id: member_id,
|
||||
role,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the role of Frontend's default options.
|
||||
pub fn frontend_default_options(cluster_id: ClusterId) -> Self {
|
||||
pub fn frontend_default_options() -> Self {
|
||||
// Frontend does not need a member id.
|
||||
Self::new(cluster_id, 0, Role::Frontend)
|
||||
Self::new(0, Role::Frontend)
|
||||
.enable_store()
|
||||
.enable_heartbeat()
|
||||
.enable_procedure()
|
||||
@@ -100,15 +99,15 @@ impl MetaClientBuilder {
|
||||
}
|
||||
|
||||
/// Returns the role of Datanode's default options.
|
||||
pub fn datanode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
|
||||
Self::new(cluster_id, member_id, Role::Datanode)
|
||||
pub fn datanode_default_options(member_id: u64) -> Self {
|
||||
Self::new(member_id, Role::Datanode)
|
||||
.enable_store()
|
||||
.enable_heartbeat()
|
||||
}
|
||||
|
||||
/// Returns the role of Flownode's default options.
|
||||
pub fn flownode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
|
||||
Self::new(cluster_id, member_id, Role::Flownode)
|
||||
pub fn flownode_default_options(member_id: u64) -> Self {
|
||||
Self::new(member_id, Role::Flownode)
|
||||
.enable_store()
|
||||
.enable_heartbeat()
|
||||
.enable_procedure()
|
||||
@@ -273,15 +272,9 @@ impl ClusterInfo for MetaClient {
|
||||
let cluster_client = self.cluster_client()?;
|
||||
|
||||
let (get_metasrv_nodes, nodes_key_prefix) = match role {
|
||||
None => (
|
||||
true,
|
||||
Some(NodeInfoKey::key_prefix_with_cluster_id(self.id.0)),
|
||||
),
|
||||
None => (true, Some(NodeInfoKey::key_prefix())),
|
||||
Some(ClusterRole::Metasrv) => (true, None),
|
||||
Some(role) => (
|
||||
false,
|
||||
Some(NodeInfoKey::key_prefix_with_role(self.id.0, role)),
|
||||
),
|
||||
Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))),
|
||||
};
|
||||
|
||||
let mut nodes = if get_metasrv_nodes {
|
||||
@@ -324,7 +317,7 @@ impl ClusterInfo for MetaClient {
|
||||
|
||||
async fn list_region_stats(&self) -> Result<Vec<RegionStat>> {
|
||||
let cluster_kv_backend = Arc::new(self.cluster_client()?);
|
||||
let range_prefix = DatanodeStatKey::key_prefix_with_cluster_id(self.id.0);
|
||||
let range_prefix = DatanodeStatKey::prefix_key();
|
||||
let req = RangeRequest::new().with_prefix(range_prefix);
|
||||
let stream =
|
||||
PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream();
|
||||
@@ -555,6 +548,8 @@ impl MetaClient {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use api::v1::meta::{HeartbeatRequest, Peer};
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
|
||||
use rand::Rng;
|
||||
@@ -624,31 +619,31 @@ mod tests {
|
||||
async fn test_meta_client_builder() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.build();
|
||||
let _ = meta_client.heartbeat_client().unwrap();
|
||||
assert!(meta_client.store_client().is_err());
|
||||
meta_client.start(urls).await.unwrap();
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode).build();
|
||||
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build();
|
||||
assert!(meta_client.heartbeat_client().is_err());
|
||||
assert!(meta_client.store_client().is_err());
|
||||
meta_client.start(urls).await.unwrap();
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
|
||||
.enable_store()
|
||||
.build();
|
||||
assert!(meta_client.heartbeat_client().is_err());
|
||||
let _ = meta_client.store_client().unwrap();
|
||||
meta_client.start(urls).await.unwrap();
|
||||
|
||||
let mut meta_client = MetaClientBuilder::new(1, 2, Role::Datanode)
|
||||
let mut meta_client = MetaClientBuilder::new(2, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.enable_store()
|
||||
.build();
|
||||
assert_eq!(1, meta_client.id().0);
|
||||
assert_eq!(2, meta_client.id().1);
|
||||
assert_eq!(2, meta_client.id());
|
||||
assert_eq!(2, meta_client.id());
|
||||
let _ = meta_client.heartbeat_client().unwrap();
|
||||
let _ = meta_client.store_client().unwrap();
|
||||
meta_client.start(urls).await.unwrap();
|
||||
@@ -657,7 +652,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_not_start_heartbeat_client() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
|
||||
.enable_store()
|
||||
.build();
|
||||
meta_client.start(urls).await.unwrap();
|
||||
@@ -668,7 +663,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_not_start_store_client() {
|
||||
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
|
||||
let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
|
||||
let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
|
||||
.enable_heartbeat()
|
||||
.build();
|
||||
|
||||
@@ -688,6 +683,9 @@ mod tests {
|
||||
let tc = new_client("test_heartbeat").await;
|
||||
let (sender, mut receiver) = tc.client.heartbeat().await.unwrap();
|
||||
// send heartbeats
|
||||
|
||||
let request_sent = Arc::new(AtomicUsize::new(0));
|
||||
let request_sent_clone = request_sent.clone();
|
||||
let _handle = tokio::spawn(async move {
|
||||
for _ in 0..5 {
|
||||
let req = HeartbeatRequest {
|
||||
@@ -698,14 +696,24 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
sender.send(req).await.unwrap();
|
||||
request_sent_clone.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
});
|
||||
|
||||
let _handle = tokio::spawn(async move {
|
||||
while let Some(res) = receiver.message().await.unwrap() {
|
||||
assert_eq!(1000, res.header.unwrap().cluster_id);
|
||||
let heartbeat_count = Arc::new(AtomicUsize::new(0));
|
||||
let heartbeat_count_clone = heartbeat_count.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
while let Some(_resp) = receiver.message().await.unwrap() {
|
||||
heartbeat_count_clone.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
});
|
||||
|
||||
handle.await.unwrap();
|
||||
//+1 for the initial response
|
||||
assert_eq!(
|
||||
request_sent.load(Ordering::Relaxed) + 1,
|
||||
heartbeat_count.load(Ordering::Relaxed)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -272,7 +272,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default(), 3);
|
||||
let mut client = Client::new(0, Role::Datanode, ChannelManager::default(), 3);
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -288,7 +288,7 @@ mod test {
|
||||
#[tokio::test]
|
||||
async fn test_heartbeat_stream() {
|
||||
let (sender, mut receiver) = mpsc::channel::<HeartbeatRequest>(100);
|
||||
let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender);
|
||||
let sender = HeartbeatSender::new(8, Role::Datanode, sender);
|
||||
let _handle = tokio::spawn(async move {
|
||||
for _ in 0..10 {
|
||||
sender.send(HeartbeatRequest::default()).await.unwrap();
|
||||
@@ -296,7 +296,6 @@ mod test {
|
||||
});
|
||||
while let Some(req) = receiver.recv().await {
|
||||
let header = req.header.unwrap();
|
||||
assert_eq!(8, header.cluster_id);
|
||||
assert_eq!(8, header.member_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_already_start() {
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
|
||||
.await
|
||||
@@ -270,7 +270,7 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_start_with_duplicate_peers() {
|
||||
let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
|
||||
let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
|
||||
client
|
||||
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
|
||||
.await
|
||||
|
||||
@@ -71,23 +71,22 @@ pub enum MetaClientType {
|
||||
pub type MetaClientRef = Arc<client::MetaClient>;
|
||||
|
||||
pub async fn create_meta_client(
|
||||
cluster_id: u64,
|
||||
client_type: MetaClientType,
|
||||
meta_client_options: &MetaClientOptions,
|
||||
) -> error::Result<MetaClientRef> {
|
||||
info!(
|
||||
"Creating {:?} instance from cluster {} with Metasrv addrs {:?}",
|
||||
client_type, cluster_id, meta_client_options.metasrv_addrs
|
||||
"Creating {:?} instance with Metasrv addrs {:?}",
|
||||
client_type, meta_client_options.metasrv_addrs
|
||||
);
|
||||
|
||||
let mut builder = match client_type {
|
||||
MetaClientType::Datanode { member_id } => {
|
||||
MetaClientBuilder::datanode_default_options(cluster_id, member_id)
|
||||
MetaClientBuilder::datanode_default_options(member_id)
|
||||
}
|
||||
MetaClientType::Flownode { member_id } => {
|
||||
MetaClientBuilder::flownode_default_options(cluster_id, member_id)
|
||||
MetaClientBuilder::flownode_default_options(member_id)
|
||||
}
|
||||
MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(cluster_id),
|
||||
MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(),
|
||||
};
|
||||
|
||||
let base_config = ChannelConfig::new()
|
||||
|
||||
@@ -60,8 +60,8 @@ pub async fn mock_client_with_etcdstore(addr: &str) -> (MetaClient, MockMetaCont
|
||||
}
|
||||
|
||||
pub async fn mock_client_by(server_addr: String, channel_manager: ChannelManager) -> MetaClient {
|
||||
let id = (1000u64, 2000u64);
|
||||
let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
|
||||
let id = 2000u64;
|
||||
let mut meta_client = MetaClientBuilder::datanode_default_options(id)
|
||||
.enable_access_cluster_info()
|
||||
.channel_manager(channel_manager)
|
||||
.build();
|
||||
|
||||
@@ -375,13 +375,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_stat_kv_map() {
|
||||
let stat_key = DatanodeStatKey {
|
||||
cluster_id: 0,
|
||||
node_id: 100,
|
||||
};
|
||||
let stat_key = DatanodeStatKey { node_id: 100 };
|
||||
|
||||
let stat = Stat {
|
||||
cluster_id: 0,
|
||||
id: 100,
|
||||
addr: "127.0.0.1:3001".to_string(),
|
||||
..Default::default()
|
||||
@@ -400,7 +396,6 @@ mod tests {
|
||||
let stat_val = kv_map.get(&stat_key).unwrap();
|
||||
let stat = stat_val.stats.first().unwrap();
|
||||
|
||||
assert_eq!(0, stat.cluster_id);
|
||||
assert_eq!(100, stat.id);
|
||||
assert_eq!("127.0.0.1:3001", stat.addr);
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::ddl::flow_meta::PartitionPeerAllocator;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::ClusterId;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::metasrv::{SelectorContext, SelectorRef};
|
||||
@@ -34,14 +33,9 @@ impl FlowPeerAllocator {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PartitionPeerAllocator for FlowPeerAllocator {
|
||||
async fn alloc(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
partitions: usize,
|
||||
) -> common_meta::error::Result<Vec<Peer>> {
|
||||
async fn alloc(&self, partitions: usize) -> common_meta::error::Result<Vec<Peer>> {
|
||||
self.selector
|
||||
.select(
|
||||
cluster_id,
|
||||
&self.ctx,
|
||||
SelectorOptions {
|
||||
min_required_items: partitions,
|
||||
|
||||
@@ -20,8 +20,8 @@ use std::time::{Duration, Instant};
|
||||
|
||||
use api::v1::meta::mailbox_message::Payload;
|
||||
use api::v1::meta::{
|
||||
HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, RequestHeader,
|
||||
ResponseHeader, Role, PROTOCOL_VERSION,
|
||||
HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, ResponseHeader, Role,
|
||||
PROTOCOL_VERSION,
|
||||
};
|
||||
use check_leader_handler::CheckLeaderHandler;
|
||||
use collect_cluster_info_handler::{
|
||||
@@ -153,13 +153,9 @@ pub struct Pusher {
|
||||
}
|
||||
|
||||
impl Pusher {
|
||||
pub fn new(
|
||||
sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||
req_header: &RequestHeader,
|
||||
) -> Self {
|
||||
pub fn new(sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>) -> Self {
|
||||
let res_header = ResponseHeader {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id: req_header.cluster_id,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -772,7 +768,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION};
|
||||
use api::v1::meta::{MailboxMessage, Role};
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -814,12 +810,8 @@ mod tests {
|
||||
async fn push_msg_via_mailbox() -> (MailboxRef, MailboxReceiver) {
|
||||
let datanode_id = 12;
|
||||
let (pusher_tx, mut pusher_rx) = mpsc::channel(16);
|
||||
let res_header = RequestHeader {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
..Default::default()
|
||||
};
|
||||
let pusher_id = PusherId::new(Role::Datanode, datanode_id);
|
||||
let pusher: Pusher = Pusher::new(pusher_tx, &res_header);
|
||||
let pusher: Pusher = Pusher::new(pusher_tx);
|
||||
let handler_group = HeartbeatHandlerGroup::default();
|
||||
handler_group.register_pusher(pusher_id, pusher).await;
|
||||
|
||||
|
||||
@@ -262,15 +262,11 @@ mod tests {
|
||||
let handler = CollectStatsHandler::default();
|
||||
handle_request_many_times(ctx.clone(), &handler, 1).await;
|
||||
|
||||
let key = DatanodeStatKey {
|
||||
cluster_id: 3,
|
||||
node_id: 101,
|
||||
};
|
||||
let key = DatanodeStatKey { node_id: 101 };
|
||||
let key: Vec<u8> = key.into();
|
||||
let res = ctx.in_memory.get(&key).await.unwrap();
|
||||
let kv = res.unwrap();
|
||||
let key: DatanodeStatKey = kv.key.clone().try_into().unwrap();
|
||||
assert_eq!(3, key.cluster_id);
|
||||
assert_eq!(101, key.node_id);
|
||||
let val: DatanodeStatValue = kv.value.try_into().unwrap();
|
||||
// first new stat must be set in kv store immediately
|
||||
@@ -295,7 +291,6 @@ mod tests {
|
||||
for i in 1..=loop_times {
|
||||
let mut acc = HeartbeatAccumulator {
|
||||
stat: Some(Stat {
|
||||
cluster_id: 3,
|
||||
id: 101,
|
||||
region_num: i as _,
|
||||
..Default::default()
|
||||
|
||||
@@ -101,7 +101,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id: 1,
|
||||
id: 42,
|
||||
region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
|
||||
timestamp_millis: 1000,
|
||||
|
||||
@@ -38,17 +38,14 @@ impl HeartbeatHandler for DatanodeKeepLeaseHandler {
|
||||
_acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let HeartbeatRequest { header, peer, .. } = req;
|
||||
let Some(header) = &header else {
|
||||
let Some(_header) = &header else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
let Some(peer) = &peer else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
let key = DatanodeLeaseKey {
|
||||
cluster_id: header.cluster_id,
|
||||
node_id: peer.id,
|
||||
};
|
||||
let key = DatanodeLeaseKey { node_id: peer.id };
|
||||
let value = LeaseValue {
|
||||
timestamp_millis: time_util::current_time_millis(),
|
||||
node_addr: peer.addr.clone(),
|
||||
@@ -80,17 +77,14 @@ impl HeartbeatHandler for FlownodeKeepLeaseHandler {
|
||||
_acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let HeartbeatRequest { header, peer, .. } = req;
|
||||
let Some(header) = &header else {
|
||||
let Some(_header) = &header else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
let Some(peer) = &peer else {
|
||||
return Ok(HandleControl::Continue);
|
||||
};
|
||||
|
||||
let key = FlownodeLeaseKey {
|
||||
cluster_id: header.cluster_id,
|
||||
node_id: peer.id,
|
||||
};
|
||||
let key = FlownodeLeaseKey { node_id: peer.id };
|
||||
let value = LeaseValue {
|
||||
timestamp_millis: time_util::current_time_millis(),
|
||||
node_addr: peer.addr.clone(),
|
||||
|
||||
@@ -64,7 +64,6 @@ impl HeartbeatHandler for RegionLeaseHandler {
|
||||
};
|
||||
|
||||
let regions = stat.regions();
|
||||
let cluster_id = stat.cluster_id;
|
||||
let datanode_id = stat.id;
|
||||
|
||||
let RenewRegionLeasesResponse {
|
||||
@@ -72,7 +71,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
|
||||
renewed,
|
||||
} = self
|
||||
.region_lease_keeper
|
||||
.renew_region_leases(cluster_id, datanode_id, ®ions)
|
||||
.renew_region_leases(datanode_id, ®ions)
|
||||
.await?;
|
||||
|
||||
let renewed = renewed
|
||||
@@ -153,7 +152,6 @@ mod test {
|
||||
let peer = Peer::empty(datanode_id);
|
||||
let follower_peer = Peer::empty(datanode_id + 1);
|
||||
let table_info = new_test_table_info(table_id, vec![region_number]).into();
|
||||
let cluster_id = 1;
|
||||
|
||||
let region_routes = vec![RegionRoute {
|
||||
region: Region::new_test(region_id),
|
||||
@@ -181,7 +179,6 @@ mod test {
|
||||
let acc = &mut HeartbeatAccumulator::default();
|
||||
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id,
|
||||
id: peer.id,
|
||||
region_stats: vec![
|
||||
new_empty_region_stat(region_id, RegionRole::Follower),
|
||||
@@ -215,7 +212,6 @@ mod test {
|
||||
let acc = &mut HeartbeatAccumulator::default();
|
||||
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id,
|
||||
id: follower_peer.id,
|
||||
region_stats: vec![
|
||||
new_empty_region_stat(region_id, RegionRole::Follower),
|
||||
@@ -249,7 +245,6 @@ mod test {
|
||||
let acc = &mut HeartbeatAccumulator::default();
|
||||
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id,
|
||||
id: follower_peer.id,
|
||||
region_stats: vec![
|
||||
new_empty_region_stat(region_id, RegionRole::Follower),
|
||||
@@ -292,7 +287,6 @@ mod test {
|
||||
let peer = Peer::empty(datanode_id);
|
||||
let follower_peer = Peer::empty(datanode_id + 1);
|
||||
let table_info = new_test_table_info(table_id, vec![region_number]).into();
|
||||
let cluster_id = 1;
|
||||
|
||||
let region_routes = vec![
|
||||
RegionRoute {
|
||||
@@ -333,7 +327,6 @@ mod test {
|
||||
let acc = &mut HeartbeatAccumulator::default();
|
||||
|
||||
acc.stat = Some(Stat {
|
||||
cluster_id,
|
||||
id: peer.id,
|
||||
region_stats: vec![
|
||||
new_empty_region_stat(region_id, RegionRole::Leader),
|
||||
|
||||
@@ -28,18 +28,15 @@ impl HeartbeatHandler for ResponseHeaderHandler {
|
||||
|
||||
async fn handle(
|
||||
&self,
|
||||
req: &HeartbeatRequest,
|
||||
_req: &HeartbeatRequest,
|
||||
_ctx: &mut Context,
|
||||
acc: &mut HeartbeatAccumulator,
|
||||
) -> Result<HandleControl> {
|
||||
let HeartbeatRequest { header, .. } = req;
|
||||
let res_header = ResponseHeader {
|
||||
protocol_version: PROTOCOL_VERSION,
|
||||
cluster_id: header.as_ref().map_or(0, |h| h.cluster_id),
|
||||
..Default::default()
|
||||
};
|
||||
acc.header = Some(res_header);
|
||||
|
||||
Ok(HandleControl::Continue)
|
||||
}
|
||||
}
|
||||
@@ -48,7 +45,7 @@ impl HeartbeatHandler for ResponseHeaderHandler {
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::{HeartbeatResponse, RequestHeader};
|
||||
use api::v1::meta::RequestHeader;
|
||||
use common_meta::cache_invalidator::DummyCacheInvalidator;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -90,7 +87,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let req = HeartbeatRequest {
|
||||
header: Some(RequestHeader::new((1, 2), Role::Datanode, W3cTrace::new())),
|
||||
header: Some(RequestHeader::new(2, Role::Datanode, W3cTrace::new())),
|
||||
..Default::default()
|
||||
};
|
||||
let mut acc = HeartbeatAccumulator::default();
|
||||
@@ -100,12 +97,5 @@ mod tests {
|
||||
.handle(&req, &mut ctx, &mut acc)
|
||||
.await
|
||||
.unwrap();
|
||||
let header = std::mem::take(&mut acc.header);
|
||||
let res = HeartbeatResponse {
|
||||
header,
|
||||
mailbox_message: acc.into_mailbox_message(),
|
||||
..Default::default()
|
||||
};
|
||||
assert_eq!(1, res.header.unwrap().cluster_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,20 +35,12 @@ macro_rules! impl_from_str_lease_key {
|
||||
.context(error::InvalidLeaseKeySnafu { key })?;
|
||||
|
||||
ensure!(caps.len() == 3, error::InvalidLeaseKeySnafu { key });
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let node_id = caps[2].to_string();
|
||||
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
node_id,
|
||||
})
|
||||
Ok(Self { node_id })
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -73,7 +65,7 @@ macro_rules! impl_try_from_lease_key {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(key: $key_type) -> error::Result<Self> {
|
||||
Ok(format!("{}-{}-{}", $prefix, key.cluster_id, key.node_id).into_bytes())
|
||||
Ok(format!("{}-0-{}", $prefix, key.node_id).into_bytes())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use common_meta::datanode::DatanodeStatKey;
|
||||
use common_meta::ClusterId;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -42,20 +41,18 @@ lazy_static! {
|
||||
|
||||
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct DatanodeLeaseKey {
|
||||
pub cluster_id: ClusterId,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl DatanodeLeaseKey {
|
||||
pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
|
||||
format!("{DATANODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
|
||||
pub fn prefix_key() -> Vec<u8> {
|
||||
format!("{DATANODE_LEASE_PREFIX}-0-").into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DatanodeLeaseKey> for DatanodeStatKey {
|
||||
fn from(lease_key: &DatanodeLeaseKey) -> Self {
|
||||
DatanodeStatKey {
|
||||
cluster_id: lease_key.cluster_id,
|
||||
node_id: lease_key.node_id,
|
||||
}
|
||||
}
|
||||
@@ -63,22 +60,21 @@ impl From<&DatanodeLeaseKey> for DatanodeStatKey {
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
|
||||
pub struct InactiveRegionKey {
|
||||
pub cluster_id: ClusterId,
|
||||
pub node_id: u64,
|
||||
pub region_id: u64,
|
||||
}
|
||||
|
||||
impl InactiveRegionKey {
|
||||
pub fn get_prefix_by_cluster(cluster_id: u64) -> Vec<u8> {
|
||||
format!("{}-{}-", INACTIVE_REGION_PREFIX, cluster_id).into_bytes()
|
||||
pub fn get_prefix_by_cluster() -> Vec<u8> {
|
||||
format!("{}-0-", INACTIVE_REGION_PREFIX).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<InactiveRegionKey> for Vec<u8> {
|
||||
fn from(value: InactiveRegionKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}-{}",
|
||||
INACTIVE_REGION_PREFIX, value.cluster_id, value.node_id, value.region_id
|
||||
"{}-0-{}-{}",
|
||||
INACTIVE_REGION_PREFIX, value.node_id, value.region_id
|
||||
)
|
||||
.into_bytes()
|
||||
}
|
||||
@@ -97,13 +93,8 @@ impl FromStr for InactiveRegionKey {
|
||||
error::InvalidInactiveRegionKeySnafu { key }
|
||||
);
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let node_id = caps[2].to_string();
|
||||
let region_id = caps[3].to_string();
|
||||
|
||||
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
@@ -111,11 +102,7 @@ impl FromStr for InactiveRegionKey {
|
||||
err_msg: format!("invalid region_id: {region_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
node_id,
|
||||
region_id,
|
||||
})
|
||||
Ok(Self { node_id, region_id })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,24 +122,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_stat_key_round_trip() {
|
||||
let key = DatanodeStatKey {
|
||||
cluster_id: 0,
|
||||
node_id: 1,
|
||||
};
|
||||
let key = DatanodeStatKey { node_id: 1 };
|
||||
|
||||
let key_bytes: Vec<u8> = key.into();
|
||||
let new_key: DatanodeStatKey = key_bytes.try_into().unwrap();
|
||||
|
||||
assert_eq!(0, new_key.cluster_id);
|
||||
assert_eq!(1, new_key.node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lease_key_round_trip() {
|
||||
let key = DatanodeLeaseKey {
|
||||
cluster_id: 0,
|
||||
node_id: 1,
|
||||
};
|
||||
let key = DatanodeLeaseKey { node_id: 1 };
|
||||
|
||||
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
|
||||
let new_key: DatanodeLeaseKey = key_bytes.try_into().unwrap();
|
||||
@@ -162,21 +142,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_lease_key_to_stat_key() {
|
||||
let lease_key = DatanodeLeaseKey {
|
||||
cluster_id: 1,
|
||||
node_id: 101,
|
||||
};
|
||||
let lease_key = DatanodeLeaseKey { node_id: 101 };
|
||||
|
||||
let stat_key: DatanodeStatKey = (&lease_key).into();
|
||||
|
||||
assert_eq!(1, stat_key.cluster_id);
|
||||
assert_eq!(101, stat_key.node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inactive_region_key_round_trip() {
|
||||
let key = InactiveRegionKey {
|
||||
cluster_id: 0,
|
||||
node_id: 1,
|
||||
region_id: 2,
|
||||
};
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_meta::ClusterId;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -26,13 +25,12 @@ lazy_static! {
|
||||
|
||||
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct FlownodeLeaseKey {
|
||||
pub cluster_id: ClusterId,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl FlownodeLeaseKey {
|
||||
pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
|
||||
format!("{FLOWNODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
|
||||
pub fn prefix_key_by_cluster() -> Vec<u8> {
|
||||
format!("{FLOWNODE_LEASE_PREFIX}-0-").into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,10 +40,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_lease_key_round_trip() {
|
||||
let key = FlownodeLeaseKey {
|
||||
cluster_id: 0,
|
||||
node_id: 1,
|
||||
};
|
||||
let key = FlownodeLeaseKey { node_id: 1 };
|
||||
|
||||
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
|
||||
let new_key: FlownodeLeaseKey = key_bytes.try_into().unwrap();
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::hash::Hash;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::kv_backend::KvBackend;
|
||||
use common_meta::peer::{Peer, PeerLookupService};
|
||||
use common_meta::{util, ClusterId, DatanodeId, FlownodeId};
|
||||
use common_meta::{util, DatanodeId, FlownodeId};
|
||||
use common_time::util as time_util;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -35,14 +35,12 @@ fn build_lease_filter(lease_secs: u64) -> impl Fn(&LeaseValue) -> bool {
|
||||
|
||||
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], will only return if it's alive under given `lease_secs`
|
||||
pub async fn lookup_datanode_peer(
|
||||
cluster_id: ClusterId,
|
||||
datanode_id: DatanodeId,
|
||||
meta_peer_client: &MetaPeerClientRef,
|
||||
lease_secs: u64,
|
||||
) -> Result<Option<Peer>> {
|
||||
let lease_filter = build_lease_filter(lease_secs);
|
||||
let lease_key = DatanodeLeaseKey {
|
||||
cluster_id,
|
||||
node_id: datanode_id,
|
||||
};
|
||||
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
|
||||
@@ -63,29 +61,24 @@ pub async fn lookup_datanode_peer(
|
||||
|
||||
/// Find all alive datanodes
|
||||
pub async fn alive_datanodes(
|
||||
cluster_id: ClusterId,
|
||||
meta_peer_client: &MetaPeerClientRef,
|
||||
lease_secs: u64,
|
||||
) -> Result<HashMap<DatanodeLeaseKey, LeaseValue>> {
|
||||
let predicate = build_lease_filter(lease_secs);
|
||||
filter(
|
||||
DatanodeLeaseKey::prefix_key_by_cluster(cluster_id),
|
||||
meta_peer_client,
|
||||
|v| predicate(v),
|
||||
)
|
||||
filter(DatanodeLeaseKey::prefix_key(), meta_peer_client, |v| {
|
||||
predicate(v)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], only return if it's alive under given `lease_secs`
|
||||
pub async fn lookup_flownode_peer(
|
||||
cluster_id: ClusterId,
|
||||
flownode_id: FlownodeId,
|
||||
meta_peer_client: &MetaPeerClientRef,
|
||||
lease_secs: u64,
|
||||
) -> Result<Option<Peer>> {
|
||||
let lease_filter = build_lease_filter(lease_secs);
|
||||
let lease_key = FlownodeLeaseKey {
|
||||
cluster_id,
|
||||
node_id: flownode_id,
|
||||
};
|
||||
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
|
||||
@@ -107,13 +100,12 @@ pub async fn lookup_flownode_peer(
|
||||
|
||||
/// Find all alive flownodes
|
||||
pub async fn alive_flownodes(
|
||||
cluster_id: ClusterId,
|
||||
meta_peer_client: &MetaPeerClientRef,
|
||||
lease_secs: u64,
|
||||
) -> Result<HashMap<FlownodeLeaseKey, LeaseValue>> {
|
||||
let predicate = build_lease_filter(lease_secs);
|
||||
filter(
|
||||
FlownodeLeaseKey::prefix_key_by_cluster(cluster_id),
|
||||
FlownodeLeaseKey::prefix_key_by_cluster(),
|
||||
meta_peer_client,
|
||||
|v| predicate(v),
|
||||
)
|
||||
@@ -163,22 +155,14 @@ impl MetaPeerLookupService {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PeerLookupService for MetaPeerLookupService {
|
||||
async fn datanode(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
id: DatanodeId,
|
||||
) -> common_meta::error::Result<Option<Peer>> {
|
||||
lookup_datanode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
|
||||
async fn datanode(&self, id: DatanodeId) -> common_meta::error::Result<Option<Peer>> {
|
||||
lookup_datanode_peer(id, &self.meta_peer_client, u64::MAX)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
}
|
||||
async fn flownode(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
id: FlownodeId,
|
||||
) -> common_meta::error::Result<Option<Peer>> {
|
||||
lookup_flownode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
|
||||
async fn flownode(&self, id: FlownodeId) -> common_meta::error::Result<Option<Peer>> {
|
||||
lookup_flownode_peer(id, &self.meta_peer_client, u64::MAX)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(common_meta::error::ExternalSnafu)
|
||||
|
||||
@@ -26,6 +26,7 @@ use common_config::Configurable;
|
||||
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::ddl::ProcedureExecutorRef;
|
||||
use common_meta::distributed_time_constants;
|
||||
use common_meta::key::maintenance::MaintenanceModeManagerRef;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
|
||||
@@ -36,7 +37,6 @@ use common_meta::node_expiry_listener::NodeExpiryListener;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
|
||||
use common_meta::{distributed_time_constants, ClusterId};
|
||||
use common_options::datanode::DatanodeClientOptions;
|
||||
use common_procedure::options::ProcedureConfig;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
@@ -572,13 +572,8 @@ impl Metasrv {
|
||||
}
|
||||
|
||||
/// Lookup a peer by peer_id, return it only when it's alive.
|
||||
pub(crate) async fn lookup_peer(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
peer_id: u64,
|
||||
) -> Result<Option<Peer>> {
|
||||
pub(crate) async fn lookup_peer(&self, peer_id: u64) -> Result<Option<Peer>> {
|
||||
lookup_datanode_peer(
|
||||
cluster_id,
|
||||
peer_id,
|
||||
&self.meta_peer_client,
|
||||
distributed_time_constants::DATANODE_LEASE_SECS,
|
||||
|
||||
@@ -20,7 +20,7 @@ lazy_static! {
|
||||
pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
|
||||
"greptime_meta_kv_request_elapsed",
|
||||
"meta kv request",
|
||||
&["target", "op", "cluster_id"]
|
||||
&["target", "op"]
|
||||
)
|
||||
.unwrap();
|
||||
/// The heartbeat connection gauge.
|
||||
|
||||
@@ -39,7 +39,6 @@ use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock};
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::{MemoryRegionKeeperRef, OperatingRegionGuard};
|
||||
use common_meta::ClusterId;
|
||||
use common_procedure::error::{
|
||||
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||
};
|
||||
@@ -70,8 +69,6 @@ pub struct PersistentContext {
|
||||
catalog: String,
|
||||
/// The table schema.
|
||||
schema: String,
|
||||
/// The Id of the cluster.
|
||||
cluster_id: ClusterId,
|
||||
/// The [Peer] of migration source.
|
||||
from_peer: Peer,
|
||||
/// The [Peer] of migration destination.
|
||||
@@ -273,12 +270,11 @@ impl Context {
|
||||
/// The original failure detector was removed once the procedure was triggered.
|
||||
/// Now, we need to register the failure detector for the failed region again.
|
||||
pub async fn register_failure_detectors(&self) {
|
||||
let cluster_id = self.persistent_ctx.cluster_id;
|
||||
let datanode_id = self.persistent_ctx.from_peer.id;
|
||||
let region_id = self.persistent_ctx.region_id;
|
||||
|
||||
self.region_failure_detector_controller
|
||||
.register_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
|
||||
.register_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -287,12 +283,11 @@ impl Context {
|
||||
/// The original failure detectors was removed once the procedure was triggered.
|
||||
/// However, the `from_peer` may still send the heartbeats contains the failed region.
|
||||
pub async fn deregister_failure_detectors(&self) {
|
||||
let cluster_id = self.persistent_ctx.cluster_id;
|
||||
let datanode_id = self.persistent_ctx.from_peer.id;
|
||||
let region_id = self.persistent_ctx.region_id;
|
||||
|
||||
self.region_failure_detector_controller
|
||||
.deregister_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
|
||||
.deregister_failure_detectors(vec![(datanode_id, region_id)])
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -458,7 +453,6 @@ impl RegionMigrationProcedure {
|
||||
} = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
|
||||
let guard = tracker.insert_running_procedure(&RegionMigrationProcedureTask {
|
||||
cluster_id: persistent_ctx.cluster_id,
|
||||
region_id: persistent_ctx.region_id,
|
||||
from_peer: persistent_ctx.from_peer.clone(),
|
||||
to_peer: persistent_ctx.to_peer.clone(),
|
||||
@@ -580,7 +574,6 @@ mod tests {
|
||||
use common_meta::key::test_utils::new_test_table_info;
|
||||
use common_meta::rpc::router::{Region, RegionRoute};
|
||||
|
||||
use super::migration_end::RegionMigrationEnd;
|
||||
use super::update_metadata::UpdateMetadata;
|
||||
use super::*;
|
||||
use crate::handler::HeartbeatMailbox;
|
||||
@@ -620,7 +613,7 @@ mod tests {
|
||||
let procedure = RegionMigrationProcedure::new(persistent_context, context, None);
|
||||
|
||||
let serialized = procedure.dump().unwrap();
|
||||
let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
|
||||
let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
|
||||
assert_eq!(expected, serialized);
|
||||
}
|
||||
|
||||
@@ -628,7 +621,7 @@ mod tests {
|
||||
fn test_backward_compatibility() {
|
||||
let persistent_ctx = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
|
||||
// NOTES: Changes it will break backward compatibility.
|
||||
let serialized = r#"{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
|
||||
let serialized = r#"{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
|
||||
let deserialized: PersistentContext = serde_json::from_str(serialized).unwrap();
|
||||
|
||||
assert_eq!(persistent_ctx, deserialized);
|
||||
@@ -640,15 +633,8 @@ mod tests {
|
||||
#[async_trait::async_trait]
|
||||
#[typetag::serde]
|
||||
impl State for MockState {
|
||||
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
let pc = &mut ctx.persistent_ctx;
|
||||
|
||||
if pc.cluster_id == 2 {
|
||||
Ok((Box::new(RegionMigrationEnd), Status::done()))
|
||||
} else {
|
||||
pc.cluster_id += 1;
|
||||
Ok((Box::new(MockState), Status::executing(false)))
|
||||
}
|
||||
async fn next(&mut self, _ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
|
||||
Ok((Box::new(MockState), Status::done()))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
@@ -692,7 +678,6 @@ mod tests {
|
||||
for _ in 1..3 {
|
||||
status = Some(procedure.execute(&ctx).await.unwrap());
|
||||
}
|
||||
assert_eq!(procedure.context.persistent_ctx.cluster_id, 2);
|
||||
assert!(status.unwrap().is_done());
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,6 @@ impl CloseDowngradedRegion {
|
||||
async fn build_close_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
|
||||
let pc = &ctx.persistent_ctx;
|
||||
let downgrade_leader_datanode_id = pc.from_peer.id;
|
||||
let cluster_id = pc.cluster_id;
|
||||
let table_id = pc.region_id.table_id();
|
||||
let region_number = pc.region_id.region_number();
|
||||
let datanode_table_value = ctx.get_from_peer_datanode_table_value().await?;
|
||||
@@ -70,7 +69,6 @@ impl CloseDowngradedRegion {
|
||||
let RegionInfo { engine, .. } = datanode_table_value.region_info.clone();
|
||||
|
||||
Ok(Instruction::CloseRegion(RegionIdent {
|
||||
cluster_id,
|
||||
datanode_id: downgrade_leader_datanode_id,
|
||||
table_id,
|
||||
region_number,
|
||||
|
||||
@@ -294,7 +294,6 @@ mod tests {
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
region_id: RegionId::new(1024, 1),
|
||||
cluster_id: 0,
|
||||
timeout: Duration::from_millis(1000),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_meta::ClusterId;
|
||||
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
|
||||
use common_telemetry::{error, info};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -101,7 +100,6 @@ impl Drop for RegionMigrationProcedureGuard {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RegionMigrationProcedureTask {
|
||||
pub(crate) cluster_id: ClusterId,
|
||||
pub(crate) region_id: RegionId,
|
||||
pub(crate) from_peer: Peer,
|
||||
pub(crate) to_peer: Peer,
|
||||
@@ -109,15 +107,8 @@ pub struct RegionMigrationProcedureTask {
|
||||
}
|
||||
|
||||
impl RegionMigrationProcedureTask {
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
region_id: RegionId,
|
||||
from_peer: Peer,
|
||||
to_peer: Peer,
|
||||
timeout: Duration,
|
||||
) -> Self {
|
||||
pub fn new(region_id: RegionId, from_peer: Peer, to_peer: Peer, timeout: Duration) -> Self {
|
||||
Self {
|
||||
cluster_id,
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
@@ -130,8 +121,8 @@ impl Display for RegionMigrationProcedureTask {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"cluster: {}, region: {}, from_peer: {}, to_peer: {}",
|
||||
self.cluster_id, self.region_id, self.from_peer, self.to_peer
|
||||
"region: {}, from_peer: {}, to_peer: {}",
|
||||
self.region_id, self.from_peer, self.to_peer
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -331,7 +322,6 @@ impl RegionMigrationManager {
|
||||
.with_label_values(&["desc", &task.to_peer.id.to_string()])
|
||||
.inc();
|
||||
let RegionMigrationProcedureTask {
|
||||
cluster_id,
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
@@ -341,7 +331,6 @@ impl RegionMigrationManager {
|
||||
PersistentContext {
|
||||
catalog: catalog_name,
|
||||
schema: schema_name,
|
||||
cluster_id,
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
@@ -394,7 +383,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(2),
|
||||
to_peer: Peer::empty(1),
|
||||
@@ -419,7 +407,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(1),
|
||||
@@ -437,7 +424,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
@@ -455,7 +441,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
@@ -483,7 +468,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
@@ -515,7 +499,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
@@ -542,7 +525,6 @@ mod test {
|
||||
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id: 1,
|
||||
region_id,
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
|
||||
@@ -62,7 +62,6 @@ impl OpenCandidateRegion {
|
||||
/// - Datanode Table is not found.
|
||||
async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
|
||||
let pc = &ctx.persistent_ctx;
|
||||
let cluster_id = pc.cluster_id;
|
||||
let table_id = pc.region_id.table_id();
|
||||
let region_number = pc.region_id.region_number();
|
||||
let candidate_id = pc.to_peer.id;
|
||||
@@ -77,7 +76,6 @@ impl OpenCandidateRegion {
|
||||
|
||||
let open_instruction = Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
cluster_id,
|
||||
datanode_id: candidate_id,
|
||||
table_id,
|
||||
region_number,
|
||||
@@ -214,7 +212,6 @@ mod tests {
|
||||
fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction {
|
||||
Instruction::OpenRegion(OpenRegion {
|
||||
region_ident: RegionIdent {
|
||||
cluster_id: 0,
|
||||
datanode_id,
|
||||
table_id: region_id.table_id(),
|
||||
region_number: region_id.region_number(),
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::mailbox_message::Payload;
|
||||
use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
|
||||
use api::v1::meta::{HeartbeatResponse, MailboxMessage};
|
||||
use common_meta::ddl::NoopRegionFailureDetectorControl;
|
||||
use common_meta::instruction::{
|
||||
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
|
||||
@@ -85,7 +85,7 @@ impl MailboxContext {
|
||||
tx: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
|
||||
) {
|
||||
let pusher_id = channel.pusher_id();
|
||||
let pusher = Pusher::new(tx, &RequestHeader::default());
|
||||
let pusher = Pusher::new(tx);
|
||||
let _ = self.pushers.insert(pusher_id.string_key(), pusher).await;
|
||||
}
|
||||
|
||||
@@ -317,7 +317,6 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
|
||||
from_peer: Peer::empty(from),
|
||||
to_peer: Peer::empty(to),
|
||||
region_id,
|
||||
cluster_id: 0,
|
||||
timeout: Duration::from_secs(10),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,11 +172,7 @@ mod tests {
|
||||
let detecting_regions = event.into_region_failure_detectors();
|
||||
assert_eq!(
|
||||
detecting_regions,
|
||||
vec![(
|
||||
ctx.persistent_ctx.cluster_id,
|
||||
from_peer.id,
|
||||
ctx.persistent_ctx.region_id
|
||||
)]
|
||||
vec![(from_peer.id, ctx.persistent_ctx.region_id)]
|
||||
);
|
||||
|
||||
let table_route = table_metadata_manager
|
||||
|
||||
@@ -238,7 +238,6 @@ mod tests {
|
||||
from_peer: Peer::empty(1),
|
||||
to_peer: Peer::empty(2),
|
||||
region_id: RegionId::new(1024, 1),
|
||||
cluster_id: 0,
|
||||
timeout: Duration::from_millis(1000),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +97,6 @@ fn create_table_task(table_name: Option<&str>) -> CreateTableTask {
|
||||
#[test]
|
||||
fn test_region_request_builder() {
|
||||
let mut procedure = CreateTableProcedure::new(
|
||||
1,
|
||||
create_table_task(None),
|
||||
test_data::new_ddl_context(Arc::new(NodeClients::default())),
|
||||
);
|
||||
@@ -192,7 +191,6 @@ async fn test_on_datanode_create_regions() {
|
||||
let node_manager = new_node_manager(®ion_server, ®ion_routes).await;
|
||||
|
||||
let mut procedure = CreateTableProcedure::new(
|
||||
1,
|
||||
create_table_task(None),
|
||||
test_data::new_ddl_context(node_manager),
|
||||
);
|
||||
@@ -260,7 +258,7 @@ async fn test_on_datanode_create_logical_regions() {
|
||||
.0;
|
||||
let _ = kv_backend.txn(physical_route_txn).await.unwrap();
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(1, vec![task1, task2, task3], physical_table_id, ctx);
|
||||
CreateLogicalTablesProcedure::new(vec![task1, task2, task3], physical_table_id, ctx);
|
||||
|
||||
let expected_created_regions = Arc::new(Mutex::new(HashMap::from([(1, 3), (2, 3), (3, 3)])));
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_default_failure_detector_container() {
|
||||
let container = RegionFailureDetector::new(Default::default());
|
||||
let detecting_region = (0, 2, RegionId::new(1, 1));
|
||||
let detecting_region = (2, RegionId::new(1, 1));
|
||||
let _ = container.region_failure_detector(detecting_region);
|
||||
assert!(container.contains(&detecting_region));
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ use common_meta::key::table_route::TableRouteValue;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::region_keeper::MemoryRegionKeeperRef;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_meta::{ClusterId, DatanodeId};
|
||||
use common_meta::DatanodeId;
|
||||
use common_telemetry::warn;
|
||||
use snafu::ResultExt;
|
||||
use store_api::region_engine::RegionRole;
|
||||
@@ -167,7 +167,6 @@ impl RegionLeaseKeeper {
|
||||
/// and corresponding regions will be added to `non_exists` of [RenewRegionLeasesResponse].
|
||||
pub async fn renew_region_leases(
|
||||
&self,
|
||||
_cluster_id: ClusterId,
|
||||
datanode_id: DatanodeId,
|
||||
regions: &[(RegionId, RegionRole)],
|
||||
) -> Result<RenewRegionLeasesResponse> {
|
||||
@@ -282,7 +281,6 @@ mod tests {
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(
|
||||
0,
|
||||
1,
|
||||
&[
|
||||
(RegionId::new(1024, 1), RegionRole::Follower),
|
||||
@@ -384,7 +382,7 @@ mod tests {
|
||||
non_exists,
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(0, 1, &[(region_id, RegionRole::Follower)])
|
||||
.renew_region_leases(1, &[(region_id, RegionRole::Follower)])
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(renewed.is_empty());
|
||||
@@ -397,7 +395,7 @@ mod tests {
|
||||
non_exists,
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(0, leader_peer_id, &[(region_id, role)])
|
||||
.renew_region_leases(leader_peer_id, &[(region_id, role)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -411,7 +409,7 @@ mod tests {
|
||||
non_exists,
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(0, follower_peer_id, &[(region_id, role)])
|
||||
.renew_region_leases(follower_peer_id, &[(region_id, role)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -432,7 +430,7 @@ mod tests {
|
||||
non_exists,
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(0, leader_peer_id, &[(opening_region_id, role)])
|
||||
.renew_region_leases(leader_peer_id, &[(opening_region_id, role)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -465,7 +463,6 @@ mod tests {
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(
|
||||
0,
|
||||
1,
|
||||
&[
|
||||
(region_id, RegionRole::Follower),
|
||||
@@ -513,7 +510,7 @@ mod tests {
|
||||
non_exists,
|
||||
renewed,
|
||||
} = keeper
|
||||
.renew_region_leases(0, follower_peer_id, &[(region_id, role)])
|
||||
.renew_region_leases(follower_peer_id, &[(region_id, role)])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController};
|
||||
use common_meta::key::maintenance::MaintenanceModeManagerRef;
|
||||
use common_meta::leadership_notifier::LeadershipChangeListener;
|
||||
use common_meta::peer::PeerLookupServiceRef;
|
||||
use common_meta::{ClusterId, DatanodeId};
|
||||
use common_meta::DatanodeId;
|
||||
use common_runtime::JoinHandle;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
@@ -45,7 +45,6 @@ use crate::selector::SelectorOptions;
|
||||
/// and a timestamp indicating when the heartbeat was sent.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DatanodeHeartbeat {
|
||||
cluster_id: ClusterId,
|
||||
datanode_id: DatanodeId,
|
||||
// TODO(weny): Considers collecting the memtable size in regions.
|
||||
regions: Vec<RegionId>,
|
||||
@@ -55,7 +54,6 @@ pub(crate) struct DatanodeHeartbeat {
|
||||
impl From<&Stat> for DatanodeHeartbeat {
|
||||
fn from(value: &Stat) -> Self {
|
||||
DatanodeHeartbeat {
|
||||
cluster_id: value.cluster_id,
|
||||
datanode_id: value.id,
|
||||
regions: value.region_stats.iter().map(|x| x.id).collect(),
|
||||
timestamp: value.timestamp_millis,
|
||||
@@ -341,7 +339,7 @@ impl RegionSupervisor {
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_region_failures(&self, mut regions: Vec<(ClusterId, DatanodeId, RegionId)>) {
|
||||
async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
|
||||
if regions.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -358,22 +356,19 @@ impl RegionSupervisor {
|
||||
}
|
||||
|
||||
let migrating_regions = regions
|
||||
.extract_if(.., |(_, _, region_id)| {
|
||||
.extract_if(.., |(_, region_id)| {
|
||||
self.region_migration_manager.tracker().contains(*region_id)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (cluster_id, datanode_id, region_id) in migrating_regions {
|
||||
self.failure_detector
|
||||
.remove(&(cluster_id, datanode_id, region_id));
|
||||
for (datanode_id, region_id) in migrating_regions {
|
||||
self.failure_detector.remove(&(datanode_id, region_id));
|
||||
}
|
||||
|
||||
warn!("Detects region failures: {:?}", regions);
|
||||
for (cluster_id, datanode_id, region_id) in regions {
|
||||
match self.do_failover(cluster_id, datanode_id, region_id).await {
|
||||
Ok(_) => self
|
||||
.failure_detector
|
||||
.remove(&(cluster_id, datanode_id, region_id)),
|
||||
for (datanode_id, region_id) in regions {
|
||||
match self.do_failover(datanode_id, region_id).await {
|
||||
Ok(_) => self.failure_detector.remove(&(datanode_id, region_id)),
|
||||
Err(err) => {
|
||||
error!(err; "Failed to execute region failover for region: {region_id}, datanode: {datanode_id}");
|
||||
}
|
||||
@@ -388,15 +383,10 @@ impl RegionSupervisor {
|
||||
.context(error::MaintenanceModeManagerSnafu)
|
||||
}
|
||||
|
||||
async fn do_failover(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
datanode_id: DatanodeId,
|
||||
region_id: RegionId,
|
||||
) -> Result<()> {
|
||||
async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
|
||||
let from_peer = self
|
||||
.peer_lookup
|
||||
.datanode(cluster_id, datanode_id)
|
||||
.datanode(datanode_id)
|
||||
.await
|
||||
.context(error::LookupPeerSnafu {
|
||||
peer_id: datanode_id,
|
||||
@@ -407,7 +397,6 @@ impl RegionSupervisor {
|
||||
let mut peers = self
|
||||
.selector
|
||||
.select(
|
||||
cluster_id,
|
||||
&self.selector_context,
|
||||
SelectorOptions {
|
||||
min_required_items: 1,
|
||||
@@ -423,7 +412,6 @@ impl RegionSupervisor {
|
||||
return Ok(());
|
||||
}
|
||||
let task = RegionMigrationProcedureTask {
|
||||
cluster_id,
|
||||
region_id,
|
||||
from_peer,
|
||||
to_peer,
|
||||
@@ -442,7 +430,7 @@ impl RegionSupervisor {
|
||||
}
|
||||
|
||||
/// Detects the failure of regions.
|
||||
fn detect_region_failure(&self) -> Vec<(ClusterId, DatanodeId, RegionId)> {
|
||||
fn detect_region_failure(&self) -> Vec<(DatanodeId, RegionId)> {
|
||||
self.failure_detector
|
||||
.iter()
|
||||
.filter_map(|e| {
|
||||
@@ -464,7 +452,7 @@ impl RegionSupervisor {
|
||||
/// Updates the state of corresponding failure detectors.
|
||||
fn on_heartbeat_arrived(&self, heartbeat: DatanodeHeartbeat) {
|
||||
for region_id in heartbeat.regions {
|
||||
let detecting_region = (heartbeat.cluster_id, heartbeat.datanode_id, region_id);
|
||||
let detecting_region = (heartbeat.datanode_id, region_id);
|
||||
let mut detector = self
|
||||
.failure_detector
|
||||
.region_failure_detector(detecting_region);
|
||||
@@ -537,7 +525,6 @@ pub(crate) mod tests {
|
||||
|
||||
sender
|
||||
.send(Event::HeartbeatArrived(DatanodeHeartbeat {
|
||||
cluster_id: 0,
|
||||
datanode_id: 0,
|
||||
regions: vec![RegionId::new(1, 1)],
|
||||
timestamp: 100,
|
||||
@@ -547,7 +534,7 @@ pub(crate) mod tests {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send(Event::Dump(tx)).await.unwrap();
|
||||
let detector = rx.await.unwrap();
|
||||
assert!(detector.contains(&(0, 0, RegionId::new(1, 1))));
|
||||
assert!(detector.contains(&(0, RegionId::new(1, 1))));
|
||||
|
||||
// Clear up
|
||||
sender.send(Event::Clear).await.unwrap();
|
||||
@@ -561,7 +548,6 @@ pub(crate) mod tests {
|
||||
(0..2000)
|
||||
.map(|i| DatanodeHeartbeat {
|
||||
timestamp: start + i * 1000 + rng.gen_range(0..100),
|
||||
cluster_id: 0,
|
||||
datanode_id,
|
||||
regions: region_ids
|
||||
.iter()
|
||||
@@ -630,7 +616,7 @@ pub(crate) mod tests {
|
||||
let (mut supervisor, sender) = new_test_supervisor();
|
||||
let controller = RegionFailureDetectorControl::new(sender.clone());
|
||||
tokio::spawn(async move { supervisor.run().await });
|
||||
let detecting_region = (0, 1, RegionId::new(1, 1));
|
||||
let detecting_region = (1, RegionId::new(1, 1));
|
||||
controller
|
||||
.register_failure_detectors(vec![detecting_region])
|
||||
.await;
|
||||
|
||||
@@ -25,19 +25,12 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
|
||||
pub type Namespace = u64;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Selector: Send + Sync {
|
||||
type Context;
|
||||
type Output;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
ns: Namespace,
|
||||
ctx: &Self::Context,
|
||||
opts: SelectorOptions,
|
||||
) -> Result<Self::Output>;
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -19,7 +19,7 @@ use crate::lease;
|
||||
use crate::metasrv::SelectorContext;
|
||||
use crate::selector::common::choose_items;
|
||||
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
|
||||
use crate::selector::{Namespace, Selector, SelectorOptions};
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
/// Select all alive datanodes based using a random weighted choose.
|
||||
pub struct LeaseBasedSelector;
|
||||
@@ -29,15 +29,10 @@ impl Selector for LeaseBasedSelector {
|
||||
type Context = SelectorContext;
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
ns: Namespace,
|
||||
ctx: &Self::Context,
|
||||
opts: SelectorOptions,
|
||||
) -> Result<Self::Output> {
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
|
||||
// 1. get alive datanodes.
|
||||
let lease_kvs =
|
||||
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
|
||||
// 2. compute weight array, but the weight of each item is the same.
|
||||
let weight_array = lease_kvs
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::metasrv::SelectorContext;
|
||||
use crate::selector::common::choose_items;
|
||||
use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
|
||||
use crate::selector::weighted_choose::RandomWeightedChoose;
|
||||
use crate::selector::{Namespace, Selector, SelectorOptions};
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
pub struct LoadBasedSelector<C> {
|
||||
weight_compute: C,
|
||||
@@ -57,15 +57,10 @@ where
|
||||
type Context = SelectorContext;
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
ns: Namespace,
|
||||
ctx: &Self::Context,
|
||||
opts: SelectorOptions,
|
||||
) -> Result<Self::Output> {
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
|
||||
// 1. get alive datanodes.
|
||||
let lease_kvs =
|
||||
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
|
||||
// 2. get stat kvs and filter out expired datanodes.
|
||||
let stat_keys = lease_kvs.keys().map(|k| k.into()).collect();
|
||||
@@ -97,8 +92,8 @@ where
|
||||
let selected = choose_items(&opts, &mut weighted_choose)?;
|
||||
|
||||
debug!(
|
||||
"LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.",
|
||||
selected, ns, opts,
|
||||
"LoadBasedSelector select peers: {:?}, opts: {:?}.",
|
||||
selected, opts,
|
||||
);
|
||||
|
||||
Ok(selected)
|
||||
@@ -165,33 +160,21 @@ mod tests {
|
||||
fn test_filter_out_expired_datanode() {
|
||||
let mut stat_kvs = HashMap::new();
|
||||
stat_kvs.insert(
|
||||
DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 0,
|
||||
},
|
||||
DatanodeStatKey { node_id: 0 },
|
||||
DatanodeStatValue { stats: vec![] },
|
||||
);
|
||||
stat_kvs.insert(
|
||||
DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 1,
|
||||
},
|
||||
DatanodeStatKey { node_id: 1 },
|
||||
DatanodeStatValue { stats: vec![] },
|
||||
);
|
||||
stat_kvs.insert(
|
||||
DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 2,
|
||||
},
|
||||
DatanodeStatKey { node_id: 2 },
|
||||
DatanodeStatValue { stats: vec![] },
|
||||
);
|
||||
|
||||
let mut lease_kvs = HashMap::new();
|
||||
lease_kvs.insert(
|
||||
DatanodeLeaseKey {
|
||||
cluster_id: 1,
|
||||
node_id: 1,
|
||||
},
|
||||
DatanodeLeaseKey { node_id: 1 },
|
||||
LeaseValue {
|
||||
timestamp_millis: 0,
|
||||
node_addr: "127.0.0.1:3002".to_string(),
|
||||
@@ -201,9 +184,6 @@ mod tests {
|
||||
let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
|
||||
|
||||
assert_eq!(1, alive_stat_kvs.len());
|
||||
assert!(alive_stat_kvs.contains_key(&DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 1
|
||||
}));
|
||||
assert!(alive_stat_kvs.contains_key(&DatanodeStatKey { node_id: 1 }));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use snafu::ensure;
|
||||
use crate::error::{NoEnoughAvailableNodeSnafu, Result};
|
||||
use crate::lease;
|
||||
use crate::metasrv::{SelectTarget, SelectorContext};
|
||||
use crate::selector::{Namespace, Selector, SelectorOptions};
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
/// Round-robin selector that returns the next peer in the list in sequence.
|
||||
/// Datanodes are ordered by their node_id.
|
||||
@@ -53,7 +53,6 @@ impl RoundRobinSelector {
|
||||
|
||||
async fn get_peers(
|
||||
&self,
|
||||
ns: Namespace,
|
||||
min_required_items: usize,
|
||||
ctx: &SelectorContext,
|
||||
) -> Result<Vec<Peer>> {
|
||||
@@ -61,8 +60,7 @@ impl RoundRobinSelector {
|
||||
SelectTarget::Datanode => {
|
||||
// 1. get alive datanodes.
|
||||
let lease_kvs =
|
||||
lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs)
|
||||
.await?;
|
||||
lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
|
||||
|
||||
// 2. map into peers
|
||||
lease_kvs
|
||||
@@ -73,8 +71,7 @@ impl RoundRobinSelector {
|
||||
SelectTarget::Flownode => {
|
||||
// 1. get alive flownodes.
|
||||
let lease_kvs =
|
||||
lease::alive_flownodes(ns, &ctx.meta_peer_client, ctx.flownode_lease_secs)
|
||||
.await?;
|
||||
lease::alive_flownodes(&ctx.meta_peer_client, ctx.flownode_lease_secs).await?;
|
||||
|
||||
// 2. map into peers
|
||||
lease_kvs
|
||||
@@ -105,13 +102,8 @@ impl Selector for RoundRobinSelector {
|
||||
type Context = SelectorContext;
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
ns: Namespace,
|
||||
ctx: &Self::Context,
|
||||
opts: SelectorOptions,
|
||||
) -> Result<Vec<Peer>> {
|
||||
let peers = self.get_peers(ns, opts.min_required_items, ctx).await?;
|
||||
async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Vec<Peer>> {
|
||||
let peers = self.get_peers(opts.min_required_items, ctx).await?;
|
||||
// choose peers
|
||||
let mut selected = Vec::with_capacity(opts.min_required_items);
|
||||
for _ in 0..opts.min_required_items {
|
||||
@@ -135,8 +127,6 @@ mod test {
|
||||
async fn test_round_robin_selector() {
|
||||
let selector = RoundRobinSelector::default();
|
||||
let ctx = create_selector_context();
|
||||
let ns = 0;
|
||||
|
||||
// add three nodes
|
||||
let peer1 = Peer {
|
||||
id: 2,
|
||||
@@ -151,11 +141,10 @@ mod test {
|
||||
addr: "node3".to_string(),
|
||||
};
|
||||
let peers = vec![peer1.clone(), peer2.clone(), peer3.clone()];
|
||||
put_datanodes(ns, &ctx.meta_peer_client, peers).await;
|
||||
put_datanodes(&ctx.meta_peer_client, peers).await;
|
||||
|
||||
let peers = selector
|
||||
.select(
|
||||
ns,
|
||||
&ctx,
|
||||
SelectorOptions {
|
||||
min_required_items: 4,
|
||||
@@ -172,7 +161,6 @@ mod test {
|
||||
|
||||
let peers = selector
|
||||
.select(
|
||||
ns,
|
||||
&ctx,
|
||||
SelectorOptions {
|
||||
min_required_items: 2,
|
||||
|
||||
@@ -22,7 +22,7 @@ use rand::prelude::SliceRandom;
|
||||
use crate::cluster::MetaPeerClientBuilder;
|
||||
use crate::error::Result;
|
||||
use crate::metasrv::SelectorContext;
|
||||
use crate::selector::{Namespace, Selector, SelectorOptions};
|
||||
use crate::selector::{Selector, SelectorOptions};
|
||||
|
||||
/// Returns [SelectorContext] for test purpose.
|
||||
pub fn new_test_selector_context() -> SelectorContext {
|
||||
@@ -60,12 +60,7 @@ impl Selector for RandomNodeSelector {
|
||||
type Context = SelectorContext;
|
||||
type Output = Vec<Peer>;
|
||||
|
||||
async fn select(
|
||||
&self,
|
||||
_ns: Namespace,
|
||||
_ctx: &Self::Context,
|
||||
_opts: SelectorOptions,
|
||||
) -> Result<Self::Output> {
|
||||
async fn select(&self, _ctx: &Self::Context, _opts: SelectorOptions) -> Result<Self::Output> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut nodes = self.nodes.clone();
|
||||
nodes.shuffle(&mut rng);
|
||||
|
||||
@@ -104,26 +104,17 @@ mod tests {
|
||||
#[test]
|
||||
fn test_weight_compute() {
|
||||
let mut stat_kvs: HashMap<DatanodeStatKey, DatanodeStatValue> = HashMap::default();
|
||||
let stat_key = DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 1,
|
||||
};
|
||||
let stat_key = DatanodeStatKey { node_id: 1 };
|
||||
let stat_val = DatanodeStatValue {
|
||||
stats: vec![mock_stat_1()],
|
||||
};
|
||||
stat_kvs.insert(stat_key, stat_val);
|
||||
let stat_key = DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 2,
|
||||
};
|
||||
let stat_key = DatanodeStatKey { node_id: 2 };
|
||||
let stat_val = DatanodeStatValue {
|
||||
stats: vec![mock_stat_2()],
|
||||
};
|
||||
stat_kvs.insert(stat_key, stat_val);
|
||||
let stat_key = DatanodeStatKey {
|
||||
cluster_id: 1,
|
||||
node_id: 3,
|
||||
};
|
||||
let stat_key = DatanodeStatKey { node_id: 3 };
|
||||
let stat_val = DatanodeStatValue {
|
||||
stats: vec![mock_stat_3()],
|
||||
};
|
||||
|
||||
@@ -22,7 +22,7 @@ use crate::cluster::MetaPeerClientRef;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::{DatanodeLeaseKey, LeaseValue};
|
||||
use crate::lease;
|
||||
use crate::service::admin::{util, HttpHandler};
|
||||
use crate::service::admin::HttpHandler;
|
||||
|
||||
pub struct NodeLeaseHandler {
|
||||
pub meta_peer_client: MetaPeerClientRef,
|
||||
@@ -34,11 +34,9 @@ impl HttpHandler for NodeLeaseHandler {
|
||||
&self,
|
||||
_: &str,
|
||||
_: http::Method,
|
||||
params: &HashMap<String, String>,
|
||||
_: &HashMap<String, String>,
|
||||
) -> Result<http::Response<String>> {
|
||||
let cluster_id = util::extract_cluster_id(params)?;
|
||||
|
||||
let leases = lease::alive_datanodes(cluster_id, &self.meta_peer_client, u64::MAX).await?;
|
||||
let leases = lease::alive_datanodes(&self.meta_peer_client, u64::MAX).await?;
|
||||
let leases = leases
|
||||
.into_iter()
|
||||
.map(|(k, v)| HumanLease {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user