mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-23 06:30:05 +00:00
Compare commits
99 Commits
async_deco
...
feat/serie
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3a02effa7 | ||
|
|
52f9fc25ba | ||
|
|
214a16565a | ||
|
|
21790a607e | ||
|
|
b33d8c1bad | ||
|
|
916e1c2d9e | ||
|
|
96ba00d175 | ||
|
|
7173401732 | ||
|
|
17c797a6d0 | ||
|
|
c44ba1aa69 | ||
|
|
843d33f9d0 | ||
|
|
b74e2a7d9b | ||
|
|
4a79c1527d | ||
|
|
b7a6ff9cc3 | ||
|
|
609e228852 | ||
|
|
c16bae32c4 | ||
|
|
ee4fe9d273 | ||
|
|
6e6e335a81 | ||
|
|
981d51785b | ||
|
|
cf1eda28aa | ||
|
|
cf1440fc32 | ||
|
|
21a209f7ba | ||
|
|
917510ffd0 | ||
|
|
7b48ef1e97 | ||
|
|
ac0f9ab575 | ||
|
|
f2907bb009 | ||
|
|
1695919ee7 | ||
|
|
eab702cc02 | ||
|
|
dd63068df6 | ||
|
|
f73b61e767 | ||
|
|
2acecd3620 | ||
|
|
f797de3497 | ||
|
|
d53afa849d | ||
|
|
3aebfc1716 | ||
|
|
dbb79c9671 | ||
|
|
054056fcbb | ||
|
|
aa486db8b7 | ||
|
|
4ef9afd8d8 | ||
|
|
f9221e9e66 | ||
|
|
6c26fe9c80 | ||
|
|
33c9fb737c | ||
|
|
68ce796771 | ||
|
|
d701c18150 | ||
|
|
d3a60d8821 | ||
|
|
5d688c6565 | ||
|
|
41aee1f1b7 | ||
|
|
c5b55fd8cf | ||
|
|
8051dbbc31 | ||
|
|
2d3192984d | ||
|
|
bef45ed0e8 | ||
|
|
a9e990768d | ||
|
|
7e1ba49d3d | ||
|
|
737558ef53 | ||
|
|
dbc25dd8da | ||
|
|
76a58a07e1 | ||
|
|
c2ba7fb16c | ||
|
|
09ef24fd75 | ||
|
|
9b7b012620 | ||
|
|
898e0bd828 | ||
|
|
2b4ed43692 | ||
|
|
8f2ae4e136 | ||
|
|
0cd219a5d2 | ||
|
|
2b2ea5bf72 | ||
|
|
e107bd5529 | ||
|
|
a31f0e255b | ||
|
|
40b52f3b13 | ||
|
|
f13a43647a | ||
|
|
7bcb01d269 | ||
|
|
e81213728b | ||
|
|
d88482b996 | ||
|
|
3b547d9d13 | ||
|
|
278553fc3f | ||
|
|
a36901a653 | ||
|
|
c4ac242c69 | ||
|
|
9f9307de73 | ||
|
|
c77ce958a3 | ||
|
|
5ad2d8b3b8 | ||
|
|
2724c3c142 | ||
|
|
4eb0771afe | ||
|
|
a0739a96e4 | ||
|
|
77ccf1eac8 | ||
|
|
1dc4a196bf | ||
|
|
2431cd3bdf | ||
|
|
cd730e0486 | ||
|
|
a19441bed8 | ||
|
|
162e3b8620 | ||
|
|
83642dab87 | ||
|
|
46070958c9 | ||
|
|
eea8b1c730 | ||
|
|
1ab4ddab8d | ||
|
|
9e63018198 | ||
|
|
594bec8c36 | ||
|
|
1586732d20 | ||
|
|
16fddd97a7 | ||
|
|
2260782c12 | ||
|
|
09dacc8e9b | ||
|
|
dec439db2b | ||
|
|
dc76571166 | ||
|
|
3e17f8c426 |
@@ -47,7 +47,6 @@ runs:
|
||||
shell: pwsh
|
||||
run: make test sqlness-test
|
||||
env:
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
RUST_BACKTRACE: 1
|
||||
SQLNESS_OPTS: "--preserve-state"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ inputs:
|
||||
default: 2
|
||||
description: "Number of Datanode replicas"
|
||||
meta-replicas:
|
||||
default: 1
|
||||
default: 2
|
||||
description: "Number of Metasrv replicas"
|
||||
image-registry:
|
||||
default: "docker.io"
|
||||
|
||||
5
.github/workflows/develop.yml
vendored
5
.github/workflows/develop.yml
vendored
@@ -576,9 +576,12 @@ jobs:
|
||||
- name: "Remote WAL"
|
||||
opts: "-w kafka -k 127.0.0.1:9092"
|
||||
kafka: true
|
||||
- name: "Pg Kvbackend"
|
||||
- name: "PostgreSQL KvBackend"
|
||||
opts: "--setup-pg"
|
||||
kafka: false
|
||||
- name: "MySQL Kvbackend"
|
||||
opts: "--setup-mysql"
|
||||
kafka: false
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
1
.github/workflows/nightly-ci.yml
vendored
1
.github/workflows/nightly-ci.yml
vendored
@@ -107,7 +107,6 @@ jobs:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
|
||||
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
||||
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
|
||||
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.13.0
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -54,3 +54,6 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
1291
Cargo.lock
generated
1291
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
56
Cargo.toml
56
Cargo.toml
@@ -29,6 +29,7 @@ members = [
|
||||
"src/common/query",
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
"src/common/test-util",
|
||||
@@ -67,7 +68,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -88,7 +89,7 @@ rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
#
|
||||
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
|
||||
ahash = { version = "0.8", features = ["compile-time-rng"] }
|
||||
aquamarine = "0.3"
|
||||
aquamarine = "0.6"
|
||||
arrow = { version = "53.0.0", features = ["prettyprint"] }
|
||||
arrow-array = { version = "53.0.0", default-features = false, features = ["chrono-tz"] }
|
||||
arrow-flight = "53.0"
|
||||
@@ -99,9 +100,9 @@ async-trait = "0.1"
|
||||
# Remember to update axum-extra, axum-macros when updating axum
|
||||
axum = "0.8"
|
||||
axum-extra = "0.10"
|
||||
axum-macros = "0.4"
|
||||
axum-macros = "0.5"
|
||||
backon = "1"
|
||||
base64 = "0.21"
|
||||
base64 = "0.22"
|
||||
bigdecimal = "0.4.2"
|
||||
bitflags = "2.4.1"
|
||||
bytemuck = "1.12"
|
||||
@@ -111,7 +112,7 @@ chrono-tz = "0.10.1"
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
config = "0.13.0"
|
||||
crossbeam-utils = "0.8"
|
||||
dashmap = "5.4"
|
||||
dashmap = "6.1"
|
||||
datafusion = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
@@ -121,32 +122,31 @@ datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", r
|
||||
datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "2464703c84c400a09cc59277018813f0e797bb4e" }
|
||||
deadpool = "0.10"
|
||||
deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.14"
|
||||
flate2 = { version = "1.1.0", default-features = false, features = ["zlib-rs"] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c5419bbd20cb42e568ec325a4d71a3c94cc327e1" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "dd4a1996982534636734674db66e44464b0c0d83" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
hyper = "1.1"
|
||||
hyper-util = "0.1"
|
||||
itertools = "0.10"
|
||||
itertools = "0.14"
|
||||
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
|
||||
lazy_static = "1.4"
|
||||
local-ip-address = "0.6"
|
||||
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "1434ecf23a2654025d86188fb5205e7a74b225d3" }
|
||||
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
|
||||
mockall = "0.11.4"
|
||||
mockall = "0.13"
|
||||
moka = "0.12"
|
||||
nalgebra = "0.33"
|
||||
notify = "6.1"
|
||||
notify = "8.0"
|
||||
num_cpus = "1.16"
|
||||
once_cell = "1.18"
|
||||
opentelemetry-proto = { version = "0.27", features = [
|
||||
@@ -164,8 +164,8 @@ prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.5", features = ["ser"] }
|
||||
prost = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
ratelimit = "0.9"
|
||||
rand = "0.9"
|
||||
ratelimit = "0.10"
|
||||
regex = "1.8"
|
||||
regex-automata = "0.4"
|
||||
reqwest = { version = "0.12", default-features = false, features = [
|
||||
@@ -177,33 +177,37 @@ reqwest = { version = "0.12", default-features = false, features = [
|
||||
rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
|
||||
"transport-tls",
|
||||
] }
|
||||
rstest = "0.21"
|
||||
rstest = "0.25"
|
||||
rstest_reuse = "0.7"
|
||||
rust_decimal = "1.33"
|
||||
rustc-hash = "2.0"
|
||||
rustls = { version = "0.23.20", default-features = false } # override by patch, see [patch.crates-io]
|
||||
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms.
|
||||
rustls = { version = "0.23.25", default-features = false }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = { version = "1.0", features = ["float_roundtrip"] }
|
||||
serde_with = "3"
|
||||
shadow-rs = "0.38"
|
||||
shadow-rs = "1.1"
|
||||
simd-json = "0.15"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
sysinfo = "0.30"
|
||||
sysinfo = "0.33"
|
||||
# on branch v0.52.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||
"visitor",
|
||||
"serde",
|
||||
] } # on branch v0.44.x
|
||||
strum = { version = "0.25", features = ["derive"] }
|
||||
strum = { version = "0.27", features = ["derive"] }
|
||||
tempfile = "3"
|
||||
tokio = { version = "1.40", features = ["full"] }
|
||||
tokio-postgres = "0.7"
|
||||
tokio-rustls = { version = "0.26.0", default-features = false } # override by patch, see [patch.crates-io]
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = "0.1"
|
||||
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
||||
toml = "0.8.8"
|
||||
@@ -246,6 +250,7 @@ common-procedure-test = { path = "src/common/procedure-test" }
|
||||
common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
@@ -278,15 +283,6 @@ store-api = { path = "src/store-api" }
|
||||
substrait = { path = "src/common/substrait" }
|
||||
table = { path = "src/table" }
|
||||
|
||||
[patch.crates-io]
|
||||
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
|
||||
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls", rev = "a951e03" } # version = "0.27.5" with ring patch
|
||||
rustls = { git = "https://github.com/GreptimeTeam/rustls", rev = "34fd0c6" } # version = "0.23.20" with ring patch
|
||||
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls", rev = "4604ca6" } # version = "0.26.0" with ring patch
|
||||
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
|
||||
# see https://github.com/aws/aws-lc-rs/pull/526
|
||||
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
|
||||
|
||||
[workspace.dependencies.meter-macros]
|
||||
git = "https://github.com/GreptimeTeam/greptime-meter.git"
|
||||
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb"
|
||||
|
||||
14
README.md
14
README.md
@@ -6,7 +6,7 @@
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
<h2 align="center">Unified & Cost-Effective Time Series Database for Metrics, Logs, and Events</h2>
|
||||
<h2 align="center">Unified & Cost-Effective Observerability Database for Metrics, Logs, and Events</h2>
|
||||
|
||||
<div align="center">
|
||||
<h3 align="center">
|
||||
@@ -62,15 +62,19 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
**GreptimeDB** is an open-source unified & cost-effective time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
**GreptimeDB** is an open-source unified & cost-effective observerability database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at Any Scale.
|
||||
|
||||
## News
|
||||
|
||||
**[GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)**
|
||||
|
||||
## Why GreptimeDB
|
||||
|
||||
Our core developers have been building time-series data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
Our core developers have been building observerability data platforms for years. Based on our best practices, GreptimeDB was born to give you:
|
||||
|
||||
* **Unified Processing of Metrics, Logs, and Events**
|
||||
|
||||
GreptimeDB unifies time series data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
GreptimeDB unifies observerability data processing by treating all data - whether metrics, logs, or events - as timestamped events with context. Users can analyze this data using either [SQL](https://docs.greptime.com/user-guide/query-data/sql) or [PromQL](https://docs.greptime.com/user-guide/query-data/promql) and leverage stream processing ([Flow](https://docs.greptime.com/user-guide/flow-computation/overview)) to enable continuous aggregation. [Read more](https://docs.greptime.com/user-guide/concepts/data-model).
|
||||
|
||||
* **Cloud-native Distributed Database**
|
||||
|
||||
@@ -112,7 +116,7 @@ Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||
@@ -24,7 +23,7 @@
|
||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -86,10 +85,6 @@
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `metadata_store` | -- | -- | Metadata storage options. |
|
||||
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. |
|
||||
@@ -98,10 +93,11 @@
|
||||
| `procedure` | -- | -- | Procedure storage options. |
|
||||
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -181,7 +177,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -222,7 +218,7 @@
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
|
||||
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
|
||||
@@ -279,7 +275,7 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -308,7 +304,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
@@ -328,6 +324,7 @@
|
||||
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
|
||||
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
|
||||
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
|
||||
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
|
||||
| `failure_detector` | -- | -- | -- |
|
||||
| `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
|
||||
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
|
||||
@@ -347,12 +344,8 @@
|
||||
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
|
||||
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
|
||||
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -381,7 +374,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||
@@ -390,7 +382,7 @@
|
||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `grpc` | -- | -- | The gRPC server options. |
|
||||
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
|
||||
@@ -434,15 +426,11 @@
|
||||
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -522,7 +510,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -551,7 +539,6 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
|
||||
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
@@ -563,7 +550,7 @@
|
||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||
| `http` | -- | -- | The HTTP server options. |
|
||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||
| `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
|
||||
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
|
||||
| `meta_client` | -- | -- | The metasrv client options. |
|
||||
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
|
||||
@@ -579,7 +566,7 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The datanode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
node_id = 42
|
||||
@@ -27,7 +24,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -119,7 +116,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -169,22 +166,6 @@ max_batch_bytes = "1MB"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Whether to enable WAL index creation.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
create_index = true
|
||||
@@ -265,7 +246,7 @@ overwrite_entry_start_id = false
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -618,7 +599,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the flownode. It can be `standalone` or `distributed`.
|
||||
mode = "distributed"
|
||||
|
||||
## The flownode identifier and should be unique in the cluster.
|
||||
## @toml2docs:none-default
|
||||
node_id = 14
|
||||
@@ -30,7 +27,7 @@ max_send_message_size = "512MB"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -76,7 +73,7 @@ retry_interval = "3s"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
@@ -121,4 +118,3 @@ sample_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ retry_interval = "3s"
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -189,7 +189,7 @@ tcp_nodelay = true
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
@@ -79,6 +79,11 @@ retry_delay = "500ms"
|
||||
## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
|
||||
max_metadata_value_size = "1500KiB"
|
||||
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
# Failure detectors options.
|
||||
[failure_detector]
|
||||
|
||||
@@ -144,17 +149,6 @@ replication_factor = 1
|
||||
|
||||
## Above which a topic creation operation will be cancelled.
|
||||
create_topic_timeout = "30s"
|
||||
## The initial backoff for kafka clients.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff for kafka clients.
|
||||
backoff_max = "10s"
|
||||
|
||||
## Exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
backoff_base = 2
|
||||
|
||||
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
# The Kafka SASL configuration.
|
||||
# **It's only used when the provider is `kafka`**.
|
||||
@@ -177,7 +171,7 @@ backoff_deadline = "5mins"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
## The running mode of the datanode. It can be `standalone` or `distributed`.
|
||||
mode = "standalone"
|
||||
|
||||
## The default timezone of the server.
|
||||
## @toml2docs:none-default
|
||||
default_timezone = "UTC"
|
||||
@@ -34,7 +31,7 @@ max_concurrent_queries = 0
|
||||
## The address to bind the HTTP server.
|
||||
addr = "127.0.0.1:4000"
|
||||
## HTTP request timeout. Set to 0 to disable timeout.
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
## HTTP request body limit.
|
||||
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
|
||||
## Set to 0 to disable limit.
|
||||
@@ -164,7 +161,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -242,22 +239,6 @@ max_batch_bytes = "1MB"
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
consumer_wait_timeout = "100ms"
|
||||
|
||||
## The initial backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_init = "500ms"
|
||||
|
||||
## The maximum backoff delay.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_max = "10s"
|
||||
|
||||
## The exponential backoff rate, i.e. next backoff = base * current backoff.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_base = 2
|
||||
|
||||
## The deadline of retries.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
backoff_deadline = "5mins"
|
||||
|
||||
## Ignore missing entries during read WAL.
|
||||
## **It's only used when the provider is `kafka`**.
|
||||
##
|
||||
@@ -302,6 +283,10 @@ purge_interval = "1m"
|
||||
max_retry_times = 3
|
||||
## Initial retry delay of procedures, increases exponentially
|
||||
retry_delay = "500ms"
|
||||
## Max running procedures.
|
||||
## The maximum number of procedures that can be running at the same time.
|
||||
## If the number of running procedures exceeds this limit, the procedure will be rejected.
|
||||
max_running_procedures = 128
|
||||
|
||||
## flow engine options.
|
||||
[flow]
|
||||
@@ -352,7 +337,7 @@ retry_delay = "500ms"
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -705,7 +690,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
@@ -68,12 +68,13 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --data-home=/greptimedb_data
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This document introduces how to write fuzz tests in GreptimeDB.
|
||||
|
||||
## What is a fuzz test
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
|
||||
|
||||
## Why we need them
|
||||
- Find bugs by leveraging random generation
|
||||
@@ -13,7 +13,7 @@ Fuzz test is tool that leverage deterministic random generation to assist in fin
|
||||
All fuzz test-related resources are located in the `/tests-fuzz` directory.
|
||||
There are two types of resources: (1) fundamental components and (2) test targets.
|
||||
|
||||
### Fundamental components
|
||||
### Fundamental components
|
||||
They are located in the `/tests-fuzz/src` directory. The fundamental components define how to generate SQLs (including dialects for different protocols) and validate execution results (e.g., column attribute validation), etc.
|
||||
|
||||
### Test targets
|
||||
@@ -21,25 +21,25 @@ They are located in the `/tests-fuzz/targets` directory, with each file represen
|
||||
|
||||
Figure 1 illustrates the fundamental components of the fuzz test provide the ability to generate random SQLs. It utilizes a Random Number Generator (Rng) to generate the Intermediate Representation (IR), then employs a DialectTranslator to produce specified dialects for different protocols. Finally, the fuzz tests send the generated SQL via the specified protocol and verify that the execution results meet expectations.
|
||||
```
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
Rng
|
||||
|
|
||||
|
|
||||
v
|
||||
ExprGenerator
|
||||
|
|
||||
|
|
||||
v
|
||||
Intermediate representation (IR)
|
||||
|
|
||||
|
|
||||
+----------------------+----------------------+
|
||||
| | |
|
||||
v v v
|
||||
MySQLTranslator PostgreSQLTranslator OtherDialectTranslator
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
| | |
|
||||
| | |
|
||||
v v v
|
||||
SQL(MySQL Dialect) ..... .....
|
||||
|
|
||||
|
|
||||
v
|
||||
@@ -133,4 +133,4 @@ fuzz_target!(|input: FuzzInput| {
|
||||
cargo fuzz run <fuzz-target> --fuzz-dir tests-fuzz
|
||||
```
|
||||
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
For more details, please refer to this [document](/tests-fuzz/README.md).
|
||||
|
||||
77
docs/rfcs/2025-02-06-remote-wal-purge.md
Normal file
77
docs/rfcs/2025-02-06-remote-wal-purge.md
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
Feature Name: Remote WAL Purge
|
||||
Tracking Issue: https://github.com/GreptimeTeam/greptimedb/issues/5474
|
||||
Date: 2025-02-06
|
||||
Author: "Yuhan Wang <profsyb@gmail.com>"
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
This RFC proposes a method for purging remote WAL in the database.
|
||||
|
||||
# Motivation
|
||||
|
||||
Currently only local wal entries are purged when flushing, while remote wal does nothing.
|
||||
|
||||
# Details
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
Region0->>Kafka: Last entry id of the topic in use
|
||||
Region0->>WALPruner: Heartbeat with last entry id
|
||||
WALPruner->>+WALPruner: Time Loop
|
||||
WALPruner->>+ProcedureManager: Submit purge procedure
|
||||
ProcedureManager->>Region0: Flush request
|
||||
ProcedureManager->>Kafka: Prune WAL entries
|
||||
Region0->>Region0: Flush
|
||||
```
|
||||
|
||||
## Steps
|
||||
|
||||
### Before purge
|
||||
|
||||
Before purging remote WAL, metasrv needs to know:
|
||||
|
||||
1. `last_entry_id` of each region.
|
||||
2. `kafka_topic_last_entry_id` which is the last entry id of the topic in use. Can be lazily updated and needed when region has empty memtable.
|
||||
3. Kafka topics that each region uses.
|
||||
|
||||
The states are maintained through:
|
||||
1. Heartbeat: Datanode sends `last_entry_id` to metasrv in heartbeat. As for regions with empty memtable, `last_entry_id` should equals to `kafka_topic_last_entry_id`.
|
||||
2. Metasrv maintains a topic-region map to know which region uses which topic.
|
||||
|
||||
`kafka_topic_last_entry_id` will be maintained by the region itself. Region will update the value after `k` heartbeats if the memtable is empty.
|
||||
|
||||
### Purge procedure
|
||||
|
||||
We can better handle locks utilizing current procedure. It's quite similar to the region migration procedure.
|
||||
|
||||
After a period of time, metasrv will submit a purge procedure to ProcedureManager. The purge will apply to all topics.
|
||||
|
||||
The procedure is divided into following stages:
|
||||
|
||||
1. Preparation:
|
||||
- Retrieve `last_entry_id` of each region kvbackend.
|
||||
- Choose regions that have a relatively small `last_entry_id` as candidate regions, which means we need to send a flush request to these regions.
|
||||
2. Communication:
|
||||
- Send flush requests to candidate regions.
|
||||
3. Purge:
|
||||
- Choose proper entry id to delete for each topic. The entry should be the smallest `last_entry_id - 1` among all regions.
|
||||
- Delete legacy entries in Kafka.
|
||||
- Store the `last_purged_entry_id` in kvbackend. It should be locked to prevent other regions from replaying the purged entries.
|
||||
|
||||
### After purge
|
||||
|
||||
After purge, there may be some regions that have `last_entry_id` smaller than the entry we just deleted. It's legal since we only delete the entries that are not needed anymore.
|
||||
|
||||
When restarting a region, it should query the `last_purged_entry_id` from metasrv and replay from `min(last_entry_id, last_purged_entry_id)`.
|
||||
|
||||
### Error handling
|
||||
|
||||
No persisted states are needed since all states are maintained in kvbackend.
|
||||
|
||||
Retry when failed to retrieving metadata from kvbackend.
|
||||
|
||||
# Alternatives
|
||||
|
||||
Purge time can depend on the size of the WAL entries instead of a fixed period of time, which may be more efficient.
|
||||
@@ -4782,7 +4782,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Current counts for stalled write requests by instance\n\nWrite stalls when memtable is full and pending for flush\n\n",
|
||||
"description": "Ingestion size by row counts.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -4844,7 +4844,7 @@
|
||||
"x": 12,
|
||||
"y": 138
|
||||
},
|
||||
"id": 221,
|
||||
"id": 277,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -4864,14 +4864,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
|
||||
"expr": "rate(greptime_mito_write_rows_total{pod=~\"$datanode\"}[$__rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Write Stall per Instance",
|
||||
"title": "Write Rows per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -4976,7 +4976,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Cache size by instance.\n",
|
||||
"description": "Current counts for stalled write requests by instance\n\nWrite stalls when memtable is full and pending for flush\n\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5028,7 +5028,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "decbytes"
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -5038,7 +5038,7 @@
|
||||
"x": 12,
|
||||
"y": 146
|
||||
},
|
||||
"id": 229,
|
||||
"id": 221,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -5058,14 +5058,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
|
||||
"expr": "sum by(pod) (greptime_mito_write_stall_total{pod=~\"$datanode\"})",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{type}}",
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Cached Bytes per Instance",
|
||||
"title": "Write Stall per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5172,7 +5172,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "P99 latency of each type of reads by instance",
|
||||
"description": "Cache size by instance.\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5224,7 +5224,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
"unit": "decbytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -5234,17 +5234,13 @@
|
||||
"x": 12,
|
||||
"y": 154
|
||||
},
|
||||
"id": 228,
|
||||
"id": 229,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true,
|
||||
"sortBy": "Last *",
|
||||
"sortDesc": true
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
@@ -5258,14 +5254,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "greptime_mito_cache_bytes{pod=~\"$datanode\"}",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"legendFormat": "{{pod}}-{{type}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Read Stage P99 per Instance",
|
||||
"title": "Cached Bytes per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5317,7 +5313,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@@ -5370,7 +5367,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Latency of compaction task, at p99",
|
||||
"description": "P99 latency of each type of reads by instance",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5414,7 +5411,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@@ -5432,7 +5430,7 @@
|
||||
"x": 12,
|
||||
"y": 162
|
||||
},
|
||||
"id": 230,
|
||||
"id": 228,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
@@ -5440,7 +5438,9 @@
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
"showLegend": true,
|
||||
"sortBy": "Last *",
|
||||
"sortDesc": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
@@ -5454,14 +5454,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "[{{pod}}]-compaction-p99",
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Compaction P99 per Instance",
|
||||
"title": "Read Stage P99 per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5570,7 +5570,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Compaction latency by stage",
|
||||
"description": "Latency of compaction task, at p99",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5632,7 +5632,7 @@
|
||||
"x": 12,
|
||||
"y": 170
|
||||
},
|
||||
"id": 232,
|
||||
"id": 230,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
@@ -5654,9 +5654,9 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"legendFormat": "[{{pod}}]-compaction-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
@@ -5794,7 +5794,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Write-ahead log operations latency at p99",
|
||||
"description": "Compaction latency by stage",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -5856,13 +5856,13 @@
|
||||
"x": 12,
|
||||
"y": 178
|
||||
},
|
||||
"id": 269,
|
||||
"id": 232,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "list",
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
@@ -5878,14 +5878,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
|
||||
"expr": "histogram_quantile(0.99, sum by(pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{pod=~\"$datanode\"}[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
|
||||
"legendFormat": "{{pod}}-{{stage}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Log Store op duration seconds",
|
||||
"title": "Compaction P99 per Instance",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -5993,7 +5993,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Ongoing compaction task count",
|
||||
"description": "Write-ahead log operations latency at p99",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -6045,7 +6045,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -6055,13 +6055,13 @@
|
||||
"x": 12,
|
||||
"y": 186
|
||||
},
|
||||
"id": 271,
|
||||
"id": 269,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
@@ -6078,14 +6078,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_inflight_compaction_count",
|
||||
"expr": "histogram_quantile(0.99, sum by(le,logstore,optype,pod) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"legendFormat": "{{pod}}-{{logstore}}-{{optype}}-p99",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Inflight Compaction",
|
||||
"title": "Log Store op duration seconds",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -6188,6 +6188,105 @@
|
||||
"title": "Inflight Flush",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"description": "Ongoing compaction task count",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "points",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 194
|
||||
},
|
||||
"id": 271,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.3",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "greptime_mito_inflight_compaction_count",
|
||||
"instant": false,
|
||||
"legendFormat": "{{pod}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Inflight Compaction",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
|
||||
@@ -15,10 +15,13 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use datatypes::schema::{
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextOptions, SkippingIndexOptions,
|
||||
SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY,
|
||||
ColumnDefaultConstraint, ColumnSchema, FulltextAnalyzer, FulltextBackend, FulltextOptions,
|
||||
SkippingIndexOptions, SkippingIndexType, COMMENT_KEY, FULLTEXT_KEY, INVERTED_INDEX_KEY,
|
||||
SKIPPING_INDEX_KEY,
|
||||
};
|
||||
use greptime_proto::v1::{
|
||||
Analyzer, FulltextBackend as PbFulltextBackend, SkippingIndexType as PbSkippingIndexType,
|
||||
};
|
||||
use greptime_proto::v1::{Analyzer, SkippingIndexType as PbSkippingIndexType};
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{self, Result};
|
||||
@@ -142,13 +145,21 @@ pub fn options_from_inverted() -> ColumnOptions {
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
pub fn as_fulltext_option_analyzer(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
match analyzer {
|
||||
Analyzer::English => FulltextAnalyzer::English,
|
||||
Analyzer::Chinese => FulltextAnalyzer::Chinese,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextBackend` from the given backend.
|
||||
pub fn as_fulltext_option_backend(backend: PbFulltextBackend) -> FulltextBackend {
|
||||
match backend {
|
||||
PbFulltextBackend::Bloom => FulltextBackend::Bloom,
|
||||
PbFulltextBackend::Tantivy => FulltextBackend::Tantivy,
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to construct a `SkippingIndexType` from the given skipping index type.
|
||||
pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> SkippingIndexType {
|
||||
match skipping_index_type {
|
||||
@@ -160,7 +171,7 @@ pub fn as_skipping_index_type(skipping_index_type: PbSkippingIndexType) -> Skipp
|
||||
mod tests {
|
||||
|
||||
use datatypes::data_type::ConcreteDataType;
|
||||
use datatypes::schema::FulltextAnalyzer;
|
||||
use datatypes::schema::{FulltextAnalyzer, FulltextBackend};
|
||||
|
||||
use super::*;
|
||||
use crate::v1::ColumnDataType;
|
||||
@@ -219,13 +230,14 @@ mod tests {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -239,11 +251,12 @@ mod tests {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
mod region_peers;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
pub mod schemata;
|
||||
|
||||
@@ -56,6 +56,8 @@ pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const COLUMN_NAME: &str = "column_name";
|
||||
pub const REGION_ID: &str = "region_id";
|
||||
pub const PEER_ID: &str = "peer_id";
|
||||
const ORDINAL_POSITION: &str = "ordinal_position";
|
||||
const CHARACTER_MAXIMUM_LENGTH: &str = "character_maximum_length";
|
||||
const CHARACTER_OCTET_LENGTH: &str = "character_octet_length";
|
||||
|
||||
@@ -21,6 +21,7 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::rpc::router::RegionRoute;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use datafusion::common::HashMap;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
@@ -43,16 +44,22 @@ use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::system_schema::information_schema::{InformationTable, Predicates};
|
||||
use crate::CatalogManager;
|
||||
|
||||
const REGION_ID: &str = "region_id";
|
||||
const PEER_ID: &str = "peer_id";
|
||||
pub const TABLE_CATALOG: &str = "table_catalog";
|
||||
pub const TABLE_SCHEMA: &str = "table_schema";
|
||||
pub const TABLE_NAME: &str = "table_name";
|
||||
pub const REGION_ID: &str = "region_id";
|
||||
pub const PEER_ID: &str = "peer_id";
|
||||
const PEER_ADDR: &str = "peer_addr";
|
||||
const IS_LEADER: &str = "is_leader";
|
||||
pub const IS_LEADER: &str = "is_leader";
|
||||
const STATUS: &str = "status";
|
||||
const DOWN_SECONDS: &str = "down_seconds";
|
||||
const INIT_CAPACITY: usize = 42;
|
||||
|
||||
/// The `REGION_PEERS` table provides information about the region distribution and routes. Including fields:
|
||||
///
|
||||
/// - `table_catalog`: the table catalog name
|
||||
/// - `table_schema`: the table schema name
|
||||
/// - `table_name`: the table name
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the region storage datanode peer id
|
||||
/// - `peer_addr`: the region storage datanode gRPC peer address
|
||||
@@ -77,6 +84,9 @@ impl InformationSchemaRegionPeers {
|
||||
|
||||
pub(crate) fn schema() -> SchemaRef {
|
||||
Arc::new(Schema::new(vec![
|
||||
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
|
||||
ColumnSchema::new(REGION_ID, ConcreteDataType::uint64_datatype(), false),
|
||||
ColumnSchema::new(PEER_ID, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
||||
@@ -134,6 +144,9 @@ struct InformationSchemaRegionPeersBuilder {
|
||||
catalog_name: String,
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
|
||||
table_catalogs: StringVectorBuilder,
|
||||
table_schemas: StringVectorBuilder,
|
||||
table_names: StringVectorBuilder,
|
||||
region_ids: UInt64VectorBuilder,
|
||||
peer_ids: UInt64VectorBuilder,
|
||||
peer_addrs: StringVectorBuilder,
|
||||
@@ -152,6 +165,9 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
schema,
|
||||
catalog_name,
|
||||
catalog_manager,
|
||||
table_catalogs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_schemas: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
region_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
@@ -177,24 +193,28 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
let predicates = Predicates::from_scan_request(&request);
|
||||
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
let table_id_stream = catalog_manager
|
||||
let table_stream = catalog_manager
|
||||
.tables(&catalog_name, &schema_name, None)
|
||||
.try_filter_map(|t| async move {
|
||||
let table_info = t.table_info();
|
||||
if table_info.table_type == TableType::Temporary {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(table_info.ident.table_id))
|
||||
Ok(Some((
|
||||
table_info.ident.table_id,
|
||||
table_info.name.to_string(),
|
||||
)))
|
||||
}
|
||||
});
|
||||
|
||||
const BATCH_SIZE: usize = 128;
|
||||
|
||||
// Split table ids into chunks
|
||||
let mut table_id_chunks = pin!(table_id_stream.ready_chunks(BATCH_SIZE));
|
||||
// Split tables into chunks
|
||||
let mut table_chunks = pin!(table_stream.ready_chunks(BATCH_SIZE));
|
||||
|
||||
while let Some(table_ids) = table_id_chunks.next().await {
|
||||
let table_ids = table_ids.into_iter().collect::<Result<Vec<_>>>()?;
|
||||
while let Some(tables) = table_chunks.next().await {
|
||||
let tables = tables.into_iter().collect::<Result<HashMap<_, _>>>()?;
|
||||
let table_ids = tables.keys().cloned().collect::<Vec<_>>();
|
||||
|
||||
let table_routes = if let Some(partition_manager) = &partition_manager {
|
||||
partition_manager
|
||||
@@ -206,7 +226,16 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
};
|
||||
|
||||
for (table_id, routes) in table_routes {
|
||||
self.add_region_peers(&predicates, table_id, &routes);
|
||||
// Safety: table_id is guaranteed to be in the map
|
||||
let table_name = tables.get(&table_id).unwrap();
|
||||
self.add_region_peers(
|
||||
&catalog_name,
|
||||
&schema_name,
|
||||
table_name,
|
||||
&predicates,
|
||||
table_id,
|
||||
&routes,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -216,6 +245,9 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
|
||||
fn add_region_peers(
|
||||
&mut self,
|
||||
table_catalog: &str,
|
||||
table_schema: &str,
|
||||
table_name: &str,
|
||||
predicates: &Predicates,
|
||||
table_id: TableId,
|
||||
routes: &[RegionRoute],
|
||||
@@ -231,13 +263,20 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
Some("ALIVE".to_string())
|
||||
};
|
||||
|
||||
let row = [(REGION_ID, &Value::from(region_id))];
|
||||
let row = [
|
||||
(TABLE_CATALOG, &Value::from(table_catalog)),
|
||||
(TABLE_SCHEMA, &Value::from(table_schema)),
|
||||
(TABLE_NAME, &Value::from(table_name)),
|
||||
(REGION_ID, &Value::from(region_id)),
|
||||
];
|
||||
|
||||
if !predicates.eval(&row) {
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(dennis): adds followers.
|
||||
self.table_catalogs.push(Some(table_catalog));
|
||||
self.table_schemas.push(Some(table_schema));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.region_ids.push(Some(region_id));
|
||||
self.peer_ids.push(peer_id);
|
||||
self.peer_addrs.push(peer_addr.as_deref());
|
||||
@@ -245,11 +284,26 @@ impl InformationSchemaRegionPeersBuilder {
|
||||
self.statuses.push(state.as_deref());
|
||||
self.down_seconds
|
||||
.push(route.leader_down_millis().map(|m| m / 1000));
|
||||
|
||||
for follower in &route.follower_peers {
|
||||
self.table_catalogs.push(Some(table_catalog));
|
||||
self.table_schemas.push(Some(table_schema));
|
||||
self.table_names.push(Some(table_name));
|
||||
self.region_ids.push(Some(region_id));
|
||||
self.peer_ids.push(Some(follower.id));
|
||||
self.peer_addrs.push(Some(follower.addr.as_str()));
|
||||
self.is_leaders.push(Some("No"));
|
||||
self.statuses.push(None);
|
||||
self.down_seconds.push(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(&mut self) -> Result<RecordBatch> {
|
||||
let columns: Vec<VectorRef> = vec![
|
||||
Arc::new(self.table_catalogs.finish()),
|
||||
Arc::new(self.table_schemas.finish()),
|
||||
Arc::new(self.table_names.finish()),
|
||||
Arc::new(self.region_ids.finish()),
|
||||
Arc::new(self.peer_ids.finish()),
|
||||
Arc::new(self.peer_addrs.finish()),
|
||||
|
||||
@@ -177,7 +177,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
|
||||
fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
let mut region_routes = Vec::with_capacity(100);
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for region_id in regions.into_iter().map(u64::from) {
|
||||
region_routes.push(RegionRoute {
|
||||
@@ -188,7 +188,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
|
||||
attrs: BTreeMap::new(),
|
||||
},
|
||||
leader_peer: Some(Peer {
|
||||
id: rng.gen_range(0..10),
|
||||
id: rng.random_range(0..10),
|
||||
addr: String::new(),
|
||||
}),
|
||||
follower_peers: vec![],
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
mod client;
|
||||
pub mod client_manager;
|
||||
#[cfg(feature = "testing")]
|
||||
mod database;
|
||||
pub mod error;
|
||||
pub mod flow;
|
||||
@@ -34,7 +33,6 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
|
||||
use snafu::OptionExt;
|
||||
|
||||
pub use self::client::Client;
|
||||
#[cfg(feature = "testing")]
|
||||
pub use self::database::Database;
|
||||
pub use self::error::{Error, Result};
|
||||
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use enum_dispatch::enum_dispatch;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::seq::IndexedRandom;
|
||||
|
||||
#[enum_dispatch]
|
||||
pub trait LoadBalance {
|
||||
@@ -37,7 +37,7 @@ pub struct Random;
|
||||
|
||||
impl LoadBalance for Random {
|
||||
fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String> {
|
||||
peers.choose(&mut rand::thread_rng())
|
||||
peers.choose(&mut rand::rng())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::service::DatanodeServiceBuilder;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -223,15 +223,14 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
ensure!(
|
||||
opts.node_id.is_some(),
|
||||
MissingConfigSnafu {
|
||||
msg: "Missing node id option"
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
|
||||
if let Some(data_home) = &self.data_home {
|
||||
opts.storage.data_home.clone_from(data_home);
|
||||
@@ -295,10 +294,13 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_config,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
@@ -311,7 +313,7 @@ impl StartCommand {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins)
|
||||
let mut datanode = DatanodeBuilder::new(opts.clone(), plugins, Mode::Distributed)
|
||||
.with_meta_client(meta_client)
|
||||
.with_kv_backend(meta_backend)
|
||||
.with_cache_registry(layered_cache_registry)
|
||||
@@ -333,6 +335,7 @@ impl StartCommand {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::io::Write;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -340,7 +343,6 @@ mod tests {
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
use servers::Mode;
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
@@ -406,7 +408,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -420,7 +422,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -467,7 +469,7 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert_eq!("./greptimedb_data/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
@@ -483,27 +485,14 @@ mod tests {
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_from_cmd() {
|
||||
let opt = StartCommand::default()
|
||||
.load_options(&GlobalOptions::default())
|
||||
.unwrap()
|
||||
.component;
|
||||
assert_eq!(Mode::Standalone, opt.mode);
|
||||
|
||||
let opt = (StartCommand {
|
||||
node_id: Some(42),
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
})
|
||||
.load_options(&GlobalOptions::default())
|
||||
.unwrap()
|
||||
.component;
|
||||
assert_eq!(Mode::Distributed, opt.mode);
|
||||
|
||||
assert!((StartCommand {
|
||||
metasrv_addrs: Some(vec!["127.0.0.1:3002".to_string()]),
|
||||
..Default::default()
|
||||
@@ -522,11 +511,23 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_load_log_options_from_cli() {
|
||||
let cmd = StartCommand::default();
|
||||
let mut cmd = StartCommand::default();
|
||||
|
||||
let result = cmd.load_options(&GlobalOptions {
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
tokio_console_addr: None,
|
||||
});
|
||||
// Missing node_id.
|
||||
assert_matches!(result, Err(crate::error::Error::MissingConfig { .. }));
|
||||
|
||||
cmd.node_id = Some(42);
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -536,7 +537,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -565,11 +566,11 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -100,6 +100,13 @@ pub enum Error {
|
||||
source: flow::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Servers error"))]
|
||||
Servers {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: servers::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to start frontend"))]
|
||||
StartFrontend {
|
||||
#[snafu(implicit)]
|
||||
@@ -365,6 +372,7 @@ impl ErrorExt for Error {
|
||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||
Error::Servers { source, .. } => source.status_code(),
|
||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||
Error::BuildCli { source, .. } => source.status_code(),
|
||||
|
||||
@@ -34,8 +34,7 @@ use common_telemetry::logging::TracingOptions;
|
||||
use common_version::{short_version, version};
|
||||
use flow::{FlownodeBuilder, FlownodeInstance, FrontendInvoker};
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use servers::Mode;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
@@ -203,7 +202,6 @@ impl StartCommand {
|
||||
.get_or_insert_with(MetaClientOptions::default)
|
||||
.metasrv_addrs
|
||||
.clone_from(metasrv_addrs);
|
||||
opts.mode = Mode::Distributed;
|
||||
}
|
||||
|
||||
if let Some(http_addr) = &self.http_addr {
|
||||
@@ -214,12 +212,12 @@ impl StartCommand {
|
||||
opts.http.timeout = Duration::from_secs(http_timeout);
|
||||
}
|
||||
|
||||
if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
|
||||
return MissingConfigSnafu {
|
||||
msg: "Missing node id option",
|
||||
ensure!(
|
||||
opts.node_id.is_some(),
|
||||
MissingConfigSnafu {
|
||||
msg: "Missing node id option"
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -249,10 +247,13 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_config.metadata_cache_ttl;
|
||||
|
||||
@@ -32,28 +32,25 @@ use common_telemetry::info;
|
||||
use common_telemetry::logging::TracingOptions;
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance};
|
||||
use frontend::server::Services;
|
||||
use meta_client::{MetaClientOptions, MetaClientType};
|
||||
use query::stats::StatementStatistics;
|
||||
use servers::export_metrics::ExportMetricsTask;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
self, InitTimezoneSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu, MissingConfigSnafu,
|
||||
Result, StartFrontendSnafu,
|
||||
};
|
||||
use crate::error::{self, Result};
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
|
||||
type FrontendOptions = GreptimeOptions<frontend::frontend::FrontendOptions>;
|
||||
|
||||
pub struct Instance {
|
||||
frontend: FeInstance,
|
||||
|
||||
frontend: Frontend,
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
@@ -61,20 +58,17 @@ pub struct Instance {
|
||||
pub const APP_NAME: &str = "greptime-frontend";
|
||||
|
||||
impl Instance {
|
||||
pub fn new(frontend: FeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||
Self {
|
||||
frontend,
|
||||
_guard: guard,
|
||||
}
|
||||
pub fn new(frontend: Frontend, _guard: Vec<WorkerGuard>) -> Self {
|
||||
Self { frontend, _guard }
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut FeInstance {
|
||||
&mut self.frontend
|
||||
}
|
||||
|
||||
pub fn inner(&self) -> &FeInstance {
|
||||
pub fn inner(&self) -> &Frontend {
|
||||
&self.frontend
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut Frontend {
|
||||
&mut self.frontend
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -84,11 +78,15 @@ impl App for Instance {
|
||||
}
|
||||
|
||||
async fn start(&mut self) -> Result<()> {
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
let plugins = self.frontend.instance.plugins().clone();
|
||||
plugins::start_frontend_plugins(plugins)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)
|
||||
}
|
||||
|
||||
async fn stop(&self) -> Result<()> {
|
||||
@@ -178,7 +176,7 @@ impl StartCommand {
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
.context(error::LoadLayeredConfigSnafu)?;
|
||||
|
||||
self.merge_with_cli_options(global_options, &mut opts)?;
|
||||
|
||||
@@ -283,22 +281,28 @@ impl StartCommand {
|
||||
let mut plugins = Plugins::new();
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
set_default_timezone(opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
set_default_timezone(opts.default_timezone.as_deref()).context(error::InitTimezoneSnafu)?;
|
||||
|
||||
let meta_client_options = opts.meta_client.as_ref().context(MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
let meta_client_options = opts
|
||||
.meta_client
|
||||
.as_ref()
|
||||
.context(error::MissingConfigSnafu {
|
||||
msg: "'meta_client'",
|
||||
})?;
|
||||
|
||||
let cache_max_capacity = meta_client_options.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_client_options.metadata_cache_ttl;
|
||||
let cache_tti = meta_client_options.metadata_cache_tti;
|
||||
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
Some(&plugins),
|
||||
)
|
||||
.await
|
||||
.context(error::MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend =
|
||||
@@ -345,6 +349,7 @@ impl StartCommand {
|
||||
opts.heartbeat.clone(),
|
||||
Arc::new(executor),
|
||||
);
|
||||
let heartbeat_task = Some(heartbeat_task);
|
||||
|
||||
// frontend to datanode need not timeout.
|
||||
// Some queries are expected to take long time.
|
||||
@@ -356,7 +361,7 @@ impl StartCommand {
|
||||
};
|
||||
let client = NodeClients::new(channel_config);
|
||||
|
||||
let mut instance = FrontendBuilder::new(
|
||||
let instance = FrontendBuilder::new(
|
||||
opts.clone(),
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
@@ -367,20 +372,27 @@ impl StartCommand {
|
||||
)
|
||||
.with_plugin(plugins.clone())
|
||||
.with_local_cache_invalidator(layered_cache_registry)
|
||||
.with_heartbeat_task(heartbeat_task)
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
let instance = Arc::new(instance);
|
||||
|
||||
let servers = Services::new(opts, Arc::new(instance.clone()), plugins)
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, instance.clone(), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
instance
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
Ok(Instance::new(instance, guard))
|
||||
let frontend = Frontend {
|
||||
instance,
|
||||
servers,
|
||||
heartbeat_task,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
Ok(Instance::new(frontend, guard))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -440,7 +452,7 @@ mod tests {
|
||||
|
||||
[http]
|
||||
addr = "127.0.0.1:4000"
|
||||
timeout = "30s"
|
||||
timeout = "0s"
|
||||
body_limit = "2GB"
|
||||
|
||||
[opentsdb]
|
||||
@@ -448,7 +460,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -461,12 +473,15 @@ mod tests {
|
||||
let fe_opts = command.load_options(&Default::default()).unwrap().component;
|
||||
|
||||
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
|
||||
assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
|
||||
assert_eq!(Duration::from_secs(0), fe_opts.http.timeout);
|
||||
|
||||
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
fe_opts.logging.dir
|
||||
);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
@@ -505,7 +520,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -515,7 +530,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
@@ -358,7 +358,10 @@ mod tests {
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -406,7 +409,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -424,7 +427,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
use common_meta::sequence::SequenceBuilder;
|
||||
use common_meta::wal_options_allocator::{build_wal_options_allocator, WalOptionsAllocatorRef};
|
||||
use common_procedure::{ProcedureInfo, ProcedureManagerRef};
|
||||
@@ -55,9 +56,9 @@ use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
use datanode::region_server::RegionServer;
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::{FlowConfig, FlowWorkerManager, FlownodeBuilder, FlownodeOptions, FrontendInvoker};
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use frontend::frontend::{Frontend, FrontendOptions};
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::instance::{Instance as FeInstance, StandaloneDatanodeManager};
|
||||
use frontend::server::Services;
|
||||
use frontend::service_config::{
|
||||
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, PostgresOptions,
|
||||
@@ -67,7 +68,7 @@ use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||
use mito2::config::MitoConfig;
|
||||
use query::stats::StatementStatistics;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
use servers::tls::{TlsMode, TlsOption};
|
||||
@@ -76,15 +77,9 @@ use snafu::ResultExt;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
|
||||
use crate::error::{
|
||||
BuildCacheRegistrySnafu, BuildWalOptionsAllocatorSnafu, CreateDirSnafu, IllegalConfigSnafu,
|
||||
InitDdlManagerSnafu, InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu,
|
||||
Result, ShutdownDatanodeSnafu, ShutdownFlownodeSnafu, ShutdownFrontendSnafu,
|
||||
StartDatanodeSnafu, StartFlownodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
|
||||
StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
|
||||
};
|
||||
use crate::error::Result;
|
||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||
use crate::{log_versions, App};
|
||||
use crate::{error, log_versions, App};
|
||||
|
||||
pub const APP_NAME: &str = "greptime-standalone";
|
||||
|
||||
@@ -132,7 +127,6 @@ impl SubCommand {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(default)]
|
||||
pub struct StandaloneOptions {
|
||||
pub mode: Mode,
|
||||
pub enable_telemetry: bool,
|
||||
pub default_timezone: Option<String>,
|
||||
pub http: HttpOptions,
|
||||
@@ -162,7 +156,6 @@ pub struct StandaloneOptions {
|
||||
impl Default for StandaloneOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: Mode::Standalone,
|
||||
enable_telemetry: true,
|
||||
default_timezone: None,
|
||||
http: HttpOptions::default(),
|
||||
@@ -243,7 +236,6 @@ impl StandaloneOptions {
|
||||
grpc: cloned_opts.grpc,
|
||||
init_regions_in_background: cloned_opts.init_regions_in_background,
|
||||
init_regions_parallelism: cloned_opts.init_regions_parallelism,
|
||||
mode: Mode::Standalone,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -251,13 +243,12 @@ impl StandaloneOptions {
|
||||
|
||||
pub struct Instance {
|
||||
datanode: Datanode,
|
||||
frontend: FeInstance,
|
||||
frontend: Frontend,
|
||||
// TODO(discord9): wrapped it in flownode instance instead
|
||||
flow_worker_manager: Arc<FlowWorkerManager>,
|
||||
flow_shutdown: broadcast::Sender<()>,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
@@ -281,21 +272,26 @@ impl App for Instance {
|
||||
self.procedure_manager
|
||||
.start()
|
||||
.await
|
||||
.context(StartProcedureManagerSnafu)?;
|
||||
.context(error::StartProcedureManagerSnafu)?;
|
||||
|
||||
self.wal_options_allocator
|
||||
.start()
|
||||
.await
|
||||
.context(StartWalOptionsAllocatorSnafu)?;
|
||||
.context(error::StartWalOptionsAllocatorSnafu)?;
|
||||
|
||||
plugins::start_frontend_plugins(self.frontend.plugins().clone())
|
||||
plugins::start_frontend_plugins(self.frontend.instance.plugins().clone())
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.frontend
|
||||
.start()
|
||||
.await
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
self.frontend.start().await.context(StartFrontendSnafu)?;
|
||||
self.flow_worker_manager
|
||||
.clone()
|
||||
.run_background(Some(self.flow_shutdown.subscribe()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -303,17 +299,18 @@ impl App for Instance {
|
||||
self.frontend
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownFrontendSnafu)?;
|
||||
.context(error::ShutdownFrontendSnafu)?;
|
||||
|
||||
self.procedure_manager
|
||||
.stop()
|
||||
.await
|
||||
.context(StopProcedureManagerSnafu)?;
|
||||
.context(error::StopProcedureManagerSnafu)?;
|
||||
|
||||
self.datanode
|
||||
.shutdown()
|
||||
.await
|
||||
.context(ShutdownDatanodeSnafu)?;
|
||||
.context(error::ShutdownDatanodeSnafu)?;
|
||||
|
||||
self.flow_shutdown
|
||||
.send(())
|
||||
.map_err(|_e| {
|
||||
@@ -322,7 +319,8 @@ impl App for Instance {
|
||||
}
|
||||
.build()
|
||||
})
|
||||
.context(ShutdownFlownodeSnafu)?;
|
||||
.context(error::ShutdownFlownodeSnafu)?;
|
||||
|
||||
info!("Datanode instance stopped.");
|
||||
|
||||
Ok(())
|
||||
@@ -368,7 +366,7 @@ impl StartCommand {
|
||||
self.config_file.as_deref(),
|
||||
self.env_prefix.as_ref(),
|
||||
)
|
||||
.context(LoadLayeredConfigSnafu)?;
|
||||
.context(error::LoadLayeredConfigSnafu)?;
|
||||
|
||||
self.merge_with_cli_options(global_options, &mut opts.component)?;
|
||||
|
||||
@@ -381,9 +379,6 @@ impl StartCommand {
|
||||
global_options: &GlobalOptions,
|
||||
opts: &mut StandaloneOptions,
|
||||
) -> Result<()> {
|
||||
// Should always be standalone mode.
|
||||
opts.mode = Mode::Standalone;
|
||||
|
||||
if let Some(dir) = &global_options.log_dir {
|
||||
opts.logging.dir.clone_from(dir);
|
||||
}
|
||||
@@ -415,7 +410,7 @@ impl StartCommand {
|
||||
// frontend grpc addr conflict with datanode default grpc addr
|
||||
let datanode_grpc_addr = DatanodeOptions::default().grpc.bind_addr;
|
||||
if addr.eq(&datanode_grpc_addr) {
|
||||
return IllegalConfigSnafu {
|
||||
return error::IllegalConfigSnafu {
|
||||
msg: format!(
|
||||
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
|
||||
),
|
||||
@@ -474,18 +469,19 @@ impl StartCommand {
|
||||
|
||||
plugins::setup_frontend_plugins(&mut plugins, &plugin_opts, &fe_opts)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
plugins::setup_datanode_plugins(&mut plugins, &plugin_opts, &dn_opts)
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
.context(error::StartDatanodeSnafu)?;
|
||||
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref()).context(InitTimezoneSnafu)?;
|
||||
set_default_timezone(fe_opts.default_timezone.as_deref())
|
||||
.context(error::InitTimezoneSnafu)?;
|
||||
|
||||
let data_home = &dn_opts.storage.data_home;
|
||||
// Ensure the data_home directory exists.
|
||||
fs::create_dir_all(path::Path::new(data_home))
|
||||
.context(CreateDirSnafu { dir: data_home })?;
|
||||
.context(error::CreateDirSnafu { dir: data_home })?;
|
||||
|
||||
let metadata_dir = metadata_store_dir(data_home);
|
||||
let (kv_backend, procedure_manager) = FeInstance::try_build_standalone_components(
|
||||
@@ -494,7 +490,7 @@ impl StartCommand {
|
||||
opts.procedure,
|
||||
)
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
// Builds cache registry
|
||||
let layered_cache_builder = LayeredCacheRegistryBuilder::default();
|
||||
@@ -503,16 +499,16 @@ impl StartCommand {
|
||||
with_default_composite_cache_registry(
|
||||
layered_cache_builder.add_cache_registry(fundamental_cache_registry),
|
||||
)
|
||||
.context(BuildCacheRegistrySnafu)?
|
||||
.context(error::BuildCacheRegistrySnafu)?
|
||||
.build(),
|
||||
);
|
||||
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone())
|
||||
let datanode = DatanodeBuilder::new(dn_opts, plugins.clone(), Mode::Standalone)
|
||||
.with_kv_backend(kv_backend.clone())
|
||||
.with_cache_registry(layered_cache_registry.clone())
|
||||
.build()
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
.context(error::StartDatanodeSnafu)?;
|
||||
|
||||
let information_extension = Arc::new(StandaloneInformationExtension::new(
|
||||
datanode.region_server(),
|
||||
@@ -545,7 +541,7 @@ impl StartCommand {
|
||||
.build()
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(OtherSnafu)?,
|
||||
.context(error::OtherSnafu)?,
|
||||
);
|
||||
|
||||
// set the ref to query for the local flow state
|
||||
@@ -576,7 +572,7 @@ impl StartCommand {
|
||||
let kafka_options = opts.wal.clone().into();
|
||||
let wal_options_allocator = build_wal_options_allocator(&kafka_options, kv_backend.clone())
|
||||
.await
|
||||
.context(BuildWalOptionsAllocatorSnafu)?;
|
||||
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
||||
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
@@ -597,8 +593,8 @@ impl StartCommand {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut frontend = FrontendBuilder::new(
|
||||
fe_opts,
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
catalog_manager.clone(),
|
||||
@@ -609,7 +605,8 @@ impl StartCommand {
|
||||
.with_plugin(plugins.clone())
|
||||
.try_build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
let fe_instance = Arc::new(fe_instance);
|
||||
|
||||
let flow_worker_manager = flownode.flow_worker_manager();
|
||||
// flow server need to be able to use frontend to write insert requests back
|
||||
@@ -622,18 +619,25 @@ impl StartCommand {
|
||||
node_manager,
|
||||
)
|
||||
.await
|
||||
.context(StartFlownodeSnafu)?;
|
||||
.context(error::StartFlownodeSnafu)?;
|
||||
flow_worker_manager.set_frontend_invoker(invoker).await;
|
||||
|
||||
let (tx, _rx) = broadcast::channel(1);
|
||||
|
||||
let servers = Services::new(opts, Arc::new(frontend.clone()), plugins)
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins)
|
||||
.build()
|
||||
.await
|
||||
.context(StartFrontendSnafu)?;
|
||||
frontend
|
||||
.build_servers(servers)
|
||||
.context(StartFrontendSnafu)?;
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
let frontend = Frontend {
|
||||
instance: fe_instance,
|
||||
servers,
|
||||
heartbeat_task: None,
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
@@ -661,6 +665,7 @@ impl StartCommand {
|
||||
node_manager,
|
||||
cache_invalidator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
@@ -670,7 +675,7 @@ impl StartCommand {
|
||||
procedure_manager,
|
||||
true,
|
||||
)
|
||||
.context(InitDdlManagerSnafu)?,
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(procedure_executor)
|
||||
@@ -684,7 +689,7 @@ impl StartCommand {
|
||||
table_metadata_manager
|
||||
.init()
|
||||
.await
|
||||
.context(InitMetadataSnafu)?;
|
||||
.context(error::InitMetadataSnafu)?;
|
||||
|
||||
Ok(table_metadata_manager)
|
||||
}
|
||||
@@ -778,6 +783,7 @@ impl InformationExtension for StandaloneInformationExtension {
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
index_size: region_stat.index_size,
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -852,7 +858,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
dir = "./greptimedb_data/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -860,7 +866,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -892,7 +898,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
@@ -922,7 +928,10 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/wal",
|
||||
raft_engine_config.dir.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
@@ -946,7 +955,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -958,7 +967,7 @@ mod tests {
|
||||
|
||||
let opts = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -967,7 +976,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.component;
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
@@ -1051,7 +1060,6 @@ mod tests {
|
||||
let options =
|
||||
StandaloneOptions::load_layered_options(None, "GREPTIMEDB_STANDALONE").unwrap();
|
||||
let default_options = StandaloneOptions::default();
|
||||
assert_eq!(options.mode, default_options.mode);
|
||||
assert_eq!(options.enable_telemetry, default_options.enable_telemetry);
|
||||
assert_eq!(options.http, default_options.http);
|
||||
assert_eq!(options.grpc, default_options.grpc);
|
||||
|
||||
@@ -56,13 +56,13 @@ fn test_load_datanode_example_config() {
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![
|
||||
@@ -159,17 +159,17 @@ fn test_load_metasrv_example_config() {
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
data_home: "./greptimedb_data/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
slow_query: SlowQueryOptions {
|
||||
enable: false,
|
||||
threshold: Some(Duration::from_secs(10)),
|
||||
sample_ratio: Some(1.0),
|
||||
threshold: None,
|
||||
sample_ratio: None,
|
||||
},
|
||||
..Default::default()
|
||||
},
|
||||
@@ -202,7 +202,7 @@ fn test_load_standalone_example_config() {
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
@@ -219,7 +219,7 @@ fn test_load_standalone_example_config() {
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
|
||||
@@ -135,5 +135,14 @@ pub fn is_readonly_schema(schema: &str) -> bool {
|
||||
pub const TRACE_ID_COLUMN: &str = "trace_id";
|
||||
pub const SPAN_ID_COLUMN: &str = "span_id";
|
||||
pub const SPAN_NAME_COLUMN: &str = "span_name";
|
||||
pub const SERVICE_NAME_COLUMN: &str = "service_name";
|
||||
pub const PARENT_SPAN_ID_COLUMN: &str = "parent_span_id";
|
||||
pub const TRACE_TABLE_NAME: &str = "opentelemetry_traces";
|
||||
pub const TRACE_TABLE_NAME_SESSION_KEY: &str = "trace_table_name";
|
||||
// ---- End of special table and fields ----
|
||||
|
||||
/// Generate the trace services table name from the trace table name by adding `_services` suffix.
|
||||
pub fn trace_services_table_name(trace_table_name: &str) -> String {
|
||||
format!("{}_services", trace_table_name)
|
||||
}
|
||||
// ---- End of special table and fields ----
|
||||
|
||||
@@ -161,7 +161,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -170,7 +170,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "./greptimedb_data/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
|
||||
@@ -39,6 +39,7 @@ geohash = { version = "0.13", optional = true }
|
||||
h3o = { version = "0.6", optional = true }
|
||||
hyperloglogplus = "0.4"
|
||||
jsonb.workspace = true
|
||||
memchr = "2.7"
|
||||
nalgebra.workspace = true
|
||||
num = "0.4"
|
||||
num-traits = "0.2"
|
||||
|
||||
@@ -12,15 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod add_region_follower;
|
||||
mod flush_compact_region;
|
||||
mod flush_compact_table;
|
||||
mod migrate_region;
|
||||
mod remove_region_follower;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use add_region_follower::AddRegionFollowerFunction;
|
||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||
use migrate_region::MigrateRegionFunction;
|
||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||
|
||||
use crate::flush_flow::FlushFlowFunction;
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
@@ -32,6 +36,8 @@ impl AdminFunction {
|
||||
/// Register all table functions to [`FunctionRegistry`].
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_async(Arc::new(MigrateRegionFunction));
|
||||
registry.register_async(Arc::new(AddRegionFollowerFunction));
|
||||
registry.register_async(Arc::new(RemoveRegionFollowerFunction));
|
||||
registry.register_async(Arc::new(FlushRegionFunction));
|
||||
registry.register_async(Arc::new(CompactRegionFunction));
|
||||
registry.register_async(Arc::new(FlushTableFunction));
|
||||
|
||||
129
src/common/function/src/admin/add_region_follower.rs
Normal file
129
src/common/function/src/admin/add_region_follower.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::AddRegionFollowerRequest;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
/// A function to add a follower to a region.
|
||||
/// Only available in cluster mode.
|
||||
///
|
||||
/// - `add_region_follower(region_id, peer_id)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the peer id
|
||||
#[admin_fn(
|
||||
name = AddRegionFollowerFunction,
|
||||
display_name = add_region_follower,
|
||||
sig_fn = signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn add_region_follower(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let Some(region_id) = cast_u64(¶ms[0])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "add_region_follower",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let Some(peer_id) = cast_u64(¶ms[1])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "add_region_follower",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
procedure_service_handler
|
||||
.add_region_follower(AddRegionFollowerRequest { region_id, peer_id })
|
||||
.await?;
|
||||
|
||||
Ok(Value::from(0u64))
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// add_region_follower(region_id, peer)
|
||||
TypeSignature::Uniform(2, ConcreteDataType::numerics()),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{UInt64Vector, VectorRef};
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_add_region_follower_misc() {
|
||||
let f = AddRegionFollowerFunction;
|
||||
assert_eq!("add_region_follower", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_region_follower() {
|
||||
let f = AddRegionFollowerFunction;
|
||||
let args = vec![1, 1];
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([0u64]));
|
||||
assert_eq!(result, expect);
|
||||
}
|
||||
}
|
||||
129
src/common/function/src/admin/remove_region_follower.rs
Normal file
129
src/common/function/src/admin/remove_region_follower.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_macro::admin_fn;
|
||||
use common_meta::rpc::procedure::RemoveRegionFollowerRequest;
|
||||
use common_query::error::{
|
||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
||||
UnsupportedInputDataTypeSnafu,
|
||||
};
|
||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::{Value, ValueRef};
|
||||
use session::context::QueryContextRef;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::handlers::ProcedureServiceHandlerRef;
|
||||
use crate::helper::cast_u64;
|
||||
|
||||
/// A function to remove a follower from a region.
|
||||
//// Only available in cluster mode.
|
||||
///
|
||||
/// - `remove_region_follower(region_id, peer_id)`.
|
||||
///
|
||||
/// The parameters:
|
||||
/// - `region_id`: the region id
|
||||
/// - `peer_id`: the peer id
|
||||
#[admin_fn(
|
||||
name = RemoveRegionFollowerFunction,
|
||||
display_name = remove_region_follower,
|
||||
sig_fn = signature,
|
||||
ret = uint64
|
||||
)]
|
||||
pub(crate) async fn remove_region_follower(
|
||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
||||
_ctx: &QueryContextRef,
|
||||
params: &[ValueRef<'_>],
|
||||
) -> Result<Value> {
|
||||
ensure!(
|
||||
params.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
params.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let Some(region_id) = cast_u64(¶ms[0])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "add_region_follower",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
let Some(peer_id) = cast_u64(¶ms[1])? else {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: "add_region_follower",
|
||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
procedure_service_handler
|
||||
.remove_region_follower(RemoveRegionFollowerRequest { region_id, peer_id })
|
||||
.await?;
|
||||
|
||||
Ok(Value::from(0u64))
|
||||
}
|
||||
|
||||
fn signature() -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
// remove_region_follower(region_id, peer_id)
|
||||
TypeSignature::Uniform(2, ConcreteDataType::numerics()),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::vectors::{UInt64Vector, VectorRef};
|
||||
|
||||
use super::*;
|
||||
use crate::function::{AsyncFunction, FunctionContext};
|
||||
|
||||
#[test]
|
||||
fn test_remove_region_follower_misc() {
|
||||
let f = RemoveRegionFollowerFunction;
|
||||
assert_eq!("remove_region_follower", f.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::uint64_datatype(),
|
||||
f.return_type(&[]).unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remove_region_follower() {
|
||||
let f = RemoveRegionFollowerFunction;
|
||||
let args = vec![1, 1];
|
||||
let args = args
|
||||
.into_iter()
|
||||
.map(|arg| Arc::new(UInt64Vector::from_slice([arg])) as _)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
||||
let expect: VectorRef = Arc::new(UInt64Vector::from_slice([0u64]));
|
||||
assert_eq!(result, expect);
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ use crate::scalars::hll_count::HllCalcFunction;
|
||||
use crate::scalars::ip::IpFunctions;
|
||||
use crate::scalars::json::JsonFunction;
|
||||
use crate::scalars::matches::MatchesFunction;
|
||||
use crate::scalars::matches_term::MatchesTermFunction;
|
||||
use crate::scalars::math::MathFunction;
|
||||
use crate::scalars::timestamp::TimestampFunction;
|
||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||
@@ -116,6 +117,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
|
||||
// Full text search function
|
||||
MatchesFunction::register(&function_registry);
|
||||
MatchesTermFunction::register(&function_registry);
|
||||
|
||||
// System and administration functions
|
||||
SystemFunction::register(&function_registry);
|
||||
|
||||
@@ -16,7 +16,10 @@ use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_meta::rpc::procedure::{
|
||||
AddRegionFollowerRequest, MigrateRegionRequest, ProcedureStateResponse,
|
||||
RemoveRegionFollowerRequest,
|
||||
};
|
||||
use common_query::error::Result;
|
||||
use common_query::Output;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -63,6 +66,12 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
||||
|
||||
/// Query the procedure' state by its id
|
||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||
|
||||
/// Add a region follower to a region.
|
||||
async fn add_region_follower(&self, request: AddRegionFollowerRequest) -> Result<()>;
|
||||
|
||||
/// Remove a region follower from a region.
|
||||
async fn remove_region_follower(&self, request: RemoveRegionFollowerRequest) -> Result<()>;
|
||||
}
|
||||
|
||||
/// This flow service handler is only use for flush flow for now.
|
||||
|
||||
@@ -19,6 +19,7 @@ pub mod expression;
|
||||
pub mod geo;
|
||||
pub mod json;
|
||||
pub mod matches;
|
||||
pub mod matches_term;
|
||||
pub mod math;
|
||||
pub mod vector;
|
||||
|
||||
|
||||
375
src/common/function/src/scalars/matches_term.rs
Normal file
375
src/common/function/src/scalars/matches_term.rs
Normal file
@@ -0,0 +1,375 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{fmt, iter};
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BooleanVector, BooleanVectorBuilder, MutableVector, VectorRef};
|
||||
use memchr::memmem;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
/// Exact term/phrase matching function for text columns.
|
||||
///
|
||||
/// This function checks if a text column contains exact term/phrase matches
|
||||
/// with non-alphanumeric boundaries. Designed for:
|
||||
/// - Whole-word matching (e.g. "cat" in "cat!" but not in "category")
|
||||
/// - Phrase matching (e.g. "hello world" in "note:hello world!")
|
||||
///
|
||||
/// # Signature
|
||||
/// `matches_term(text: String, term: String) -> Boolean`
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `text` - String column to search
|
||||
/// * `term` - Search term/phrase
|
||||
///
|
||||
/// # Returns
|
||||
/// BooleanVector where each element indicates if the corresponding text
|
||||
/// contains an exact match of the term, following these rules:
|
||||
/// 1. Exact substring match found (case-sensitive)
|
||||
/// 2. Match boundaries are either:
|
||||
/// - Start/end of text
|
||||
/// - Any non-alphanumeric character (including spaces, hyphens, punctuation, etc.)
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// -- SQL examples --
|
||||
/// -- Match phrase with space --
|
||||
/// SELECT matches_term(column, 'hello world') FROM table;
|
||||
/// -- Text: "warning:hello world!" => true
|
||||
/// -- Text: "hello-world" => false (hyphen instead of space)
|
||||
/// -- Text: "hello world2023" => false (ending with numbers)
|
||||
///
|
||||
/// -- Match multiple words with boundaries --
|
||||
/// SELECT matches_term(column, 'critical error') FROM logs;
|
||||
/// -- Match in: "ERROR:critical error!"
|
||||
/// -- No match: "critical_errors"
|
||||
///
|
||||
/// -- Empty string handling --
|
||||
/// SELECT matches_term(column, '') FROM table;
|
||||
/// -- Text: "" => true
|
||||
/// -- Text: "any" => false
|
||||
///
|
||||
/// -- Case sensitivity --
|
||||
/// SELECT matches_term(column, 'Cat') FROM table;
|
||||
/// -- Text: "Cat" => true
|
||||
/// -- Text: "cat" => false
|
||||
/// ```
|
||||
pub struct MatchesTermFunction;
|
||||
|
||||
impl MatchesTermFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register(Arc::new(MatchesTermFunction));
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MatchesTermFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "MATCHES_TERM")
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for MatchesTermFunction {
|
||||
fn name(&self) -> &str {
|
||||
"matches_term"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::boolean_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> common_query::prelude::Signature {
|
||||
common_query::prelude::Signature::exact(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly 2, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let text_column = &columns[0];
|
||||
if text_column.is_empty() {
|
||||
return Ok(Arc::new(BooleanVector::from(Vec::<bool>::with_capacity(0))));
|
||||
}
|
||||
|
||||
let term_column = &columns[1];
|
||||
let compiled_finder = if term_column.is_const() {
|
||||
let term = term_column.get_ref(0).as_string().unwrap();
|
||||
match term {
|
||||
None => {
|
||||
return Ok(Arc::new(BooleanVector::from_iter(
|
||||
iter::repeat(None).take(text_column.len()),
|
||||
)));
|
||||
}
|
||||
Some(term) => Some(MatchesTermFinder::new(term)),
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let len = text_column.len();
|
||||
let mut result = BooleanVectorBuilder::with_capacity(len);
|
||||
for i in 0..len {
|
||||
let text = text_column.get_ref(i).as_string().unwrap();
|
||||
let Some(text) = text else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let contains = match &compiled_finder {
|
||||
Some(finder) => finder.find(text),
|
||||
None => {
|
||||
let term = match term_column.get_ref(i).as_string().unwrap() {
|
||||
None => {
|
||||
result.push_null();
|
||||
continue;
|
||||
}
|
||||
Some(term) => term,
|
||||
};
|
||||
MatchesTermFinder::new(term).find(text)
|
||||
}
|
||||
};
|
||||
result.push(Some(contains));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
/// A compiled finder for `matches_term` function that holds the compiled term
|
||||
/// and its metadata for efficient matching.
|
||||
///
|
||||
/// A term is considered matched when:
|
||||
/// 1. The exact sequence appears in the text
|
||||
/// 2. It is either:
|
||||
/// - At the start/end of text with adjacent non-alphanumeric character
|
||||
/// - Surrounded by non-alphanumeric characters
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// let finder = MatchesTermFinder::new("cat");
|
||||
/// assert!(finder.find("cat!")); // Term at end with punctuation
|
||||
/// assert!(finder.find("dog,cat")); // Term preceded by comma
|
||||
/// assert!(!finder.find("category")); // Partial match rejected
|
||||
///
|
||||
/// let finder = MatchesTermFinder::new("world");
|
||||
/// assert!(finder.find("hello-world")); // Hyphen boundary
|
||||
/// ```
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MatchesTermFinder {
|
||||
finder: memmem::Finder<'static>,
|
||||
term: String,
|
||||
starts_with_non_alnum: bool,
|
||||
ends_with_non_alnum: bool,
|
||||
}
|
||||
|
||||
impl MatchesTermFinder {
|
||||
/// Create a new `MatchesTermFinder` for the given term.
|
||||
pub fn new(term: &str) -> Self {
|
||||
let starts_with_non_alnum = term.chars().next().is_some_and(|c| !c.is_alphanumeric());
|
||||
let ends_with_non_alnum = term.chars().last().is_some_and(|c| !c.is_alphanumeric());
|
||||
|
||||
Self {
|
||||
finder: memmem::Finder::new(term).into_owned(),
|
||||
term: term.to_string(),
|
||||
starts_with_non_alnum,
|
||||
ends_with_non_alnum,
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the term in the text.
|
||||
pub fn find(&self, text: &str) -> bool {
|
||||
if self.term.is_empty() {
|
||||
return text.is_empty();
|
||||
}
|
||||
|
||||
if text.len() < self.term.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut pos = 0;
|
||||
while let Some(found_pos) = self.finder.find(text[pos..].as_bytes()) {
|
||||
let actual_pos = pos + found_pos;
|
||||
|
||||
let prev_ok = self.starts_with_non_alnum
|
||||
|| text[..actual_pos]
|
||||
.chars()
|
||||
.last()
|
||||
.map(|c| !c.is_alphanumeric())
|
||||
.unwrap_or(true);
|
||||
|
||||
if prev_ok {
|
||||
let next_pos = actual_pos + self.finder.needle().len();
|
||||
let next_ok = self.ends_with_non_alnum
|
||||
|| text[next_pos..]
|
||||
.chars()
|
||||
.next()
|
||||
.map(|c| !c.is_alphanumeric())
|
||||
.unwrap_or(true);
|
||||
|
||||
if next_ok {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(next_char) = text[actual_pos..].chars().next() {
|
||||
pos = actual_pos + next_char.len_utf8();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn matches_term_example() {
|
||||
let finder = MatchesTermFinder::new("hello world");
|
||||
assert!(finder.find("warning:hello world!"));
|
||||
assert!(!finder.find("hello-world"));
|
||||
assert!(!finder.find("hello world2023"));
|
||||
|
||||
let finder = MatchesTermFinder::new("critical error");
|
||||
assert!(finder.find("ERROR:critical error!"));
|
||||
assert!(!finder.find("critical_errors"));
|
||||
|
||||
let finder = MatchesTermFinder::new("");
|
||||
assert!(finder.find(""));
|
||||
assert!(!finder.find("any"));
|
||||
|
||||
let finder = MatchesTermFinder::new("Cat");
|
||||
assert!(finder.find("Cat"));
|
||||
assert!(!finder.find("cat"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_term_with_punctuation() {
|
||||
assert!(MatchesTermFinder::new("cat").find("cat!"));
|
||||
assert!(MatchesTermFinder::new("dog").find("!dog"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_phrase_with_boundaries() {
|
||||
assert!(MatchesTermFinder::new("hello-world").find("hello-world"));
|
||||
assert!(MatchesTermFinder::new("'foo bar'").find("test: 'foo bar'"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_at_text_boundaries() {
|
||||
assert!(MatchesTermFinder::new("start").find("start..."));
|
||||
assert!(MatchesTermFinder::new("end").find("...end"));
|
||||
}
|
||||
|
||||
// Negative cases
|
||||
#[test]
|
||||
fn rejects_partial_matches() {
|
||||
assert!(!MatchesTermFinder::new("cat").find("category"));
|
||||
assert!(!MatchesTermFinder::new("boot").find("rebooted"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_missing_term() {
|
||||
assert!(!MatchesTermFinder::new("foo").find("hello world"));
|
||||
}
|
||||
|
||||
// Edge cases
|
||||
#[test]
|
||||
fn handles_empty_inputs() {
|
||||
assert!(!MatchesTermFinder::new("test").find(""));
|
||||
assert!(!MatchesTermFinder::new("").find("text"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_unicode_boundaries() {
|
||||
assert!(MatchesTermFinder::new("café").find("café>"));
|
||||
assert!(!MatchesTermFinder::new("café").find("口café>"));
|
||||
assert!(!MatchesTermFinder::new("café").find("café口"));
|
||||
assert!(!MatchesTermFinder::new("café").find("cafémore"));
|
||||
assert!(MatchesTermFinder::new("русский").find("русский!"));
|
||||
assert!(MatchesTermFinder::new("русский").find("русский!"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_sensitive_matching() {
|
||||
assert!(!MatchesTermFinder::new("cat").find("Cat"));
|
||||
assert!(MatchesTermFinder::new("CaT").find("CaT"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn numbers_in_term() {
|
||||
assert!(MatchesTermFinder::new("v1.0").find("v1.0!"));
|
||||
assert!(!MatchesTermFinder::new("v1.0").find("v1.0a"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adjacent_alphanumeric_fails() {
|
||||
assert!(!MatchesTermFinder::new("cat").find("cat5"));
|
||||
assert!(!MatchesTermFinder::new("dog").find("dogcat"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_term_text() {
|
||||
assert!(!MatchesTermFinder::new("").find("text"));
|
||||
assert!(MatchesTermFinder::new("").find(""));
|
||||
assert!(!MatchesTermFinder::new("text").find(""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn leading_non_alphanumeric() {
|
||||
assert!(MatchesTermFinder::new("/cat").find("dog/cat"));
|
||||
assert!(MatchesTermFinder::new("dog/").find("dog/cat"));
|
||||
assert!(MatchesTermFinder::new("dog/cat").find("dog/cat"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn continues_searching_after_boundary_mismatch() {
|
||||
assert!(!MatchesTermFinder::new("log").find("bloglog!"));
|
||||
assert!(MatchesTermFinder::new("log").find("bloglog log"));
|
||||
assert!(MatchesTermFinder::new("log").find("alogblog_log!"));
|
||||
|
||||
assert!(MatchesTermFinder::new("error").find("errorlog_error_case"));
|
||||
assert!(MatchesTermFinder::new("test").find("atestbtestc_test_end"));
|
||||
assert!(MatchesTermFinder::new("data").find("database_data_store"));
|
||||
assert!(!MatchesTermFinder::new("data").find("database_datastore"));
|
||||
assert!(MatchesTermFinder::new("log.txt").find("catalog.txt_log.txt!"));
|
||||
assert!(!MatchesTermFinder::new("log.txt").find("catalog.txtlog.txt!"));
|
||||
assert!(MatchesTermFinder::new("data-set").find("bigdata-set_data-set!"));
|
||||
|
||||
assert!(MatchesTermFinder::new("中文").find("这是中文测试,中文!"));
|
||||
assert!(MatchesTermFinder::new("error").find("错误errorerror日志_error!"));
|
||||
}
|
||||
}
|
||||
@@ -24,9 +24,11 @@ pub(crate) mod sum;
|
||||
mod vector_add;
|
||||
mod vector_dim;
|
||||
mod vector_div;
|
||||
mod vector_kth_elem;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
mod vector_sub;
|
||||
mod vector_subvector;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -56,6 +58,8 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_dim::VectorDimFunction));
|
||||
registry.register(Arc::new(vector_kth_elem::VectorKthElemFunction));
|
||||
registry.register(Arc::new(vector_subvector::VectorSubvectorFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
|
||||
211
src/common/function/src/scalars/vector/vector_kth_elem.rs
Normal file
211
src/common/function/src/scalars/vector/vector_kth_elem.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::Signature;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::helper;
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
|
||||
|
||||
const NAME: &str = "vec_kth_elem";
|
||||
|
||||
/// Returns the k-th(0-based index) element of the vector.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_kth_elem("[2, 4, 6]",1) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | 4 |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorKthElemFunction;
|
||||
|
||||
impl Function for VectorKthElemFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(
|
||||
&self,
|
||||
_input_types: &[ConcreteDataType],
|
||||
) -> common_query::error::Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float32_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::binary_datatype(),
|
||||
],
|
||||
vec![ConcreteDataType::int64_datatype()],
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 2,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly two, have: {}",
|
||||
columns.len()
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = Float32VectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
};
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let Some(arg0) = arg0 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
let arg1 = arg1.get(i).as_f64_lossy();
|
||||
let Some(arg1) = arg1 else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
arg1 >= 0.0 && arg1.fract() == 0.0,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid argument: k must be a non-negative integer, but got k = {}.",
|
||||
arg1
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let k = arg1 as usize;
|
||||
|
||||
ensure!(
|
||||
k < arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Out of range: k must be in the range [0, {}], but got k = {}.",
|
||||
arg0.len() - 1,
|
||||
k
|
||||
),
|
||||
}
|
||||
);
|
||||
|
||||
let value = arg0[k];
|
||||
|
||||
result.push(Some(value));
|
||||
}
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorKthElemFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_vec_kth_elem() {
|
||||
let func = VectorKthElemFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0,2.0,3.0]".to_string()),
|
||||
Some("[4.0,5.0,6.0]".to_string()),
|
||||
Some("[7.0,8.0,9.0]".to_string()),
|
||||
None,
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(0), Some(2), None, Some(1)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(result.get_ref(0).as_f32().unwrap(), Some(1.0));
|
||||
assert_eq!(result.get_ref(1).as_f32().unwrap(), Some(6.0));
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert!(result.get_ref(3).is_null());
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,2.0,3.0]".to_string())]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let err = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!("Out of range: k must be in the range [0, 2], but got k = 3.")
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![Some("[1.0,2.0,3.0]".to_string())]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(-1)]));
|
||||
|
||||
let err = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1])
|
||||
.unwrap_err();
|
||||
match err {
|
||||
error::Error::InvalidFuncArgs { err_msg, .. } => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
format!("Invalid argument: k must be a non-negative integer, but got k = -1.")
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_subvector";
|
||||
|
||||
/// Returns a subvector from start(included) to end(excluded) index.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_subvector("[1, 2, 3, 4, 5]", 1, 3)) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2, 3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorSubvectorFunction;
|
||||
|
||||
impl Function for VectorSubvectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly three, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
let arg2 = &columns[2];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len() && arg1.len() == arg2.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}, args 2: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
arg2.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = arg1.get(i).as_i64();
|
||||
let arg2 = arg2.get(i).as_i64();
|
||||
let (Some(arg0), Some(arg1), Some(arg2)) = (arg0, arg1, arg2) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
0 <= arg1 && arg1 <= arg2 && arg2 as usize <= arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid start and end indices: start={}, end={}, vec_len={}",
|
||||
arg1,
|
||||
arg2,
|
||||
arg0.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let subvector = &arg0[arg1 as usize..arg2 as usize];
|
||||
let binlit = veclit_to_binlit(subvector);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorSubvectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_subvector() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0, 4.0, 5.0]".to_string()),
|
||||
Some("[6.0, 7.0, 8.0, 9.0, 10.0]".to_string()),
|
||||
None,
|
||||
Some("[11.0, 12.0, 13.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(0), Some(0), Some(1)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(5), Some(2), Some(3)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1, input2])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[6.0, 7.0, 8.0, 9.0, 10.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[12.0, 13.0]).as_slice())
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_subvector_error() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(2)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 2, args 1: 2, args 2: 1"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subvector_invalid_indices() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(3)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(4)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"Invalid start and end indices: start=3, end=4, vec_len=3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,10 @@ impl FunctionState {
|
||||
use api::v1::meta::ProcedureStatus;
|
||||
use async_trait::async_trait;
|
||||
use common_base::AffectedRows;
|
||||
use common_meta::rpc::procedure::{MigrateRegionRequest, ProcedureStateResponse};
|
||||
use common_meta::rpc::procedure::{
|
||||
AddRegionFollowerRequest, MigrateRegionRequest, ProcedureStateResponse,
|
||||
RemoveRegionFollowerRequest,
|
||||
};
|
||||
use common_query::error::Result;
|
||||
use common_query::Output;
|
||||
use session::context::QueryContextRef;
|
||||
@@ -66,6 +69,17 @@ impl FunctionState {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
async fn add_region_follower(&self, _request: AddRegionFollowerRequest) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_region_follower(
|
||||
&self,
|
||||
_request: RemoveRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -22,7 +22,9 @@ mod version;
|
||||
use std::sync::Arc;
|
||||
|
||||
use build::BuildFunction;
|
||||
use database::{CurrentSchemaFunction, DatabaseFunction, SessionUserFunction};
|
||||
use database::{
|
||||
CurrentSchemaFunction, DatabaseFunction, ReadPreferenceFunction, SessionUserFunction,
|
||||
};
|
||||
use pg_catalog::PGCatalogFunction;
|
||||
use procedure_state::ProcedureStateFunction;
|
||||
use timezone::TimezoneFunction;
|
||||
@@ -39,6 +41,7 @@ impl SystemFunction {
|
||||
registry.register(Arc::new(CurrentSchemaFunction));
|
||||
registry.register(Arc::new(DatabaseFunction));
|
||||
registry.register(Arc::new(SessionUserFunction));
|
||||
registry.register(Arc::new(ReadPreferenceFunction));
|
||||
registry.register(Arc::new(TimezoneFunction));
|
||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||
PGCatalogFunction::register(registry);
|
||||
|
||||
@@ -30,9 +30,12 @@ pub struct DatabaseFunction;
|
||||
pub struct CurrentSchemaFunction;
|
||||
pub struct SessionUserFunction;
|
||||
|
||||
pub struct ReadPreferenceFunction;
|
||||
|
||||
const DATABASE_FUNCTION_NAME: &str = "database";
|
||||
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||
const READ_PREFERENCE_FUNCTION_NAME: &str = "read_preference";
|
||||
|
||||
impl Function for DatabaseFunction {
|
||||
fn name(&self) -> &str {
|
||||
@@ -94,6 +97,26 @@ impl Function for SessionUserFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl Function for ReadPreferenceFunction {
|
||||
fn name(&self) -> &str {
|
||||
READ_PREFERENCE_FUNCTION_NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::string_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::nullary(Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let read_preference = func_ctx.query_ctx.read_preference();
|
||||
|
||||
Ok(Arc::new(StringVector::from_slice(&[read_preference.as_ref()])) as _)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DatabaseFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "DATABASE")
|
||||
@@ -112,6 +135,12 @@ impl fmt::Display for SessionUserFunction {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ReadPreferenceFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "READ_PREFERENCE")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -15,11 +15,13 @@
|
||||
use api::helper::ColumnDataTypeWrapper;
|
||||
use api::v1::add_column_location::LocationType;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::column_def::{as_fulltext_option, as_skipping_index_type};
|
||||
use api::v1::column_def::{
|
||||
as_fulltext_option_analyzer, as_fulltext_option_backend, as_skipping_index_type,
|
||||
};
|
||||
use api::v1::{
|
||||
column_def, AddColumnLocation as Location, AlterTableExpr, Analyzer, CreateTableExpr,
|
||||
DropColumns, ModifyColumnTypes, RenameTable, SemanticType,
|
||||
SkippingIndexType as PbSkippingIndexType,
|
||||
DropColumns, FulltextBackend as PbFulltextBackend, ModifyColumnTypes, RenameTable,
|
||||
SemanticType, SkippingIndexType as PbSkippingIndexType,
|
||||
};
|
||||
use common_query::AddColumnLocation;
|
||||
use datatypes::schema::{ColumnSchema, FulltextOptions, RawSchema, SkippingIndexOptions};
|
||||
@@ -126,11 +128,15 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions {
|
||||
enable: f.enable,
|
||||
analyzer: as_fulltext_option(
|
||||
analyzer: as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: f.case_sensitive,
|
||||
backend: as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -25,7 +25,7 @@ async fn do_bench_channel_manager() {
|
||||
let m_clone = m.clone();
|
||||
let join = tokio::spawn(async move {
|
||||
for _ in 0..10000 {
|
||||
let idx = rand::random::<usize>() % 100;
|
||||
let idx = rand::random::<u32>() % 100;
|
||||
let ret = m_clone.get(format!("{idx}"));
|
||||
let _ = ret.unwrap();
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::error::{
|
||||
DecodeJsonSnafu, EncodeJsonSnafu, Error, FromUtf8Snafu, InvalidNodeInfoKeySnafu,
|
||||
InvalidRoleSnafu, ParseNumSnafu, Result,
|
||||
};
|
||||
use crate::key::flow::flow_state::FlowStat;
|
||||
use crate::peer::Peer;
|
||||
|
||||
const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info";
|
||||
@@ -52,6 +53,9 @@ pub trait ClusterInfo {
|
||||
/// List all region stats in the cluster.
|
||||
async fn list_region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
|
||||
|
||||
/// List all flow stats in the cluster.
|
||||
async fn list_flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
|
||||
|
||||
// TODO(jeremy): Other info, like region status, etc.
|
||||
}
|
||||
|
||||
|
||||
@@ -92,6 +92,22 @@ pub struct RegionStat {
|
||||
pub sst_size: u64,
|
||||
/// The size of the SST index files in bytes.
|
||||
pub index_size: u64,
|
||||
/// The manifest infoof the region.
|
||||
pub region_manifest: RegionManifestInfo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum RegionManifestInfo {
|
||||
Mito {
|
||||
manifest_version: u64,
|
||||
flushed_entry_id: u64,
|
||||
},
|
||||
Metric {
|
||||
data_manifest_version: u64,
|
||||
data_flushed_entry_id: u64,
|
||||
metadata_manifest_version: u64,
|
||||
metadata_flushed_entry_id: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl Stat {
|
||||
@@ -165,6 +181,31 @@ impl TryFrom<&HeartbeatRequest> for Stat {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<store_api::region_engine::RegionManifestInfo> for RegionManifestInfo {
|
||||
fn from(value: store_api::region_engine::RegionManifestInfo) -> Self {
|
||||
match value {
|
||||
store_api::region_engine::RegionManifestInfo::Mito {
|
||||
manifest_version,
|
||||
flushed_entry_id,
|
||||
} => RegionManifestInfo::Mito {
|
||||
manifest_version,
|
||||
flushed_entry_id,
|
||||
},
|
||||
store_api::region_engine::RegionManifestInfo::Metric {
|
||||
data_manifest_version,
|
||||
data_flushed_entry_id,
|
||||
metadata_manifest_version,
|
||||
metadata_flushed_entry_id,
|
||||
} => RegionManifestInfo::Metric {
|
||||
data_manifest_version,
|
||||
data_flushed_entry_id,
|
||||
metadata_manifest_version,
|
||||
metadata_flushed_entry_id,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&api::v1::meta::RegionStat> for RegionStat {
|
||||
fn from(value: &api::v1::meta::RegionStat) -> Self {
|
||||
let region_stat = value
|
||||
@@ -185,6 +226,7 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
|
||||
manifest_size: region_stat.manifest_size,
|
||||
sst_size: region_stat.sst_size,
|
||||
index_size: region_stat.index_size,
|
||||
region_manifest: region_stat.manifest.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,14 +22,18 @@ use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
use crate::cache_invalidator::CacheInvalidatorRef;
|
||||
use crate::ddl::flow_meta::FlowMetadataAllocatorRef;
|
||||
use crate::ddl::table_meta::TableMetadataAllocatorRef;
|
||||
use crate::error::Result;
|
||||
use crate::error::{Result, UnsupportedSnafu};
|
||||
use crate::key::flow::FlowMetadataManagerRef;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::TableMetadataManagerRef;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::region_registry::LeaderRegionRegistryRef;
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::rpc::procedure::{
|
||||
AddRegionFollowerRequest, MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse,
|
||||
RemoveRegionFollowerRequest,
|
||||
};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub mod alter_database;
|
||||
@@ -70,6 +74,30 @@ pub trait ProcedureExecutor: Send + Sync {
|
||||
request: SubmitDdlTaskRequest,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
/// Add a region follower
|
||||
async fn add_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: AddRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "add_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Remove a region follower
|
||||
async fn remove_region_follower(
|
||||
&self,
|
||||
_ctx: &ExecutorContext,
|
||||
_request: RemoveRegionFollowerRequest,
|
||||
) -> Result<()> {
|
||||
UnsupportedSnafu {
|
||||
operation: "remove_region_follower",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
|
||||
/// Submit a region migration task
|
||||
async fn migrate_region(
|
||||
&self,
|
||||
@@ -137,6 +165,8 @@ pub struct DdlContext {
|
||||
pub cache_invalidator: CacheInvalidatorRef,
|
||||
/// Keep tracking operating regions.
|
||||
pub memory_region_keeper: MemoryRegionKeeperRef,
|
||||
/// The leader region registry.
|
||||
pub leader_region_registry: LeaderRegionRegistryRef,
|
||||
/// Table metadata manager.
|
||||
pub table_metadata_manager: TableMetadataManagerRef,
|
||||
/// Allocator for table metadata.
|
||||
|
||||
@@ -22,30 +22,31 @@ use std::vec;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{
|
||||
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status, StringKey,
|
||||
Context as ProcedureContext, ContextProvider, Error as ProcedureError, LockKey, PoisonKey,
|
||||
PoisonKeys, Procedure, ProcedureId, Status, StringKey,
|
||||
};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use futures::future;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId, TableInfo};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::add_peer_context_if_needed;
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, handle_multiple_results, MultipleResults};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{AbortProcedureSnafu, Error, NoLeaderSnafu, PutPoisonSnafu, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::poison_key::table_poison_key;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
|
||||
|
||||
@@ -104,7 +105,27 @@ impl AlterTableProcedure {
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
pub async fn submit_alter_region_requests(&mut self) -> Result<Status> {
|
||||
fn table_poison_key(&self) -> PoisonKey {
|
||||
table_poison_key(self.data.table_id())
|
||||
}
|
||||
|
||||
async fn put_poison(
|
||||
&self,
|
||||
ctx_provider: &dyn ContextProvider,
|
||||
procedure_id: ProcedureId,
|
||||
) -> Result<()> {
|
||||
let poison_key = self.table_poison_key();
|
||||
ctx_provider
|
||||
.try_put_poison(&poison_key, procedure_id)
|
||||
.await
|
||||
.context(PutPoisonSnafu)
|
||||
}
|
||||
|
||||
pub async fn submit_alter_region_requests(
|
||||
&mut self,
|
||||
procedure_id: ProcedureId,
|
||||
ctx_provider: &dyn ContextProvider,
|
||||
) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
let (_, physical_table_route) = self
|
||||
.context
|
||||
@@ -127,6 +148,9 @@ impl AlterTableProcedure {
|
||||
alter_kind,
|
||||
);
|
||||
|
||||
ensure!(!leaders.is_empty(), NoLeaderSnafu { table_id });
|
||||
// Puts the poison before submitting alter region requests to datanodes.
|
||||
self.put_poison(ctx_provider, procedure_id).await?;
|
||||
for datanode in leaders {
|
||||
let requester = self.context.node_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(&physical_table_route.region_routes, &datanode);
|
||||
@@ -140,28 +164,51 @@ impl AlterTableProcedure {
|
||||
let requester = requester.clone();
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
if let Err(err) = requester.handle(request).await {
|
||||
if err.status_code() != StatusCode::RequestOutdated {
|
||||
// Treat request outdated as success.
|
||||
// The engine will throw this code when the schema version not match.
|
||||
// As this procedure has locked the table, the only reason for this error
|
||||
// is procedure is succeeded before and is retrying.
|
||||
return Err(add_peer_context_if_needed(datanode)(err));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(datanode))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
future::join_all(alter_region_tasks)
|
||||
let results = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
match handle_multiple_results(results) {
|
||||
MultipleResults::PartialRetryable(error) => {
|
||||
// Just returns the error, and wait for the next try.
|
||||
Err(error)
|
||||
}
|
||||
MultipleResults::PartialNonRetryable(error) => {
|
||||
error!(error; "Partial non-retryable errors occurred during alter table, table {}, table_id: {}", self.data.table_ref(), self.data.table_id());
|
||||
// No retry will be done.
|
||||
Ok(Status::poisoned(
|
||||
Some(self.table_poison_key()),
|
||||
ProcedureError::external(error),
|
||||
))
|
||||
}
|
||||
MultipleResults::AllRetryable(error) => {
|
||||
// Just returns the error, and wait for the next try.
|
||||
Err(error)
|
||||
}
|
||||
MultipleResults::Ok => {
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
Ok(Status::executing_with_clean_poisons(true))
|
||||
}
|
||||
MultipleResults::AllNonRetryable(error) => {
|
||||
error!(error; "All alter requests returned non-retryable errors for table {}, table_id: {}", self.data.table_ref(), self.data.table_id());
|
||||
// It assumes the metadata on datanode is not changed.
|
||||
// Case: The alter region request is sent but not applied. (e.g., InvalidArgument)
|
||||
|
||||
Ok(Status::executing(true))
|
||||
let err = BoxedError::new(error);
|
||||
Err(err).context(AbortProcedureSnafu {
|
||||
clean_poisons: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update table metadata.
|
||||
@@ -250,10 +297,12 @@ impl Procedure for AlterTableProcedure {
|
||||
Self::TYPE_NAME
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
async fn execute(&mut self, ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
let error_handler = |e: Error| {
|
||||
if e.is_retry_later() {
|
||||
ProcedureError::retry_later(e)
|
||||
} else if e.need_clean_poisons() {
|
||||
ProcedureError::external_and_clean_poisons(e)
|
||||
} else {
|
||||
ProcedureError::external(e)
|
||||
}
|
||||
@@ -269,7 +318,10 @@ impl Procedure for AlterTableProcedure {
|
||||
|
||||
match state {
|
||||
AlterTableState::Prepare => self.on_prepare().await,
|
||||
AlterTableState::SubmitAlterRegionRequests => self.submit_alter_region_requests().await,
|
||||
AlterTableState::SubmitAlterRegionRequests => {
|
||||
self.submit_alter_region_requests(ctx.procedure_id, ctx.provider.as_ref())
|
||||
.await
|
||||
}
|
||||
AlterTableState::UpdateMetadata => self.on_update_metadata().await,
|
||||
AlterTableState::InvalidateTableCache => self.on_broadcast().await,
|
||||
}
|
||||
@@ -285,6 +337,10 @@ impl Procedure for AlterTableProcedure {
|
||||
|
||||
LockKey::new(key)
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::new(vec![self.table_poison_key()])
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
|
||||
|
||||
@@ -299,7 +299,9 @@ impl Procedure for CreateTableProcedure {
|
||||
.creator
|
||||
.register_opening_regions(&self.context, &x.region_routes)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
.context(ExternalSnafu {
|
||||
clean_poisons: false,
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -130,7 +130,9 @@ impl Procedure for DropDatabaseProcedure {
|
||||
self.state
|
||||
.recover(&self.runtime_context)
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)
|
||||
.context(ExternalSnafu {
|
||||
clean_poisons: false,
|
||||
})
|
||||
}
|
||||
|
||||
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
|
||||
|
||||
@@ -200,7 +200,9 @@ impl Procedure for DropTableProcedure {
|
||||
if register_operating_regions {
|
||||
self.register_dropping_regions()
|
||||
.map_err(BoxedError::new)
|
||||
.context(ExternalSnafu)?;
|
||||
.context(ExternalSnafu {
|
||||
clean_poisons: false,
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -35,7 +35,9 @@ use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::rpc::router::{
|
||||
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
|
||||
};
|
||||
|
||||
/// [Control] indicated to the caller whether to go to the next step.
|
||||
#[derive(Debug)]
|
||||
@@ -250,6 +252,11 @@ impl DropTableExecutor {
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
// Deletes the leader region from registry.
|
||||
let region_ids = operating_leader_regions(region_routes);
|
||||
ctx.leader_region_registry
|
||||
.batch_delete(region_ids.into_iter().map(|(region_id, _)| region_id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,13 +98,14 @@ impl TableMetadataAllocator {
|
||||
fn create_wal_options(
|
||||
&self,
|
||||
table_route: &PhysicalTableRouteValue,
|
||||
skip_wal: bool,
|
||||
) -> Result<HashMap<RegionNumber, String>> {
|
||||
let region_numbers = table_route
|
||||
.region_routes
|
||||
.iter()
|
||||
.map(|route| route.region.id.region_number())
|
||||
.collect();
|
||||
allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
|
||||
allocate_region_wal_options(region_numbers, &self.wal_options_allocator, skip_wal)
|
||||
}
|
||||
|
||||
async fn create_table_route(
|
||||
@@ -158,7 +159,9 @@ impl TableMetadataAllocator {
|
||||
pub async fn create(&self, task: &CreateTableTask) -> Result<TableMetadata> {
|
||||
let table_id = self.allocate_table_id(&task.create_table.table_id).await?;
|
||||
let table_route = self.create_table_route(table_id, task).await?;
|
||||
let region_wal_options = self.create_wal_options(&table_route)?;
|
||||
|
||||
let region_wal_options =
|
||||
self.create_wal_options(&table_route, task.table_info.meta.options.skip_wal)?;
|
||||
|
||||
debug!(
|
||||
"Allocated region wal options {:?} for table {}",
|
||||
|
||||
@@ -80,7 +80,13 @@ pub async fn create_logical_table(
|
||||
let tasks = vec![test_create_logical_table_task(table_name)];
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let status = procedure.on_create_metadata().await.unwrap();
|
||||
assert_matches!(status, Status::Done { .. });
|
||||
|
||||
|
||||
@@ -171,3 +171,74 @@ impl MockDatanodeHandler for NaiveDatanodeHandler {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PartialSuccessDatanodeHandler {
|
||||
pub retryable: bool,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for PartialSuccessDatanodeHandler {
|
||||
async fn handle(&self, peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
let success = peer.id % 2 == 0;
|
||||
if success {
|
||||
Ok(RegionResponse::new(0))
|
||||
} else if self.retryable {
|
||||
Err(Error::RetryLater {
|
||||
source: BoxedError::new(
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "retry later",
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
})
|
||||
} else {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "mock error",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
&self,
|
||||
_peer: &Peer,
|
||||
_request: QueryRequest,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AllFailureDatanodeHandler {
|
||||
pub retryable: bool,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MockDatanodeHandler for AllFailureDatanodeHandler {
|
||||
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
|
||||
if self.retryable {
|
||||
Err(Error::RetryLater {
|
||||
source: BoxedError::new(
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "retry later",
|
||||
}
|
||||
.build(),
|
||||
),
|
||||
})
|
||||
} else {
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: "mock error",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_query(
|
||||
&self,
|
||||
_peer: &Peer,
|
||||
_request: QueryRequest,
|
||||
) -> Result<SendableRecordBatchStream> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +180,13 @@ async fn test_on_prepare() {
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let result = procedure.on_prepare().await;
|
||||
assert_matches!(result, Ok(Status::Executing { persist: true }));
|
||||
assert_matches!(
|
||||
result,
|
||||
Ok(Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -205,7 +211,13 @@ async fn test_on_update_metadata() {
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
|
||||
let ctx = common_procedure::Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -213,10 +225,22 @@ async fn test_on_update_metadata() {
|
||||
};
|
||||
// on_submit_alter_region_requests
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
// on_update_metadata
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -237,7 +261,13 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
|
||||
let ctx = common_procedure::Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -245,10 +275,22 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
};
|
||||
// on_submit_alter_region_requests
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
// on_update_metadata
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
|
||||
// re-alter
|
||||
let tasks = vec![
|
||||
@@ -270,7 +312,13 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
|
||||
let ctx = common_procedure::Context {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -278,10 +326,22 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
};
|
||||
// on_submit_alter_region_requests
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
// on_update_metadata
|
||||
status = procedure.execute(&ctx).await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
|
||||
let table_name_keys = vec![
|
||||
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table1"),
|
||||
|
||||
@@ -25,6 +25,9 @@ use api::v1::{
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::store::poison_store::PoisonStore;
|
||||
use common_procedure::{ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::TTL_KEY;
|
||||
use tokio::sync::mpsc::{self};
|
||||
@@ -33,16 +36,46 @@ use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::test_util::alter_table::TestAlterTableExprBuilder;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::test_util::datanode_handler::{
|
||||
DatanodeWatcher, RequestOutdatedErrorDatanodeHandler,
|
||||
AllFailureDatanodeHandler, DatanodeWatcher, PartialSuccessDatanodeHandler,
|
||||
RequestOutdatedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::poison_key::table_poison_key;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
fn prepare_table_route(table_id: u32) -> TableRouteValue {
|
||||
TableRouteValue::physical(vec![
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 1)),
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
follower_peers: vec![Peer::empty(5)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 2)),
|
||||
leader_peer: Some(Peer::empty(2)),
|
||||
follower_peers: vec![Peer::empty(4)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 3)),
|
||||
leader_peer: Some(Peer::empty(3)),
|
||||
follower_peers: vec![],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
])
|
||||
}
|
||||
|
||||
fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> AlterTableTask {
|
||||
let builder = TestAlterTableExprBuilder::default()
|
||||
.table_name(table_name)
|
||||
@@ -101,29 +134,7 @@ async fn test_on_submit_alter_request() {
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 1)),
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
follower_peers: vec![Peer::empty(5)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 2)),
|
||||
leader_peer: Some(Peer::empty(2)),
|
||||
follower_peers: vec![Peer::empty(4)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 3)),
|
||||
leader_peer: Some(Peer::empty(3)),
|
||||
follower_peers: vec![],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
prepare_table_route(table_id),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
@@ -141,9 +152,15 @@ async fn test_on_submit_alter_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
let procedure_id = ProcedureId::random();
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(table_id, alter_table_task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let check = |peer: Peer,
|
||||
request: RegionRequest,
|
||||
@@ -185,29 +202,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 1)),
|
||||
leader_peer: Some(Peer::empty(1)),
|
||||
follower_peers: vec![Peer::empty(5)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 2)),
|
||||
leader_peer: Some(Peer::empty(2)),
|
||||
follower_peers: vec![Peer::empty(4)],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
RegionRoute {
|
||||
region: Region::new_test(RegionId::new(table_id, 3)),
|
||||
leader_peer: Some(Peer::empty(3)),
|
||||
follower_peers: vec![],
|
||||
leader_state: None,
|
||||
leader_down_since: None,
|
||||
},
|
||||
]),
|
||||
prepare_table_route(table_id),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
@@ -225,9 +220,15 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let procedure_id = ProcedureId::random();
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(!err.is_retry_later());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -326,9 +327,14 @@ async fn test_on_update_metadata_add_columns() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let procedure_id = ProcedureId::random();
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
@@ -387,9 +393,14 @@ async fn test_on_update_table_options() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let procedure_id = ProcedureId::random();
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
@@ -417,3 +428,156 @@ async fn test_on_update_table_options() {
|
||||
HashMap::from(&table_info.meta.options)
|
||||
);
|
||||
}
|
||||
|
||||
async fn prepare_alter_table_procedure(
|
||||
node_manager: NodeManagerRef,
|
||||
) -> (AlterTableProcedure, ProcedureId) {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
prepare_table_route(table_id),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let alter_table_task = AlterTableTask {
|
||||
alter_table: AlterTableExpr {
|
||||
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
kind: Some(Kind::DropColumns(DropColumns {
|
||||
drop_columns: vec![DropColumn {
|
||||
name: "cpu".to_string(),
|
||||
}],
|
||||
})),
|
||||
},
|
||||
};
|
||||
let procedure_id = ProcedureId::random();
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(table_id, alter_table_task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
(procedure, procedure_id)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request_with_partial_success_retryable() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(PartialSuccessDatanodeHandler {
|
||||
retryable: true,
|
||||
}));
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let (mut procedure, procedure_id) = prepare_alter_table_procedure(node_manager).await;
|
||||
let result = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(result.is_retry_later());
|
||||
|
||||
// Submits again
|
||||
let result = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(result.is_retry_later());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request_with_partial_success_non_retryable() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(PartialSuccessDatanodeHandler {
|
||||
retryable: false,
|
||||
}));
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let (mut procedure, procedure_id) = prepare_alter_table_procedure(node_manager).await;
|
||||
let result = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_matches!(result, Status::Poisoned { .. });
|
||||
|
||||
// submits again
|
||||
let result = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_matches!(result, Status::Poisoned { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request_with_all_failure_retrybale() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(AllFailureDatanodeHandler {
|
||||
retryable: true,
|
||||
}));
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let (mut procedure, procedure_id) = prepare_alter_table_procedure(node_manager).await;
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err.is_retry_later());
|
||||
// submits again
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(err.is_retry_later());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request_with_all_failure_non_retrybale() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(AllFailureDatanodeHandler {
|
||||
retryable: false,
|
||||
}));
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let (mut procedure, procedure_id) = prepare_alter_table_procedure(node_manager).await;
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::AbortProcedure { .. });
|
||||
assert!(!err.is_retry_later());
|
||||
assert!(err.need_clean_poisons());
|
||||
|
||||
// submits again
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::AbortProcedure { .. });
|
||||
assert!(!err.is_retry_later());
|
||||
assert!(err.need_clean_poisons());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request_with_exist_poison() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(AllFailureDatanodeHandler {
|
||||
retryable: false,
|
||||
}));
|
||||
let provider = Arc::new(MockContextProvider::default());
|
||||
let (mut procedure, procedure_id) = prepare_alter_table_procedure(node_manager).await;
|
||||
|
||||
let table_id = 1024;
|
||||
let key = table_poison_key(table_id).to_string();
|
||||
let another_procedure_id = ProcedureId::random();
|
||||
provider
|
||||
.poison_manager()
|
||||
.try_put_poison(key, another_procedure_id.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let err = procedure
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::PutPoison { .. });
|
||||
}
|
||||
|
||||
@@ -69,7 +69,13 @@ async fn test_on_prepare() {
|
||||
let physical_table_id = table_id;
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -202,7 +208,13 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
ddl_context,
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -238,7 +250,13 @@ async fn test_on_create_metadata() {
|
||||
ddl_context,
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider::default()),
|
||||
@@ -294,7 +312,13 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
ddl_context,
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider::default()),
|
||||
@@ -339,7 +363,13 @@ async fn test_on_create_metadata_err() {
|
||||
ddl_context.clone(),
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
provider: Arc::new(MockContextProvider::default()),
|
||||
|
||||
@@ -137,7 +137,13 @@ async fn test_on_prepare_without_create_if_table_exists() {
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
assert_eq!(procedure.table_id(), 1024);
|
||||
}
|
||||
|
||||
|
||||
@@ -153,7 +153,13 @@ async fn test_on_prepare_without_create_if_table_exists() {
|
||||
task.create_view.create_if_not_exists = true;
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_matches!(
|
||||
status,
|
||||
Status::Executing {
|
||||
persist: true,
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
assert_eq!(procedure.view_id(), 1024);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,10 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
|
||||
use common_catalog::consts::METRIC_ENGINE;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::Error as ProcedureError;
|
||||
use common_telemetry::{error, warn};
|
||||
use common_wal::options::WalOptions;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metric_engine_consts::LOGICAL_TABLE_METADATA_KEY;
|
||||
@@ -38,6 +40,7 @@ use crate::rpc::router::RegionRoute;
|
||||
/// Adds [Peer] context if the error is unretryable.
|
||||
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
|
||||
move |err| {
|
||||
error!(err; "Failed to operate datanode, peer: {}", datanode);
|
||||
if !err.is_retry_later() {
|
||||
return Err::<(), BoxedError>(BoxedError::new(err))
|
||||
.context(OperateDatanodeSnafu { peer: datanode })
|
||||
@@ -182,6 +185,85 @@ pub fn extract_region_wal_options(
|
||||
Ok(region_wal_options)
|
||||
}
|
||||
|
||||
/// The result of multiple operations.
|
||||
///
|
||||
/// - Ok: all operations are successful.
|
||||
/// - PartialRetryable: if any operation is retryable and without non retryable error, the result is retryable.
|
||||
/// - PartialNonRetryable: if any operation is non retryable, the result is non retryable.
|
||||
/// - AllRetryable: all operations are retryable.
|
||||
/// - AllNonRetryable: all operations are not retryable.
|
||||
pub enum MultipleResults {
|
||||
Ok,
|
||||
PartialRetryable(Error),
|
||||
PartialNonRetryable(Error),
|
||||
AllRetryable(Error),
|
||||
AllNonRetryable(Error),
|
||||
}
|
||||
|
||||
/// Handles the results of alter region requests.
|
||||
///
|
||||
/// For partial success, we need to check if the errors are retryable.
|
||||
/// If all the errors are retryable, we return a retryable error.
|
||||
/// Otherwise, we return the first error.
|
||||
pub fn handle_multiple_results<T: Debug>(results: Vec<Result<T>>) -> MultipleResults {
|
||||
if results.is_empty() {
|
||||
return MultipleResults::Ok;
|
||||
}
|
||||
let num_results = results.len();
|
||||
let mut retryable_results = Vec::new();
|
||||
let mut non_retryable_results = Vec::new();
|
||||
let mut ok_results = Vec::new();
|
||||
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => ok_results.push(result),
|
||||
Err(err) => {
|
||||
if err.is_retry_later() {
|
||||
retryable_results.push(err);
|
||||
} else {
|
||||
non_retryable_results.push(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
common_telemetry::debug!(
|
||||
"retryable_results: {}, non_retryable_results: {}, ok_results: {}",
|
||||
retryable_results.len(),
|
||||
non_retryable_results.len(),
|
||||
ok_results.len()
|
||||
);
|
||||
|
||||
if retryable_results.len() == num_results {
|
||||
return MultipleResults::AllRetryable(retryable_results.into_iter().next().unwrap());
|
||||
} else if non_retryable_results.len() == num_results {
|
||||
warn!("all non retryable results: {}", non_retryable_results.len());
|
||||
for err in &non_retryable_results {
|
||||
error!(err; "non retryable error");
|
||||
}
|
||||
return MultipleResults::AllNonRetryable(non_retryable_results.into_iter().next().unwrap());
|
||||
} else if ok_results.len() == num_results {
|
||||
return MultipleResults::Ok;
|
||||
} else if !retryable_results.is_empty()
|
||||
&& !ok_results.is_empty()
|
||||
&& non_retryable_results.is_empty()
|
||||
{
|
||||
return MultipleResults::PartialRetryable(retryable_results.into_iter().next().unwrap());
|
||||
}
|
||||
|
||||
warn!(
|
||||
"partial non retryable results: {}, retryable results: {}, ok results: {}",
|
||||
non_retryable_results.len(),
|
||||
retryable_results.len(),
|
||||
ok_results.len()
|
||||
);
|
||||
for err in &non_retryable_results {
|
||||
error!(err; "non retryable error");
|
||||
}
|
||||
// non_retryable_results.len() > 0
|
||||
MultipleResults::PartialNonRetryable(non_retryable_results.into_iter().next().unwrap())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -834,6 +834,7 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_procedure::local::LocalManager;
|
||||
use common_procedure::test_util::InMemoryPoisonStore;
|
||||
|
||||
use super::DdlManager;
|
||||
use crate::cache_invalidator::DummyCacheInvalidator;
|
||||
@@ -850,6 +851,7 @@ mod tests {
|
||||
use crate::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
|
||||
use crate::peer::Peer;
|
||||
use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::region_registry::LeaderRegionRegistry;
|
||||
use crate::sequence::SequenceBuilder;
|
||||
use crate::state_store::KvStateStore;
|
||||
use crate::wal_options_allocator::WalOptionsAllocator;
|
||||
@@ -882,7 +884,12 @@ mod tests {
|
||||
));
|
||||
|
||||
let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
|
||||
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let procedure_manager = Arc::new(LocalManager::new(
|
||||
Default::default(),
|
||||
state_store,
|
||||
poison_manager,
|
||||
));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
DdlContext {
|
||||
@@ -893,6 +900,7 @@ mod tests {
|
||||
flow_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
},
|
||||
procedure_manager.clone(),
|
||||
|
||||
@@ -449,6 +449,14 @@ pub enum Error {
|
||||
#[snafu(display("Retry later"))]
|
||||
RetryLater { source: BoxedError },
|
||||
|
||||
#[snafu(display("Abort procedure"))]
|
||||
AbortProcedure {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
clean_poisons: bool,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to encode a wal options to json string, wal_options: {:?}",
|
||||
wal_options
|
||||
@@ -748,6 +756,33 @@ pub enum Error {
|
||||
#[snafu(source)]
|
||||
error: serde_json::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("No leader found for table_id: {}", table_id))]
|
||||
NoLeader {
|
||||
table_id: TableId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Procedure poison key already exists with a different value, key: {}, value: {}",
|
||||
key,
|
||||
value
|
||||
))]
|
||||
ProcedurePoisonConflict {
|
||||
key: String,
|
||||
value: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to put poison, table metadata may be corrupted"))]
|
||||
PutPoison {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
source: common_procedure::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -766,7 +801,8 @@ impl ErrorExt for Error {
|
||||
| SerializeToJson { .. }
|
||||
| DeserializeFromJson { .. } => StatusCode::Internal,
|
||||
|
||||
ValueNotExist { .. } => StatusCode::Unexpected,
|
||||
NoLeader { .. } => StatusCode::TableUnavailable,
|
||||
ValueNotExist { .. } | ProcedurePoisonConflict { .. } => StatusCode::Unexpected,
|
||||
|
||||
Unsupported { .. } => StatusCode::Unsupported,
|
||||
|
||||
@@ -837,7 +873,9 @@ impl ErrorExt for Error {
|
||||
OperateDatanode { source, .. } => source.status_code(),
|
||||
Table { source, .. } => source.status_code(),
|
||||
RetryLater { source, .. } => source.status_code(),
|
||||
AbortProcedure { source, .. } => source.status_code(),
|
||||
ConvertAlterTableRequest { source, .. } => source.status_code(),
|
||||
PutPoison { source, .. } => source.status_code(),
|
||||
|
||||
ParseProcedureId { .. }
|
||||
| InvalidNumTopics { .. }
|
||||
@@ -908,6 +946,11 @@ impl Error {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Determine whether it needs to clean poisons.
|
||||
pub fn need_clean_poisons(&self) -> bool {
|
||||
matches!(self, Error::AbortProcedure { clean_poisons, .. } if *clean_poisons)
|
||||
}
|
||||
|
||||
/// Returns true if the response exceeds the size limit.
|
||||
pub fn is_exceeded_size_limit(&self) -> bool {
|
||||
match self {
|
||||
|
||||
@@ -192,6 +192,12 @@ pub struct DropFlow {
|
||||
pub flownode_ids: Vec<FlownodeId>,
|
||||
}
|
||||
|
||||
/// Flushes a batch of regions.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct FlushRegions {
|
||||
pub region_ids: Vec<RegionId>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Display, PartialEq)]
|
||||
pub enum Instruction {
|
||||
/// Opens a region.
|
||||
@@ -208,6 +214,8 @@ pub enum Instruction {
|
||||
DowngradeRegion(DowngradeRegion),
|
||||
/// Invalidates batch cache.
|
||||
InvalidateCaches(Vec<CacheIdent>),
|
||||
/// Flushes regions.
|
||||
FlushRegion(FlushRegions),
|
||||
}
|
||||
|
||||
/// The reply of [UpgradeRegion].
|
||||
|
||||
@@ -57,7 +57,10 @@
|
||||
//! - This key is mainly used in constructing the view in Datanode and Frontend.
|
||||
//!
|
||||
//! 12. Kafka topic key: `__topic_name/kafka/{topic_name}`
|
||||
//! - The key is used to mark existing topics in kafka for WAL.
|
||||
//! - The key is used to track existing topics in Kafka.
|
||||
//! - The value is a [TopicNameValue](crate::key::topic_name::TopicNameValue) struct; it contains the `pruned_entry_id` which represents
|
||||
//! the highest entry id that has been pruned from the remote WAL.
|
||||
//! - When a region uses this topic, it should start replaying entries from `pruned_entry_id + 1` (minimum available entry id).
|
||||
//!
|
||||
//! 13. Topic name to region map key `__topic_region/{topic_name}/{region_id}`
|
||||
//! - Mapping {topic_name} to {region_id}
|
||||
@@ -137,6 +140,7 @@ use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_name::TableName;
|
||||
use table_info::{TableInfoKey, TableInfoManager, TableInfoValue};
|
||||
use table_name::{TableNameKey, TableNameManager, TableNameValue};
|
||||
use topic_name::TopicNameManager;
|
||||
use topic_region::{TopicRegionKey, TopicRegionManager};
|
||||
use view_info::{ViewInfoKey, ViewInfoManager, ViewInfoValue};
|
||||
|
||||
@@ -156,6 +160,7 @@ use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::router::{region_distribution, LeaderState, RegionRoute};
|
||||
use crate::rpc::store::BatchDeleteRequest;
|
||||
use crate::state_store::PoisonValue;
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
|
||||
@@ -308,6 +313,7 @@ pub struct TableMetadataManager {
|
||||
schema_manager: SchemaManager,
|
||||
table_route_manager: TableRouteManager,
|
||||
tombstone_manager: TombstoneManager,
|
||||
topic_name_manager: TopicNameManager,
|
||||
topic_region_manager: TopicRegionManager,
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
@@ -459,6 +465,7 @@ impl TableMetadataManager {
|
||||
schema_manager: SchemaManager::new(kv_backend.clone()),
|
||||
table_route_manager: TableRouteManager::new(kv_backend.clone()),
|
||||
tombstone_manager: TombstoneManager::new(kv_backend.clone()),
|
||||
topic_name_manager: TopicNameManager::new(kv_backend.clone()),
|
||||
topic_region_manager: TopicRegionManager::new(kv_backend.clone()),
|
||||
kv_backend,
|
||||
}
|
||||
@@ -512,6 +519,14 @@ impl TableMetadataManager {
|
||||
&self.table_route_manager
|
||||
}
|
||||
|
||||
pub fn topic_name_manager(&self) -> &TopicNameManager {
|
||||
&self.topic_name_manager
|
||||
}
|
||||
|
||||
pub fn topic_region_manager(&self) -> &TopicRegionManager {
|
||||
&self.topic_region_manager
|
||||
}
|
||||
|
||||
#[cfg(feature = "testing")]
|
||||
pub fn kv_backend(&self) -> &KvBackendRef {
|
||||
&self.kv_backend
|
||||
@@ -1320,7 +1335,8 @@ impl_metadata_value! {
|
||||
TableFlowValue,
|
||||
NodeAddressValue,
|
||||
SchemaNameValue,
|
||||
FlowStateValue
|
||||
FlowStateValue,
|
||||
PoisonValue
|
||||
}
|
||||
|
||||
impl_optional_metadata_value! {
|
||||
@@ -1471,7 +1487,8 @@ mod tests {
|
||||
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
|
||||
let wal_allocator = WalOptionsAllocator::RaftEngine;
|
||||
let regions = (0..16).collect();
|
||||
let region_wal_options = allocate_region_wal_options(regions, &wal_allocator).unwrap();
|
||||
let region_wal_options =
|
||||
allocate_region_wal_options(regions, &wal_allocator, false).unwrap();
|
||||
create_physical_table_metadata(
|
||||
&table_metadata_manager,
|
||||
table_info.clone(),
|
||||
|
||||
@@ -15,11 +15,14 @@
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error::{DecodeJsonSnafu, Error, InvalidMetadataSnafu, Result};
|
||||
use crate::ensure_values;
|
||||
use crate::error::{self, DecodeJsonSnafu, Error, InvalidMetadataSnafu, Result, UnexpectedSnafu};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{
|
||||
MetadataKey, KAFKA_TOPIC_KEY_PATTERN, KAFKA_TOPIC_KEY_PREFIX, LEGACY_TOPIC_KEY_PREFIX,
|
||||
DeserializedValueWithBytes, MetadataKey, MetadataValue, KAFKA_TOPIC_KEY_PATTERN,
|
||||
KAFKA_TOPIC_KEY_PREFIX, LEGACY_TOPIC_KEY_PREFIX,
|
||||
};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
@@ -31,8 +34,32 @@ pub struct TopicNameKey<'a> {
|
||||
pub topic: &'a str,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TopicNameValue;
|
||||
/// The value associated with a topic name key.
|
||||
///
|
||||
/// The `pruned_entry_id` is the highest entry id that has been pruned from the remote WAL.
|
||||
/// When a region uses this topic, it should start replaying entries from `pruned_entry_id + 1` (minimal available entry id).
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
pub struct TopicNameValue {
|
||||
pub pruned_entry_id: u64,
|
||||
}
|
||||
|
||||
impl TopicNameValue {
|
||||
pub fn new(pruned_entry_id: u64) -> Self {
|
||||
Self { pruned_entry_id }
|
||||
}
|
||||
}
|
||||
|
||||
impl MetadataValue for TopicNameValue {
|
||||
fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
|
||||
let value = serde_json::from_slice::<TopicNameValue>(raw_value).context(DecodeJsonSnafu)?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
fn try_as_raw_value(&self) -> Result<Vec<u8>> {
|
||||
let raw_value = serde_json::to_vec(self).context(DecodeJsonSnafu)?;
|
||||
Ok(raw_value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TopicNameKey<'a> {
|
||||
pub fn new(topic: &'a str) -> Self {
|
||||
@@ -114,13 +141,16 @@ impl TopicNameManager {
|
||||
{
|
||||
let topics =
|
||||
serde_json::from_slice::<Vec<String>>(&kv.value).context(DecodeJsonSnafu)?;
|
||||
let mut reqs = topics
|
||||
.iter()
|
||||
.map(|topic| {
|
||||
let key = TopicNameKey::new(topic);
|
||||
TxnOp::Put(key.to_bytes(), vec![])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let mut reqs = Vec::with_capacity(topics.len() + 1);
|
||||
for topic in topics {
|
||||
let topic_name_key = TopicNameKey::new(&topic);
|
||||
let topic_name_value = TopicNameValue::new(0);
|
||||
let put_req = TxnOp::Put(
|
||||
topic_name_key.to_bytes(),
|
||||
topic_name_value.try_as_raw_value()?,
|
||||
);
|
||||
reqs.push(put_req);
|
||||
}
|
||||
let delete_req = TxnOp::Delete(LEGACY_TOPIC_KEY_PREFIX.as_bytes().to_vec());
|
||||
reqs.push(delete_req);
|
||||
let txn = Txn::new().and_then(reqs);
|
||||
@@ -129,7 +159,7 @@ impl TopicNameManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Range query for topics.
|
||||
/// Range query for topics. Only the keys are returned.
|
||||
/// Caution: this method returns keys as String instead of values of range query since the topics are stored in keys.
|
||||
pub async fn range(&self) -> Result<Vec<String>> {
|
||||
let prefix = TopicNameKey::range_start_key();
|
||||
@@ -142,25 +172,72 @@ impl TopicNameManager {
|
||||
.collect::<Result<Vec<String>>>()
|
||||
}
|
||||
|
||||
/// Put topics into kvbackend.
|
||||
/// Put topics into kvbackend. The value is set to 0 by default.
|
||||
pub async fn batch_put(&self, topic_name_keys: Vec<TopicNameKey<'_>>) -> Result<()> {
|
||||
let mut kvs = Vec::with_capacity(topic_name_keys.len());
|
||||
let topic_name_value = TopicNameValue::new(0);
|
||||
for topic_name_key in &topic_name_keys {
|
||||
let kv = KeyValue {
|
||||
key: topic_name_key.to_bytes(),
|
||||
value: topic_name_value.clone().try_as_raw_value()?,
|
||||
};
|
||||
kvs.push(kv);
|
||||
}
|
||||
let req = BatchPutRequest {
|
||||
kvs: topic_name_keys
|
||||
.iter()
|
||||
.map(|key| KeyValue {
|
||||
key: key.to_bytes(),
|
||||
value: vec![],
|
||||
})
|
||||
.collect(),
|
||||
kvs,
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.batch_put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get value for a specific topic.
|
||||
pub async fn get(
|
||||
&self,
|
||||
topic: &str,
|
||||
) -> Result<Option<DeserializedValueWithBytes<TopicNameValue>>> {
|
||||
let key = TopicNameKey::new(topic);
|
||||
let raw_key = key.to_bytes();
|
||||
self.kv_backend
|
||||
.get(&raw_key)
|
||||
.await?
|
||||
.map(|x| DeserializedValueWithBytes::from_inner_slice(&x.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Update the topic name key and value in the kv backend.
|
||||
pub async fn update(
|
||||
&self,
|
||||
topic: &str,
|
||||
pruned_entry_id: u64,
|
||||
prev: Option<DeserializedValueWithBytes<TopicNameValue>>,
|
||||
) -> Result<()> {
|
||||
let key = TopicNameKey::new(topic);
|
||||
let raw_key = key.to_bytes();
|
||||
let value = TopicNameValue::new(pruned_entry_id);
|
||||
let new_raw_value = value.try_as_raw_value()?;
|
||||
let raw_value = prev.map(|v| v.get_raw_bytes()).unwrap_or_default();
|
||||
|
||||
let txn = Txn::compare_and_put(raw_key.clone(), raw_value, new_raw_value.clone());
|
||||
let mut r = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !r.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
|
||||
let raw_value = TxnOpGetResponseSet::filter(raw_key)(&mut set)
|
||||
.context(UnexpectedSnafu {
|
||||
err_msg: "Reads the empty topic name value in comparing operation while updating TopicNameValue",
|
||||
})?;
|
||||
|
||||
let op_name = "updating TopicNameValue";
|
||||
ensure_values!(raw_value, new_raw_value, op_name);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
@@ -204,7 +281,19 @@ mod tests {
|
||||
let topics = manager.range().await.unwrap();
|
||||
assert_eq!(topics, all_topics);
|
||||
|
||||
let topics = manager.range().await.unwrap();
|
||||
assert_eq!(topics, all_topics);
|
||||
for topic in &topics {
|
||||
let value = manager.get(topic).await.unwrap().unwrap();
|
||||
assert_eq!(value.pruned_entry_id, 0);
|
||||
manager.update(topic, 1, Some(value.clone())).await.unwrap();
|
||||
let new_value = manager.get(topic).await.unwrap().unwrap();
|
||||
assert_eq!(new_value.pruned_entry_id, 1);
|
||||
// Update twice, nothing changed
|
||||
manager.update(topic, 1, Some(value.clone())).await.unwrap();
|
||||
let new_value = manager.get(topic).await.unwrap().unwrap();
|
||||
assert_eq!(new_value.pruned_entry_id, 1);
|
||||
// Bad cas, emit error
|
||||
let err = manager.update(topic, 3, Some(value)).await.unwrap_err();
|
||||
assert_matches!(err, error::Error::Unexpected { .. });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,6 +224,7 @@ impl TopicRegionManager {
|
||||
Some((region_id, kafka.topic.as_str()))
|
||||
}
|
||||
Some(WalOptions::RaftEngine) => None,
|
||||
Some(WalOptions::Noop) => None,
|
||||
None => None,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -155,21 +155,21 @@ impl<'a> MySqlTemplateFactory<'a> {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
// Cannot be more than 3072 bytes in PRIMARY KEY
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k VARBINARY(3072) PRIMARY KEY, v BLOB);",
|
||||
"CREATE TABLE IF NOT EXISTS `{table_name}`(k VARBINARY(3072) PRIMARY KEY, v BLOB);",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} ? ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE ? ORDER BY k"),
|
||||
point: format!("SELECT k, v FROM `{table_name}` WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM `{table_name}` ? ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM `{table_name}` WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM `{table_name}` WHERE k LIKE ? ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = ?;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= ? AND k < ?;"),
|
||||
full: format!("DELETE FROM {table_name}"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= ?;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE ?;"),
|
||||
point: format!("DELETE FROM `{table_name}` WHERE k = ?;"),
|
||||
range: format!("DELETE FROM `{table_name}` WHERE k >= ? AND k < ?;"),
|
||||
full: format!("DELETE FROM `{table_name}`"),
|
||||
left_bounded: format!("DELETE FROM `{table_name}` WHERE k >= ?;"),
|
||||
prefix: format!("DELETE FROM `{table_name}` WHERE k LIKE ?;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -189,14 +189,17 @@ impl MySqlTemplateSet {
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!(
|
||||
"SELECT k, v FROM `{table_name}` WHERE k in ({});",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
fn generate_batch_delete_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("DELETE FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!("DELETE FROM `{table_name}` WHERE k in ({});", in_clause)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch upsert.
|
||||
@@ -212,9 +215,9 @@ impl MySqlTemplateSet {
|
||||
let values_clause = values_placeholders.join(", ");
|
||||
|
||||
(
|
||||
format!(r#"SELECT k, v FROM {table_name} WHERE k IN ({in_clause})"#,),
|
||||
format!(r#"SELECT k, v FROM `{table_name}` WHERE k IN ({in_clause})"#,),
|
||||
format!(
|
||||
r#"INSERT INTO {table_name} (k, v) VALUES {values_clause} ON DUPLICATE KEY UPDATE v = VALUES(v);"#,
|
||||
r#"INSERT INTO `{table_name}` (k, v) VALUES {values_clause} ON DUPLICATE KEY UPDATE v = VALUES(v);"#,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -157,21 +157,25 @@ impl<'a> PgSqlTemplateFactory<'a> {
|
||||
PgSqlTemplateSet {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k bytea PRIMARY KEY, v bytea)",
|
||||
"CREATE TABLE IF NOT EXISTS \"{table_name}\"(k bytea PRIMARY KEY, v bytea)",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = $1"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= $1 AND k < $2 ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} $1 ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE $1 ORDER BY k"),
|
||||
point: format!("SELECT k, v FROM \"{table_name}\" WHERE k = $1"),
|
||||
range: format!(
|
||||
"SELECT k, v FROM \"{table_name}\" WHERE k >= $1 AND k < $2 ORDER BY k"
|
||||
),
|
||||
full: format!("SELECT k, v FROM \"{table_name}\" $1 ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM \"{table_name}\" WHERE k >= $1 ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM \"{table_name}\" WHERE k LIKE $1 ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = $1 RETURNING k,v;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= $1 AND k < $2 RETURNING k,v;"),
|
||||
full: format!("DELETE FROM {table_name} RETURNING k,v"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= $1 RETURNING k,v;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE $1 RETURNING k,v;"),
|
||||
point: format!("DELETE FROM \"{table_name}\" WHERE k = $1 RETURNING k,v;"),
|
||||
range: format!(
|
||||
"DELETE FROM \"{table_name}\" WHERE k >= $1 AND k < $2 RETURNING k,v;"
|
||||
),
|
||||
full: format!("DELETE FROM \"{table_name}\" RETURNING k,v"),
|
||||
left_bounded: format!("DELETE FROM \"{table_name}\" WHERE k >= $1 RETURNING k,v;"),
|
||||
prefix: format!("DELETE FROM \"{table_name}\" WHERE k LIKE $1 RETURNING k,v;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -191,7 +195,10 @@ impl PgSqlTemplateSet {
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
format!(
|
||||
"SELECT k, v FROM \"{table_name}\" WHERE k in ({});",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
@@ -199,7 +206,7 @@ impl PgSqlTemplateSet {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = pg_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!(
|
||||
"DELETE FROM {table_name} WHERE k in ({}) RETURNING k,v;",
|
||||
"DELETE FROM \"{table_name}\" WHERE k in ({}) RETURNING k,v;",
|
||||
in_clause
|
||||
)
|
||||
}
|
||||
@@ -220,9 +227,9 @@ impl PgSqlTemplateSet {
|
||||
format!(
|
||||
r#"
|
||||
WITH prev AS (
|
||||
SELECT k,v FROM {table_name} WHERE k IN ({in_clause})
|
||||
SELECT k,v FROM "{table_name}" WHERE k IN ({in_clause})
|
||||
), update AS (
|
||||
INSERT INTO {table_name} (k, v) VALUES
|
||||
INSERT INTO "{table_name}" (k, v) VALUES
|
||||
{values_clause}
|
||||
ON CONFLICT (
|
||||
k
|
||||
|
||||
@@ -37,8 +37,10 @@ pub mod metrics;
|
||||
pub mod node_expiry_listener;
|
||||
pub mod node_manager;
|
||||
pub mod peer;
|
||||
pub mod poison_key;
|
||||
pub mod range_stream;
|
||||
pub mod region_keeper;
|
||||
pub mod region_registry;
|
||||
pub mod rpc;
|
||||
pub mod sequence;
|
||||
pub mod state_store;
|
||||
|
||||
@@ -27,6 +27,7 @@ const TABLE_NAME_LOCK_PREFIX: &str = "__table_name_lock";
|
||||
const FLOW_NAME_LOCK_PREFIX: &str = "__flow_name_lock";
|
||||
const REGION_LOCK_PREFIX: &str = "__region_lock";
|
||||
const FLOW_LOCK_PREFIX: &str = "__flow_lock";
|
||||
const REMOTE_WAL_LOCK_PREFIX: &str = "__remote_wal_lock";
|
||||
|
||||
/// [CatalogLock] acquires the lock on the tenant level.
|
||||
pub enum CatalogLock<'a> {
|
||||
@@ -231,6 +232,31 @@ impl From<FlowLock> for StringKey {
|
||||
}
|
||||
}
|
||||
|
||||
/// [RemoteWalLock] acquires the lock on the remote wal topic level.
|
||||
pub enum RemoteWalLock {
|
||||
Read(String),
|
||||
Write(String),
|
||||
}
|
||||
|
||||
impl Display for RemoteWalLock {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let key = match self {
|
||||
RemoteWalLock::Read(s) => s,
|
||||
RemoteWalLock::Write(s) => s,
|
||||
};
|
||||
write!(f, "{}/{}", REMOTE_WAL_LOCK_PREFIX, key)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RemoteWalLock> for StringKey {
|
||||
fn from(value: RemoteWalLock) -> Self {
|
||||
match value {
|
||||
RemoteWalLock::Write(_) => StringKey::Exclusive(value.to_string()),
|
||||
RemoteWalLock::Read(_) => StringKey::Share(value.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_procedure::StringKey;
|
||||
@@ -308,5 +334,16 @@ mod tests {
|
||||
string_key,
|
||||
StringKey::Exclusive(format!("{}/{}", FLOW_LOCK_PREFIX, flow_id))
|
||||
);
|
||||
// The remote wal lock
|
||||
let string_key: StringKey = RemoteWalLock::Read("foo".to_string()).into();
|
||||
assert_eq!(
|
||||
string_key,
|
||||
StringKey::Share(format!("{}/{}", REMOTE_WAL_LOCK_PREFIX, "foo"))
|
||||
);
|
||||
let string_key: StringKey = RemoteWalLock::Write("foo".to_string()).into();
|
||||
assert_eq!(
|
||||
string_key,
|
||||
StringKey::Exclusive(format!("{}/{}", REMOTE_WAL_LOCK_PREFIX, "foo"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,63 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Peer as PbPeer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub use api::v1::meta::Peer;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
|
||||
pub struct Peer {
|
||||
/// Node identifier. Unique in a cluster.
|
||||
pub id: u64,
|
||||
pub addr: String,
|
||||
}
|
||||
|
||||
impl From<PbPeer> for Peer {
|
||||
fn from(p: PbPeer) -> Self {
|
||||
Self {
|
||||
id: p.id,
|
||||
addr: p.addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Peer> for PbPeer {
|
||||
fn from(p: Peer) -> Self {
|
||||
Self {
|
||||
id: p.id,
|
||||
addr: p.addr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
pub fn new(id: u64, addr: impl Into<String>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: addr.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub fn empty(id: u64) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Peer {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "peer-{}({})", self.id, self.addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// can query peer given a node id
|
||||
#[async_trait::async_trait]
|
||||
pub trait PeerLookupService {
|
||||
|
||||
22
src/common/meta/src/poison_key.rs
Normal file
22
src/common/meta/src/poison_key.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_procedure::PoisonKey;
|
||||
use store_api::storage::TableId;
|
||||
|
||||
/// Returns the poison key for the table.
|
||||
pub fn table_poison_key(table_id: TableId) -> PoisonKey {
|
||||
let key = format!("table/{}", table_id);
|
||||
PoisonKey::new(&key)
|
||||
}
|
||||
186
src/common/meta/src/region_registry.rs
Normal file
186
src/common/meta/src/region_registry.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use common_telemetry::warn;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::datanode::RegionManifestInfo;
|
||||
|
||||
/// Represents information about a leader region in the cluster.
|
||||
/// Contains the datanode id where the leader is located,
|
||||
/// and the current manifest version.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct LeaderRegion {
|
||||
pub datanode_id: u64,
|
||||
pub manifest: LeaderRegionManifestInfo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum LeaderRegionManifestInfo {
|
||||
Mito {
|
||||
manifest_version: u64,
|
||||
flushed_entry_id: u64,
|
||||
},
|
||||
Metric {
|
||||
data_manifest_version: u64,
|
||||
data_flushed_entry_id: u64,
|
||||
metadata_manifest_version: u64,
|
||||
metadata_flushed_entry_id: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RegionManifestInfo> for LeaderRegionManifestInfo {
|
||||
fn from(value: RegionManifestInfo) -> Self {
|
||||
match value {
|
||||
RegionManifestInfo::Mito {
|
||||
manifest_version,
|
||||
flushed_entry_id,
|
||||
} => LeaderRegionManifestInfo::Mito {
|
||||
manifest_version,
|
||||
flushed_entry_id,
|
||||
},
|
||||
RegionManifestInfo::Metric {
|
||||
data_manifest_version,
|
||||
data_flushed_entry_id,
|
||||
metadata_manifest_version,
|
||||
metadata_flushed_entry_id,
|
||||
} => LeaderRegionManifestInfo::Metric {
|
||||
data_manifest_version,
|
||||
data_flushed_entry_id,
|
||||
metadata_manifest_version,
|
||||
metadata_flushed_entry_id,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LeaderRegionManifestInfo {
|
||||
/// Returns the manifest version of the leader region.
|
||||
pub fn manifest_version(&self) -> u64 {
|
||||
match self {
|
||||
LeaderRegionManifestInfo::Mito {
|
||||
manifest_version, ..
|
||||
} => *manifest_version,
|
||||
LeaderRegionManifestInfo::Metric {
|
||||
data_manifest_version,
|
||||
..
|
||||
} => *data_manifest_version,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the flushed entry id of the leader region.
|
||||
pub fn flushed_entry_id(&self) -> u64 {
|
||||
match self {
|
||||
LeaderRegionManifestInfo::Mito {
|
||||
flushed_entry_id, ..
|
||||
} => *flushed_entry_id,
|
||||
LeaderRegionManifestInfo::Metric {
|
||||
data_flushed_entry_id,
|
||||
..
|
||||
} => *data_flushed_entry_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the minimum flushed entry id of the leader region.
|
||||
/// It is used to determine the minimum flushed entry id that can be pruned in remote wal.
|
||||
pub fn min_flushed_entry_id(&self) -> u64 {
|
||||
match self {
|
||||
LeaderRegionManifestInfo::Mito {
|
||||
flushed_entry_id, ..
|
||||
} => *flushed_entry_id,
|
||||
LeaderRegionManifestInfo::Metric {
|
||||
data_flushed_entry_id,
|
||||
metadata_flushed_entry_id,
|
||||
..
|
||||
} => (*data_flushed_entry_id).min(*metadata_flushed_entry_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type LeaderRegionRegistryRef = Arc<LeaderRegionRegistry>;
|
||||
|
||||
/// Registry that maintains a mapping of all leader regions in the cluster.
|
||||
/// Tracks which datanode is hosting the leader for each region and the corresponding
|
||||
/// manifest version.
|
||||
#[derive(Default)]
|
||||
pub struct LeaderRegionRegistry {
|
||||
inner: RwLock<HashMap<RegionId, LeaderRegion>>,
|
||||
}
|
||||
|
||||
impl LeaderRegionRegistry {
|
||||
/// Creates a new empty leader region registry.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the leader region for the given region ids.
|
||||
pub fn batch_get<I: Iterator<Item = RegionId>>(
|
||||
&self,
|
||||
region_ids: I,
|
||||
) -> HashMap<RegionId, LeaderRegion> {
|
||||
let inner = self.inner.read().unwrap();
|
||||
region_ids
|
||||
.into_iter()
|
||||
.flat_map(|region_id| {
|
||||
inner
|
||||
.get(®ion_id)
|
||||
.map(|leader_region| (region_id, *leader_region))
|
||||
})
|
||||
.collect::<HashMap<_, _>>()
|
||||
}
|
||||
|
||||
/// Puts the leader regions into the registry.
|
||||
pub fn batch_put(&self, key_values: Vec<(RegionId, LeaderRegion)>) {
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
for (region_id, leader_region) in key_values {
|
||||
match inner.entry(region_id) {
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(leader_region);
|
||||
}
|
||||
Entry::Occupied(mut entry) => {
|
||||
let manifest_version = entry.get().manifest.manifest_version();
|
||||
if manifest_version > leader_region.manifest.manifest_version() {
|
||||
warn!(
|
||||
"Received a leader region with a smaller manifest version than the existing one, ignore it. region: {}, existing_manifest_version: {}, new_manifest_version: {}",
|
||||
region_id,
|
||||
manifest_version,
|
||||
leader_region.manifest.manifest_version()
|
||||
);
|
||||
} else {
|
||||
entry.insert(leader_region);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn batch_delete<I: Iterator<Item = RegionId>>(&self, region_ids: I) {
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
for region_id in region_ids {
|
||||
inner.remove(®ion_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Resets the registry to an empty state.
|
||||
pub fn reset(&self) {
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
inner.clear();
|
||||
}
|
||||
}
|
||||
@@ -1240,6 +1240,7 @@ impl From<QueryContext> for PbQueryContext {
|
||||
extensions,
|
||||
channel: channel as u32,
|
||||
snapshot_seqs: None,
|
||||
explain: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use api::v1::meta::{
|
||||
ProcedureMeta as PbProcedureMeta, ProcedureStateResponse as PbProcedureStateResponse,
|
||||
ProcedureStatus as PbProcedureStatus,
|
||||
};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_procedure::{ProcedureId, ProcedureInfo, ProcedureState};
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -73,14 +74,15 @@ pub fn procedure_state_to_pb_state(state: &ProcedureState) -> (PbProcedureStatus
|
||||
match state {
|
||||
ProcedureState::Running => (PbProcedureStatus::Running, String::default()),
|
||||
ProcedureState::Done { .. } => (PbProcedureStatus::Done, String::default()),
|
||||
ProcedureState::Retrying { error } => (PbProcedureStatus::Retrying, error.to_string()),
|
||||
ProcedureState::Failed { error } => (PbProcedureStatus::Failed, error.to_string()),
|
||||
ProcedureState::Retrying { error } => (PbProcedureStatus::Retrying, error.output_msg()),
|
||||
ProcedureState::Failed { error } => (PbProcedureStatus::Failed, error.output_msg()),
|
||||
ProcedureState::PrepareRollback { error } => {
|
||||
(PbProcedureStatus::PrepareRollback, error.to_string())
|
||||
(PbProcedureStatus::PrepareRollback, error.output_msg())
|
||||
}
|
||||
ProcedureState::RollingBack { error } => {
|
||||
(PbProcedureStatus::RollingBack, error.to_string())
|
||||
(PbProcedureStatus::RollingBack, error.output_msg())
|
||||
}
|
||||
ProcedureState::Poisoned { error, .. } => (PbProcedureStatus::Poisoned, error.output_msg()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,16 +14,23 @@
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::error::{DeleteStatesSnafu, ListStateSnafu, PutStateSnafu};
|
||||
use common_procedure::error::{
|
||||
DeletePoisonSnafu, DeleteStatesSnafu, GetPoisonSnafu, ListStateSnafu, PutPoisonSnafu,
|
||||
PutStateSnafu, Result as ProcedureResult,
|
||||
};
|
||||
use common_procedure::store::poison_store::PoisonStore;
|
||||
use common_procedure::store::state_store::{KeySet, KeyValueStream, StateStore};
|
||||
use common_procedure::store::util::multiple_value_stream;
|
||||
use common_procedure::Result as ProcedureResult;
|
||||
use futures::future::try_join_all;
|
||||
use futures::StreamExt;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::error::{ProcedurePoisonConflictSnafu, Result, UnexpectedSnafu};
|
||||
use crate::key::txn_helper::TxnOpGetResponseSet;
|
||||
use crate::key::{DeserializedValueWithBytes, MetadataValue};
|
||||
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::range_stream::PaginationStream;
|
||||
use crate::rpc::store::{BatchDeleteRequest, PutRequest, RangeRequest};
|
||||
@@ -32,11 +39,16 @@ use crate::rpc::KeyValue;
|
||||
const DELIMITER: &str = "/";
|
||||
|
||||
const PROCEDURE_PREFIX: &str = "/__procedure__/";
|
||||
const PROCEDURE_POISON_KEY_PREFIX: &str = "/__procedure_poison/";
|
||||
|
||||
fn with_prefix(key: &str) -> String {
|
||||
format!("{PROCEDURE_PREFIX}{key}")
|
||||
}
|
||||
|
||||
fn with_poison_prefix(key: &str) -> String {
|
||||
format!("{}{}", PROCEDURE_POISON_KEY_PREFIX, key)
|
||||
}
|
||||
|
||||
fn strip_prefix(key: &str) -> String {
|
||||
key.trim_start_matches(PROCEDURE_PREFIX).to_string()
|
||||
}
|
||||
@@ -207,8 +219,168 @@ impl StateStore for KvStateStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// The value of the poison key.
|
||||
///
|
||||
/// Each poison value contains a unique token that identifies the procedure.
|
||||
/// While multiple procedures may use the same poison key (representing the same resource),
|
||||
/// each procedure will have a distinct token value to differentiate its ownership.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PoisonValue {
|
||||
token: String,
|
||||
}
|
||||
|
||||
type PoisonDecodeResult = Result<Option<DeserializedValueWithBytes<PoisonValue>>>;
|
||||
|
||||
impl KvStateStore {
|
||||
/// Builds a create poison transaction,
|
||||
/// it expected the `__procedure_poison/{key}` wasn't occupied.
|
||||
fn build_create_poison_txn(
|
||||
&self,
|
||||
key: &str,
|
||||
value: &PoisonValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult,
|
||||
)> {
|
||||
let key = key.as_bytes().to_vec();
|
||||
let value = value.try_as_raw_value()?;
|
||||
let txn = Txn::put_if_not_exists(key.clone(), value);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
|
||||
/// Builds a delete poison transaction,
|
||||
/// it expected the `__procedure_poison/{key}` was occupied.
|
||||
fn build_delete_poison_txn(
|
||||
&self,
|
||||
key: &str,
|
||||
value: PoisonValue,
|
||||
) -> Result<(
|
||||
Txn,
|
||||
impl FnOnce(&mut TxnOpGetResponseSet) -> PoisonDecodeResult,
|
||||
)> {
|
||||
let key = key.as_bytes().to_vec();
|
||||
let value = value.try_as_raw_value()?;
|
||||
|
||||
let txn = Txn::new()
|
||||
.when(vec![Compare::with_value(
|
||||
key.clone(),
|
||||
CompareOp::Equal,
|
||||
value,
|
||||
)])
|
||||
.and_then(vec![TxnOp::Delete(key.clone())])
|
||||
.or_else(vec![TxnOp::Get(key.clone())]);
|
||||
|
||||
Ok((
|
||||
txn,
|
||||
TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
|
||||
))
|
||||
}
|
||||
|
||||
async fn get_poison_inner(&self, key: &str) -> Result<Option<PoisonValue>> {
|
||||
let key = with_poison_prefix(key);
|
||||
let value = self.kv_backend.get(key.as_bytes()).await?;
|
||||
value
|
||||
.map(|v| PoisonValue::try_from_raw_value(&v.value))
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Put the poison.
|
||||
///
|
||||
/// If the poison is already put by other procedure, it will return an error.
|
||||
async fn set_poison_inner(&self, key: &str, token: &str) -> Result<()> {
|
||||
let key = with_poison_prefix(key);
|
||||
let (txn, on_failure) = self.build_create_poison_txn(
|
||||
&key,
|
||||
&PoisonValue {
|
||||
token: token.to_string(),
|
||||
},
|
||||
)?;
|
||||
|
||||
let mut resp = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !resp.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
let remote_value = on_failure(&mut set)?
|
||||
.context(UnexpectedSnafu {
|
||||
err_msg: "Reads the empty poison value in comparing operation of the put consistency poison",
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
ensure!(
|
||||
remote_value.token == token,
|
||||
ProcedurePoisonConflictSnafu {
|
||||
key: &key,
|
||||
value: &remote_value.token,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes the poison.
|
||||
///
|
||||
/// If the poison is not put by the procedure, it will return an error.
|
||||
async fn delete_poison_inner(&self, key: &str, token: &str) -> Result<()> {
|
||||
let key = with_poison_prefix(key);
|
||||
let (txn, on_failure) = self.build_delete_poison_txn(
|
||||
&key,
|
||||
PoisonValue {
|
||||
token: token.to_string(),
|
||||
},
|
||||
)?;
|
||||
|
||||
let mut resp = self.kv_backend.txn(txn).await?;
|
||||
|
||||
if !resp.succeeded {
|
||||
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
|
||||
let remote_value = on_failure(&mut set)?;
|
||||
|
||||
ensure!(
|
||||
remote_value.is_none(),
|
||||
ProcedurePoisonConflictSnafu {
|
||||
key: &key,
|
||||
value: &remote_value.unwrap().into_inner().token,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl PoisonStore for KvStateStore {
|
||||
async fn try_put_poison(&self, key: String, token: String) -> ProcedureResult<()> {
|
||||
self.set_poison_inner(&key, &token)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(PutPoisonSnafu { key, token })
|
||||
}
|
||||
|
||||
async fn delete_poison(&self, key: String, token: String) -> ProcedureResult<()> {
|
||||
self.delete_poison_inner(&key, &token)
|
||||
.await
|
||||
.map_err(BoxedError::new)
|
||||
.context(DeletePoisonSnafu { key, token })
|
||||
}
|
||||
|
||||
async fn get_poison(&self, key: &str) -> ProcedureResult<Option<String>> {
|
||||
self.get_poison_inner(key)
|
||||
.await
|
||||
.map(|v| v.map(|v| v.token))
|
||||
.map_err(BoxedError::new)
|
||||
.context(GetPoisonSnafu { key })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::assert_matches::assert_matches;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -219,6 +391,7 @@ mod tests {
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::kv_backend::chroot::ChrootKvBackend;
|
||||
use crate::kv_backend::etcd::EtcdStore;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
@@ -290,13 +463,13 @@ mod tests {
|
||||
num_per_range: u32,
|
||||
max_bytes: u32,
|
||||
) {
|
||||
let num_cases = rand::thread_rng().gen_range(1..=8);
|
||||
let num_cases = rand::rng().random_range(1..=8);
|
||||
common_telemetry::info!("num_cases: {}", num_cases);
|
||||
let mut cases = Vec::with_capacity(num_cases);
|
||||
for i in 0..num_cases {
|
||||
let size = rand::thread_rng().gen_range(size_limit..=max_bytes);
|
||||
let size = rand::rng().random_range(size_limit..=max_bytes);
|
||||
let mut large_value = vec![0u8; size as usize];
|
||||
rand::thread_rng().fill_bytes(&mut large_value);
|
||||
rand::rng().fill_bytes(&mut large_value);
|
||||
|
||||
// Starts from `a`.
|
||||
let prefix = format!("{}/", std::char::from_u32(97 + i as u32).unwrap());
|
||||
@@ -354,8 +527,8 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_meta_state_store_split_value() {
|
||||
let size_limit = rand::thread_rng().gen_range(128..=512);
|
||||
let page_size = rand::thread_rng().gen_range(1..10);
|
||||
let size_limit = rand::rng().random_range(128..=512);
|
||||
let page_size = rand::rng().random_range(1..10);
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
test_meta_state_store_split_value_with_size_limit(kv_backend, size_limit, page_size, 8192)
|
||||
.await;
|
||||
@@ -388,7 +561,7 @@ mod tests {
|
||||
// However, some KvBackends, the `ChrootKvBackend`, will add the prefix to `key`;
|
||||
// we don't know the exact size of the key.
|
||||
let size_limit = 1536 * 1024 - key_size;
|
||||
let page_size = rand::thread_rng().gen_range(1..10);
|
||||
let page_size = rand::rng().random_range(1..10);
|
||||
test_meta_state_store_split_value_with_size_limit(
|
||||
kv_backend,
|
||||
size_limit,
|
||||
@@ -397,4 +570,73 @@ mod tests {
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_poison() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let poison_manager = KvStateStore::new(mem_kv.clone());
|
||||
|
||||
let key = "table/1";
|
||||
|
||||
let token = "expected_token";
|
||||
|
||||
poison_manager.set_poison_inner(key, token).await.unwrap();
|
||||
|
||||
// Put again, should be ok.
|
||||
poison_manager.set_poison_inner(key, token).await.unwrap();
|
||||
|
||||
// Delete, should be ok.
|
||||
poison_manager
|
||||
.delete_poison_inner(key, token)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Delete again, should be ok.
|
||||
poison_manager
|
||||
.delete_poison_inner(key, token)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_consistency_poison_failed() {
|
||||
let mem_kv = Arc::new(MemoryKvBackend::default());
|
||||
let poison_manager = KvStateStore::new(mem_kv.clone());
|
||||
|
||||
let key = "table/1";
|
||||
|
||||
let token = "expected_token";
|
||||
let token2 = "expected_token2";
|
||||
|
||||
poison_manager.set_poison_inner(key, token).await.unwrap();
|
||||
|
||||
let err = poison_manager
|
||||
.set_poison_inner(key, token2)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::ProcedurePoisonConflict { .. });
|
||||
|
||||
let err = poison_manager
|
||||
.delete_poison_inner(key, token2)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert_matches!(err, Error::ProcedurePoisonConflict { .. });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_deserialize() {
|
||||
let key = "table/1";
|
||||
let value = PoisonValue {
|
||||
token: "expected_token".to_string(),
|
||||
};
|
||||
|
||||
let serialized_key = with_poison_prefix(key).as_bytes().to_vec();
|
||||
let serialized_value = value.try_as_raw_value().unwrap();
|
||||
|
||||
let expected_key = "/__procedure_poison/table/1";
|
||||
let expected_value = r#"{"token":"expected_token"}"#;
|
||||
|
||||
assert_eq!(expected_key.as_bytes(), serialized_key);
|
||||
assert_eq!(expected_value.as_bytes(), serialized_value);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ use crate::node_manager::{
|
||||
};
|
||||
use crate::peer::{Peer, PeerLookupService};
|
||||
use crate::region_keeper::MemoryRegionKeeper;
|
||||
use crate::region_registry::LeaderRegionRegistry;
|
||||
use crate::sequence::SequenceBuilder;
|
||||
use crate::wal_options_allocator::WalOptionsAllocator;
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
@@ -177,6 +178,7 @@ pub fn new_ddl_context_with_kv_backend(
|
||||
node_manager,
|
||||
cache_invalidator: Arc::new(DummyCacheInvalidator),
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::new()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_allocator,
|
||||
table_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::error::{EncodeWalOptionsSnafu, InvalidTopicNamePrefixSnafu, Result};
|
||||
use crate::key::NAME_PATTERN_REGEX;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::leadership_notifier::LeadershipChangeListener;
|
||||
use crate::wal_options_allocator::topic_creator::build_kafka_topic_creator;
|
||||
pub use crate::wal_options_allocator::topic_creator::build_kafka_topic_creator;
|
||||
use crate::wal_options_allocator::topic_pool::KafkaTopicPool;
|
||||
|
||||
/// Allocates wal options in region granularity.
|
||||
@@ -53,21 +53,12 @@ impl WalOptionsAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a wal options for a region.
|
||||
pub fn alloc(&self) -> Result<WalOptions> {
|
||||
match self {
|
||||
Self::RaftEngine => Ok(WalOptions::RaftEngine),
|
||||
Self::Kafka(topic_manager) => {
|
||||
let topic = topic_manager.select()?;
|
||||
Ok(WalOptions::Kafka(KafkaWalOptions {
|
||||
topic: topic.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a batch of wal options where each wal options goes to a region.
|
||||
pub fn alloc_batch(&self, num_regions: usize) -> Result<Vec<WalOptions>> {
|
||||
/// If skip_wal is true, the wal options will be set to Noop regardless of the allocator type.
|
||||
pub fn alloc_batch(&self, num_regions: usize, skip_wal: bool) -> Result<Vec<WalOptions>> {
|
||||
if skip_wal {
|
||||
return Ok(vec![WalOptions::Noop; num_regions]);
|
||||
}
|
||||
match self {
|
||||
WalOptionsAllocator::RaftEngine => Ok(vec![WalOptions::RaftEngine; num_regions]),
|
||||
WalOptionsAllocator::Kafka(topic_manager) => {
|
||||
@@ -130,9 +121,10 @@ pub async fn build_wal_options_allocator(
|
||||
pub fn allocate_region_wal_options(
|
||||
regions: Vec<RegionNumber>,
|
||||
wal_options_allocator: &WalOptionsAllocator,
|
||||
skip_wal: bool,
|
||||
) -> Result<HashMap<RegionNumber, String>> {
|
||||
let wal_options = wal_options_allocator
|
||||
.alloc_batch(regions.len())?
|
||||
.alloc_batch(regions.len(), skip_wal)?
|
||||
.into_iter()
|
||||
.map(|wal_options| {
|
||||
serde_json::to_string(&wal_options).context(EncodeWalOptionsSnafu { wal_options })
|
||||
@@ -177,7 +169,7 @@ mod tests {
|
||||
|
||||
let num_regions = 32;
|
||||
let regions = (0..num_regions).collect::<Vec<_>>();
|
||||
let got = allocate_region_wal_options(regions.clone(), &allocator).unwrap();
|
||||
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
|
||||
|
||||
let encoded_wal_options = serde_json::to_string(&WalOptions::RaftEngine).unwrap();
|
||||
let expected = regions
|
||||
@@ -237,7 +229,7 @@ mod tests {
|
||||
|
||||
let num_regions = 32;
|
||||
let regions = (0..num_regions).collect::<Vec<_>>();
|
||||
let got = allocate_region_wal_options(regions.clone(), &allocator).unwrap();
|
||||
let got = allocate_region_wal_options(regions.clone(), &allocator, false).unwrap();
|
||||
|
||||
// Check the allocated wal options contain the expected topics.
|
||||
let expected = (0..num_regions)
|
||||
@@ -253,4 +245,18 @@ mod tests {
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_allocator_with_skip_wal() {
|
||||
let allocator = WalOptionsAllocator::RaftEngine;
|
||||
allocator.start().await.unwrap();
|
||||
|
||||
let num_regions = 32;
|
||||
let regions = (0..num_regions).collect::<Vec<_>>();
|
||||
let got = allocate_region_wal_options(regions.clone(), &allocator, true).unwrap();
|
||||
assert_eq!(got.len(), num_regions as usize);
|
||||
for wal_options in got.values() {
|
||||
assert_eq!(wal_options, &"{\"wal.provider\":\"noop\"}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ impl RoundRobinTopicSelector {
|
||||
// The cursor in the round-robin selector is not persisted which may break the round-robin strategy cross crashes.
|
||||
// Introducing a shuffling strategy may help mitigate this issue.
|
||||
pub fn with_shuffle() -> Self {
|
||||
let offset = rand::thread_rng().gen_range(0..64);
|
||||
let offset = rand::rng().random_range(0..64);
|
||||
Self {
|
||||
cursor: AtomicUsize::new(offset),
|
||||
}
|
||||
|
||||
@@ -12,14 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::{error, info};
|
||||
use common_wal::config::kafka::common::DEFAULT_BACKOFF_CONFIG;
|
||||
use common_wal::config::kafka::MetasrvKafkaConfig;
|
||||
use rskafka::client::error::Error as RsKafkaError;
|
||||
use rskafka::client::error::ProtocolError::TopicAlreadyExists;
|
||||
use rskafka::client::partition::{Compression, UnknownTopicHandling};
|
||||
use rskafka::client::{Client, ClientBuilder};
|
||||
use rskafka::record::Record;
|
||||
use rskafka::BackoffConfig;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{
|
||||
@@ -32,9 +34,11 @@ use crate::error::{
|
||||
// The `DEFAULT_PARTITION` refers to the index of the partition.
|
||||
const DEFAULT_PARTITION: i32 = 0;
|
||||
|
||||
type KafkaClientRef = Arc<Client>;
|
||||
|
||||
/// Creates topics in kafka.
|
||||
pub struct KafkaTopicCreator {
|
||||
client: Client,
|
||||
client: KafkaClientRef,
|
||||
/// The number of partitions per topic.
|
||||
num_partitions: i32,
|
||||
/// The replication factor of each topic.
|
||||
@@ -44,6 +48,10 @@ pub struct KafkaTopicCreator {
|
||||
}
|
||||
|
||||
impl KafkaTopicCreator {
|
||||
pub fn client(&self) -> &KafkaClientRef {
|
||||
&self.client
|
||||
}
|
||||
|
||||
async fn create_topic(&self, topic: &String, client: &Client) -> Result<()> {
|
||||
let controller = client
|
||||
.controller_client()
|
||||
@@ -127,16 +135,10 @@ impl KafkaTopicCreator {
|
||||
|
||||
pub async fn build_kafka_topic_creator(config: &MetasrvKafkaConfig) -> Result<KafkaTopicCreator> {
|
||||
// Builds an kafka controller client for creating topics.
|
||||
let backoff_config = BackoffConfig {
|
||||
init_backoff: config.backoff.init,
|
||||
max_backoff: config.backoff.max,
|
||||
base: config.backoff.base as f64,
|
||||
deadline: config.backoff.deadline,
|
||||
};
|
||||
let broker_endpoints = common_wal::resolve_to_ipv4(&config.connection.broker_endpoints)
|
||||
.await
|
||||
.context(ResolveKafkaEndpointSnafu)?;
|
||||
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(backoff_config);
|
||||
let mut builder = ClientBuilder::new(broker_endpoints).backoff_config(DEFAULT_BACKOFF_CONFIG);
|
||||
if let Some(sasl) = &config.connection.sasl {
|
||||
builder = builder.sasl_config(sasl.config.clone().into_sasl_config());
|
||||
};
|
||||
@@ -151,7 +153,7 @@ pub async fn build_kafka_topic_creator(config: &MetasrvKafkaConfig) -> Result<Ka
|
||||
})?;
|
||||
|
||||
Ok(KafkaTopicCreator {
|
||||
client,
|
||||
client: Arc::new(client),
|
||||
num_partitions: config.kafka_topic.num_partitions,
|
||||
replication_factor: config.kafka_topic.replication_factor,
|
||||
create_topic_timeout: config.kafka_topic.create_topic_timeout.as_millis() as i32,
|
||||
|
||||
@@ -9,4 +9,5 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
common-procedure.workspace = true
|
||||
common-procedure = { workspace = true, features = ["testing"] }
|
||||
snafu.workspace = true
|
||||
|
||||
@@ -18,21 +18,32 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_procedure::store::poison_store::PoisonStore;
|
||||
use common_procedure::test_util::InMemoryPoisonStore;
|
||||
use common_procedure::{
|
||||
Context, ContextProvider, Output, Procedure, ProcedureId, ProcedureState, ProcedureWithId,
|
||||
Result, Status,
|
||||
Context, ContextProvider, Output, PoisonKey, Procedure, ProcedureId, ProcedureState,
|
||||
ProcedureWithId, Result, Status,
|
||||
};
|
||||
|
||||
/// A Mock [ContextProvider].
|
||||
#[derive(Default)]
|
||||
pub struct MockContextProvider {
|
||||
states: HashMap<ProcedureId, ProcedureState>,
|
||||
poison_manager: InMemoryPoisonStore,
|
||||
}
|
||||
|
||||
impl MockContextProvider {
|
||||
/// Returns a new provider.
|
||||
pub fn new(states: HashMap<ProcedureId, ProcedureState>) -> MockContextProvider {
|
||||
MockContextProvider { states }
|
||||
MockContextProvider {
|
||||
states,
|
||||
poison_manager: InMemoryPoisonStore::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the poison manager.
|
||||
pub fn poison_manager(&self) -> &InMemoryPoisonStore {
|
||||
&self.poison_manager
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +52,12 @@ impl ContextProvider for MockContextProvider {
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.states.get(&procedure_id).cloned())
|
||||
}
|
||||
|
||||
async fn try_put_poison(&self, key: &PoisonKey, procedure_id: ProcedureId) -> Result<()> {
|
||||
self.poison_manager
|
||||
.try_put_poison(key.to_string(), procedure_id.to_string())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a procedure until it returns [Status::Done].
|
||||
@@ -61,6 +78,7 @@ pub async fn execute_procedure_until_done(procedure: &mut dyn Procedure) -> Opti
|
||||
"Executing subprocedure is unsupported"
|
||||
),
|
||||
Status::Done { output } => return output,
|
||||
Status::Poisoned { .. } => return None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -88,6 +106,7 @@ pub async fn execute_procedure_once(
|
||||
false
|
||||
}
|
||||
Status::Done { .. } => true,
|
||||
Status::Poisoned { .. } => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,6 +128,7 @@ pub async fn execute_until_suspended_or_done(
|
||||
Status::Executing { .. } => (),
|
||||
Status::Suspended { subprocedures, .. } => return Some(subprocedures),
|
||||
Status::Done { .. } => break,
|
||||
Status::Poisoned { .. } => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,21 @@ use common_macro::stack_trace_debug;
|
||||
use snafu::{Location, Snafu};
|
||||
|
||||
use crate::procedure::ProcedureId;
|
||||
use crate::PoisonKey;
|
||||
|
||||
/// Procedure error.
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to execute procedure due to external error"))]
|
||||
External { source: BoxedError },
|
||||
#[snafu(display(
|
||||
"Failed to execute procedure due to external error, clean poisons: {}",
|
||||
clean_poisons
|
||||
))]
|
||||
External {
|
||||
source: BoxedError,
|
||||
clean_poisons: bool,
|
||||
},
|
||||
|
||||
#[snafu(display("Loader {} is already registered", name))]
|
||||
LoaderConflict {
|
||||
@@ -58,6 +65,13 @@ pub enum Error {
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Too many running procedures, max: {}", max_running_procedures))]
|
||||
TooManyRunningProcedures {
|
||||
max_running_procedures: usize,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to put state, key: '{key}'"))]
|
||||
PutState {
|
||||
key: String,
|
||||
@@ -66,6 +80,32 @@ pub enum Error {
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to put poison, key: '{key}', token: '{token}'"))]
|
||||
PutPoison {
|
||||
key: String,
|
||||
token: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to get poison, key: '{key}'"))]
|
||||
GetPoison {
|
||||
key: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to delete poison, key: '{key}', token: '{token}'"))]
|
||||
DeletePoison {
|
||||
key: String,
|
||||
token: String,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
source: BoxedError,
|
||||
},
|
||||
|
||||
#[snafu(display("Failed to delete {}", key))]
|
||||
DeleteState {
|
||||
key: String,
|
||||
@@ -175,6 +215,21 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Procedure not found, procedure_id: {}", procedure_id))]
|
||||
ProcedureNotFound {
|
||||
procedure_id: ProcedureId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Poison key not defined, key: '{key}', procedure_id: '{procedure_id}'"))]
|
||||
PoisonKeyNotDefined {
|
||||
key: PoisonKey,
|
||||
procedure_id: ProcedureId,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -185,14 +240,19 @@ impl ErrorExt for Error {
|
||||
Error::External { source, .. }
|
||||
| Error::PutState { source, .. }
|
||||
| Error::DeleteStates { source, .. }
|
||||
| Error::ListState { source, .. } => source.status_code(),
|
||||
| Error::ListState { source, .. }
|
||||
| Error::PutPoison { source, .. }
|
||||
| Error::DeletePoison { source, .. }
|
||||
| Error::GetPoison { source, .. } => source.status_code(),
|
||||
|
||||
Error::ToJson { .. }
|
||||
| Error::DeleteState { .. }
|
||||
| Error::FromJson { .. }
|
||||
| Error::WaitWatcher { .. }
|
||||
| Error::RetryLater { .. }
|
||||
| Error::RollbackProcedureRecovered { .. } => StatusCode::Internal,
|
||||
| Error::RollbackProcedureRecovered { .. }
|
||||
| Error::TooManyRunningProcedures { .. }
|
||||
| Error::PoisonKeyNotDefined { .. } => StatusCode::Internal,
|
||||
|
||||
Error::RetryTimesExceeded { .. }
|
||||
| Error::RollbackTimesExceeded { .. }
|
||||
@@ -204,7 +264,8 @@ impl ErrorExt for Error {
|
||||
}
|
||||
Error::ProcedurePanic { .. }
|
||||
| Error::ParseSegmentKey { .. }
|
||||
| Error::Unexpected { .. } => StatusCode::Unexpected,
|
||||
| Error::Unexpected { .. }
|
||||
| &Error::ProcedureNotFound { .. } => StatusCode::Unexpected,
|
||||
Error::ProcedureExec { source, .. } => source.status_code(),
|
||||
Error::StartRemoveOutdatedMetaTask { source, .. }
|
||||
| Error::StopRemoveOutdatedMetaTask { source, .. } => source.status_code(),
|
||||
@@ -221,6 +282,15 @@ impl Error {
|
||||
pub fn external<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::External {
|
||||
source: BoxedError::new(err),
|
||||
clean_poisons: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [Error::External] error from source `err` and clean poisons.
|
||||
pub fn external_and_clean_poisons<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
|
||||
Error::External {
|
||||
source: BoxedError::new(err),
|
||||
clean_poisons: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,6 +306,11 @@ impl Error {
|
||||
matches!(self, Error::RetryLater { .. })
|
||||
}
|
||||
|
||||
/// Determine whether it needs to clean poisons.
|
||||
pub fn need_clean_poisons(&self) -> bool {
|
||||
matches!(self, Error::External { clean_poisons, .. } if *clean_poisons)
|
||||
}
|
||||
|
||||
/// Creates a new [Error::RetryLater] or [Error::External] error from source `err` according
|
||||
/// to its [StatusCode].
|
||||
pub fn from_error_ext<E: ErrorExt + Send + Sync + 'static>(err: E) -> Self {
|
||||
|
||||
@@ -23,10 +23,13 @@ mod procedure;
|
||||
pub mod store;
|
||||
pub mod watcher;
|
||||
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
|
||||
pub use crate::error::{Error, Result};
|
||||
pub use crate::procedure::{
|
||||
BoxedProcedure, BoxedProcedureLoader, Context, ContextProvider, LockKey, Output, ParseIdError,
|
||||
Procedure, ProcedureId, ProcedureInfo, ProcedureManager, ProcedureManagerRef, ProcedureState,
|
||||
ProcedureWithId, Status, StringKey,
|
||||
PoisonKey, PoisonKeys, Procedure, ProcedureId, ProcedureInfo, ProcedureManager,
|
||||
ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, StringKey,
|
||||
};
|
||||
pub use crate::watcher::Watcher;
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
mod runner;
|
||||
mod rwlock;
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
@@ -25,21 +26,23 @@ use backon::ExponentialBuilder;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{error, info, tracing};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::{Mutex as TokioMutex, Notify};
|
||||
|
||||
use self::rwlock::KeyRwLock;
|
||||
use crate::error::{
|
||||
self, DuplicateProcedureSnafu, Error, LoaderConflictSnafu, ManagerNotStartSnafu, Result,
|
||||
StartRemoveOutdatedMetaTaskSnafu, StopRemoveOutdatedMetaTaskSnafu,
|
||||
self, DuplicateProcedureSnafu, Error, LoaderConflictSnafu, ManagerNotStartSnafu,
|
||||
PoisonKeyNotDefinedSnafu, ProcedureNotFoundSnafu, Result, StartRemoveOutdatedMetaTaskSnafu,
|
||||
StopRemoveOutdatedMetaTaskSnafu, TooManyRunningProceduresSnafu,
|
||||
};
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::{BoxedProcedureLoader, InitProcedureState, ProcedureInfo};
|
||||
use crate::procedure::{BoxedProcedureLoader, InitProcedureState, PoisonKeys, ProcedureInfo};
|
||||
use crate::store::poison_store::PoisonStoreRef;
|
||||
use crate::store::{ProcedureMessage, ProcedureMessages, ProcedureStore, StateStoreRef};
|
||||
use crate::{
|
||||
BoxedProcedure, ContextProvider, LockKey, ProcedureId, ProcedureManager, ProcedureState,
|
||||
ProcedureWithId, Watcher,
|
||||
BoxedProcedure, ContextProvider, LockKey, PoisonKey, ProcedureId, ProcedureManager,
|
||||
ProcedureState, ProcedureWithId, Watcher,
|
||||
};
|
||||
|
||||
/// The expired time of a procedure's metadata.
|
||||
@@ -65,6 +68,8 @@ pub(crate) struct ProcedureMeta {
|
||||
child_notify: Notify,
|
||||
/// Lock required by this procedure.
|
||||
lock_key: LockKey,
|
||||
/// Poison keys that may cause this procedure to become poisoned during execution.
|
||||
poison_keys: PoisonKeys,
|
||||
/// Sender to notify the procedure state.
|
||||
state_sender: Sender<ProcedureState>,
|
||||
/// Receiver to watch the procedure state.
|
||||
@@ -83,6 +88,7 @@ impl ProcedureMeta {
|
||||
procedure_state: ProcedureState,
|
||||
parent_id: Option<ProcedureId>,
|
||||
lock_key: LockKey,
|
||||
poison_keys: PoisonKeys,
|
||||
type_name: &str,
|
||||
) -> ProcedureMeta {
|
||||
let (state_sender, state_receiver) = watch::channel(procedure_state);
|
||||
@@ -91,6 +97,7 @@ impl ProcedureMeta {
|
||||
parent_id,
|
||||
child_notify: Notify::new(),
|
||||
lock_key,
|
||||
poison_keys,
|
||||
state_sender,
|
||||
state_receiver,
|
||||
children: Mutex::new(Vec::new()),
|
||||
@@ -147,7 +154,6 @@ type ProcedureMetaRef = Arc<ProcedureMeta>;
|
||||
/// Procedure loaded from store.
|
||||
struct LoadedProcedure {
|
||||
procedure: BoxedProcedure,
|
||||
parent_id: Option<ProcedureId>,
|
||||
step: u32,
|
||||
}
|
||||
|
||||
@@ -157,12 +163,13 @@ pub(crate) struct ManagerContext {
|
||||
loaders: Mutex<HashMap<String, BoxedProcedureLoader>>,
|
||||
key_lock: KeyRwLock<String>,
|
||||
procedures: RwLock<HashMap<ProcedureId, ProcedureMetaRef>>,
|
||||
/// Messages loaded from the procedure store.
|
||||
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
|
||||
running_procedures: Mutex<HashSet<ProcedureId>>,
|
||||
/// Ids and finished time of finished procedures.
|
||||
finished_procedures: Mutex<VecDeque<(ProcedureId, Instant)>>,
|
||||
/// Running flag.
|
||||
running: Arc<AtomicBool>,
|
||||
/// Poison manager.
|
||||
poison_manager: PoisonStoreRef,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -170,18 +177,41 @@ impl ContextProvider for ManagerContext {
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
|
||||
Ok(self.state(procedure_id))
|
||||
}
|
||||
|
||||
async fn try_put_poison(&self, key: &PoisonKey, procedure_id: ProcedureId) -> Result<()> {
|
||||
{
|
||||
// validate the procedure exists
|
||||
let procedures = self.procedures.read().unwrap();
|
||||
let procedure = procedures
|
||||
.get(&procedure_id)
|
||||
.context(ProcedureNotFoundSnafu { procedure_id })?;
|
||||
|
||||
// validate the poison key is defined
|
||||
ensure!(
|
||||
procedure.poison_keys.contains(key),
|
||||
PoisonKeyNotDefinedSnafu {
|
||||
key: key.clone(),
|
||||
procedure_id
|
||||
}
|
||||
);
|
||||
}
|
||||
let key = key.to_string();
|
||||
let procedure_id = procedure_id.to_string();
|
||||
self.poison_manager.try_put_poison(key, procedure_id).await
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagerContext {
|
||||
/// Returns a new [ManagerContext].
|
||||
fn new() -> ManagerContext {
|
||||
fn new(poison_manager: PoisonStoreRef) -> ManagerContext {
|
||||
ManagerContext {
|
||||
key_lock: KeyRwLock::new(),
|
||||
loaders: Mutex::new(HashMap::new()),
|
||||
procedures: RwLock::new(HashMap::new()),
|
||||
messages: Mutex::new(HashMap::new()),
|
||||
running_procedures: Mutex::new(HashSet::new()),
|
||||
finished_procedures: Mutex::new(VecDeque::new()),
|
||||
running: Arc::new(AtomicBool::new(false)),
|
||||
poison_manager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,18 +240,27 @@ impl ManagerContext {
|
||||
procedures.contains_key(&procedure_id)
|
||||
}
|
||||
|
||||
/// Returns the number of running procedures.
|
||||
fn num_running_procedures(&self) -> usize {
|
||||
self.running_procedures.lock().unwrap().len()
|
||||
}
|
||||
|
||||
/// Try to insert the `procedure` to the context if there is no procedure
|
||||
/// with same [ProcedureId].
|
||||
///
|
||||
/// Returns `false` if there is already a procedure using the same [ProcedureId].
|
||||
fn try_insert_procedure(&self, meta: ProcedureMetaRef) -> bool {
|
||||
let procedure_id = meta.id;
|
||||
let mut procedures = self.procedures.write().unwrap();
|
||||
if procedures.contains_key(&meta.id) {
|
||||
return false;
|
||||
match procedures.entry(procedure_id) {
|
||||
Entry::Occupied(_) => return false,
|
||||
Entry::Vacant(vacant_entry) => {
|
||||
vacant_entry.insert(meta);
|
||||
}
|
||||
}
|
||||
|
||||
let old = procedures.insert(meta.id, meta);
|
||||
debug_assert!(old.is_none());
|
||||
let mut running_procedures = self.running_procedures.lock().unwrap();
|
||||
running_procedures.insert(procedure_id);
|
||||
|
||||
true
|
||||
}
|
||||
@@ -264,16 +303,6 @@ impl ManagerContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load procedure with specific `procedure_id` from cached [ProcedureMessage]s.
|
||||
fn load_one_procedure(&self, procedure_id: ProcedureId) -> Option<LoadedProcedure> {
|
||||
let message = {
|
||||
let messages = self.messages.lock().unwrap();
|
||||
messages.get(&procedure_id).cloned()?
|
||||
};
|
||||
|
||||
self.load_one_procedure_from_message(procedure_id, &message)
|
||||
}
|
||||
|
||||
/// Load procedure from specific [ProcedureMessage].
|
||||
fn load_one_procedure_from_message(
|
||||
&self,
|
||||
@@ -301,7 +330,6 @@ impl ManagerContext {
|
||||
|
||||
Some(LoadedProcedure {
|
||||
procedure,
|
||||
parent_id: message.parent_id,
|
||||
step: message.step,
|
||||
})
|
||||
}
|
||||
@@ -350,23 +378,19 @@ impl ManagerContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove cached [ProcedureMessage] by ids.
|
||||
fn remove_messages(&self, procedure_ids: &[ProcedureId]) {
|
||||
let mut messages = self.messages.lock().unwrap();
|
||||
for procedure_id in procedure_ids {
|
||||
let _ = messages.remove(procedure_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean resources of finished procedures.
|
||||
fn on_procedures_finish(&self, procedure_ids: &[ProcedureId]) {
|
||||
self.remove_messages(procedure_ids);
|
||||
|
||||
// Since users need to query the procedure state, so we can't remove the
|
||||
// meta of the procedure directly.
|
||||
let now = Instant::now();
|
||||
let mut finished_procedures = self.finished_procedures.lock().unwrap();
|
||||
finished_procedures.extend(procedure_ids.iter().map(|id| (*id, now)));
|
||||
|
||||
// Remove the procedures from the running set.
|
||||
let mut running_procedures = self.running_procedures.lock().unwrap();
|
||||
for procedure_id in procedure_ids {
|
||||
running_procedures.remove(procedure_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove metadata of outdated procedures.
|
||||
@@ -410,6 +434,7 @@ pub struct ManagerConfig {
|
||||
pub retry_delay: Duration,
|
||||
pub remove_outdated_meta_task_interval: Duration,
|
||||
pub remove_outdated_meta_ttl: Duration,
|
||||
pub max_running_procedures: usize,
|
||||
}
|
||||
|
||||
impl Default for ManagerConfig {
|
||||
@@ -420,6 +445,7 @@ impl Default for ManagerConfig {
|
||||
retry_delay: Duration::from_millis(500),
|
||||
remove_outdated_meta_task_interval: Duration::from_secs(60 * 10),
|
||||
remove_outdated_meta_ttl: META_TTL,
|
||||
max_running_procedures: 128,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -437,8 +463,12 @@ pub struct LocalManager {
|
||||
|
||||
impl LocalManager {
|
||||
/// Create a new [LocalManager] with specific `config`.
|
||||
pub fn new(config: ManagerConfig, state_store: StateStoreRef) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
pub fn new(
|
||||
config: ManagerConfig,
|
||||
state_store: StateStoreRef,
|
||||
poison_store: PoisonStoreRef,
|
||||
) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new(poison_store));
|
||||
|
||||
LocalManager {
|
||||
manager_ctx,
|
||||
@@ -476,6 +506,7 @@ impl LocalManager {
|
||||
procedure_state,
|
||||
None,
|
||||
procedure.lock_key(),
|
||||
procedure.poison_keys(),
|
||||
procedure.type_name(),
|
||||
));
|
||||
let runner = Runner {
|
||||
@@ -492,6 +523,13 @@ impl LocalManager {
|
||||
|
||||
let watcher = meta.state_receiver.clone();
|
||||
|
||||
ensure!(
|
||||
self.manager_ctx.num_running_procedures() < self.config.max_running_procedures,
|
||||
TooManyRunningProceduresSnafu {
|
||||
max_running_procedures: self.config.max_running_procedures,
|
||||
}
|
||||
);
|
||||
|
||||
// Inserts meta into the manager before actually spawnd the runner.
|
||||
ensure!(
|
||||
self.manager_ctx.try_insert_procedure(meta),
|
||||
@@ -718,6 +756,7 @@ pub(crate) mod test_util {
|
||||
ProcedureState::Running,
|
||||
None,
|
||||
LockKey::default(),
|
||||
PoisonKeys::default(),
|
||||
"ProcedureAdapter",
|
||||
)
|
||||
}
|
||||
@@ -741,11 +780,17 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::error::{self, Error};
|
||||
use crate::store::state_store::ObjectStateStore;
|
||||
use crate::test_util::InMemoryPoisonStore;
|
||||
use crate::{Context, Procedure, Status};
|
||||
|
||||
fn new_test_manager_context() -> ManagerContext {
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
ManagerContext::new(poison_manager)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_manager_context() {
|
||||
let ctx = ManagerContext::new();
|
||||
let ctx = new_test_manager_context();
|
||||
let meta = Arc::new(test_util::procedure_meta_for_test());
|
||||
|
||||
assert!(!ctx.contains_procedure(meta.id));
|
||||
@@ -761,7 +806,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_manager_context_insert_duplicate() {
|
||||
let ctx = ManagerContext::new();
|
||||
let ctx = new_test_manager_context();
|
||||
let meta = Arc::new(test_util::procedure_meta_for_test());
|
||||
|
||||
assert!(ctx.try_insert_procedure(meta.clone()));
|
||||
@@ -783,7 +828,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_procedures_in_tree() {
|
||||
let ctx = ManagerContext::new();
|
||||
let ctx = new_test_manager_context();
|
||||
let root = Arc::new(test_util::procedure_meta_for_test());
|
||||
assert!(ctx.try_insert_procedure(root.clone()));
|
||||
|
||||
@@ -807,6 +852,7 @@ mod tests {
|
||||
struct ProcedureToLoad {
|
||||
content: String,
|
||||
lock_key: LockKey,
|
||||
poison_keys: PoisonKeys,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -826,6 +872,10 @@ mod tests {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
self.lock_key.clone()
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
self.poison_keys.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcedureToLoad {
|
||||
@@ -833,6 +883,7 @@ mod tests {
|
||||
ProcedureToLoad {
|
||||
content: content.to_string(),
|
||||
lock_key: LockKey::default(),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -855,7 +906,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -879,7 +931,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -932,7 +985,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
@@ -983,7 +1037,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1022,6 +1077,10 @@ mod tests {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::single_exclusive("test.submit")
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::default()
|
||||
}
|
||||
}
|
||||
|
||||
let check_procedure = |procedure| async {
|
||||
@@ -1059,7 +1118,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single_exclusive("test.submit");
|
||||
@@ -1086,7 +1146,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
|
||||
manager.start().await.unwrap();
|
||||
manager.stop().await.unwrap();
|
||||
@@ -1119,9 +1180,11 @@ mod tests {
|
||||
retry_delay: Duration::from_millis(500),
|
||||
remove_outdated_meta_task_interval: Duration::from_millis(1),
|
||||
remove_outdated_meta_ttl: Duration::from_millis(1),
|
||||
max_running_procedures: 128,
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
@@ -1191,11 +1254,76 @@ mod tests {
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_too_many_running_procedures() {
|
||||
let dir = create_temp_dir("too_many_running_procedures");
|
||||
let config = ManagerConfig {
|
||||
parent_path: "data/".to_string(),
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
max_running_procedures: 1,
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
manager
|
||||
.manager_ctx
|
||||
.running_procedures
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(ProcedureId::random());
|
||||
manager.start().await.unwrap();
|
||||
|
||||
// Submit a new procedure should fail.
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single_exclusive("test.submit");
|
||||
let procedure_id = ProcedureId::random();
|
||||
let err = manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::TooManyRunningProcedures { .. }));
|
||||
|
||||
manager
|
||||
.manager_ctx
|
||||
.running_procedures
|
||||
.lock()
|
||||
.unwrap()
|
||||
.clear();
|
||||
|
||||
// Submit a new procedure should succeed.
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single_exclusive("test.submit");
|
||||
assert!(manager
|
||||
.submit(ProcedureWithId {
|
||||
id: procedure_id,
|
||||
procedure: Box::new(procedure),
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
assert!(manager
|
||||
.procedure_state(procedure_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.is_some());
|
||||
// Wait for the procedure done.
|
||||
let mut watcher = manager.procedure_watcher(procedure_id).unwrap();
|
||||
watcher.changed().await.unwrap();
|
||||
assert!(watcher.borrow().is_done());
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ProcedureToRecover {
|
||||
content: String,
|
||||
lock_key: LockKey,
|
||||
notify: Option<Arc<Notify>>,
|
||||
poison_keys: PoisonKeys,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -1220,6 +1348,10 @@ mod tests {
|
||||
self.notify.as_ref().unwrap().notify_one();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
self.poison_keys.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcedureToRecover {
|
||||
@@ -1227,6 +1359,7 @@ mod tests {
|
||||
ProcedureToRecover {
|
||||
content: content.to_string(),
|
||||
lock_key: LockKey::default(),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
notify: None,
|
||||
}
|
||||
}
|
||||
@@ -1236,6 +1369,7 @@ mod tests {
|
||||
let procedure = ProcedureToRecover {
|
||||
content: json.to_string(),
|
||||
lock_key: LockKey::default(),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
notify: Some(notify.clone()),
|
||||
};
|
||||
Ok(Box::new(procedure) as _)
|
||||
@@ -1256,7 +1390,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
@@ -207,7 +207,7 @@ impl Runner {
|
||||
if let Some(d) = retry.next() {
|
||||
let millis = d.as_millis() as u64;
|
||||
// Add random noise to the retry delay to avoid retry storms.
|
||||
let noise = rand::thread_rng().gen_range(0..(millis / 4) + 1);
|
||||
let noise = rand::rng().random_range(0..(millis / 4) + 1);
|
||||
let d = d.add(Duration::from_millis(noise));
|
||||
|
||||
self.wait_on_err(d, retry_times).await;
|
||||
@@ -238,11 +238,34 @@ impl Runner {
|
||||
}
|
||||
ProcedureState::Done { .. } => return,
|
||||
ProcedureState::Failed { .. } => return,
|
||||
ProcedureState::Poisoned { .. } => return,
|
||||
}
|
||||
self.execute_once(ctx).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn clean_poisons(&mut self) -> Result<()> {
|
||||
let mut error = None;
|
||||
for key in self.meta.poison_keys.iter() {
|
||||
let key = key.to_string();
|
||||
if let Err(e) = self
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.delete_poison(key, self.meta.id.to_string())
|
||||
.await
|
||||
{
|
||||
error!(e; "Failed to clean poisons for procedure: {}", self.meta.id);
|
||||
error = Some(e);
|
||||
}
|
||||
}
|
||||
|
||||
// returns the last error if any.
|
||||
if let Some(e) = error {
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn rollback(&mut self, ctx: &Context, err: Arc<Error>) {
|
||||
if self.procedure.rollback_supported() {
|
||||
if let Err(e) = self.procedure.rollback(ctx).await {
|
||||
@@ -255,7 +278,7 @@ impl Runner {
|
||||
}
|
||||
|
||||
async fn prepare_rollback(&mut self, err: Arc<Error>) {
|
||||
if let Err(e) = self.write_procedure_state(err.to_string()).await {
|
||||
if let Err(e) = self.write_rollback_procedure_state(err.to_string()).await {
|
||||
self.meta
|
||||
.set_state(ProcedureState::prepare_rollback(Arc::new(e)));
|
||||
return;
|
||||
@@ -288,26 +311,48 @@ impl Runner {
|
||||
return;
|
||||
}
|
||||
|
||||
// Cleans poisons before persist.
|
||||
if status.need_clean_poisons() {
|
||||
if let Err(e) = self.clean_poisons().await {
|
||||
error!(e; "Failed to clean poison for procedure: {}", self.meta.id);
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if status.need_persist() {
|
||||
if let Err(err) = self.persist_procedure().await {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(err)));
|
||||
if let Err(e) = self.persist_procedure().await {
|
||||
error!(e; "Failed to persist procedure: {}", self.meta.id);
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
match status {
|
||||
Status::Executing { .. } => (),
|
||||
Status::Executing { .. } => {}
|
||||
Status::Suspended { subprocedures, .. } => {
|
||||
self.on_suspended(subprocedures).await;
|
||||
}
|
||||
Status::Done { output } => {
|
||||
if let Err(e) = self.commit_procedure().await {
|
||||
error!(e; "Failed to commit procedure: {}", self.meta.id);
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return;
|
||||
}
|
||||
|
||||
self.done(output);
|
||||
}
|
||||
Status::Poisoned { error, keys } => {
|
||||
error!(
|
||||
error;
|
||||
"Procedure {}-{} is poisoned, keys: {:?}",
|
||||
self.procedure.type_name(),
|
||||
self.meta.id,
|
||||
keys,
|
||||
);
|
||||
self.meta
|
||||
.set_state(ProcedureState::poisoned(keys, Arc::new(error)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -327,6 +372,14 @@ impl Runner {
|
||||
return;
|
||||
}
|
||||
|
||||
if e.need_clean_poisons() {
|
||||
if let Err(e) = self.clean_poisons().await {
|
||||
error!(e; "Failed to clean poison for procedure: {}", self.meta.id);
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if e.is_retry_later() {
|
||||
self.meta.set_state(ProcedureState::retrying(Arc::new(e)));
|
||||
return;
|
||||
@@ -339,7 +392,9 @@ impl Runner {
|
||||
}
|
||||
ProcedureState::PrepareRollback { error } => self.prepare_rollback(error).await,
|
||||
ProcedureState::RollingBack { error } => self.rollback(ctx, error).await,
|
||||
ProcedureState::Failed { .. } | ProcedureState::Done { .. } => (),
|
||||
ProcedureState::Failed { .. }
|
||||
| ProcedureState::Done { .. }
|
||||
| ProcedureState::Poisoned { .. } => (),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,30 +403,21 @@ impl Runner {
|
||||
&self,
|
||||
procedure_id: ProcedureId,
|
||||
procedure_state: ProcedureState,
|
||||
mut procedure: BoxedProcedure,
|
||||
procedure: BoxedProcedure,
|
||||
) {
|
||||
if self.manager_ctx.contains_procedure(procedure_id) {
|
||||
// If the parent has already submitted this procedure, don't submit it again.
|
||||
return;
|
||||
}
|
||||
|
||||
let mut step = 0;
|
||||
if let Some(loaded_procedure) = self.manager_ctx.load_one_procedure(procedure_id) {
|
||||
// Try to load procedure state from the message to avoid re-run the subprocedure
|
||||
// from initial state.
|
||||
assert_eq!(self.meta.id, loaded_procedure.parent_id.unwrap());
|
||||
|
||||
// Use the dumped procedure from the procedure store.
|
||||
procedure = loaded_procedure.procedure;
|
||||
// Update step number.
|
||||
step = loaded_procedure.step;
|
||||
}
|
||||
let step = 0;
|
||||
|
||||
let meta = Arc::new(ProcedureMeta::new(
|
||||
procedure_id,
|
||||
procedure_state,
|
||||
Some(self.meta.id),
|
||||
procedure.lock_key(),
|
||||
procedure.poison_keys(),
|
||||
procedure.type_name(),
|
||||
));
|
||||
let runner = Runner {
|
||||
@@ -494,7 +540,7 @@ impl Runner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn write_procedure_state(&mut self, error: String) -> Result<()> {
|
||||
async fn write_rollback_procedure_state(&mut self, error: String) -> Result<()> {
|
||||
// Persists procedure state
|
||||
let type_name = self.procedure.type_name().to_string();
|
||||
let data = self.procedure.dump()?;
|
||||
@@ -549,8 +595,10 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::local::test_util;
|
||||
use crate::procedure::PoisonKeys;
|
||||
use crate::store::proc_path;
|
||||
use crate::{ContextProvider, Error, LockKey, Procedure};
|
||||
use crate::test_util::InMemoryPoisonStore;
|
||||
use crate::{ContextProvider, Error, LockKey, PoisonKey, Procedure};
|
||||
|
||||
const ROOT_ID: &str = "9f805a1f-05f7-490c-9f91-bd56e3cc54c1";
|
||||
|
||||
@@ -562,7 +610,9 @@ mod tests {
|
||||
Runner {
|
||||
meta,
|
||||
procedure,
|
||||
manager_ctx: Arc::new(ManagerContext::new()),
|
||||
manager_ctx: Arc::new(ManagerContext::new(
|
||||
Arc::new(InMemoryPoisonStore::default()),
|
||||
)),
|
||||
step: 0,
|
||||
exponential_builder: ExponentialBuilder::default(),
|
||||
store,
|
||||
@@ -587,6 +637,16 @@ mod tests {
|
||||
assert_eq!(files, files_in_dir);
|
||||
}
|
||||
|
||||
fn context_with_provider(
|
||||
procedure_id: ProcedureId,
|
||||
provider: Arc<dyn ContextProvider>,
|
||||
) -> Context {
|
||||
Context {
|
||||
procedure_id,
|
||||
provider,
|
||||
}
|
||||
}
|
||||
|
||||
fn context_without_provider(procedure_id: ProcedureId) -> Context {
|
||||
struct MockProvider;
|
||||
|
||||
@@ -598,6 +658,14 @@ mod tests {
|
||||
) -> Result<Option<ProcedureState>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn try_put_poison(
|
||||
&self,
|
||||
_key: &PoisonKey,
|
||||
_procedure_id: ProcedureId,
|
||||
) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
Context {
|
||||
@@ -611,6 +679,7 @@ mod tests {
|
||||
struct ProcedureAdapter<F> {
|
||||
data: String,
|
||||
lock_key: LockKey,
|
||||
poison_keys: PoisonKeys,
|
||||
exec_fn: F,
|
||||
rollback_fn: Option<RollbackFn>,
|
||||
}
|
||||
@@ -620,6 +689,7 @@ mod tests {
|
||||
let mut meta = test_util::procedure_meta_for_test();
|
||||
meta.id = ProcedureId::parse_str(uuid).unwrap();
|
||||
meta.lock_key = self.lock_key.clone();
|
||||
meta.poison_keys = self.poison_keys.clone();
|
||||
|
||||
Arc::new(meta)
|
||||
}
|
||||
@@ -657,6 +727,10 @@ mod tests {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
self.lock_key.clone()
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
self.poison_keys.clone()
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_once_normal(persist: bool, first_files: &[&str], second_files: &[&str]) {
|
||||
@@ -665,7 +739,7 @@ mod tests {
|
||||
times += 1;
|
||||
async move {
|
||||
if times == 1 {
|
||||
Ok(Status::Executing { persist })
|
||||
Ok(Status::executing(persist))
|
||||
} else {
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -675,6 +749,7 @@ mod tests {
|
||||
let normal = ProcedureAdapter {
|
||||
data: "normal".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -739,6 +814,7 @@ mod tests {
|
||||
let suspend = ProcedureAdapter {
|
||||
data: "suspend".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -763,7 +839,7 @@ mod tests {
|
||||
async move {
|
||||
if times == 1 {
|
||||
time::sleep(Duration::from_millis(200)).await;
|
||||
Ok(Status::Executing { persist: true })
|
||||
Ok(Status::executing(true))
|
||||
} else {
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -773,6 +849,7 @@ mod tests {
|
||||
let child = ProcedureAdapter {
|
||||
data: "child".to_string(),
|
||||
lock_key: LockKey::new_exclusive(keys.iter().map(|k| k.to_string())),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -842,6 +919,7 @@ mod tests {
|
||||
let parent = ProcedureAdapter {
|
||||
data: "parent".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -853,7 +931,8 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store.clone());
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let manager_ctx = Arc::new(ManagerContext::new(poison_manager));
|
||||
manager_ctx.start();
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta));
|
||||
@@ -885,10 +964,11 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_running_is_stopped() {
|
||||
let exec_fn = move |_| async move { Ok(Status::Executing { persist: true }) }.boxed();
|
||||
let exec_fn = move |_| async move { Ok(Status::executing(true)) }.boxed();
|
||||
let normal = ProcedureAdapter {
|
||||
data: "normal".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -933,6 +1013,7 @@ mod tests {
|
||||
let normal = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -959,6 +1040,7 @@ mod tests {
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -995,6 +1077,7 @@ mod tests {
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: Some(Box::new(rollback_fn)),
|
||||
};
|
||||
@@ -1046,6 +1129,7 @@ mod tests {
|
||||
let retry_later = ProcedureAdapter {
|
||||
data: "retry_later".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -1082,6 +1166,7 @@ mod tests {
|
||||
let exceed_max_retry_later = ProcedureAdapter {
|
||||
data: "exceed_max_retry_later".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -1117,6 +1202,7 @@ mod tests {
|
||||
let exceed_max_retry_later = ProcedureAdapter {
|
||||
data: "exceed_max_rollback".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: Some(Box::new(rollback_fn)),
|
||||
};
|
||||
@@ -1159,6 +1245,7 @@ mod tests {
|
||||
let retry_later = ProcedureAdapter {
|
||||
data: "rollback_after_retry_fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: Some(Box::new(rollback_fn)),
|
||||
};
|
||||
@@ -1203,6 +1290,7 @@ mod tests {
|
||||
let fail = ProcedureAdapter {
|
||||
data: "fail".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table.region-0"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -1238,6 +1326,7 @@ mod tests {
|
||||
let parent = ProcedureAdapter {
|
||||
data: "parent".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::default(),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
@@ -1248,7 +1337,8 @@ mod tests {
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(parent), procedure_store);
|
||||
let manager_ctx = Arc::new(ManagerContext::new());
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let manager_ctx = Arc::new(ManagerContext::new(poison_manager));
|
||||
manager_ctx.start();
|
||||
// Manually add this procedure to the manager ctx.
|
||||
assert!(manager_ctx.try_insert_procedure(meta.clone()));
|
||||
@@ -1261,4 +1351,327 @@ mod tests {
|
||||
let err = meta.state().error().unwrap().output_msg();
|
||||
assert!(err.contains("subprocedure failed"), "{err}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_with_clean_poisons() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut times = 0;
|
||||
let poison_key = PoisonKey::new("table/1024");
|
||||
let moved_poison_key = poison_key.clone();
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
let poison_key = moved_poison_key.clone();
|
||||
async move {
|
||||
if times == 1 {
|
||||
// Put the poison to the context.
|
||||
ctx.provider
|
||||
.try_put_poison(&poison_key, ctx.procedure_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(Status::executing(true))
|
||||
} else {
|
||||
Ok(Status::executing_with_clean_poisons(true))
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let poison = ProcedureAdapter {
|
||||
data: "poison".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::new(vec![poison_key.clone()]),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("clean_poisons");
|
||||
let meta = poison.new_meta(ROOT_ID);
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(poison), procedure_store.clone());
|
||||
|
||||
// Use the manager ctx as the context provider.
|
||||
let ctx = context_with_provider(
|
||||
meta.id,
|
||||
runner.manager_ctx.clone() as Arc<dyn ContextProvider>,
|
||||
);
|
||||
// Manually add this procedure to the manager ctx.
|
||||
runner
|
||||
.manager_ctx
|
||||
.procedures
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(meta.id, runner.meta.clone());
|
||||
|
||||
runner.manager_ctx.start();
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
// poison key should be exist.
|
||||
assert!(procedure_id.is_some());
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
// poison key should be deleted.
|
||||
assert!(procedure_id.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_error_with_clean_poisons() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let mut times = 0;
|
||||
let poison_key = PoisonKey::new("table/1024");
|
||||
let moved_poison_key = poison_key.clone();
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
let poison_key = moved_poison_key.clone();
|
||||
async move {
|
||||
if times == 1 {
|
||||
// Put the poison to the context.
|
||||
ctx.provider
|
||||
.try_put_poison(&poison_key, ctx.procedure_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(Status::executing(true))
|
||||
} else {
|
||||
Err(Error::external_and_clean_poisons(MockError::new(
|
||||
StatusCode::Unexpected,
|
||||
)))
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let poison = ProcedureAdapter {
|
||||
data: "poison".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::new(vec![poison_key.clone()]),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("error_with_clean_poisons");
|
||||
let meta = poison.new_meta(ROOT_ID);
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(poison), procedure_store.clone());
|
||||
|
||||
// Use the manager ctx as the context provider.
|
||||
let ctx = context_with_provider(
|
||||
meta.id,
|
||||
runner.manager_ctx.clone() as Arc<dyn ContextProvider>,
|
||||
);
|
||||
// Manually add this procedure to the manager ctx.
|
||||
runner
|
||||
.manager_ctx
|
||||
.procedures
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(meta.id, runner.meta.clone());
|
||||
|
||||
runner.manager_ctx.start();
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
// poison key should be exist.
|
||||
assert!(procedure_id.is_some());
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_prepare_rollback(), "{state:?}");
|
||||
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
// poison key should be deleted.
|
||||
assert!(procedure_id.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_failed_after_set_poison() {
|
||||
let mut times = 0;
|
||||
let poison_key = PoisonKey::new("table/1024");
|
||||
let moved_poison_key = poison_key.clone();
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
let poison_key = moved_poison_key.clone();
|
||||
async move {
|
||||
if times == 1 {
|
||||
Ok(Status::executing(true))
|
||||
} else {
|
||||
// Put the poison to the context.
|
||||
ctx.provider
|
||||
.try_put_poison(&poison_key, ctx.procedure_id)
|
||||
.await
|
||||
.unwrap();
|
||||
Err(Error::external(MockError::new(StatusCode::Unexpected)))
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let poison = ProcedureAdapter {
|
||||
data: "poison".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::new(vec![poison_key.clone()]),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("poison");
|
||||
let meta = poison.new_meta(ROOT_ID);
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(poison), procedure_store.clone());
|
||||
|
||||
// Use the manager ctx as the context provider.
|
||||
let ctx = context_with_provider(
|
||||
meta.id,
|
||||
runner.manager_ctx.clone() as Arc<dyn ContextProvider>,
|
||||
);
|
||||
// Manually add this procedure to the manager ctx.
|
||||
runner
|
||||
.manager_ctx
|
||||
.procedures
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(meta.id, runner.meta.clone());
|
||||
|
||||
runner.manager_ctx.start();
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_prepare_rollback(), "{state:?}");
|
||||
assert!(meta.state().is_prepare_rollback());
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_failed(), "{state:?}");
|
||||
assert!(meta.state().is_failed());
|
||||
|
||||
// Check the poison is set.
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// If the procedure is poisoned, the poison key shouldn't be deleted.
|
||||
assert_eq!(&procedure_id.to_string(), ROOT_ID);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_poisoned() {
|
||||
let mut times = 0;
|
||||
let poison_key = PoisonKey::new("table/1024");
|
||||
let moved_poison_key = poison_key.clone();
|
||||
let exec_fn = move |ctx: Context| {
|
||||
times += 1;
|
||||
let poison_key = moved_poison_key.clone();
|
||||
async move {
|
||||
if times == 1 {
|
||||
Ok(Status::executing(true))
|
||||
} else {
|
||||
// Put the poison to the context.
|
||||
ctx.provider
|
||||
.try_put_poison(&poison_key, ctx.procedure_id)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(Status::Poisoned {
|
||||
keys: PoisonKeys::new(vec![poison_key.clone()]),
|
||||
error: Error::external(MockError::new(StatusCode::Unexpected)),
|
||||
})
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
let poison = ProcedureAdapter {
|
||||
data: "poison".to_string(),
|
||||
lock_key: LockKey::single_exclusive("catalog.schema.table"),
|
||||
poison_keys: PoisonKeys::new(vec![poison_key.clone()]),
|
||||
exec_fn,
|
||||
rollback_fn: None,
|
||||
};
|
||||
|
||||
let dir = create_temp_dir("poison");
|
||||
let meta = poison.new_meta(ROOT_ID);
|
||||
|
||||
let object_store = test_util::new_object_store(&dir);
|
||||
let procedure_store = Arc::new(ProcedureStore::from_object_store(object_store.clone()));
|
||||
let mut runner = new_runner(meta.clone(), Box::new(poison), procedure_store.clone());
|
||||
|
||||
// Use the manager ctx as the context provider.
|
||||
let ctx = context_with_provider(
|
||||
meta.id,
|
||||
runner.manager_ctx.clone() as Arc<dyn ContextProvider>,
|
||||
);
|
||||
// Manually add this procedure to the manager ctx.
|
||||
runner
|
||||
.manager_ctx
|
||||
.procedures
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(meta.id, runner.meta.clone());
|
||||
|
||||
runner.manager_ctx.start();
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_running(), "{state:?}");
|
||||
|
||||
runner.execute_once(&ctx).await;
|
||||
let state = runner.meta.state();
|
||||
assert!(state.is_poisoned(), "{state:?}");
|
||||
assert!(meta.state().is_poisoned());
|
||||
check_files(
|
||||
&object_store,
|
||||
&procedure_store,
|
||||
ctx.procedure_id,
|
||||
&["0000000000.step"],
|
||||
)
|
||||
.await;
|
||||
|
||||
// Check the poison is set.
|
||||
let procedure_id = runner
|
||||
.manager_ctx
|
||||
.poison_manager
|
||||
.get_poison(&poison_key.to_string())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// If the procedure is poisoned, the poison key shouldn't be deleted.
|
||||
assert_eq!(procedure_id, ROOT_ID);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ pub struct ProcedureConfig {
|
||||
pub retry_delay: Duration,
|
||||
/// `None` stands for no limit.
|
||||
pub max_metadata_value_size: Option<ReadableSize>,
|
||||
/// Max running procedures.
|
||||
pub max_running_procedures: usize,
|
||||
}
|
||||
|
||||
impl Default for ProcedureConfig {
|
||||
@@ -37,6 +39,7 @@ impl Default for ProcedureConfig {
|
||||
max_retry_times: 3,
|
||||
retry_delay: Duration::from_millis(500),
|
||||
max_metadata_value_size: None,
|
||||
max_running_procedures: 128,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use std::any::Any;
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -35,6 +36,8 @@ pub enum Status {
|
||||
Executing {
|
||||
/// Whether the framework needs to persist the procedure.
|
||||
persist: bool,
|
||||
/// Whether the framework needs to clean the poisons.
|
||||
clean_poisons: bool,
|
||||
},
|
||||
/// The procedure has suspended itself and is waiting for subprocedures.
|
||||
Suspended {
|
||||
@@ -42,14 +45,40 @@ pub enum Status {
|
||||
/// Whether the framework needs to persist the procedure.
|
||||
persist: bool,
|
||||
},
|
||||
/// The procedure is poisoned.
|
||||
Poisoned {
|
||||
/// The keys that cause the procedure to be poisoned.
|
||||
keys: PoisonKeys,
|
||||
/// The error that cause the procedure to be poisoned.
|
||||
error: Error,
|
||||
},
|
||||
/// the procedure is done.
|
||||
Done { output: Option<Output> },
|
||||
}
|
||||
|
||||
impl Status {
|
||||
/// Returns a [Status::Poisoned] with given `keys` and `error`.
|
||||
pub fn poisoned(keys: impl IntoIterator<Item = PoisonKey>, error: Error) -> Status {
|
||||
Status::Poisoned {
|
||||
keys: PoisonKeys::new(keys),
|
||||
error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [Status::Executing] with given `persist` flag.
|
||||
pub fn executing(persist: bool) -> Status {
|
||||
Status::Executing { persist }
|
||||
Status::Executing {
|
||||
persist,
|
||||
clean_poisons: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [Status::Executing] with given `persist` flag and clean poisons.
|
||||
pub fn executing_with_clean_poisons(persist: bool) -> Status {
|
||||
Status::Executing {
|
||||
persist,
|
||||
clean_poisons: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [Status::Done] without output.
|
||||
@@ -86,11 +115,20 @@ impl Status {
|
||||
|
||||
/// Returns `true` if the procedure needs the framework to persist its intermediate state.
|
||||
pub fn need_persist(&self) -> bool {
|
||||
// If the procedure is done, the framework doesn't need to persist the procedure
|
||||
// anymore. It only needs to mark the procedure as committed.
|
||||
match self {
|
||||
Status::Executing { persist } | Status::Suspended { persist, .. } => *persist,
|
||||
Status::Done { .. } => false,
|
||||
// If the procedure is done/poisoned, the framework doesn't need to persist the procedure
|
||||
// anymore. It only needs to mark the procedure as committed.
|
||||
Status::Executing { persist, .. } | Status::Suspended { persist, .. } => *persist,
|
||||
Status::Done { .. } | Status::Poisoned { .. } => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the framework needs to clean the poisons.
|
||||
pub fn need_clean_poisons(&self) -> bool {
|
||||
match self {
|
||||
Status::Executing { clean_poisons, .. } => *clean_poisons,
|
||||
Status::Done { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -100,6 +138,12 @@ impl Status {
|
||||
pub trait ContextProvider: Send + Sync {
|
||||
/// Query the procedure state.
|
||||
async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>>;
|
||||
|
||||
/// Try to put a poison key for a procedure.
|
||||
///
|
||||
/// This method is used to mark a resource as being operated on by a procedure.
|
||||
/// If the poison key already exists with a different value, the operation will fail.
|
||||
async fn try_put_poison(&self, key: &PoisonKey, procedure_id: ProcedureId) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Reference-counted pointer to [ContextProvider].
|
||||
@@ -147,6 +191,11 @@ pub trait Procedure: Send {
|
||||
|
||||
/// Returns the [LockKey] that this procedure needs to acquire.
|
||||
fn lock_key(&self) -> LockKey;
|
||||
|
||||
/// Returns the [PoisonKeys] that may cause this procedure to become poisoned during execution.
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -174,6 +223,54 @@ impl<T: Procedure + ?Sized> Procedure for Box<T> {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
(**self).lock_key()
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
(**self).poison_keys()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct PoisonKey(String);
|
||||
|
||||
impl Display for PoisonKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl PoisonKey {
|
||||
/// Creates a new [PoisonKey] from a [String].
|
||||
pub fn new(key: impl Into<String>) -> Self {
|
||||
Self(key.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// A collection of [PoisonKey]s.
|
||||
///
|
||||
/// This type is used to represent the keys that may cause the procedure to become poisoned during execution.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
|
||||
pub struct PoisonKeys(SmallVec<[PoisonKey; 2]>);
|
||||
|
||||
impl PoisonKeys {
|
||||
/// Creates a new [PoisonKeys] from a [String].
|
||||
pub fn single(key: impl Into<String>) -> Self {
|
||||
Self(smallvec![PoisonKey::new(key)])
|
||||
}
|
||||
|
||||
/// Creates a new [PoisonKeys] from a [PoisonKey].
|
||||
pub fn new(keys: impl IntoIterator<Item = PoisonKey>) -> Self {
|
||||
Self(keys.into_iter().collect())
|
||||
}
|
||||
|
||||
/// Returns `true` if the [PoisonKeys] contains the given [PoisonKey].
|
||||
pub fn contains(&self, key: &PoisonKey) -> bool {
|
||||
self.0.contains(key)
|
||||
}
|
||||
|
||||
/// Returns an iterator over the [PoisonKey]s.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &PoisonKey> {
|
||||
self.0.iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
@@ -325,6 +422,8 @@ pub enum ProcedureState {
|
||||
RollingBack { error: Arc<Error> },
|
||||
/// The procedure is failed and cannot proceed anymore.
|
||||
Failed { error: Arc<Error> },
|
||||
/// The procedure is poisoned.
|
||||
Poisoned { keys: PoisonKeys, error: Arc<Error> },
|
||||
}
|
||||
|
||||
impl ProcedureState {
|
||||
@@ -348,6 +447,11 @@ impl ProcedureState {
|
||||
ProcedureState::Retrying { error }
|
||||
}
|
||||
|
||||
/// Returns a [ProcedureState] with poisoned state.
|
||||
pub fn poisoned(keys: PoisonKeys, error: Arc<Error>) -> ProcedureState {
|
||||
ProcedureState::Poisoned { keys, error }
|
||||
}
|
||||
|
||||
/// Returns true if the procedure state is running.
|
||||
pub fn is_running(&self) -> bool {
|
||||
matches!(self, ProcedureState::Running)
|
||||
@@ -358,6 +462,11 @@ impl ProcedureState {
|
||||
matches!(self, ProcedureState::Done { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the procedure state is poisoned.
|
||||
pub fn is_poisoned(&self) -> bool {
|
||||
matches!(self, ProcedureState::Poisoned { .. })
|
||||
}
|
||||
|
||||
/// Returns true if the procedure state failed.
|
||||
pub fn is_failed(&self) -> bool {
|
||||
matches!(self, ProcedureState::Failed { .. })
|
||||
@@ -384,6 +493,7 @@ impl ProcedureState {
|
||||
ProcedureState::Failed { error } => Some(error),
|
||||
ProcedureState::Retrying { error } => Some(error),
|
||||
ProcedureState::RollingBack { error } => Some(error),
|
||||
ProcedureState::Poisoned { error, .. } => Some(error),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -397,6 +507,7 @@ impl ProcedureState {
|
||||
ProcedureState::Failed { .. } => "Failed",
|
||||
ProcedureState::PrepareRollback { .. } => "PrepareRollback",
|
||||
ProcedureState::RollingBack { .. } => "RollingBack",
|
||||
ProcedureState::Poisoned { .. } => "Poisoned",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -470,12 +581,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_status() {
|
||||
let status = Status::Executing { persist: false };
|
||||
let status = Status::executing(false);
|
||||
assert!(!status.need_persist());
|
||||
|
||||
let status = Status::Executing { persist: true };
|
||||
let status = Status::executing(true);
|
||||
assert!(status.need_persist());
|
||||
|
||||
let status = Status::executing_with_clean_poisons(false);
|
||||
assert!(status.need_clean_poisons());
|
||||
|
||||
let status = Status::executing_with_clean_poisons(true);
|
||||
assert!(status.need_clean_poisons());
|
||||
|
||||
let status = Status::Suspended {
|
||||
subprocedures: Vec::new(),
|
||||
persist: false,
|
||||
@@ -490,6 +607,7 @@ mod tests {
|
||||
|
||||
let status = Status::done();
|
||||
assert!(!status.need_persist());
|
||||
assert!(status.need_clean_poisons());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -24,6 +24,7 @@ use crate::error::{Result, ToJsonSnafu};
|
||||
pub(crate) use crate::store::state_store::StateStoreRef;
|
||||
use crate::ProcedureId;
|
||||
|
||||
pub mod poison_store;
|
||||
pub mod state_store;
|
||||
pub mod util;
|
||||
|
||||
@@ -341,6 +342,7 @@ mod tests {
|
||||
|
||||
use object_store::ObjectStore;
|
||||
|
||||
use crate::procedure::PoisonKeys;
|
||||
use crate::store::state_store::ObjectStateStore;
|
||||
use crate::BoxedProcedure;
|
||||
|
||||
@@ -503,6 +505,10 @@ mod tests {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::default()
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
59
src/common/procedure/src/store/poison_store.rs
Normal file
59
src/common/procedure/src/store/poison_store.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
pub type PoisonStoreRef = Arc<dyn PoisonStore>;
|
||||
|
||||
/// Poison store.
|
||||
///
|
||||
/// This trait is used to manage the state of operations on resources, particularly
|
||||
/// when an operation encounters an unrecoverable error, potentially leading to
|
||||
/// metadata inconsistency. In such cases, manual intervention is required to
|
||||
/// resolve the issue before any further operations can be performed on the resource.
|
||||
///
|
||||
/// ## Behavior:
|
||||
/// - **Insertion**: When an operation begins on a resource, a "poison" key is inserted
|
||||
/// into the state store to indicate the operation is in progress.
|
||||
/// - **Deletion**: If the operation completes successfully or
|
||||
/// other cases can ensure the resource is in a consistent state, the poison key is removed
|
||||
/// from the state store, indicating the resource is in a consistent state.
|
||||
/// - **Failure Handling**:
|
||||
/// - If the operation fails or other cases may lead to metadata inconsistency,
|
||||
/// the poison key remains in the state store.
|
||||
/// - The presence of this key indicates that the resource has encountered an
|
||||
/// unrecoverable error and the metadata may be inconsistent.
|
||||
/// - New operations on the same resource are rejected until the resource is
|
||||
/// manually recovered and the poison key is removed.
|
||||
#[async_trait]
|
||||
pub trait PoisonStore: Send + Sync {
|
||||
/// Try to put the poison key.
|
||||
///
|
||||
/// If the poison key already exists with a different value, the operation will fail.
|
||||
async fn try_put_poison(&self, key: String, token: String) -> Result<()>;
|
||||
|
||||
/// Delete the poison key.
|
||||
///
|
||||
/// If the poison key exists with a different value, the operation will fail.
|
||||
async fn delete_poison(&self, key: String, token: String) -> Result<()>;
|
||||
|
||||
/// Get the poison key.
|
||||
///
|
||||
/// If the poison key does not exist, the operation will return `None`.
|
||||
async fn get_poison(&self, key: &str) -> Result<Option<String>>;
|
||||
}
|
||||
85
src/common/procedure/src/test_util.rs
Normal file
85
src/common/procedure/src/test_util.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use snafu::ensure;
|
||||
|
||||
use super::*;
|
||||
use crate::error;
|
||||
use crate::store::poison_store::PoisonStore;
|
||||
|
||||
/// A poison store that uses an in-memory map to store the poison state.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct InMemoryPoisonStore {
|
||||
map: Arc<RwLock<HashMap<String, String>>>,
|
||||
}
|
||||
|
||||
impl InMemoryPoisonStore {
|
||||
/// Create a new in-memory poison manager.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PoisonStore for InMemoryPoisonStore {
|
||||
async fn try_put_poison(&self, key: String, token: String) -> Result<()> {
|
||||
let mut map = self.map.write().unwrap();
|
||||
match map.entry(key) {
|
||||
Entry::Vacant(v) => {
|
||||
v.insert(token.to_string());
|
||||
}
|
||||
Entry::Occupied(o) => {
|
||||
let value = o.get();
|
||||
ensure!(
|
||||
value == &token,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!("The poison is already set by other token {}", value)
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_poison(&self, key: String, token: String) -> Result<()> {
|
||||
let mut map = self.map.write().unwrap();
|
||||
match map.entry(key) {
|
||||
Entry::Vacant(_) => {
|
||||
// do nothing
|
||||
}
|
||||
Entry::Occupied(o) => {
|
||||
let value = o.get();
|
||||
ensure!(
|
||||
value == &token,
|
||||
error::UnexpectedSnafu {
|
||||
err_msg: format!("The poison is not set by the token {}", value)
|
||||
}
|
||||
);
|
||||
|
||||
o.remove();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_poison(&self, key: &str) -> Result<Option<String>> {
|
||||
let map = self.map.read().unwrap();
|
||||
let key = key.to_string();
|
||||
Ok(map.get(&key).cloned())
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,10 @@ pub async fn wait(watcher: &mut Watcher) -> Result<Option<Output>> {
|
||||
ProcedureState::PrepareRollback { error } => {
|
||||
debug!("commit rollback, source: {}", error)
|
||||
}
|
||||
ProcedureState::Poisoned { error, .. } => {
|
||||
debug!("poisoned, source: {}", error);
|
||||
return Err(error.clone()).context(ProcedureExecSnafu);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -61,7 +65,9 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::error::Error;
|
||||
use crate::local::{test_util, LocalManager, ManagerConfig};
|
||||
use crate::procedure::PoisonKeys;
|
||||
use crate::store::state_store::ObjectStateStore;
|
||||
use crate::test_util::InMemoryPoisonStore;
|
||||
use crate::{
|
||||
Context, LockKey, Procedure, ProcedureId, ProcedureManager, ProcedureWithId, Status,
|
||||
};
|
||||
@@ -76,7 +82,8 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let manager = LocalManager::new(config, state_store);
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.start().await.unwrap();
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -106,6 +113,10 @@ mod tests {
|
||||
fn lock_key(&self) -> LockKey {
|
||||
LockKey::single_exclusive("test.submit")
|
||||
}
|
||||
|
||||
fn poison_keys(&self) -> PoisonKeys {
|
||||
PoisonKeys::default()
|
||||
}
|
||||
}
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user