mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 15:40:02 +00:00
Compare commits
78 Commits
v0.15.4
...
flow/faste
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67a60646b4 | ||
|
|
1c3bde7e4e | ||
|
|
e045a0dbdf | ||
|
|
2f765c8fd4 | ||
|
|
d99cd98c01 | ||
|
|
a858f55257 | ||
|
|
916967ea59 | ||
|
|
c58d8aa94a | ||
|
|
eeb061ca74 | ||
|
|
f7282fde28 | ||
|
|
a4bd11fb9c | ||
|
|
6dc9e8ddb4 | ||
|
|
af03e89139 | ||
|
|
e7a64b7dc0 | ||
|
|
29739b556e | ||
|
|
77e50d0e08 | ||
|
|
c2f1447345 | ||
|
|
30f7955d2b | ||
|
|
3508fddd74 | ||
|
|
351c741c70 | ||
|
|
bb43d604a4 | ||
|
|
9576bcb9ae | ||
|
|
dc17e6e517 | ||
|
|
563d25ee04 | ||
|
|
7d17782fd5 | ||
|
|
c5360601f5 | ||
|
|
9b5baa965c | ||
|
|
76a5145def | ||
|
|
7b2703760b | ||
|
|
81ea172ce4 | ||
|
|
f7c363f969 | ||
|
|
5f2daae087 | ||
|
|
b1b0d0136f | ||
|
|
599f289f59 | ||
|
|
385f12a62e | ||
|
|
6b90e2b6b4 | ||
|
|
a4f3e96e96 | ||
|
|
2b0f27da51 | ||
|
|
e0382eeb7c | ||
|
|
4aa6add8dc | ||
|
|
645988975e | ||
|
|
a203909de3 | ||
|
|
616e76941a | ||
|
|
bc42d35c2a | ||
|
|
524bdfff22 | ||
|
|
6bed0b6ba0 | ||
|
|
dec8c52b18 | ||
|
|
753a7e1a24 | ||
|
|
6684200fce | ||
|
|
5fcb97724f | ||
|
|
ff559b2688 | ||
|
|
8473a34fc9 | ||
|
|
df0ebf0378 | ||
|
|
4a665fd27b | ||
|
|
b4d6441716 | ||
|
|
bdd50a2263 | ||
|
|
f87b12b2aa | ||
|
|
07eec083b9 | ||
|
|
4737285275 | ||
|
|
55f5e09885 | ||
|
|
9ab36e9a6f | ||
|
|
4bb5d00a4b | ||
|
|
1d07864b29 | ||
|
|
9be75361a4 | ||
|
|
9c1df68a5f | ||
|
|
0209461155 | ||
|
|
e728cb33fb | ||
|
|
cde7e11983 | ||
|
|
944b4b3e49 | ||
|
|
7953b090c0 | ||
|
|
7aa9af5ba6 | ||
|
|
7a9444c85b | ||
|
|
bb12be3310 | ||
|
|
24019334ee | ||
|
|
116d5cf82b | ||
|
|
90a3894564 | ||
|
|
39d3e0651d | ||
|
|
a49edc6ca6 |
@@ -12,3 +12,6 @@ fetch = true
|
||||
checkout = true
|
||||
list_files = true
|
||||
internal_use_git2 = false
|
||||
|
||||
[env]
|
||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
||||
|
||||
42
.github/scripts/check-version.sh
vendored
Executable file
42
.github/scripts/check-version.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get current version
|
||||
CURRENT_VERSION=$1
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Failed to get current version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version from GitHub Releases
|
||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||
|
||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||
echo "Error: Failed to fetch latest version from GitHub"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version
|
||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||
|
||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||
echo "Error: No valid version found in GitHub releases"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
|
||||
echo "Current version: $CLEAN_CURRENT"
|
||||
echo "Latest release version: $CLEAN_LATEST"
|
||||
|
||||
# Use sort -V to compare versions
|
||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||
|
||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -110,6 +110,8 @@ jobs:
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
|
||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -135,6 +137,11 @@ jobs:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Check version
|
||||
id: check-version
|
||||
run: |
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
@@ -314,7 +321,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -332,7 +339,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -361,7 +368,7 @@ jobs:
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
|
||||
10
.github/workflows/semantic-pull-request.yml
vendored
10
.github/workflows/semantic-pull-request.yml
vendored
@@ -11,17 +11,17 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write # Add permissions to modify PRs
|
||||
issues: write
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
|
||||
315
Cargo.lock
generated
315
Cargo.lock
generated
@@ -211,7 +211,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -944,7 +944,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1586,7 +1586,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1602,6 +1602,17 @@ version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acbc26382d871df4b7442e3df10a9402bf3cf5e55cbd66f12be38861425f0564"
|
||||
|
||||
[[package]]
|
||||
name = "cargo-manifest"
|
||||
version = "0.19.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1d8af896b707212cd0e99c112a78c9497dd32994192a463ed2f7419d29bd8c6"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"thiserror 2.0.12",
|
||||
"toml 0.8.19",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cast"
|
||||
version = "0.3.0"
|
||||
@@ -1610,7 +1621,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -1948,7 +1959,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1975,7 +1986,6 @@ dependencies = [
|
||||
"common-version",
|
||||
"common-wal",
|
||||
"datatypes",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"humantime",
|
||||
@@ -1986,14 +1996,14 @@ dependencies = [
|
||||
"operator",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -2002,7 +2012,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -2032,7 +2042,7 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2073,7 +2083,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -2103,7 +2113,6 @@ dependencies = [
|
||||
"common-wal",
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"file-engine",
|
||||
"flow",
|
||||
@@ -2118,13 +2127,14 @@ dependencies = [
|
||||
"mito2",
|
||||
"moka",
|
||||
"nu-ansi-term",
|
||||
"object-store",
|
||||
"plugins",
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rexpect",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2134,7 +2144,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"stat",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -2181,7 +2191,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -2203,11 +2213,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2220,6 +2230,7 @@ dependencies = [
|
||||
"humantime-serde",
|
||||
"meta-client",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
@@ -2232,7 +2243,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -2269,7 +2280,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.8",
|
||||
"common-error",
|
||||
@@ -2282,7 +2293,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-macro",
|
||||
"http 1.1.0",
|
||||
@@ -2293,7 +2304,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2309,7 +2320,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2332,6 +2343,7 @@ dependencies = [
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datatypes",
|
||||
"derive_more",
|
||||
"geo",
|
||||
@@ -2362,7 +2374,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2370,7 +2382,7 @@ dependencies = [
|
||||
"common-test-util",
|
||||
"common-version",
|
||||
"hyper 0.14.30",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -2379,7 +2391,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2411,7 +2423,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2430,7 +2442,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2444,7 +2456,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"common-error",
|
||||
@@ -2460,7 +2472,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2525,7 +2537,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2534,11 +2546,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2550,7 +2562,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2577,7 +2589,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2586,7 +2598,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2612,7 +2624,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2632,7 +2644,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2662,17 +2674,36 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-session"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"strum 0.27.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-sql"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-datasource",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-time",
|
||||
"datafusion-sql",
|
||||
"datatypes",
|
||||
"hex",
|
||||
"jsonb",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
"common-version",
|
||||
"console-subscriber",
|
||||
"greptime-proto",
|
||||
"humantime-serde",
|
||||
@@ -2696,7 +2727,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-grpc",
|
||||
@@ -2709,7 +2740,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -2727,9 +2758,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"cargo-manifest",
|
||||
"const_format",
|
||||
"serde",
|
||||
"shadow-rs",
|
||||
@@ -2737,7 +2769,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2760,7 +2792,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-workload"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-telemetry",
|
||||
@@ -3716,7 +3748,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3762,14 +3794,14 @@ dependencies = [
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3778,7 +3810,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -4198,9 +4230,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.13.0"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -4438,7 +4470,7 @@ checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4575,7 +4607,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -4640,7 +4672,7 @@ dependencies = [
|
||||
"sql",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4695,10 +4727,11 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"auth",
|
||||
"bytes",
|
||||
@@ -4754,7 +4787,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -5144,7 +5177,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=82fe5c6282f623c185b86f03e898ee8952e50cf9#82fe5c6282f623c185b86f03e898ee8952e50cf9"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ceb1af4fa9309ce65bda0367db7b384df2bb4d4f#ceb1af4fa9309ce65bda0367db7b384df2bb4d4f"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -5915,7 +5948,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6695,7 +6728,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.48.5",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6800,7 +6833,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6812,7 +6845,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -7110,7 +7143,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7138,7 +7171,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7168,6 +7201,7 @@ dependencies = [
|
||||
"deadpool",
|
||||
"deadpool-postgres",
|
||||
"derive_builder 0.20.1",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"h2 0.3.26",
|
||||
@@ -7229,7 +7263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7239,6 +7273,7 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -7319,7 +7354,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito-codec"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"bytes",
|
||||
@@ -7342,7 +7377,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -8092,18 +8127,25 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"futures",
|
||||
"humantime-serde",
|
||||
"lazy_static",
|
||||
"md5",
|
||||
"moka",
|
||||
"opendal",
|
||||
"prometheus",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
@@ -8238,7 +8280,7 @@ dependencies = [
|
||||
"prometheus",
|
||||
"quick-xml 0.36.2",
|
||||
"reqsign",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -8310,6 +8352,19 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f51189ce8be654f9b5f7e70e49967ed894e84a06fc35c6c042e64ac1fc5399e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"reqwest 0.11.27",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.14.0"
|
||||
@@ -8320,10 +8375,12 @@ dependencies = [
|
||||
"futures-core",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"opentelemetry-http",
|
||||
"opentelemetry-proto 0.4.0",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"opentelemetry_sdk 0.21.2",
|
||||
"prost 0.11.9",
|
||||
"reqwest 0.11.27",
|
||||
"thiserror 1.0.64",
|
||||
"tokio",
|
||||
"tonic 0.9.2",
|
||||
@@ -8406,7 +8463,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8431,6 +8488,7 @@ dependencies = [
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-sql",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
@@ -8461,7 +8519,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8728,7 +8786,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9016,7 +9074,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9159,7 +9217,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -9472,7 +9530,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9496,8 +9554,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql-parser"
|
||||
version = "0.5.1"
|
||||
source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=0410e8b459dda7cb222ce9596f8bf3971bd07bd2#0410e8b459dda7cb222ce9596f8bf3971bd07bd2"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "328fe69c2443ec4f8e6c33ea925dde04a1026e6c95928e89ed02343944cac9bf"
|
||||
dependencies = [
|
||||
"cfgrammar",
|
||||
"chrono",
|
||||
@@ -9754,7 +9813,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9796,7 +9855,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9862,7 +9921,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10310,7 +10369,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"quick-xml 0.35.0",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rsa",
|
||||
"rust-ini 0.21.1",
|
||||
"serde",
|
||||
@@ -10319,6 +10378,42 @@ dependencies = [
|
||||
"sha2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.11.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.30",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper 0.1.2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"url",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"web-sys",
|
||||
"winreg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.9"
|
||||
@@ -11148,7 +11243,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -11181,6 +11276,7 @@ dependencies = [
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-session",
|
||||
"common-sql",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
@@ -11234,7 +11330,7 @@ dependencies = [
|
||||
"quoted-string",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"rust-embed",
|
||||
"rustls",
|
||||
"rustls-pemfile",
|
||||
@@ -11269,7 +11365,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11608,7 +11704,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11619,6 +11715,7 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-query",
|
||||
"common-sql",
|
||||
"common-time",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
@@ -11663,7 +11760,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11678,7 +11775,7 @@ dependencies = [
|
||||
"local-ip-address",
|
||||
"mysql",
|
||||
"num_cpus",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -11963,7 +12060,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "stat"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"nix 0.30.1",
|
||||
]
|
||||
@@ -11989,7 +12086,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -12150,7 +12247,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12328,9 +12425,30 @@ dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"core-foundation",
|
||||
"system-configuration-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration-sys"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
|
||||
dependencies = [
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12591,7 +12709,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12618,7 +12736,7 @@ dependencies = [
|
||||
"paste",
|
||||
"rand 0.9.0",
|
||||
"rand_chacha 0.9.0",
|
||||
"reqwest",
|
||||
"reqwest 0.12.9",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -12635,7 +12753,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12702,7 +12820,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.15.0",
|
||||
"substrait 0.16.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -13072,6 +13190,7 @@ version = "0.8.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
|
||||
dependencies = [
|
||||
"indexmap 2.9.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
@@ -14501,6 +14620,16 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
|
||||
14
Cargo.toml
14
Cargo.toml
@@ -30,6 +30,7 @@ members = [
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/sql",
|
||||
"src/common/stat",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
@@ -71,11 +72,13 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.15.0"
|
||||
version = "0.16.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
@@ -121,6 +124,7 @@ datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
@@ -130,11 +134,12 @@ deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "82fe5c6282f623c185b86f03e898ee8952e50cf9" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ceb1af4fa9309ce65bda0367db7b384df2bb4d4f" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -167,9 +172,7 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
|
||||
"ser",
|
||||
] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
@@ -258,6 +261,7 @@ common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-sql = { path = "src/common/sql" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
|
||||
@@ -75,9 +75,9 @@
|
||||
| --------- | ----------- |
|
||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
|
||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||
|
||||
|
||||
@@ -185,10 +185,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -288,10 +289,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -323,6 +325,7 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
@@ -370,10 +373,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -432,8 +436,8 @@
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -534,10 +538,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -584,11 +589,14 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
|
||||
@@ -129,11 +129,11 @@ dir = "./greptimedb_data/wal"
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a flush.
|
||||
## The interval to trigger a purge.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
|
||||
@@ -629,7 +629,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -640,6 +640,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -83,7 +83,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -94,6 +94,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -105,3 +108,8 @@ default_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
[query]
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
@@ -218,7 +218,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -229,6 +229,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -43,6 +43,11 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The delay before starting region failure detection.
|
||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
||||
region_failure_detector_initialization_delay = '10m'
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
@@ -220,7 +225,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -231,6 +236,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -720,7 +720,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -731,6 +731,9 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -55,12 +55,25 @@ async function main() {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
|
||||
// Get available assignees for the docs repo
|
||||
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
})
|
||||
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||
|
||||
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||
core.info(`Assigning issue to: ${assignee}`)
|
||||
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: actor,
|
||||
assignee: assignee,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
|
||||
@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments-administration/configuration#storage-options).
|
||||
|
||||
@@ -83,7 +83,7 @@ If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to depl
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/getting-started).
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration-administration/deploy-on-kubernetes/getting-started).
|
||||
|
||||
### Self-host Prometheus and import dashboards manually
|
||||
|
||||
|
||||
@@ -31,8 +31,10 @@ excludes = [
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/statements/drop/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
"src/mito2/src/extension.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -22,6 +22,7 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extensions: HashMap<String, Vec<u8>>,
|
||||
pub metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
@@ -29,6 +30,7 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extensions: region_response.extensions,
|
||||
metadata: region_response.metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +39,16 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response with metadata.
|
||||
pub fn from_metadata(metadata: Vec<u8>) -> Self {
|
||||
Self {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,18 +226,20 @@ mod tests {
|
||||
assert!(options.is_none());
|
||||
|
||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||
.with_fulltext_options(FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
})
|
||||
.with_fulltext_options(FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
))
|
||||
.unwrap();
|
||||
schema.set_inverted_index(true);
|
||||
let options = options_from_column_schema(&schema).unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
);
|
||||
assert_eq!(
|
||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||
@@ -247,16 +249,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_options_with_fulltext() {
|
||||
let fulltext = FulltextOptions {
|
||||
enable: true,
|
||||
analyzer: FulltextAnalyzer::English,
|
||||
case_sensitive: false,
|
||||
backend: FulltextBackend::Bloom,
|
||||
};
|
||||
let fulltext = FulltextOptions::new_unchecked(
|
||||
true,
|
||||
FulltextAnalyzer::English,
|
||||
false,
|
||||
FulltextBackend::Bloom,
|
||||
10240,
|
||||
0.01,
|
||||
);
|
||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||
assert_eq!(
|
||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
131
src/catalog/src/kvbackend/builder.rs
Normal file
131
src/catalog/src/kvbackend/builder.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_procedure_manager(mut self, procedure_manager: ProcedureManagerRef) -> Self {
|
||||
self.procedure_manager = Some(procedure_manager);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_process_manager(mut self, process_manager: ProcessManagerRef) -> Self {
|
||||
self.process_manager = Some(process_manager);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Arc<KvBackendCatalogManager> {
|
||||
let Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: {
|
||||
let provider = InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
},
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -28,17 +28,18 @@ use common_meta::cache::{
|
||||
use common_meta::key::catalog_name::CatalogNameKey;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::TableInfoValue;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use partition::manager::PartitionRuleManagerRef;
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
use table::dist_table::DistTable;
|
||||
use table::metadata::TableId;
|
||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||
@@ -51,6 +52,8 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
@@ -66,60 +69,22 @@ use crate::CatalogManager;
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
/// Provides the extension methods for the `information_schema` tables
|
||||
information_extension: InformationExtensionRef,
|
||||
pub(super) information_extension: InformationExtensionRef,
|
||||
/// Manages partition rules.
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
pub(super) partition_manager: PartitionRuleManagerRef,
|
||||
/// Manages table metadata.
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
pub(super) table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
system_catalog: SystemCatalog,
|
||||
pub(super) system_catalog: SystemCatalog,
|
||||
/// Cache registry for all caches.
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
pub(super) cache_registry: LayeredCacheRegistryRef,
|
||||
/// Only available in `Standalone` mode.
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
pub(super) procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
pub(super) const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
process_manager.clone(),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
process_manager,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
@@ -142,6 +107,61 @@ impl KvBackendCatalogManager {
|
||||
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
||||
self.procedure_manager.clone()
|
||||
}
|
||||
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
async fn override_logical_table_partition_key_indices(
|
||||
table_route_cache: &TableRouteCacheRef,
|
||||
table_info_manager: &TableInfoManager,
|
||||
table: TableRef,
|
||||
) -> Result<TableRef> {
|
||||
// If the table is not a metric table, return the table directly.
|
||||
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
|
||||
return Ok(table);
|
||||
}
|
||||
|
||||
if let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = table_info_manager
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
|
||||
// Remap partition key indices from physical table to logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter_map(|&physical_index| {
|
||||
// Get the column name from the physical table using the physical index
|
||||
physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(physical_index)
|
||||
.and_then(|physical_column| {
|
||||
// Find the corresponding index in the logical table schema
|
||||
new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_index_by_name(physical_column.name.as_str())
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(new_table);
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -268,10 +288,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_cache",
|
||||
})?;
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
|
||||
let table = table_cache
|
||||
.get_by_ref(&TableName {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
@@ -281,55 +298,18 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
.await
|
||||
.context(GetTableCacheSnafu)?;
|
||||
|
||||
// Override logical table's partition key indices with physical table's.
|
||||
if let Some(table) = &table
|
||||
&& let Some(table_route_value) = table_route_cache
|
||||
.get(table.table_info().table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
||||
&& let Some(physical_table_info_value) = self
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(logical_route.physical_table_id())
|
||||
.await
|
||||
.context(TableMetadataManagerSnafu)?
|
||||
{
|
||||
let mut new_table_info = (*table.table_info()).clone();
|
||||
// Gather all column names from the logical table
|
||||
let logical_column_names: std::collections::HashSet<_> = new_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas()
|
||||
.iter()
|
||||
.map(|col| &col.name)
|
||||
.collect();
|
||||
|
||||
// Only preserve partition key indices where the corresponding columns exist in logical table
|
||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.partition_key_indices
|
||||
.iter()
|
||||
.filter(|&&index| {
|
||||
if let Some(physical_column) = physical_table_info_value
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.get(index)
|
||||
{
|
||||
logical_column_names.contains(&physical_column.name)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
||||
|
||||
return Ok(Some(new_table));
|
||||
if let Some(table) = table {
|
||||
let table_route_cache: TableRouteCacheRef =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
})?;
|
||||
return Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
self.table_metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
.map(Some);
|
||||
}
|
||||
|
||||
if channel == Channel::Postgres {
|
||||
@@ -342,7 +322,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn tables_by_ids(
|
||||
@@ -394,8 +374,20 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let catalog = catalog.to_string();
|
||||
let schema = schema.to_string();
|
||||
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
||||
let table_route_cache: Result<TableRouteCacheRef> =
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "table_route_cache",
|
||||
});
|
||||
|
||||
common_runtime::spawn_global(async move {
|
||||
let table_route_cache = match table_route_cache {
|
||||
Ok(table_route_cache) => table_route_cache,
|
||||
Err(e) => {
|
||||
let _ = tx.send(Err(e)).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let table_id_stream = metadata_manager
|
||||
.table_name_manager()
|
||||
.tables(&catalog, &schema)
|
||||
@@ -422,6 +414,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
let metadata_manager = metadata_manager.clone();
|
||||
let tx = tx.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let table_route_cache = table_route_cache.clone();
|
||||
common_runtime::spawn_global(async move {
|
||||
// we don't explicitly close the semaphore so just ignore the potential error.
|
||||
let _ = semaphore.acquire().await;
|
||||
@@ -439,6 +432,16 @@ impl CatalogManager for KvBackendCatalogManager {
|
||||
};
|
||||
|
||||
for table in table_info_values.into_values().map(build_table) {
|
||||
let table = if let Ok(table) = table {
|
||||
Self::override_logical_table_partition_key_indices(
|
||||
&table_route_cache,
|
||||
metadata_manager.table_info_manager(),
|
||||
table,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
table
|
||||
};
|
||||
if tx.send(table).await.is_err() {
|
||||
return;
|
||||
}
|
||||
@@ -468,16 +471,19 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
pub(super) struct SystemCatalog {
|
||||
pub(super) catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
pub(super) catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pub(super) pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
@@ -541,12 +547,17 @@ impl SystemCatalog {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
self.catalog_cache.get_with_by_ref(catalog, move || {
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
let provider = InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
self.process_manager.clone(),
|
||||
))
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
|
||||
@@ -352,11 +352,13 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let backend = Arc::new(MemoryKvBackend::new());
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
None, // we don't need ProcessManager on regions server.
|
||||
backend,
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::sync::{Arc, RwLock};
|
||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
||||
use common_base::cancellation::CancellationHandle;
|
||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
||||
use common_telemetry::{debug, info};
|
||||
use common_telemetry::{debug, info, warn};
|
||||
use common_time::util::current_time_millis;
|
||||
use meta_client::MetaClientRef;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
@@ -141,14 +141,20 @@ impl ProcessManager {
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?;
|
||||
for mut f in frontends {
|
||||
processes.extend(
|
||||
f.list_process(ListProcessRequest {
|
||||
let result = f
|
||||
.list_process(ListProcessRequest {
|
||||
catalog: catalog.unwrap_or_default().to_string(),
|
||||
})
|
||||
.await
|
||||
.context(error::InvokeFrontendSnafu)?
|
||||
.processes,
|
||||
);
|
||||
.context(error::InvokeFrontendSnafu);
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
processes.extend(resp.processes);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(e; "Skipping failing node: {:?}", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
processes.extend(self.local_processes(catalog)?);
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
mod predicate;
|
||||
pub mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -96,7 +96,7 @@ trait SystemSchemaProviderInner {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait SystemTable {
|
||||
pub trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
@@ -110,7 +110,7 @@ pub(crate) trait SystemTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
pub type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
|
||||
@@ -19,7 +19,7 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
mod process_list;
|
||||
pub mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
@@ -38,6 +38,7 @@ use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
@@ -112,6 +113,25 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
pub kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
/// A factory trait for making information schema tables.
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
@@ -119,6 +139,10 @@ pub struct InformationSchemaProvider {
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
@@ -128,6 +152,7 @@ impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
@@ -215,7 +240,22 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
_ => None,
|
||||
table_name => {
|
||||
#[cfg(feature = "enterprise")]
|
||||
return self.extra_table_factories.get(table_name).map(|factory| {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
kv_backend: self.kv_backend.clone(),
|
||||
};
|
||||
factory.make_information_table(req)
|
||||
});
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
{
|
||||
let _ = table_name;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -226,6 +266,7 @@ impl InformationSchemaProvider {
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
@@ -233,6 +274,9 @@ impl InformationSchemaProvider {
|
||||
flow_metadata_manager,
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
@@ -240,6 +284,16 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_table_factories = factories;
|
||||
self.build_tables();
|
||||
self
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
@@ -290,16 +344,19 @@ impl InformationSchemaProvider {
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
trait InformationTable {
|
||||
pub trait InformationTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
@@ -39,14 +39,14 @@ use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
/// Column names of `information_schema.process_list`
|
||||
const ID: &str = "id";
|
||||
const CATALOG: &str = "catalog";
|
||||
const SCHEMAS: &str = "schemas";
|
||||
const QUERY: &str = "query";
|
||||
const CLIENT: &str = "client";
|
||||
const FRONTEND: &str = "frontend";
|
||||
const START_TIMESTAMP: &str = "start_timestamp";
|
||||
const ELAPSED_TIME: &str = "elapsed_time";
|
||||
pub const ID: &str = "id";
|
||||
pub const CATALOG: &str = "catalog";
|
||||
pub const SCHEMAS: &str = "schemas";
|
||||
pub const QUERY: &str = "query";
|
||||
pub const CLIENT: &str = "client";
|
||||
pub const FRONTEND: &str = "frontend";
|
||||
pub const START_TIMESTAMP: &str = "start_timestamp";
|
||||
pub const ELAPSED_TIME: &str = "elapsed_time";
|
||||
|
||||
/// `information_schema.process_list` table implementation that tracks running
|
||||
/// queries in current cluster.
|
||||
|
||||
@@ -48,3 +48,4 @@ pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
pub const TRIGGER_LIST: &str = "trigger_list";
|
||||
|
||||
@@ -207,6 +207,7 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use crate::memory::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
@@ -323,13 +324,13 @@ mod tests {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
Arc::new(NoopInformationExtension),
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
)
|
||||
.build();
|
||||
|
||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||
view_info.table_type = TableType::View;
|
||||
|
||||
@@ -43,7 +43,6 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime.workspace = true
|
||||
|
||||
@@ -160,6 +160,7 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
options: Default::default(),
|
||||
region_numbers: (1..=100).collect(),
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
};
|
||||
|
||||
RawTableInfo {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
mod bench;
|
||||
mod data;
|
||||
mod database;
|
||||
|
||||
@@ -241,7 +241,6 @@ impl RepairTool {
|
||||
let alter_table_request = alter_table::make_alter_region_request_for_peer(
|
||||
logical_table_id,
|
||||
&alter_table_expr,
|
||||
full_table_metadata.table_info.ident.version,
|
||||
peer,
|
||||
physical_region_routes,
|
||||
)?;
|
||||
|
||||
@@ -66,7 +66,6 @@ pub fn generate_alter_table_expr_for_all_columns(
|
||||
pub fn make_alter_region_request_for_peer(
|
||||
logical_table_id: TableId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
@@ -74,7 +73,7 @@ pub fn make_alter_region_request_for_peer(
|
||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let request = make_alter_region_request(region_id, alter_table_expr, schema_version);
|
||||
let request = make_alter_region_request(region_id, alter_table_expr);
|
||||
requests.push(request);
|
||||
}
|
||||
|
||||
|
||||
@@ -301,7 +301,6 @@ struct MetaInfoTool {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MetaInfoTool {
|
||||
#[allow(clippy::print_stdout)]
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
let result = MetadataSnapshotManager::info(
|
||||
&self.inner,
|
||||
|
||||
@@ -31,7 +31,7 @@ use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_grpc::flight::do_put::DoPutResponse;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
@@ -48,7 +48,7 @@ use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||
InvalidTonicMetadataValueSnafu, ServerSnafu,
|
||||
InvalidTonicMetadataValueSnafu,
|
||||
};
|
||||
use crate::{error, from_grpc_response, Client, Result};
|
||||
|
||||
@@ -196,12 +196,22 @@ impl Database {
|
||||
|
||||
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
|
||||
/// is `max_retries * GRPC_CONN_TIMEOUT`
|
||||
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
|
||||
pub async fn handle_with_retry(
|
||||
&self,
|
||||
request: Request,
|
||||
max_retries: u32,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut retries = 0;
|
||||
|
||||
let request = self.to_rpc_request(request);
|
||||
|
||||
loop {
|
||||
let raw_response = client.handle(request.clone()).await;
|
||||
let mut tonic_request = tonic::Request::new(request.clone());
|
||||
let metadata = tonic_request.metadata_mut();
|
||||
Self::put_hints(metadata, hints)?;
|
||||
let raw_response = client.handle(tonic_request).await;
|
||||
match (raw_response, retries < max_retries) {
|
||||
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
|
||||
(Err(err), true) => {
|
||||
@@ -292,21 +302,16 @@ impl Database {
|
||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error =
|
||||
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
|
||||
FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
}
|
||||
});
|
||||
error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
error
|
||||
e
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})?;
|
||||
|
||||
@@ -436,8 +441,11 @@ mod tests {
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic};
|
||||
use common_error::status_code::StatusCode;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
use super::*;
|
||||
use crate::error::TonicSnafu;
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
@@ -460,4 +468,19 @@ mod tests {
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_tonic_status() {
|
||||
let expected = TonicSnafu {
|
||||
code: StatusCode::Internal,
|
||||
msg: "blabla".to_string(),
|
||||
tonic_code: Code::Internal,
|
||||
}
|
||||
.build();
|
||||
|
||||
let status = Status::new(Code::Internal, "blabla");
|
||||
let actual: Error = status.into();
|
||||
|
||||
assert_eq!(expected.to_string(), actual.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::define_from_tonic_status;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::{convert_tonic_code_to_status_code, StatusCode};
|
||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
use tonic::{Code, Status};
|
||||
use tonic::Code;
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -124,6 +124,15 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("{}", msg))]
|
||||
Tonic {
|
||||
code: StatusCode,
|
||||
msg: String,
|
||||
tonic_code: Code,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -135,7 +144,7 @@ impl ErrorExt for Error {
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::Server { code, .. } | Error::Tonic { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
@@ -153,34 +162,7 @@ impl ErrorExt for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE).and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let tonic_code = e.code();
|
||||
let code = code.unwrap_or_else(|| convert_tonic_code_to_status_code(tonic_code));
|
||||
|
||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
Self::Server {
|
||||
code,
|
||||
msg,
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
define_from_tonic_status!(Error, Tonic);
|
||||
|
||||
impl Error {
|
||||
pub fn should_retry(&self) -> bool {
|
||||
|
||||
@@ -21,7 +21,7 @@ use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
@@ -107,24 +107,18 @@ impl RegionRequester {
|
||||
.mut_inner()
|
||||
.do_get(ticket)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
.or_else(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
})
|
||||
.unwrap_err();
|
||||
error!(
|
||||
e; "Failed to do Flight get, addr: {}, code: {}",
|
||||
flight_client.addr(),
|
||||
tonic_code
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: flight_client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})?;
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ default = [
|
||||
"meta-srv/pg_kvbackend",
|
||||
"meta-srv/mysql_kvbackend",
|
||||
]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
@@ -52,7 +52,6 @@ common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
@@ -67,6 +66,7 @@ metric-engine.workspace = true
|
||||
mito2.workspace = true
|
||||
moka.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -20,11 +20,11 @@ use cmd::error::{InitTlsProviderSnafu, Result};
|
||||
use cmd::options::GlobalOptions;
|
||||
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||
use common_base::Plugins;
|
||||
use common_version::version;
|
||||
use common_version::{verbose_version, version};
|
||||
use servers::install_ring_crypto_provider;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||
#[command(name = "greptime", author, version, long_version = verbose_version(), about)]
|
||||
#[command(propagate_version = true)]
|
||||
pub(crate) struct Command {
|
||||
#[clap(subcommand)]
|
||||
@@ -143,10 +143,8 @@ async fn start(cli: Command) -> Result<()> {
|
||||
}
|
||||
|
||||
fn setup_human_panic() {
|
||||
human_panic::setup_panic!(
|
||||
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||
);
|
||||
human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version())
|
||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions"));
|
||||
|
||||
common_telemetry::set_panic_hook();
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ mod tests {
|
||||
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use object_store::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -19,7 +19,7 @@ use catalog::kvbackend::MetaKvBackend;
|
||||
use common_base::Plugins;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_telemetry::info;
|
||||
use common_version::{short_version, version};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use datanode::datanode::DatanodeBuilder;
|
||||
use datanode::service::DatanodeServiceBuilder;
|
||||
use meta_client::MetaClientType;
|
||||
@@ -67,7 +67,7 @@ impl InstanceBuilder {
|
||||
None,
|
||||
);
|
||||
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::time::Duration;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -32,7 +32,7 @@ use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_version::{short_version, version};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use flow::{
|
||||
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
||||
FrontendClient, FrontendInvoker,
|
||||
@@ -279,7 +279,7 @@ impl StartCommand {
|
||||
None,
|
||||
);
|
||||
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Flownode start command: {:#?}", self);
|
||||
@@ -342,13 +342,12 @@ impl StartCommand {
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
)
|
||||
.build();
|
||||
|
||||
let table_metadata_manager =
|
||||
Arc::new(TableMetadataManager::new(cached_meta_backend.clone()));
|
||||
@@ -371,8 +370,11 @@ impl StartCommand {
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
|
||||
let frontend_client =
|
||||
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
|
||||
let frontend_client = FrontendClient::from_meta_client(
|
||||
meta_client.clone(),
|
||||
flow_auth_header,
|
||||
opts.query.clone(),
|
||||
);
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
@@ -33,7 +33,7 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use frontend::frontend::Frontend;
|
||||
use frontend::heartbeat::HeartbeatTask;
|
||||
use frontend::instance::builder::FrontendBuilder;
|
||||
@@ -102,7 +102,7 @@ impl App for Instance {
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
subcmd: SubCommand,
|
||||
pub subcmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
@@ -116,7 +116,7 @@ impl Command {
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
pub enum SubCommand {
|
||||
Start(StartCommand),
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
config_file: Option<String>,
|
||||
pub config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
@@ -169,7 +169,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
disable_dashboard: Option<bool>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
|
||||
env_prefix: String,
|
||||
pub env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -282,7 +282,7 @@ impl StartCommand {
|
||||
opts.component.slow_query.as_ref(),
|
||||
);
|
||||
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Frontend start command: {:#?}", self);
|
||||
@@ -350,13 +350,20 @@ impl StartCommand {
|
||||
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
Some(meta_client.clone()),
|
||||
));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
None,
|
||||
Some(process_manager.clone()),
|
||||
);
|
||||
)
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
|
||||
@@ -112,7 +112,7 @@ pub trait App: Send {
|
||||
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
||||
// Report app version as gauge.
|
||||
APP_VERSION
|
||||
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version, app])
|
||||
.with_label_values(&[common_version::version(), short_version, app])
|
||||
.inc();
|
||||
|
||||
// Log version and argument flags.
|
||||
|
||||
@@ -22,7 +22,7 @@ use common_base::Plugins;
|
||||
use common_config::Configurable;
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||
use common_version::{short_version, version};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use meta_srv::bootstrap::MetasrvInstance;
|
||||
use meta_srv::metasrv::BackendImpl;
|
||||
use snafu::ResultExt;
|
||||
@@ -54,6 +54,10 @@ impl Instance {
|
||||
pub fn get_inner(&self) -> &MetasrvInstance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut MetasrvInstance {
|
||||
&mut self.instance
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -320,7 +324,7 @@ impl StartCommand {
|
||||
None,
|
||||
);
|
||||
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Metasrv start command: {:#?}", self);
|
||||
@@ -336,12 +340,12 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
let instance = MetasrvInstance::new(opts, plugins, metasrv)
|
||||
let instance = MetasrvInstance::new(metasrv)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::{fs, path};
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
@@ -30,20 +30,16 @@ use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||
use common_meta::ddl_manager::DdlManager;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use common_meta::ddl_manager::TriggerDdlManagerRef;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_meta::node_manager::NodeManagerRef;
|
||||
use common_meta::peer::Peer;
|
||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||
use common_meta::region_registry::LeaderRegionRegistry;
|
||||
@@ -55,7 +51,7 @@ use common_telemetry::logging::{
|
||||
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
||||
};
|
||||
use common_time::timezone::set_default_timezone;
|
||||
use common_version::{short_version, version};
|
||||
use common_version::{short_version, verbose_version};
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||
@@ -261,15 +257,34 @@ pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// The components of standalone, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub plugins: Plugins,
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub frontend_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -470,7 +485,7 @@ impl StartCommand {
|
||||
opts.component.slow_query.as_ref(),
|
||||
);
|
||||
|
||||
log_versions(version(), short_version(), APP_NAME);
|
||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
||||
create_resource_limit_metrics(APP_NAME);
|
||||
|
||||
info!("Standalone start command: {:#?}", self);
|
||||
@@ -529,13 +544,20 @@ impl StartCommand {
|
||||
));
|
||||
|
||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
information_extension.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
Some(procedure_manager.clone()),
|
||||
Some(process_manager.clone()),
|
||||
);
|
||||
)
|
||||
.with_procedure_manager(procedure_manager.clone())
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
@@ -549,14 +571,15 @@ impl StartCommand {
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler();
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
Arc::new(frontend_client.clone()),
|
||||
frontend_client.clone(),
|
||||
);
|
||||
let flownode = flow_builder
|
||||
.build()
|
||||
@@ -594,28 +617,36 @@ impl StartCommand {
|
||||
.await
|
||||
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
||||
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
let table_metadata_allocator = Arc::new(TableMetadataAllocator::new(
|
||||
table_id_sequence,
|
||||
wal_options_allocator.clone(),
|
||||
));
|
||||
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||
let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||
flow_id_sequence,
|
||||
));
|
||||
|
||||
let ddl_context = DdlContext {
|
||||
node_manager: node_manager.clone(),
|
||||
cache_invalidator: layered_cache_registry.clone(),
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager: table_metadata_manager.clone(),
|
||||
table_metadata_allocator: table_metadata_allocator.clone(),
|
||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
};
|
||||
let procedure_manager_c = procedure_manager.clone();
|
||||
|
||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager_c, true)
|
||||
.context(error::InitDdlManagerSnafu)?;
|
||||
#[cfg(feature = "enterprise")]
|
||||
let trigger_ddl_manager: Option<TriggerDdlManagerRef> = plugins.get();
|
||||
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||
procedure_manager.clone(),
|
||||
node_manager.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
table_metadata_manager,
|
||||
table_meta_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_meta_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.await?;
|
||||
let ddl_manager = {
|
||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
||||
plugins.get();
|
||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
||||
};
|
||||
let ddl_task_executor: ProcedureExecutorRef = Arc::new(ddl_manager);
|
||||
|
||||
let fe_instance = FrontendBuilder::new(
|
||||
fe_opts.clone(),
|
||||
@@ -658,7 +689,7 @@ impl StartCommand {
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins)
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
||||
.build()
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
@@ -669,51 +700,26 @@ impl StartCommand {
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
plugins,
|
||||
kv_backend,
|
||||
frontend_client,
|
||||
catalog_manager,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn create_ddl_task_executor(
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
node_manager: NodeManagerRef,
|
||||
cache_invalidator: CacheInvalidatorRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||
flow_metadata_manager: FlowMetadataManagerRef,
|
||||
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<ProcedureExecutorRef> {
|
||||
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||
DdlManager::try_new(
|
||||
DdlContext {
|
||||
node_manager,
|
||||
cache_invalidator,
|
||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||
table_metadata_manager,
|
||||
table_metadata_allocator,
|
||||
flow_metadata_manager,
|
||||
flow_metadata_allocator,
|
||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||
},
|
||||
procedure_manager,
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
)
|
||||
.context(error::InitDdlManagerSnafu)?,
|
||||
);
|
||||
|
||||
Ok(procedure_executor)
|
||||
}
|
||||
|
||||
pub async fn create_table_metadata_manager(
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Result<TableMetadataManagerRef> {
|
||||
@@ -849,7 +855,7 @@ mod tests {
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
use object_store::config::{FileConfig, GcsConfig};
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
@@ -968,15 +974,15 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
object_store::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(dn_opts.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
dn_opts.storage.providers[0],
|
||||
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
object_store::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
match &dn_opts.storage.providers[1] {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
|
||||
@@ -18,17 +18,19 @@ use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use meta_srv::selector::SelectorType;
|
||||
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||
use mito2::config::MitoConfig;
|
||||
use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
@@ -81,7 +83,7 @@ fn test_load_datanode_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -124,7 +126,7 @@ fn test_load_frontend_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -172,7 +174,7 @@ fn test_load_metasrv_example_config() {
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -195,6 +197,57 @@ fn test_load_metasrv_example_config() {
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_flownode_example_config() {
|
||||
let example_config = common_test_util::find_workspace_path("config/flownode.example.toml");
|
||||
let options =
|
||||
GreptimeOptions::<FlownodeOptions>::load_layered_options(example_config.to_str(), "")
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<FlownodeOptions> {
|
||||
component: FlownodeOptions {
|
||||
node_id: Some(14),
|
||||
flow: Default::default(),
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: "127.0.0.1:6800".to_string(),
|
||||
server_addr: "127.0.0.1:6800".to_string(),
|
||||
runtime_size: 2,
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_export_protocol: Some(common_telemetry::logging::OtlpExportProtocol::Http),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
tracing: Default::default(),
|
||||
heartbeat: Default::default(),
|
||||
// flownode deliberately use a slower query parallelism
|
||||
// to avoid overwhelming the frontend with too many queries
|
||||
query: QueryOptions { parallelism: 1 },
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
metadata_cache_max_capacity: 100000,
|
||||
metadata_cache_ttl: Duration::from_secs(600),
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
http: HttpOptions {
|
||||
addr: "127.0.0.1:4000".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: None,
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_standalone_example_config() {
|
||||
let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
|
||||
@@ -229,7 +282,7 @@ fn test_load_standalone_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
@@ -78,7 +78,7 @@ pub const INFORMATION_SCHEMA_ROUTINES_TABLE_ID: u32 = 21;
|
||||
pub const INFORMATION_SCHEMA_SCHEMA_PRIVILEGES_TABLE_ID: u32 = 22;
|
||||
/// id for information_schema.TABLE_PRIVILEGES
|
||||
pub const INFORMATION_SCHEMA_TABLE_PRIVILEGES_TABLE_ID: u32 = 23;
|
||||
/// id for information_schema.TRIGGERS
|
||||
/// id for information_schema.TRIGGERS (for mysql)
|
||||
pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
|
||||
/// id for information_schema.GLOBAL_STATUS
|
||||
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
|
||||
@@ -104,6 +104,8 @@ pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
||||
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||
/// id for information_schema.process_list
|
||||
pub const INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID: u32 = 36;
|
||||
/// id for information_schema.trigger_list (for greptimedb trigger)
|
||||
pub const INFORMATION_SCHEMA_TRIGGER_TABLE_ID: u32 = 37;
|
||||
|
||||
// ----- End of information_schema tables -----
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ common-macro.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
|
||||
@@ -106,7 +106,7 @@ mod tests {
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{ObjectStoreConfig, StorageConfig};
|
||||
use datanode::config::StorageConfig;
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -212,7 +212,7 @@ mod tests {
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
|
||||
@@ -119,6 +119,11 @@ pub enum StatusCode {
|
||||
FlowAlreadyExists = 8000,
|
||||
FlowNotFound = 8001,
|
||||
// ====== End of flow related status code =====
|
||||
|
||||
// ====== Begin of trigger related status code =====
|
||||
TriggerAlreadyExists = 9000,
|
||||
TriggerNotFound = 9001,
|
||||
// ====== End of trigger related status code =====
|
||||
}
|
||||
|
||||
impl StatusCode {
|
||||
@@ -155,6 +160,8 @@ impl StatusCode {
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::RegionReadonly
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
@@ -198,6 +205,8 @@ impl StatusCode {
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::RegionReadonly
|
||||
@@ -230,6 +239,48 @@ impl fmt::Display for StatusCode {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! define_from_tonic_status {
|
||||
($Error: ty, $Variant: ident) => {
|
||||
impl From<tonic::Status> for $Error {
|
||||
fn from(e: tonic::Status) -> Self {
|
||||
use snafu::location;
|
||||
|
||||
fn metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = metadata_value(&e, $crate::GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| match e.code() {
|
||||
tonic::Code::Cancelled => StatusCode::Cancelled,
|
||||
tonic::Code::DeadlineExceeded => StatusCode::DeadlineExceeded,
|
||||
_ => StatusCode::Internal,
|
||||
});
|
||||
|
||||
let msg = metadata_value(&e, $crate::GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
// TODO(LFC): Make the error variant defined automatically.
|
||||
Self::$Variant {
|
||||
code,
|
||||
msg,
|
||||
tonic_code: e.code(),
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! define_into_tonic_status {
|
||||
($Error: ty) => {
|
||||
@@ -281,12 +332,14 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
|
||||
StatusCode::TableNotFound
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::FlowNotFound => Code::NotFound,
|
||||
StatusCode::TableUnavailable
|
||||
| StatusCode::StorageUnavailable
|
||||
@@ -304,15 +357,6 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts tonic [Code] to [StatusCode].
|
||||
pub fn convert_tonic_code_to_status_code(code: Code) -> StatusCode {
|
||||
match code {
|
||||
Code::Cancelled => StatusCode::Cancelled,
|
||||
Code::DeadlineExceeded => StatusCode::DeadlineExceeded,
|
||||
_ => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
||||
@@ -30,7 +31,7 @@ use crate::error::{MetaSnafu, Result};
|
||||
pub type FrontendClientPtr = Box<dyn FrontendClient>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait FrontendClient: Send {
|
||||
pub trait FrontendClient: Send + Debug {
|
||||
async fn list_process(&mut self, req: ListProcessRequest) -> Result<ListProcessResponse>;
|
||||
|
||||
async fn kill_process(&mut self, req: KillProcessRequest) -> Result<KillProcessResponse>;
|
||||
|
||||
@@ -33,6 +33,7 @@ common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-functions-aggregate-common.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
geo = { version = "0.29", optional = true }
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod approximate;
|
||||
pub mod count_hash;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod vector;
|
||||
|
||||
647
src/common/function/src/aggrs/count_hash.rs
Normal file
647
src/common/function/src/aggrs/count_hash.rs
Normal file
@@ -0,0 +1,647 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! `CountHash` / `count_hash` is a hash-based approximate distinct count function.
|
||||
//!
|
||||
//! It is a variant of `CountDistinct` that uses a hash function to approximate the
|
||||
//! distinct count.
|
||||
//! It is designed to be more efficient than `CountDistinct` for large datasets,
|
||||
//! but it is not as accurate, as the hash value may be collision.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ahash::RandomState;
|
||||
use datafusion_common::cast::as_list_array;
|
||||
use datafusion_common::error::Result;
|
||||
use datafusion_common::hash_utils::create_hashes;
|
||||
use datafusion_common::utils::SingleRowListArrayBuilder;
|
||||
use datafusion_common::{internal_err, not_impl_err, ScalarValue};
|
||||
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
|
||||
use datafusion_expr::utils::{format_state_name, AggregateOrderSensitivity};
|
||||
use datafusion_expr::{
|
||||
Accumulator, AggregateUDF, AggregateUDFImpl, EmitTo, GroupsAccumulator, ReversedUDAF,
|
||||
SetMonotonicity, Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use datafusion_functions_aggregate_common::aggregate::groups_accumulator::nulls::filtered_null_mask;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::array::{
|
||||
Array, ArrayRef, AsArray, BooleanArray, Int64Array, ListArray, UInt64Array,
|
||||
};
|
||||
use datatypes::arrow::buffer::{OffsetBuffer, ScalarBuffer};
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
type HashValueType = u64;
|
||||
|
||||
// read from /dev/urandom 4047821dc6144e4b2abddf23ad4171126a52eeecd26eff2191cf673b965a7875
|
||||
const RANDOM_SEED_0: u64 = 0x4047821dc6144e4b;
|
||||
const RANDOM_SEED_1: u64 = 0x2abddf23ad417112;
|
||||
const RANDOM_SEED_2: u64 = 0x6a52eeecd26eff21;
|
||||
const RANDOM_SEED_3: u64 = 0x91cf673b965a7875;
|
||||
|
||||
impl CountHash {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggr(CountHash::udf_impl());
|
||||
}
|
||||
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
AggregateUDF::new_from_impl(CountHash {
|
||||
signature: Signature::one_of(
|
||||
vec![TypeSignature::VariadicAny, TypeSignature::Nullary],
|
||||
Volatility::Immutable,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CountHash {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for CountHash {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"count_hash"
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn return_type(&self, _arg_types: &[DataType]) -> Result<DataType> {
|
||||
Ok(DataType::Int64)
|
||||
}
|
||||
|
||||
fn is_nullable(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
|
||||
Ok(vec![Field::new_list(
|
||||
format_state_name(args.name, "count_hash"),
|
||||
Field::new_list_field(DataType::UInt64, true),
|
||||
// For count_hash accumulator, null list item stands for an
|
||||
// empty value set (i.e., all NULL value so far for that group).
|
||||
true,
|
||||
)])
|
||||
}
|
||||
|
||||
fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
||||
if acc_args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}))
|
||||
}
|
||||
|
||||
fn aliases(&self) -> &[String] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn groups_accumulator_supported(&self, _args: AccumulatorArgs) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn create_groups_accumulator(
|
||||
&self,
|
||||
args: AccumulatorArgs,
|
||||
) -> Result<Box<dyn GroupsAccumulator>> {
|
||||
if args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashGroupAccumulator::new()))
|
||||
}
|
||||
|
||||
fn reverse_expr(&self) -> ReversedUDAF {
|
||||
ReversedUDAF::Identical
|
||||
}
|
||||
|
||||
fn order_sensitivity(&self) -> AggregateOrderSensitivity {
|
||||
AggregateOrderSensitivity::Insensitive
|
||||
}
|
||||
|
||||
fn default_value(&self, _data_type: &DataType) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(0)))
|
||||
}
|
||||
|
||||
fn set_monotonicity(&self, _data_type: &DataType) -> SetMonotonicity {
|
||||
SetMonotonicity::Increasing
|
||||
}
|
||||
}
|
||||
|
||||
/// GroupsAccumulator for `count_hash` aggregate function
|
||||
#[derive(Debug)]
|
||||
pub struct CountHashGroupAccumulator {
|
||||
/// One HashSet per group to track distinct values
|
||||
distinct_sets: Vec<HashSet<HashValueType, RandomState>>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl Default for CountHashGroupAccumulator {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CountHashGroupAccumulator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
distinct_sets: vec![],
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_sets(&mut self, total_num_groups: usize) {
|
||||
if self.distinct_sets.len() < total_num_groups {
|
||||
self.distinct_sets
|
||||
.resize_with(total_num_groups, HashSet::default);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GroupsAccumulator for CountHashGroupAccumulator {
|
||||
fn update_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let array = &values[0];
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(array.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(array)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
|
||||
// Use a pattern similar to accumulate_indices to process rows
|
||||
// that are not null and pass the filter
|
||||
let nulls = array.logical_nulls();
|
||||
|
||||
match (nulls.as_ref(), opt_filter) {
|
||||
(None, None) => {
|
||||
// No nulls, no filter - process all rows
|
||||
for (row_idx, &group_idx) in group_indices.iter().enumerate() {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
(Some(nulls), None) => {
|
||||
// Has nulls, no filter
|
||||
for (row_idx, (&group_idx, is_valid)) in
|
||||
group_indices.iter().zip(nulls.iter()).enumerate()
|
||||
{
|
||||
if is_valid {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, Some(filter)) => {
|
||||
// No nulls, has filter
|
||||
for (row_idx, (&group_idx, filter_value)) in
|
||||
group_indices.iter().zip(filter.iter()).enumerate()
|
||||
{
|
||||
if let Some(true) = filter_value {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Some(nulls), Some(filter)) => {
|
||||
// Has nulls and filter
|
||||
let iter = filter
|
||||
.iter()
|
||||
.zip(group_indices.iter())
|
||||
.zip(nulls.iter())
|
||||
.enumerate();
|
||||
|
||||
for (row_idx, ((filter_value, &group_idx), is_valid)) in iter {
|
||||
if is_valid && filter_value == Some(true) {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self, emit_to: EmitTo) -> Result<ArrayRef> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let counts = distinct_sets
|
||||
.iter()
|
||||
.map(|set| set.len() as i64)
|
||||
.collect::<Vec<_>>();
|
||||
Ok(Arc::new(Int64Array::from(counts)))
|
||||
}
|
||||
|
||||
fn merge_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
_opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(
|
||||
values.len(),
|
||||
1,
|
||||
"count_hash merge expects a single state array"
|
||||
);
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let list_array = as_list_array(&values[0])?;
|
||||
|
||||
// For each group in the incoming batch
|
||||
for (i, &group_idx) in group_indices.iter().enumerate() {
|
||||
if i < list_array.len() {
|
||||
let inner_array = list_array.value(i);
|
||||
let inner_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
// Add each value to our set for this group
|
||||
for j in 0..inner_array.len() {
|
||||
if !inner_array.is_null(j) {
|
||||
self.distinct_sets[group_idx].insert(inner_array.value(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn state(&mut self, emit_to: EmitTo) -> Result<Vec<ArrayRef>> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let mut offsets = Vec::with_capacity(distinct_sets.len() + 1);
|
||||
offsets.push(0);
|
||||
let mut curr_len = 0i32;
|
||||
|
||||
let mut value_iter = distinct_sets
|
||||
.into_iter()
|
||||
.flat_map(|set| {
|
||||
// build offset
|
||||
curr_len += set.len() as i32;
|
||||
offsets.push(curr_len);
|
||||
// convert into iter
|
||||
set.into_iter()
|
||||
})
|
||||
.peekable();
|
||||
let data_array: ArrayRef = if value_iter.peek().is_none() {
|
||||
arrow::array::new_empty_array(&DataType::UInt64) as _
|
||||
} else {
|
||||
Arc::new(UInt64Array::from_iter_values(value_iter))
|
||||
};
|
||||
let offset_buffer = OffsetBuffer::new(ScalarBuffer::from(offsets));
|
||||
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offset_buffer,
|
||||
data_array,
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array) as _])
|
||||
}
|
||||
|
||||
fn convert_to_state(
|
||||
&self,
|
||||
values: &[ArrayRef],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
) -> Result<Vec<ArrayRef>> {
|
||||
// For a single hash value per row, create a list array with that value
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
let values = ArrayRef::clone(&values[0]);
|
||||
|
||||
let offsets = OffsetBuffer::new(ScalarBuffer::from_iter(0..values.len() as i32 + 1));
|
||||
let nulls = filtered_null_mask(opt_filter, &values);
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offsets,
|
||||
values,
|
||||
nulls,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array)])
|
||||
}
|
||||
|
||||
fn supports_convert_to_state(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
// Base size of the struct
|
||||
let mut size = size_of::<Self>();
|
||||
|
||||
// Size of the vector holding the HashSets
|
||||
size += size_of::<Vec<HashSet<HashValueType, RandomState>>>()
|
||||
+ self.distinct_sets.capacity() * size_of::<HashSet<HashValueType, RandomState>>();
|
||||
|
||||
// Estimate HashSet contents size more efficiently
|
||||
// Instead of iterating through all values which is expensive, use an approximation
|
||||
for set in &self.distinct_sets {
|
||||
// Base size of the HashSet
|
||||
size += set.capacity() * size_of::<HashValueType>();
|
||||
}
|
||||
|
||||
size
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CountHashAccumulator {
|
||||
values: HashSet<HashValueType, RandomState>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl CountHashAccumulator {
|
||||
// calculating the size for fixed length values, taking first batch size *
|
||||
// number of batches.
|
||||
fn fixed_size(&self) -> usize {
|
||||
size_of_val(self) + (size_of::<HashValueType>() * self.values.capacity())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for CountHashAccumulator {
|
||||
/// Returns the distinct values seen so far as (one element) ListArray.
|
||||
fn state(&mut self) -> Result<Vec<ScalarValue>> {
|
||||
let values = self.values.iter().cloned().collect::<Vec<_>>();
|
||||
let arr = Arc::new(UInt64Array::from(values)) as _;
|
||||
let list_scalar = SingleRowListArrayBuilder::new(arr).build_list_scalar();
|
||||
Ok(vec![list_scalar])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
|
||||
if values.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let arr = &values[0];
|
||||
if arr.data_type() == &DataType::Null {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(arr.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(arr)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
for hash in hashes.as_slice() {
|
||||
self.values.insert(*hash);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merges multiple sets of distinct values into the current set.
|
||||
///
|
||||
/// The input to this function is a `ListArray` with **multiple** rows,
|
||||
/// where each row contains the values from a partial aggregate's phase (e.g.
|
||||
/// the result of calling `Self::state` on multiple accumulators).
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
|
||||
if states.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
assert_eq!(states.len(), 1, "array_agg states must be singleton!");
|
||||
let array = &states[0];
|
||||
let list_array = array.as_list::<i32>();
|
||||
for inner_array in list_array.iter() {
|
||||
let Some(inner_array) = inner_array else {
|
||||
return internal_err!(
|
||||
"Intermediate results of count_hash should always be non null"
|
||||
);
|
||||
};
|
||||
let hash_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
for i in 0..hash_array.len() {
|
||||
self.values.insert(hash_array.value(i));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(self.values.len() as i64)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.fixed_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::arrow::array::{Array, BooleanArray, Int32Array, Int64Array};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_accumulator() -> CountHashAccumulator {
|
||||
CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_accumulator();
|
||||
|
||||
// Test with some data
|
||||
let array = Arc::new(Int32Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
Some(3),
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(4)));
|
||||
|
||||
// Test with empty data
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![] as Vec<Option<i32>>)) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(0)));
|
||||
|
||||
// Test with only nulls
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![None, None, None])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(1)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_accumulator();
|
||||
let array1 = Arc::new(Int32Array::from(vec![Some(1), Some(2), Some(3)])) as ArrayRef;
|
||||
acc1.update_batch(&[array1])?;
|
||||
let state1 = acc1.state()?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_accumulator();
|
||||
let array2 = Arc::new(Int32Array::from(vec![Some(3), Some(4), Some(5)])) as ArrayRef;
|
||||
acc2.update_batch(&[array2])?;
|
||||
let state2 = acc2.state()?;
|
||||
|
||||
// Merge state1 and state2 into a new accumulator
|
||||
let mut acc_merged = create_test_accumulator();
|
||||
let state_array1 = state1[0].to_array()?;
|
||||
let state_array2 = state2[0].to_array()?;
|
||||
|
||||
acc_merged.merge_batch(&[state_array1])?;
|
||||
acc_merged.merge_batch(&[state_array2])?;
|
||||
|
||||
let result = acc_merged.evaluate()?;
|
||||
// Distinct values are {1, 2, 3, 4, 5}, so count is 5
|
||||
assert_eq!(result, ScalarValue::Int64(Some(5)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_test_group_accumulator() -> CountHashGroupAccumulator {
|
||||
CountHashGroupAccumulator::new()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 1, 3, 2, 4, 5])) as ArrayRef;
|
||||
let group_indices = vec![0, 1, 0, 0, 1, 2, 0];
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, None, total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1, 3, 5} -> 3
|
||||
// Group 1: {2} -> 1
|
||||
// Group 2: {4} -> 1
|
||||
assert_eq!(result.value(0), 3);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_with_filter() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6])) as ArrayRef;
|
||||
let group_indices = vec![0, 0, 1, 1, 2, 2];
|
||||
let filter = BooleanArray::from(vec![true, false, true, true, false, true]);
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, Some(&filter), total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1} (2 is filtered out) -> 1
|
||||
// Group 1: {3, 4} -> 2
|
||||
// Group 2: {6} (5 is filtered out) -> 1
|
||||
assert_eq!(result.value(0), 1);
|
||||
assert_eq!(result.value(1), 2);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_group_accumulator();
|
||||
let values1 = Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as ArrayRef;
|
||||
let group_indices1 = vec![0, 0, 1, 1];
|
||||
acc1.update_batch(&[values1], &group_indices1, None, 2)?;
|
||||
// acc1 state: group 0 -> {1, 2}, group 1 -> {3, 4}
|
||||
let state1 = acc1.state(EmitTo::All)?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_group_accumulator();
|
||||
let values2 = Arc::new(Int32Array::from(vec![5, 6, 1, 3])) as ArrayRef;
|
||||
// Merge into different group indices
|
||||
let group_indices2 = vec![2, 2, 0, 1];
|
||||
acc2.update_batch(&[values2], &group_indices2, None, 3)?;
|
||||
// acc2 state: group 0 -> {1}, group 1 -> {3}, group 2 -> {5, 6}
|
||||
|
||||
// Merge state from acc1 into acc2
|
||||
// We will merge acc1's group 0 into acc2's group 0
|
||||
// and acc1's group 1 into acc2's group 2
|
||||
let merge_group_indices = vec![0, 2];
|
||||
acc2.merge_batch(&state1, &merge_group_indices, None, 3)?;
|
||||
|
||||
let result_array = acc2.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Final state of acc2:
|
||||
// Group 0: {1} U {1, 2} -> {1, 2}, count = 2
|
||||
// Group 1: {3}, count = 1
|
||||
// Group 2: {5, 6} U {3, 4} -> {3, 4, 5, 6}, count = 4
|
||||
assert_eq!(result.value(0), 2);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 4);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
let acc = create_test_group_accumulator();
|
||||
// Just test it doesn't crash and returns a value.
|
||||
assert!(acc.size() > 0);
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
use crate::function::{AsyncFunctionRef, Function, FunctionRef};
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
@@ -144,6 +145,9 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Approximate functions
|
||||
ApproximateFunction::register(&function_registry);
|
||||
|
||||
// CountHash function
|
||||
CountHash::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
pub mod clamp;
|
||||
mod modulo;
|
||||
mod pow;
|
||||
mod rate;
|
||||
|
||||
use std::fmt;
|
||||
@@ -26,7 +25,6 @@ use datafusion::error::DataFusionError;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::VectorRef;
|
||||
pub use pow::PowFunction;
|
||||
pub use rate::RateFunction;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -39,7 +37,6 @@ pub(crate) struct MathFunction;
|
||||
impl MathFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(ModuloFunction);
|
||||
registry.register_scalar(PowFunction);
|
||||
registry.register_scalar(RateFunction);
|
||||
registry.register_scalar(RangeFunction);
|
||||
registry.register_scalar(ClampFunction);
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num::traits::Pow;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PowFunction;
|
||||
|
||||
impl Function for PowFunction {
|
||||
fn name(&self) -> &str {
|
||||
"pow"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
with_match_primitive_type_id!(columns[1].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_binary_op::<<$S as LogicalPrimitiveType>::Native, <$T as LogicalPrimitiveType>::Native, f64, _>(&columns[0], &columns[1], scalar_pow, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn scalar_pow<S, T>(value: Option<S>, base: Option<T>, _ctx: &mut EvalContext) -> Option<f64>
|
||||
where
|
||||
S: AsPrimitive<f64>,
|
||||
T: AsPrimitive<f64>,
|
||||
{
|
||||
match (value, base) {
|
||||
(Some(value), Some(base)) => Some(value.as_().pow(base.as_())),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PowFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "POW")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Float32Vector, Int8Vector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_pow_function() {
|
||||
let pow = PowFunction;
|
||||
|
||||
assert_eq!("pow", pow.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
pow.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(pow.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(2, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == ConcreteDataType::numerics()
|
||||
));
|
||||
|
||||
let values = vec![1.0, 2.0, 3.0];
|
||||
let bases = vec![0i8, -1i8, 3i8];
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Float32Vector::from_vec(values.clone())),
|
||||
Arc::new(Int8Vector::from_vec(bases.clone())),
|
||||
];
|
||||
|
||||
let vector = pow.eval(&FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(3, vector.len());
|
||||
|
||||
for i in 0..3 {
|
||||
let p: f64 = (values[i] as f64).pow(bases[i] as f64);
|
||||
assert!(matches!(vector.get(i), Value::Float64(v) if v == p));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::{env, fmt};
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
@@ -47,7 +47,7 @@ impl Function for PGVersionFunction {
|
||||
fn eval(&self, _func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
let result = StringVector::from(vec![format!(
|
||||
"PostgreSQL 16.3 GreptimeDB {}",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
common_version::version()
|
||||
)]);
|
||||
Ok(Arc::new(result))
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::{env, fmt};
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
@@ -52,13 +52,13 @@ impl Function for VersionFunction {
|
||||
"{}-greptimedb-{}",
|
||||
std::env::var("GREPTIMEDB_MYSQL_SERVER_VERSION")
|
||||
.unwrap_or_else(|_| "8.4.2".to_string()),
|
||||
env!("CARGO_PKG_VERSION")
|
||||
common_version::version()
|
||||
)
|
||||
}
|
||||
Channel::Postgres => {
|
||||
format!("16.3-greptimedb-{}", env!("CARGO_PKG_VERSION"))
|
||||
format!("16.3-greptimedb-{}", common_version::version())
|
||||
}
|
||||
_ => env!("CARGO_PKG_VERSION").to_string(),
|
||||
_ => common_version::version().to_string(),
|
||||
};
|
||||
let result = StringVector::from(vec![version]);
|
||||
Ok(Arc::new(result))
|
||||
|
||||
@@ -34,7 +34,7 @@ use table::requests::{
|
||||
};
|
||||
|
||||
use crate::error::{
|
||||
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidColumnDefSnafu, InvalidIndexOptionSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||
@@ -126,18 +126,21 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Fulltext {
|
||||
column_name: f.column_name.clone(),
|
||||
options: FulltextOptions {
|
||||
enable: f.enable,
|
||||
analyzer: as_fulltext_option_analyzer(
|
||||
options: FulltextOptions::new(
|
||||
f.enable,
|
||||
as_fulltext_option_analyzer(
|
||||
Analyzer::try_from(f.analyzer)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
case_sensitive: f.case_sensitive,
|
||||
backend: as_fulltext_option_backend(
|
||||
f.case_sensitive,
|
||||
as_fulltext_option_backend(
|
||||
PbFulltextBackend::try_from(f.backend)
|
||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
f.granularity as u32,
|
||||
f.false_positive_rate,
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||
@@ -148,13 +151,15 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
||||
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||
options: SetIndexOptions::Skipping {
|
||||
column_name: s.column_name,
|
||||
options: SkippingIndexOptions {
|
||||
granularity: s.granularity as u32,
|
||||
index_type: as_skipping_index_type(
|
||||
options: SkippingIndexOptions::new(
|
||||
s.granularity as u32,
|
||||
s.false_positive_rate,
|
||||
as_skipping_index_type(
|
||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||
),
|
||||
},
|
||||
)
|
||||
.context(InvalidIndexOptionSnafu)?,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -153,6 +153,14 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Invalid index option"))]
|
||||
InvalidIndexOption {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: datatypes::error::Error,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -180,7 +188,8 @@ impl ErrorExt for Error {
|
||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
| Error::MissingAlterIndexOption { .. }
|
||||
| Error::InvalidIndexOption { .. } => StatusCode::InvalidArguments,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ pub mod drop_flow;
|
||||
pub mod drop_table;
|
||||
pub mod drop_view;
|
||||
pub mod flow_meta;
|
||||
mod physical_table_metadata;
|
||||
pub mod table_meta;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
|
||||
@@ -12,32 +12,32 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod check;
|
||||
mod metadata;
|
||||
mod region_request;
|
||||
mod table_cache_keys;
|
||||
mod executor;
|
||||
mod update_metadata;
|
||||
mod validator;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context, LockKey, Procedure, Status};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures_util::future;
|
||||
pub use region_request::make_alter_region_request;
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
pub use executor::make_alter_region_request;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
use crate::cache_invalidator::Context as CacheContext;
|
||||
use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor;
|
||||
use crate::ddl::alter_logical_tables::validator::{
|
||||
retain_unskipped, AlterLogicalTableValidator, ValidatorResult,
|
||||
};
|
||||
use crate::ddl::utils::{extract_column_metadatas, map_to_procedure_error, sync_follower_regions};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
@@ -45,13 +45,38 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leaders, RegionRoute};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
|
||||
pub struct AlterLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
pub data: AlterTablesData,
|
||||
}
|
||||
|
||||
/// Builds the validator from the [`AlterTablesData`].
|
||||
fn build_validator_from_alter_table_data<'a>(
|
||||
data: &'a AlterTablesData,
|
||||
) -> AlterLogicalTableValidator<'a> {
|
||||
let phsycial_table_id = data.physical_table_id;
|
||||
let alters = data
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task| &task.alter_table)
|
||||
.collect::<Vec<_>>();
|
||||
AlterLogicalTableValidator::new(phsycial_table_id, alters)
|
||||
}
|
||||
|
||||
/// Builds the executor from the [`AlterTablesData`].
|
||||
fn build_executor_from_alter_expr<'a>(data: &'a AlterTablesData) -> AlterLogicalTablesExecutor<'a> {
|
||||
debug_assert_eq!(data.tasks.len(), data.table_info_values.len());
|
||||
let alters = data
|
||||
.tasks
|
||||
.iter()
|
||||
.zip(data.table_info_values.iter())
|
||||
.map(|(task, table_info)| (table_info.table_info.ident.table_id, &task.alter_table))
|
||||
.collect::<Vec<_>>();
|
||||
AlterLogicalTablesExecutor::new(alters)
|
||||
}
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
|
||||
|
||||
@@ -81,35 +106,44 @@ impl AlterLogicalTablesProcedure {
|
||||
}
|
||||
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
// Checks all the tasks
|
||||
self.check_input_tasks()?;
|
||||
// Fills the table info values
|
||||
self.fill_table_info_values().await?;
|
||||
// Checks the physical table, must after [fill_table_info_values]
|
||||
self.check_physical_table().await?;
|
||||
// Fills the physical table info
|
||||
self.fill_physical_table_info().await?;
|
||||
// Filter the finished tasks
|
||||
let finished_tasks = self.check_finished_tasks()?;
|
||||
let already_finished_count = finished_tasks
|
||||
.iter()
|
||||
.map(|x| if *x { 1 } else { 0 })
|
||||
.sum::<usize>();
|
||||
let apply_tasks_count = self.data.tasks.len();
|
||||
if already_finished_count == apply_tasks_count {
|
||||
let validator = build_validator_from_alter_table_data(&self.data);
|
||||
let ValidatorResult {
|
||||
num_skipped,
|
||||
skip_alter,
|
||||
table_info_values,
|
||||
physical_table_info,
|
||||
physical_table_route,
|
||||
} = validator
|
||||
.validate(&self.context.table_metadata_manager)
|
||||
.await?;
|
||||
|
||||
let num_tasks = self.data.tasks.len();
|
||||
if num_skipped == num_tasks {
|
||||
info!("All the alter tasks are finished, will skip the procedure.");
|
||||
let cache_ident_keys = AlterLogicalTablesExecutor::build_cache_ident_keys(
|
||||
&physical_table_info,
|
||||
&table_info_values
|
||||
.iter()
|
||||
.map(|v| v.get_inner_ref())
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
self.data.table_cache_keys_to_invalidate = cache_ident_keys;
|
||||
// Re-invalidate the table cache
|
||||
self.data.state = AlterTablesState::InvalidateTableCache;
|
||||
return Ok(Status::executing(true));
|
||||
} else if already_finished_count > 0 {
|
||||
} else if num_skipped > 0 {
|
||||
info!(
|
||||
"There are {} alter tasks, {} of them were already finished.",
|
||||
apply_tasks_count, already_finished_count
|
||||
num_tasks, num_skipped
|
||||
);
|
||||
}
|
||||
self.filter_task(&finished_tasks)?;
|
||||
|
||||
// Next state
|
||||
// Updates the procedure state.
|
||||
retain_unskipped(&mut self.data.tasks, &skip_alter);
|
||||
self.data.physical_table_info = Some(physical_table_info);
|
||||
self.data.physical_table_route = Some(physical_table_route);
|
||||
self.data.table_info_values = table_info_values;
|
||||
debug_assert_eq!(self.data.tasks.len(), self.data.table_info_values.len());
|
||||
self.data.state = AlterTablesState::SubmitAlterRegionRequests;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
@@ -117,57 +151,21 @@ impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn on_submit_alter_region_requests(&mut self) -> Result<Status> {
|
||||
// Safety: we have checked the state in on_prepare
|
||||
let physical_table_route = &self.data.physical_table_route.as_ref().unwrap();
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
let executor = build_executor_from_alter_expr(&self.data);
|
||||
let mut results = executor
|
||||
.on_alter_regions(
|
||||
&self.context.node_manager,
|
||||
&physical_table_route.region_routes,
|
||||
)
|
||||
.await?;
|
||||
|
||||
for peer in leaders {
|
||||
let requester = self.context.node_manager.datanode(&peer).await;
|
||||
let request = self.make_request(&peer, &physical_table_route.region_routes)?;
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
|
||||
let mut results = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
// Collects responses from datanodes.
|
||||
let phy_raw_schemas = results
|
||||
.iter_mut()
|
||||
.map(|res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.data.state = AlterTablesState::UpdateMetadata;
|
||||
return Ok(Status::executing(true));
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = phy_raw_schemas.first().unwrap();
|
||||
ensure!(
|
||||
phy_raw_schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The physical schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
// Decodes the physical raw schemas
|
||||
if let Some(phy_raw_schema) = first {
|
||||
self.data.physical_columns =
|
||||
ColumnMetadata::decode_list(phy_raw_schema).context(DecodeJsonSnafu)?;
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, ALTER_PHYSICAL_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.physical_columns = column_metadatas;
|
||||
} else {
|
||||
warn!("altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
|
||||
}
|
||||
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.data.state = AlterTablesState::UpdateMetadata;
|
||||
@@ -183,7 +181,7 @@ impl AlterLogicalTablesProcedure {
|
||||
if let Err(err) = sync_follower_regions(
|
||||
&self.context,
|
||||
self.data.physical_table_id,
|
||||
results,
|
||||
&results,
|
||||
region_routes,
|
||||
table_info.meta.engine.as_str(),
|
||||
)
|
||||
@@ -200,7 +198,18 @@ impl AlterLogicalTablesProcedure {
|
||||
self.update_physical_table_metadata().await?;
|
||||
self.update_logical_tables_metadata().await?;
|
||||
|
||||
self.data.build_cache_keys_to_invalidate();
|
||||
let logical_table_info_values = self
|
||||
.data
|
||||
.table_info_values
|
||||
.iter()
|
||||
.map(|v| v.get_inner_ref())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cache_ident_keys = AlterLogicalTablesExecutor::build_cache_ident_keys(
|
||||
self.data.physical_table_info.as_ref().unwrap(),
|
||||
&logical_table_info_values,
|
||||
);
|
||||
self.data.table_cache_keys_to_invalidate = cache_ident_keys;
|
||||
self.data.clear_metadata_fields();
|
||||
|
||||
self.data.state = AlterTablesState::InvalidateTableCache;
|
||||
@@ -210,9 +219,16 @@ impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
|
||||
let to_invalidate = &self.data.table_cache_keys_to_invalidate;
|
||||
|
||||
let ctx = CacheContext {
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by altering logical tables, physical_table_id: {}",
|
||||
self.data.physical_table_id,
|
||||
)),
|
||||
};
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(&Default::default(), to_invalidate)
|
||||
.invalidate(&ctx, to_invalidate)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -232,6 +248,10 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_ALTER_TABLE
|
||||
.with_label_values(&[step])
|
||||
.start_timer();
|
||||
debug!(
|
||||
"Executing alter logical tables procedure, state: {:?}",
|
||||
state
|
||||
);
|
||||
|
||||
match state {
|
||||
AlterTablesState::Prepare => self.on_prepare().await,
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::{AlterLogicalTablesInvalidArgumentsSnafu, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn check_input_tasks(&self) -> Result<()> {
|
||||
self.check_schema()?;
|
||||
self.check_alter_kind()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn check_physical_table(&self) -> Result<()> {
|
||||
let table_route_manager = self.context.table_metadata_manager.table_route_manager();
|
||||
let table_ids = self
|
||||
.data
|
||||
.table_info_values
|
||||
.iter()
|
||||
.map(|v| v.table_info.ident.table_id)
|
||||
.collect::<Vec<_>>();
|
||||
let table_routes = table_route_manager
|
||||
.table_route_storage()
|
||||
.batch_get(&table_ids)
|
||||
.await?;
|
||||
let physical_table_id = self.data.physical_table_id;
|
||||
let is_same_physical_table = table_routes.iter().all(|r| {
|
||||
if let Some(TableRouteValue::Logical(r)) = r {
|
||||
r.physical_table_id() == physical_table_id
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_physical_table,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "All the tasks should have the same physical table id"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn check_finished_tasks(&self) -> Result<Vec<bool>> {
|
||||
let task = &self.data.tasks;
|
||||
let table_info_values = &self.data.table_info_values;
|
||||
|
||||
Ok(task
|
||||
.iter()
|
||||
.zip(table_info_values.iter())
|
||||
.map(|(task, table)| Self::check_finished_task(task, table))
|
||||
.collect())
|
||||
}
|
||||
|
||||
// Checks if the schemas of the tasks are the same
|
||||
fn check_schema(&self) -> Result<()> {
|
||||
let is_same_schema = self.data.tasks.windows(2).all(|pair| {
|
||||
pair[0].alter_table.catalog_name == pair[1].alter_table.catalog_name
|
||||
&& pair[0].alter_table.schema_name == pair[1].alter_table.schema_name
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_schema,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Schemas of the tasks are not the same"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_alter_kind(&self) -> Result<()> {
|
||||
for task in &self.data.tasks {
|
||||
let kind = task.alter_table.kind.as_ref().context(
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Alter kind is missing",
|
||||
},
|
||||
)?;
|
||||
let Kind::AddColumns(_) = kind else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Only support add columns operation",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_finished_task(task: &AlterTableTask, table: &TableInfoValue) -> bool {
|
||||
let columns = table
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| &c.name)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let Some(kind) = task.alter_table.kind.as_ref() else {
|
||||
return true; // Never get here since we have checked it in `check_alter_kind`
|
||||
};
|
||||
let Kind::AddColumns(add_columns) = kind else {
|
||||
return true; // Never get here since we have checked it in `check_alter_kind`
|
||||
};
|
||||
|
||||
// We only check that all columns have been finished. That is to say,
|
||||
// if one part is finished but another part is not, it will be considered
|
||||
// unfinished.
|
||||
add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| add_column.column_def.as_ref().map(|c| &c.name))
|
||||
.all(|column| column.map(|c| columns.contains(c)).unwrap_or(false))
|
||||
}
|
||||
}
|
||||
216
src/common/meta/src/ddl/alter_logical_tables/executor.rs
Normal file
216
src/common/meta/src/ddl/alter_logical_tables/executor.rs
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{self, AlterTableExpr};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, warn};
|
||||
use futures::future;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, raw_table_info};
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef};
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// [AlterLogicalTablesExecutor] performs:
|
||||
/// - Alters logical regions on the datanodes.
|
||||
/// - Updates table metadata for alter table operation.
|
||||
pub struct AlterLogicalTablesExecutor<'a> {
|
||||
/// The alter table expressions.
|
||||
///
|
||||
/// The first element is the logical table id, the second element is the alter table expression.
|
||||
alters: Vec<(TableId, &'a AlterTableExpr)>,
|
||||
}
|
||||
|
||||
impl<'a> AlterLogicalTablesExecutor<'a> {
|
||||
pub fn new(alters: Vec<(TableId, &'a AlterTableExpr)>) -> Self {
|
||||
Self { alters }
|
||||
}
|
||||
|
||||
/// Alters logical regions on the datanodes.
|
||||
pub(crate) async fn on_alter_regions(
|
||||
&self,
|
||||
node_manager: &NodeManagerRef,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<Vec<RegionResponse>> {
|
||||
let region_distribution = region_distribution(region_routes);
|
||||
let leaders = find_leaders(region_routes)
|
||||
.into_iter()
|
||||
.map(|p| (p.id, p))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
for (datanode_id, region_role_set) in region_distribution {
|
||||
if region_role_set.leader_regions.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// Safety: must exists.
|
||||
let peer = leaders.get(&datanode_id).unwrap();
|
||||
let requester = node_manager.datanode(peer).await;
|
||||
let requests = self.make_alter_region_request(®ion_role_set.leader_regions);
|
||||
let requester = requester.clone();
|
||||
let peer = peer.clone();
|
||||
|
||||
debug!("Sending alter region requests to datanode {}", peer);
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(make_request(requests))
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
|
||||
future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
fn make_alter_region_request(&self, region_numbers: &[RegionNumber]) -> AlterRequests {
|
||||
let mut requests = Vec::with_capacity(region_numbers.len() * self.alters.len());
|
||||
for (table_id, alter) in self.alters.iter() {
|
||||
for region_number in region_numbers {
|
||||
let region_id = RegionId::new(*table_id, *region_number);
|
||||
let request = make_alter_region_request(region_id, alter);
|
||||
requests.push(request);
|
||||
}
|
||||
}
|
||||
|
||||
AlterRequests { requests }
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
///
|
||||
/// ## Panic:
|
||||
/// - If the region distribution is not set when updating table metadata.
|
||||
pub(crate) async fn on_alter_metadata(
|
||||
physical_table_id: TableId,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_distribution: RegionDistribution,
|
||||
physical_columns: &[ColumnMetadata],
|
||||
) -> Result<()> {
|
||||
if physical_columns.is_empty() {
|
||||
warn!("No physical columns found, leaving the physical table's schema unchanged when altering logical tables");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_ref = current_table_info_value.table_ref();
|
||||
let table_id = physical_table_id;
|
||||
|
||||
// Generates new table info
|
||||
let old_raw_table_info = current_table_info_value.table_info.clone();
|
||||
let new_raw_table_info =
|
||||
raw_table_info::build_new_physical_table_info(old_raw_table_info, physical_columns);
|
||||
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}",
|
||||
table_ref, table_id, new_raw_table_info
|
||||
);
|
||||
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
Some(region_distribution),
|
||||
new_raw_table_info,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds the cache ident keys for the alter logical tables.
|
||||
///
|
||||
/// The cache ident keys are:
|
||||
/// - The table id of the logical tables.
|
||||
/// - The table name of the logical tables.
|
||||
/// - The table id of the physical table.
|
||||
pub(crate) fn build_cache_ident_keys(
|
||||
physical_table_info: &TableInfoValue,
|
||||
logical_table_info_values: &[&TableInfoValue],
|
||||
) -> Vec<CacheIdent> {
|
||||
let mut cache_keys = Vec::with_capacity(logical_table_info_values.len() * 2 + 2);
|
||||
cache_keys.extend(logical_table_info_values.iter().flat_map(|table| {
|
||||
vec![
|
||||
CacheIdent::TableId(table.table_info.ident.table_id),
|
||||
CacheIdent::TableName(table.table_name()),
|
||||
]
|
||||
}));
|
||||
cache_keys.push(CacheIdent::TableId(
|
||||
physical_table_info.table_info.ident.table_id,
|
||||
));
|
||||
cache_keys.push(CacheIdent::TableName(physical_table_info.table_name()));
|
||||
|
||||
cache_keys
|
||||
}
|
||||
}
|
||||
|
||||
fn make_request(alter_requests: AlterRequests) -> RegionRequest {
|
||||
RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alters(alter_requests)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes an alter region request.
|
||||
pub fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
) -> AlterRequest {
|
||||
let region_id = region_id.as_u64();
|
||||
let kind = match &alter_table_expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
|
||||
AlterRequest {
|
||||
region_id,
|
||||
schema_version: 0,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
fn to_region_add_columns(add_columns: &v1::AddColumns) -> AddColumns {
|
||||
let add_columns = add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let region_column_def = RegionColumnDef {
|
||||
column_def: add_column.column_def.clone(),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
};
|
||||
AddColumn {
|
||||
column_def: Some(region_column_def),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
AddColumns { add_columns }
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::{
|
||||
AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu, TableNotFoundSnafu,
|
||||
TableRouteNotFoundSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn filter_task(&mut self, finished_tasks: &[bool]) -> Result<()> {
|
||||
debug_assert_eq!(finished_tasks.len(), self.data.tasks.len());
|
||||
debug_assert_eq!(finished_tasks.len(), self.data.table_info_values.len());
|
||||
self.data.tasks = self
|
||||
.data
|
||||
.tasks
|
||||
.drain(..)
|
||||
.zip(finished_tasks.iter())
|
||||
.filter_map(|(task, finished)| if *finished { None } else { Some(task) })
|
||||
.collect();
|
||||
self.data.table_info_values = self
|
||||
.data
|
||||
.table_info_values
|
||||
.drain(..)
|
||||
.zip(finished_tasks.iter())
|
||||
.filter_map(|(table_info_value, finished)| {
|
||||
if *finished {
|
||||
None
|
||||
} else {
|
||||
Some(table_info_value)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fill_physical_table_info(&mut self) -> Result<()> {
|
||||
let (physical_table_info, physical_table_route) = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.get_full_table_info(self.data.physical_table_id)
|
||||
.await?;
|
||||
|
||||
let physical_table_info = physical_table_info.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id - {}", self.data.physical_table_id),
|
||||
})?;
|
||||
let physical_table_route = physical_table_route
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: self.data.physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
self.data.physical_table_info = Some(physical_table_info);
|
||||
let TableRouteValue::Physical(physical_table_route) = physical_table_route else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: format!(
|
||||
"expected a physical table but got a logical table: {:?}",
|
||||
self.data.physical_table_id
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
self.data.physical_table_route = Some(physical_table_route);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fill_table_info_values(&mut self) -> Result<()> {
|
||||
let table_ids = self.get_all_table_ids().await?;
|
||||
let table_info_values = self.get_all_table_info_values(&table_ids).await?;
|
||||
debug_assert_eq!(table_info_values.len(), self.data.tasks.len());
|
||||
self.data.table_info_values = table_info_values;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_all_table_info_values(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<Vec<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let table_info_manager = self.context.table_metadata_manager.table_info_manager();
|
||||
let mut table_info_map = table_info_manager.batch_get_raw(table_ids).await?;
|
||||
let mut table_info_values = Vec::with_capacity(table_ids.len());
|
||||
for (table_id, task) in table_ids.iter().zip(self.data.tasks.iter()) {
|
||||
let table_info_value =
|
||||
table_info_map
|
||||
.remove(table_id)
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: extract_table_name(task),
|
||||
})?;
|
||||
table_info_values.push(table_info_value);
|
||||
}
|
||||
|
||||
Ok(table_info_values)
|
||||
}
|
||||
|
||||
async fn get_all_table_ids(&self) -> Result<Vec<TableId>> {
|
||||
let table_name_manager = self.context.table_metadata_manager.table_name_manager();
|
||||
let table_name_keys = self
|
||||
.data
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task| extract_table_name_key(task))
|
||||
.collect();
|
||||
|
||||
let table_name_values = table_name_manager.batch_get(table_name_keys).await?;
|
||||
let mut table_ids = Vec::with_capacity(table_name_values.len());
|
||||
for (value, task) in table_name_values.into_iter().zip(self.data.tasks.iter()) {
|
||||
let table_id = value
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: extract_table_name(task),
|
||||
})?
|
||||
.table_id();
|
||||
table_ids.push(table_id);
|
||||
}
|
||||
|
||||
Ok(table_ids)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn extract_table_name(task: &AlterTableTask) -> String {
|
||||
format_full_table_name(
|
||||
&task.alter_table.catalog_name,
|
||||
&task.alter_table.schema_name,
|
||||
&task.alter_table.table_name,
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn extract_table_name_key(task: &AlterTableTask) -> TableNameKey {
|
||||
TableNameKey::new(
|
||||
&task.alter_table.catalog_name,
|
||||
&task.alter_table.schema_name,
|
||||
&task.alter_table.table_name,
|
||||
)
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{self, AlterTableExpr};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::Result;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{find_leader_regions, RegionRoute};
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn make_request(
|
||||
&self,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
let alter_requests = self.make_alter_region_requests(peer, region_routes)?;
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alters(alter_requests)),
|
||||
};
|
||||
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
fn make_alter_region_requests(
|
||||
&self,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<AlterRequests> {
|
||||
let tasks = &self.data.tasks;
|
||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
||||
let mut requests = Vec::with_capacity(tasks.len() * regions_on_this_peer.len());
|
||||
for (task, table) in self
|
||||
.data
|
||||
.tasks
|
||||
.iter()
|
||||
.zip(self.data.table_info_values.iter())
|
||||
{
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(table.table_info.ident.table_id, *region_number);
|
||||
let request = make_alter_region_request(
|
||||
region_id,
|
||||
&task.alter_table,
|
||||
table.table_info.ident.version,
|
||||
);
|
||||
requests.push(request);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AlterRequests { requests })
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes an alter region request.
|
||||
pub fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
) -> AlterRequest {
|
||||
let region_id = region_id.as_u64();
|
||||
let kind = match &alter_table_expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
|
||||
AlterRequest {
|
||||
region_id,
|
||||
schema_version,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
fn to_region_add_columns(add_columns: &v1::AddColumns) -> AddColumns {
|
||||
let add_columns = add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let region_column_def = RegionColumnDef {
|
||||
column_def: add_column.column_def.clone(),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
};
|
||||
AddColumn {
|
||||
column_def: Some(region_column_def),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
AddColumns { add_columns }
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterTablesData;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
impl AlterTablesData {
|
||||
pub(crate) fn build_cache_keys_to_invalidate(&mut self) {
|
||||
let mut cache_keys = self
|
||||
.table_info_values
|
||||
.iter()
|
||||
.flat_map(|table| {
|
||||
vec![
|
||||
CacheIdent::TableId(table.table_info.ident.table_id),
|
||||
CacheIdent::TableName(extract_table_name(&table.table_info)),
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
cache_keys.push(CacheIdent::TableId(self.physical_table_id));
|
||||
// Safety: physical_table_info already filled in previous steps
|
||||
let physical_table_info = &self.physical_table_info.as_ref().unwrap().table_info;
|
||||
cache_keys.push(CacheIdent::TableName(extract_table_name(
|
||||
physical_table_info,
|
||||
)));
|
||||
|
||||
self.table_cache_keys_to_invalidate = cache_keys;
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_table_name(table_info: &RawTableInfo) -> TableName {
|
||||
TableName::new(
|
||||
&table_info.catalog_name,
|
||||
&table_info.schema_name,
|
||||
&table_info.name,
|
||||
)
|
||||
}
|
||||
@@ -13,41 +13,35 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use common_telemetry::warn;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
|
||||
use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor;
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::physical_table_metadata;
|
||||
use crate::error;
|
||||
use crate::error::{ConvertAlterTableRequestSnafu, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::region_distribution;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
||||
if self.data.physical_columns.is_empty() {
|
||||
warn!("No physical columns found, leaving the physical table's schema unchanged when altering logical tables");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Safety: must exist.
|
||||
let physical_table_info = self.data.physical_table_info.as_ref().unwrap();
|
||||
let physical_table_route = self.data.physical_table_route.as_ref().unwrap();
|
||||
let region_distribution = region_distribution(&physical_table_route.region_routes);
|
||||
|
||||
// Generates new table info
|
||||
let old_raw_table_info = physical_table_info.table_info.clone();
|
||||
let new_raw_table_info = physical_table_metadata::build_new_physical_table_info(
|
||||
old_raw_table_info,
|
||||
// Updates physical table's metadata.
|
||||
AlterLogicalTablesExecutor::on_alter_metadata(
|
||||
self.data.physical_table_id,
|
||||
&self.context.table_metadata_manager,
|
||||
physical_table_info,
|
||||
region_distribution,
|
||||
&self.data.physical_columns,
|
||||
);
|
||||
|
||||
// Updates physical table's metadata, and we don't need to touch per-region settings.
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.update_table_info(physical_table_info, None, new_raw_table_info)
|
||||
.await?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
284
src/common/meta/src/ddl/alter_logical_tables/validator.rs
Normal file
284
src/common/meta/src/ddl/alter_logical_tables/validator.rs
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::AlterTableExpr;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::utils::table_id::get_all_table_ids_by_names;
|
||||
use crate::ddl::utils::table_info::get_all_table_info_values_by_table_ids;
|
||||
use crate::error::{
|
||||
AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu,
|
||||
TableRouteNotFoundSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteManager, TableRouteValue};
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
|
||||
/// [AlterLogicalTableValidator] validates the alter logical expressions.
|
||||
pub struct AlterLogicalTableValidator<'a> {
|
||||
physical_table_id: TableId,
|
||||
alters: Vec<&'a AlterTableExpr>,
|
||||
}
|
||||
|
||||
impl<'a> AlterLogicalTableValidator<'a> {
|
||||
pub fn new(physical_table_id: TableId, alters: Vec<&'a AlterTableExpr>) -> Self {
|
||||
Self {
|
||||
physical_table_id,
|
||||
alters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validates all alter table expressions have the same schema and catalog.
|
||||
fn validate_schema(&self) -> Result<()> {
|
||||
let is_same_schema = self.alters.windows(2).all(|pair| {
|
||||
pair[0].catalog_name == pair[1].catalog_name
|
||||
&& pair[0].schema_name == pair[1].schema_name
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_schema,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Schemas of the alter table expressions are not the same"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates that all alter table expressions are of the supported kind.
|
||||
/// Currently only supports `AddColumns` operations.
|
||||
fn validate_alter_kind(&self) -> Result<()> {
|
||||
for alter in &self.alters {
|
||||
let kind = alter
|
||||
.kind
|
||||
.as_ref()
|
||||
.context(AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Alter kind is missing",
|
||||
})?;
|
||||
|
||||
let Kind::AddColumns(_) = kind else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Only support add columns operation",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<TableReference> {
|
||||
self.alters
|
||||
.iter()
|
||||
.map(|alter| {
|
||||
TableReference::full(&alter.catalog_name, &alter.schema_name, &alter.table_name)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Validates that the physical table info and route exist.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Retrieves the full table info and route for the given physical table id
|
||||
/// 2. Ensures the table info and table route exists
|
||||
/// 3. Verifies that the table route is actually a physical table route, not a logical one
|
||||
///
|
||||
/// Returns a tuple containing the validated table info and physical table route.
|
||||
async fn validate_physical_table(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<(
|
||||
DeserializedValueWithBytes<TableInfoValue>,
|
||||
PhysicalTableRouteValue,
|
||||
)> {
|
||||
let (table_info, table_route) = table_metadata_manager
|
||||
.get_full_table_info(self.physical_table_id)
|
||||
.await?;
|
||||
|
||||
let table_info = table_info.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id - {}", self.physical_table_id),
|
||||
})?;
|
||||
|
||||
let physical_table_route = table_route
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: self.physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let TableRouteValue::Physical(table_route) = physical_table_route else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: format!(
|
||||
"expected a physical table but got a logical table: {:?}",
|
||||
self.physical_table_id
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
Ok((table_info, table_route))
|
||||
}
|
||||
|
||||
/// Validates that all logical table routes have the same physical table id.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Retrieves table routes for all the given table ids.
|
||||
/// 2. Ensures that all retrieved routes are logical table routes (not physical)
|
||||
/// 3. Verifies that all logical table routes reference the same physical table id.
|
||||
/// 4. Returns an error if any route is not logical or references a different physical table.
|
||||
async fn validate_logical_table_routes(
|
||||
&self,
|
||||
table_route_manager: &TableRouteManager,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<()> {
|
||||
let table_routes = table_route_manager
|
||||
.table_route_storage()
|
||||
.batch_get(table_ids)
|
||||
.await?;
|
||||
|
||||
let physical_table_id = self.physical_table_id;
|
||||
|
||||
let is_same_physical_table = table_routes.iter().all(|r| {
|
||||
if let Some(TableRouteValue::Logical(r)) = r {
|
||||
r.physical_table_id() == physical_table_id
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_physical_table,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "All the tasks should have the same physical table id"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates the alter logical expressions.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Validates that all alter table expressions have the same schema and catalog.
|
||||
/// 2. Validates that all alter table expressions are of the supported kind.
|
||||
/// 3. Validates that the physical table info and route exist.
|
||||
/// 4. Validates that all logical table routes have the same physical table id.
|
||||
///
|
||||
/// Returns a [ValidatorResult] containing the validation results.
|
||||
pub async fn validate(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<ValidatorResult> {
|
||||
self.validate_schema()?;
|
||||
self.validate_alter_kind()?;
|
||||
let (physical_table_info, physical_table_route) =
|
||||
self.validate_physical_table(table_metadata_manager).await?;
|
||||
let table_names = self.table_names();
|
||||
let table_ids =
|
||||
get_all_table_ids_by_names(table_metadata_manager.table_name_manager(), &table_names)
|
||||
.await?;
|
||||
let mut table_info_values = get_all_table_info_values_by_table_ids(
|
||||
table_metadata_manager.table_info_manager(),
|
||||
&table_ids,
|
||||
&table_names,
|
||||
)
|
||||
.await?;
|
||||
self.validate_logical_table_routes(
|
||||
table_metadata_manager.table_route_manager(),
|
||||
&table_ids,
|
||||
)
|
||||
.await?;
|
||||
let skip_alter = self
|
||||
.alters
|
||||
.iter()
|
||||
.zip(table_info_values.iter())
|
||||
.map(|(task, table)| skip_alter_logical_region(task, table))
|
||||
.collect::<Vec<_>>();
|
||||
retain_unskipped(&mut table_info_values, &skip_alter);
|
||||
let num_skipped = skip_alter.iter().filter(|&&x| x).count();
|
||||
|
||||
Ok(ValidatorResult {
|
||||
num_skipped,
|
||||
skip_alter,
|
||||
table_info_values,
|
||||
physical_table_info,
|
||||
physical_table_route,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of the validator.
|
||||
pub(crate) struct ValidatorResult {
|
||||
pub(crate) num_skipped: usize,
|
||||
pub(crate) skip_alter: Vec<bool>,
|
||||
pub(crate) table_info_values: Vec<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
pub(crate) physical_table_info: DeserializedValueWithBytes<TableInfoValue>,
|
||||
pub(crate) physical_table_route: PhysicalTableRouteValue,
|
||||
}
|
||||
|
||||
/// Retains the elements that are not skipped.
|
||||
pub(crate) fn retain_unskipped<T>(target: &mut Vec<T>, skipped: &[bool]) {
|
||||
debug_assert_eq!(target.len(), skipped.len());
|
||||
let mut iter = skipped.iter();
|
||||
target.retain(|_| !iter.next().unwrap());
|
||||
}
|
||||
|
||||
/// Returns true if does not required to alter the logical region.
|
||||
fn skip_alter_logical_region(alter: &AlterTableExpr, table: &TableInfoValue) -> bool {
|
||||
let existing_columns = table
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| &c.name)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let Some(kind) = alter.kind.as_ref() else {
|
||||
return true; // Never get here since we have checked it in `validate_alter_kind`
|
||||
};
|
||||
let Kind::AddColumns(add_columns) = kind else {
|
||||
return true; // Never get here since we have checked it in `validate_alter_kind`
|
||||
};
|
||||
|
||||
// We only check that all columns have been finished. That is to say,
|
||||
// if one part is finished but another part is not, it will be considered
|
||||
// unfinished.
|
||||
add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| add_column.column_def.as_ref().map(|c| &c.name))
|
||||
.all(|column| {
|
||||
column
|
||||
.map(|c| existing_columns.contains(c))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_retain_unskipped() {
|
||||
let mut target = vec![1, 2, 3, 4, 5];
|
||||
let skipped = vec![false, true, false, true, false];
|
||||
retain_unskipped(&mut target, &skipped);
|
||||
assert_eq!(target, vec![1, 3, 5]);
|
||||
}
|
||||
}
|
||||
@@ -12,10 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod check;
|
||||
mod executor;
|
||||
mod metadata;
|
||||
mod region_request;
|
||||
mod update_metadata;
|
||||
|
||||
use std::vec;
|
||||
|
||||
@@ -29,30 +28,29 @@ use common_procedure::{
|
||||
Context as ProcedureContext, ContextProvider, Error as ProcedureError, LockKey, PoisonKey,
|
||||
PoisonKeys, Procedure, ProcedureId, Status, StringKey,
|
||||
};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use futures::future::{self};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::storage::RegionId;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId, TableInfo};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::alter_table::executor::AlterTableExecutor;
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, handle_multiple_results, map_to_procedure_error,
|
||||
extract_column_metadatas, handle_multiple_results, map_to_procedure_error,
|
||||
sync_follower_regions, MultipleResults,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{AbortProcedureSnafu, NoLeaderSnafu, PutPoisonSnafu, Result, RetryLaterSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::poison_key::table_poison_key;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution, RegionRoute};
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
@@ -64,6 +62,24 @@ pub struct AlterTableProcedure {
|
||||
/// If we recover the procedure from json, then the table info value is not cached.
|
||||
/// But we already validated it in the prepare step.
|
||||
new_table_info: Option<TableInfo>,
|
||||
/// The alter table executor.
|
||||
executor: AlterTableExecutor,
|
||||
}
|
||||
|
||||
/// Builds the executor from the [`AlterTableData`].
|
||||
///
|
||||
/// # Panics
|
||||
/// - If the alter kind is not set.
|
||||
fn build_executor_from_alter_expr(alter_data: &AlterTableData) -> AlterTableExecutor {
|
||||
let table_name = alter_data.table_ref().into();
|
||||
let table_id = alter_data.table_id;
|
||||
let alter_kind = alter_data.task.alter_table.kind.as_ref().unwrap();
|
||||
let new_table_name = if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
Some(new_table_name.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
AlterTableExecutor::new(table_name, table_id, new_table_name)
|
||||
}
|
||||
|
||||
impl AlterTableProcedure {
|
||||
@@ -71,33 +87,42 @@ impl AlterTableProcedure {
|
||||
|
||||
pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
|
||||
task.validate()?;
|
||||
let data = AlterTableData::new(task, table_id);
|
||||
let executor = build_executor_from_alter_expr(&data);
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterTableData::new(task, table_id),
|
||||
data,
|
||||
new_table_info: None,
|
||||
executor,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data: AlterTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
let executor = build_executor_from_alter_expr(&data);
|
||||
|
||||
Ok(AlterTableProcedure {
|
||||
context,
|
||||
data,
|
||||
new_table_info: None,
|
||||
executor,
|
||||
})
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
self.check_alter().await?;
|
||||
self.executor
|
||||
.on_prepare(&self.context.table_metadata_manager)
|
||||
.await?;
|
||||
self.fill_table_info().await?;
|
||||
|
||||
// Validates the request and builds the new table info.
|
||||
// We need to build the new table info here because we should ensure the alteration
|
||||
// is valid in `UpdateMeta` state as we already altered the region.
|
||||
// Safety: `fill_table_info()` already set it.
|
||||
// Safety: filled in `fill_table_info`.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
self.new_table_info = Some(self.build_new_table_info(&table_info_value.table_info)?);
|
||||
let new_table_info = AlterTableExecutor::validate_alter_table_expr(
|
||||
&table_info_value.table_info,
|
||||
self.data.task.alter_table.clone(),
|
||||
)?;
|
||||
self.new_table_info = Some(new_table_info);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
@@ -140,9 +165,7 @@ impl AlterTableProcedure {
|
||||
|
||||
self.data.region_distribution =
|
||||
Some(region_distribution(&physical_table_route.region_routes));
|
||||
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
let alter_kind = self.make_region_alter_kind()?;
|
||||
|
||||
info!(
|
||||
@@ -155,31 +178,14 @@ impl AlterTableProcedure {
|
||||
ensure!(!leaders.is_empty(), NoLeaderSnafu { table_id });
|
||||
// Puts the poison before submitting alter region requests to datanodes.
|
||||
self.put_poison(ctx_provider, procedure_id).await?;
|
||||
for datanode in leaders {
|
||||
let requester = self.context.node_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(&physical_table_route.region_routes, &datanode);
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
let request = self.make_alter_region_request(region_id, alter_kind.clone())?;
|
||||
debug!("Submitting {request:?} to {datanode}");
|
||||
|
||||
let datanode = datanode.clone();
|
||||
let requester = requester.clone();
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(datanode))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let results = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let results = self
|
||||
.executor
|
||||
.on_alter_regions(
|
||||
&self.context.node_manager,
|
||||
&physical_table_route.region_routes,
|
||||
alter_kind,
|
||||
)
|
||||
.await;
|
||||
|
||||
match handle_multiple_results(results) {
|
||||
MultipleResults::PartialRetryable(error) => {
|
||||
@@ -202,9 +208,9 @@ impl AlterTableProcedure {
|
||||
})
|
||||
}
|
||||
MultipleResults::Ok(results) => {
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
self.submit_sync_region_requests(&results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
self.handle_alter_region_response(results)?;
|
||||
Ok(Status::executing_with_clean_poisons(true))
|
||||
}
|
||||
MultipleResults::AllNonRetryable(error) => {
|
||||
@@ -220,9 +226,22 @@ impl AlterTableProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_alter_region_response(&mut self, mut results: Vec<RegionResponse>) -> Result<()> {
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, TABLE_COLUMN_METADATA_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.column_metadatas = column_metadatas;
|
||||
} else {
|
||||
warn!("altering table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit_sync_region_requests(
|
||||
&mut self,
|
||||
results: Vec<RegionResponse>,
|
||||
results: &[RegionResponse],
|
||||
region_routes: &[RegionRoute],
|
||||
) {
|
||||
// Safety: filled in `prepare` step.
|
||||
@@ -244,39 +263,34 @@ impl AlterTableProcedure {
|
||||
pub(crate) async fn on_update_metadata(&mut self) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
let table_ref = self.data.table_ref();
|
||||
// Safety: checked before.
|
||||
// Safety: filled in `fill_table_info`.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
|
||||
// Gets the table info from the cache or builds it.
|
||||
let new_info = match &self.new_table_info {
|
||||
let new_info = match &self.new_table_info {
|
||||
Some(cached) => cached.clone(),
|
||||
None => self.build_new_table_info(&table_info_value.table_info)
|
||||
None => AlterTableExecutor::validate_alter_table_expr(
|
||||
&table_info_value.table_info,
|
||||
self.data.task.alter_table.clone(),
|
||||
)
|
||||
.inspect_err(|e| {
|
||||
// We already check the table info in the prepare step so this should not happen.
|
||||
error!(e; "Unable to build info for table {} in update metadata step, table_id: {}", table_ref, table_id);
|
||||
})?,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Starting update table: {} metadata, new table info {:?}",
|
||||
table_ref.to_string(),
|
||||
new_info
|
||||
);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
self.on_update_metadata_for_rename(new_table_name.to_string(), table_info_value)
|
||||
.await?;
|
||||
} else {
|
||||
// region distribution is set in submit_alter_region_requests
|
||||
let region_distribution = self.data.region_distribution.as_ref().unwrap().clone();
|
||||
self.on_update_metadata_for_alter(
|
||||
new_info.into(),
|
||||
region_distribution,
|
||||
// Safety: region distribution is set in `submit_alter_region_requests`.
|
||||
self.executor
|
||||
.on_alter_metadata(
|
||||
&self.context.table_metadata_manager,
|
||||
table_info_value,
|
||||
self.data.region_distribution.as_ref(),
|
||||
new_info.into(),
|
||||
&self.data.column_metadatas,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}");
|
||||
self.data.state = AlterTableState::InvalidateTableCache;
|
||||
@@ -285,18 +299,9 @@ impl AlterTableProcedure {
|
||||
|
||||
/// Broadcasts the invalidating table cache instructions.
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
&[
|
||||
CacheIdent::TableId(self.data.table_id()),
|
||||
CacheIdent::TableName(self.data.table_ref().into()),
|
||||
],
|
||||
)
|
||||
self.executor
|
||||
.invalidate_table_cache(&self.context.cache_invalidator)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done())
|
||||
}
|
||||
|
||||
@@ -318,6 +323,16 @@ impl AlterTableProcedure {
|
||||
|
||||
lock_key
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn data(&self) -> &AlterTableData {
|
||||
&self.data
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn mut_data(&mut self) -> &mut AlterTableData {
|
||||
&mut self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -380,6 +395,8 @@ pub struct AlterTableData {
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
table_id: TableId,
|
||||
#[serde(default)]
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
/// Table info value before alteration.
|
||||
table_info_value: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
/// Region distribution for table in case we need to update region options.
|
||||
@@ -392,6 +409,7 @@ impl AlterTableData {
|
||||
state: AlterTableState::Prepare,
|
||||
task,
|
||||
table_id,
|
||||
column_metadatas: vec![],
|
||||
table_info_value: None,
|
||||
region_distribution: None,
|
||||
}
|
||||
@@ -410,4 +428,14 @@ impl AlterTableData {
|
||||
.as_ref()
|
||||
.map(|value| &value.table_info)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn column_metadatas(&self) -> &[ColumnMetadata] {
|
||||
&self.column_metadatas
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn set_column_metadatas(&mut self, column_metadatas: Vec<ColumnMetadata>) {
|
||||
self.column_metadatas = column_metadatas;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Checks:
|
||||
/// - The new table name doesn't exist (rename).
|
||||
/// - Table exists.
|
||||
pub(crate) async fn check_alter(&self) -> Result<()> {
|
||||
let alter_expr = &self.data.task.alter_table;
|
||||
let catalog = &alter_expr.catalog_name;
|
||||
let schema = &alter_expr.schema_name;
|
||||
let table_name = &alter_expr.table_name;
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
let new_table_name_key = TableNameKey::new(catalog, schema, new_table_name);
|
||||
let exists = manager
|
||||
.table_name_manager()
|
||||
.exists(new_table_name_key)
|
||||
.await?;
|
||||
ensure!(
|
||||
!exists,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, new_table_name),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let exists = manager.table_name_manager().exists(table_name_key).await?;
|
||||
ensure!(
|
||||
exists,
|
||||
error::TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, &alter_expr.table_name),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
308
src/common/meta/src/ddl/alter_table/executor.rs
Normal file
308
src/common/meta/src/ddl/alter_table/executor.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{alter_request, AlterRequest, RegionRequest, RegionRequestHeader};
|
||||
use api::v1::AlterTableExpr;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::requests::AlterKind;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::{CacheInvalidatorRef, Context};
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, raw_table_info};
|
||||
use crate::error::{self, Result, UnexpectedSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef};
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// [AlterTableExecutor] performs:
|
||||
/// - Alters the metadata of the table.
|
||||
/// - Alters regions on the datanode nodes.
|
||||
pub struct AlterTableExecutor {
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
/// The new table name if the alter kind is rename table.
|
||||
new_table_name: Option<String>,
|
||||
}
|
||||
|
||||
impl AlterTableExecutor {
|
||||
/// Creates a new [`AlterTableExecutor`].
|
||||
pub fn new(table: TableName, table_id: TableId, new_table_name: Option<String>) -> Self {
|
||||
Self {
|
||||
table,
|
||||
table_id,
|
||||
new_table_name,
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepares to alter the table.
|
||||
///
|
||||
/// ## Checks:
|
||||
/// - The new table name doesn't exist (rename).
|
||||
/// - Table exists.
|
||||
pub(crate) async fn on_prepare(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<()> {
|
||||
let catalog = &self.table.catalog_name;
|
||||
let schema = &self.table.schema_name;
|
||||
let table_name = &self.table.table_name;
|
||||
|
||||
let manager = table_metadata_manager;
|
||||
if let Some(new_table_name) = &self.new_table_name {
|
||||
let new_table_name_key = TableNameKey::new(catalog, schema, new_table_name);
|
||||
let exists = manager
|
||||
.table_name_manager()
|
||||
.exists(new_table_name_key)
|
||||
.await?;
|
||||
ensure!(
|
||||
!exists,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, new_table_name),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let exists = manager.table_name_manager().exists(table_name_key).await?;
|
||||
ensure!(
|
||||
exists,
|
||||
error::TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, table_name),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates the alter table expression and builds the new table info.
|
||||
///
|
||||
/// This validation is performed early to ensure the alteration is valid before
|
||||
/// proceeding to the `on_alter_metadata` state, where regions have already been altered.
|
||||
/// Building the new table info here allows us to catch any issues with the
|
||||
/// alteration before committing metadata changes.
|
||||
pub(crate) fn validate_alter_table_expr(
|
||||
table_info: &RawTableInfo,
|
||||
alter_table_expr: AlterTableExpr,
|
||||
) -> Result<TableInfo> {
|
||||
build_new_table_info(table_info, alter_table_expr)
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
pub(crate) async fn on_alter_metadata(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_distribution: Option<&RegionDistribution>,
|
||||
mut raw_table_info: RawTableInfo,
|
||||
column_metadatas: &[ColumnMetadata],
|
||||
) -> Result<()> {
|
||||
let table_ref = self.table.table_ref();
|
||||
let table_id = self.table_id;
|
||||
|
||||
if let Some(new_table_name) = &self.new_table_name {
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}, new table name: {}",
|
||||
table_ref, table_id, raw_table_info, new_table_name
|
||||
);
|
||||
|
||||
table_metadata_manager
|
||||
.rename_table(current_table_info_value, new_table_name.to_string())
|
||||
.await?;
|
||||
} else {
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}",
|
||||
table_ref, table_id, raw_table_info
|
||||
);
|
||||
|
||||
ensure!(
|
||||
region_distribution.is_some(),
|
||||
UnexpectedSnafu {
|
||||
err_msg: "region distribution is not set when updating table metadata",
|
||||
}
|
||||
);
|
||||
|
||||
if !column_metadatas.is_empty() {
|
||||
raw_table_info::update_table_info_column_ids(&mut raw_table_info, column_metadatas);
|
||||
}
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
region_distribution.cloned(),
|
||||
raw_table_info,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Alters regions on the datanode nodes.
|
||||
pub(crate) async fn on_alter_regions(
|
||||
&self,
|
||||
node_manager: &NodeManagerRef,
|
||||
region_routes: &[RegionRoute],
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Vec<Result<RegionResponse>> {
|
||||
let region_distribution = region_distribution(region_routes);
|
||||
let leaders = find_leaders(region_routes)
|
||||
.into_iter()
|
||||
.map(|p| (p.id, p))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let total_num_region = region_distribution
|
||||
.values()
|
||||
.map(|r| r.leader_regions.len())
|
||||
.sum::<usize>();
|
||||
let mut alter_region_tasks = Vec::with_capacity(total_num_region);
|
||||
for (datanode_id, region_role_set) in region_distribution {
|
||||
if region_role_set.leader_regions.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// Safety: must exists.
|
||||
let peer = leaders.get(&datanode_id).unwrap();
|
||||
let requester = node_manager.datanode(peer).await;
|
||||
|
||||
for region_id in region_role_set.leader_regions {
|
||||
let region_id = RegionId::new(self.table_id, region_id);
|
||||
let request = make_alter_region_request(region_id, kind.clone());
|
||||
|
||||
let requester = requester.clone();
|
||||
let peer = peer.clone();
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Invalidates cache for the table.
|
||||
pub(crate) async fn invalidate_table_cache(
|
||||
&self,
|
||||
cache_invalidator: &CacheInvalidatorRef,
|
||||
) -> Result<()> {
|
||||
let ctx = Context {
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by altering table {}, table_id: {}",
|
||||
self.table.table_ref(),
|
||||
self.table_id,
|
||||
)),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::TableName(self.table.clone()),
|
||||
CacheIdent::TableId(self.table_id),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes alter region request.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> RegionRequest {
|
||||
RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(Body::Alter(AlterRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
kind,
|
||||
..Default::default()
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds new table info after alteration.
|
||||
///
|
||||
/// This function creates a new table info by applying the alter table expression
|
||||
/// to the existing table info. For add column operations, it increments the
|
||||
/// `next_column_id` by the number of columns being added, which may result in gaps
|
||||
/// in the column id sequence.
|
||||
fn build_new_table_info(
|
||||
table_info: &RawTableInfo,
|
||||
alter_table_expr: AlterTableExpr,
|
||||
) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
let schema_name = &table_info.schema_name;
|
||||
let catalog_name = &table_info.catalog_name;
|
||||
let table_name = &table_info.name;
|
||||
let table_id = table_info.ident.table_id;
|
||||
let request = alter_expr_to_request(table_id, alter_table_expr)
|
||||
.context(error::ConvertAlterTableRequestSnafu)?;
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_name, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
table_name: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let mut new_info = table_info.clone();
|
||||
new_info.meta = new_meta;
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
}
|
||||
@@ -15,43 +15,16 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
alter_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns, RegionColumnDef,
|
||||
RegionRequest, RegionRequestHeader,
|
||||
alter_request, AddColumn, AddColumns, DropColumn, DropColumns, RegionColumnDef,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{InvalidProtoMsgSnafu, Result};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Makes alter region request from existing an alter kind.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Result<RegionRequest> {
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(Body::Alter(AlterRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
schema_version: table_info.ident.version,
|
||||
kind,
|
||||
})),
|
||||
})
|
||||
}
|
||||
|
||||
/// Makes alter kind proto that all regions can reuse.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_region_alter_kind(&self) -> Result<Option<alter_request::Kind>> {
|
||||
@@ -155,6 +128,7 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
use crate::ddl::alter_table::executor::make_alter_region_request;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::create_table::{
|
||||
@@ -261,15 +235,13 @@ mod tests {
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
make_alter_region_request(region_id, alter_kind).body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(alter_region_request.region_id, region_id.as_u64());
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(alter_region_request.schema_version, 0);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::AddColumns(
|
||||
@@ -319,15 +291,13 @@ mod tests {
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
make_alter_region_request(region_id, alter_kind).body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(alter_region_request.region_id, region_id.as_u64());
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(alter_region_request.schema_version, 0);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::ModifyColumnTypes(
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::requests::AlterKind;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Builds new table info after alteration.
|
||||
/// It bumps the column id of the table by the number of the add column requests.
|
||||
/// So there may be holes in the column id sequence.
|
||||
pub(crate) fn build_new_table_info(&self, table_info: &RawTableInfo) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
let table_ref = self.data.table_ref();
|
||||
let alter_expr = self.data.task.alter_table.clone();
|
||||
let request = alter_expr_to_request(self.data.table_id(), alter_expr)
|
||||
.context(error::ConvertAlterTableRequestSnafu)?;
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
table_name: table_ref.table,
|
||||
})?;
|
||||
|
||||
let mut new_info = table_info.clone();
|
||||
new_info.meta = new_meta;
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
}
|
||||
|
||||
/// Updates table metadata for rename table operation.
|
||||
pub(crate) async fn on_update_metadata_for_rename(
|
||||
&self,
|
||||
new_table_name: String,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
table_metadata_manager
|
||||
.rename_table(current_table_info_value, new_table_name)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
pub(crate) async fn on_update_metadata_for_alter(
|
||||
&self,
|
||||
new_table_info: RawTableInfo,
|
||||
region_distribution: RegionDistribution,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
Some(region_distribution),
|
||||
new_table_info,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -167,6 +167,25 @@ impl CreateFlowProcedure {
|
||||
}
|
||||
|
||||
self.collect_source_tables().await?;
|
||||
|
||||
// Validate that source and sink tables are not the same
|
||||
let sink_table_name = &self.data.task.sink_table_name;
|
||||
if self
|
||||
.data
|
||||
.task
|
||||
.source_table_names
|
||||
.iter()
|
||||
.any(|source| source == sink_table_name)
|
||||
{
|
||||
return error::UnsupportedSnafu {
|
||||
operation: format!(
|
||||
"Creating flow with source and sink table being the same: {}",
|
||||
sink_table_name
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if self.data.flow_id.is_none() {
|
||||
self.allocate_flow_id().await?;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ use common_telemetry::{debug, error, warn};
|
||||
use futures::future;
|
||||
pub use region_request::create_region_request_builder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use snafu::ResultExt;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
@@ -35,10 +35,11 @@ use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
add_peer_context_if_needed, extract_column_metadatas, map_to_procedure_error,
|
||||
sync_follower_regions,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::error::Result;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
@@ -166,47 +167,23 @@ impl CreateLogicalTablesProcedure {
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
// Collects response from datanodes.
|
||||
let phy_raw_schemas = results
|
||||
.iter_mut()
|
||||
.map(|res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
self.submit_sync_region_requests(results, region_routes)
|
||||
.await;
|
||||
self.data.state = CreateTablesState::CreateMetadata;
|
||||
return Ok(Status::executing(false));
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = phy_raw_schemas.first().unwrap();
|
||||
ensure!(
|
||||
phy_raw_schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The physical schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
// Decodes the physical raw schemas
|
||||
if let Some(phy_raw_schemas) = first {
|
||||
self.data.physical_columns =
|
||||
ColumnMetadata::decode_list(phy_raw_schemas).context(DecodeJsonSnafu)?;
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, ALTER_PHYSICAL_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.physical_columns = column_metadatas;
|
||||
} else {
|
||||
warn!("creating logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
|
||||
}
|
||||
|
||||
self.submit_sync_region_requests(results, region_routes)
|
||||
self.submit_sync_region_requests(&results, region_routes)
|
||||
.await;
|
||||
self.data.state = CreateTablesState::CreateMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn submit_sync_region_requests(
|
||||
&self,
|
||||
results: Vec<RegionResponse>,
|
||||
results: &[RegionResponse],
|
||||
region_routes: &[RegionRoute],
|
||||
) {
|
||||
if let Err(err) = sync_follower_regions(
|
||||
|
||||
@@ -22,7 +22,7 @@ use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::physical_table_metadata;
|
||||
use crate::ddl::utils::raw_table_info;
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
@@ -47,7 +47,7 @@ impl CreateLogicalTablesProcedure {
|
||||
// Generates new table info
|
||||
let raw_table_info = physical_table_info.deref().table_info.clone();
|
||||
|
||||
let new_table_info = physical_table_metadata::build_new_physical_table_info(
|
||||
let new_table_info = raw_table_info::build_new_physical_table_info(
|
||||
raw_table_info,
|
||||
&self.data.physical_columns,
|
||||
);
|
||||
|
||||
@@ -22,20 +22,23 @@ use common_procedure::error::{
|
||||
ExternalSnafu, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||
};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{info, warn};
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
|
||||
use crate::ddl::utils::raw_table_info::update_table_info_column_ids;
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, map_to_procedure_error,
|
||||
region_storage_path,
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions,
|
||||
extract_column_metadatas, map_to_procedure_error, region_storage_path,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
@@ -243,14 +246,21 @@ impl CreateTableProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
join_all(create_region_tasks)
|
||||
self.creator.data.state = CreateTableState::CreateMetadata;
|
||||
|
||||
let mut results = join_all(create_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
self.creator.data.state = CreateTableState::CreateMetadata;
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, TABLE_COLUMN_METADATA_EXTENSION_KEY)?
|
||||
{
|
||||
self.creator.data.column_metadatas = column_metadatas;
|
||||
} else {
|
||||
warn!("creating table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged");
|
||||
}
|
||||
|
||||
// TODO(weny): Add more tests.
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
@@ -262,7 +272,10 @@ impl CreateTableProcedure {
|
||||
let table_id = self.table_id();
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let raw_table_info = self.table_info().clone();
|
||||
let mut raw_table_info = self.table_info().clone();
|
||||
if !self.creator.data.column_metadatas.is_empty() {
|
||||
update_table_info_column_ids(&mut raw_table_info, &self.creator.data.column_metadatas);
|
||||
}
|
||||
// Safety: the region_wal_options must be allocated.
|
||||
let region_wal_options = self.region_wal_options()?.clone();
|
||||
// Safety: the table_route must be allocated.
|
||||
@@ -346,6 +359,7 @@ impl TableCreator {
|
||||
Self {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
column_metadatas: vec![],
|
||||
task,
|
||||
table_route: None,
|
||||
region_wal_options: None,
|
||||
@@ -407,6 +421,8 @@ pub enum CreateTableState {
|
||||
pub struct CreateTableData {
|
||||
pub state: CreateTableState,
|
||||
pub task: CreateTableTask,
|
||||
#[serde(default)]
|
||||
pub column_metadatas: Vec<ColumnMetadata>,
|
||||
/// None stands for not allocated yet.
|
||||
table_route: Option<PhysicalTableRouteValue>,
|
||||
/// None stands for not allocated yet.
|
||||
|
||||
@@ -185,11 +185,15 @@ impl DropTableExecutor {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Invalidates frontend caches
|
||||
/// Invalidates caches for the table.
|
||||
pub async fn invalidate_table_cache(&self, ctx: &DdlContext) -> Result<()> {
|
||||
let cache_invalidator = &ctx.cache_invalidator;
|
||||
let ctx = Context {
|
||||
subject: Some("Invalidate table cache by dropping table".to_string()),
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by dropping table {}, table_id: {}",
|
||||
self.table.table_ref(),
|
||||
self.table_id,
|
||||
)),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
|
||||
@@ -122,6 +122,7 @@ impl TableMetadataAllocator {
|
||||
);
|
||||
|
||||
let peers = self.peer_allocator.alloc(regions).await?;
|
||||
debug!("Allocated peers {:?} for table {}", peers, table_id);
|
||||
let region_routes = task
|
||||
.partitions
|
||||
.iter()
|
||||
|
||||
@@ -24,7 +24,14 @@ use std::collections::HashMap;
|
||||
use api::v1::meta::Partition;
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_procedure::Status;
|
||||
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{
|
||||
DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME, LOGICAL_TABLE_METADATA_KEY,
|
||||
METRIC_ENGINE_NAME,
|
||||
};
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
@@ -146,6 +153,7 @@ pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a physical table task with a single region.
|
||||
pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
|
||||
let create_table = TestCreateTableExprBuilder::default()
|
||||
.column_defs([
|
||||
@@ -182,3 +190,95 @@ pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
|
||||
table_info,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a column metadata list with tag fields.
|
||||
pub fn test_column_metadatas(tag_fields: &[&str]) -> Vec<ColumnMetadata> {
|
||||
let mut output = Vec::with_capacity(tag_fields.len() + 4);
|
||||
output.extend([
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
DATA_SCHEMA_TABLE_ID_COLUMN_NAME,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: ReservedColumnId::table_id(),
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
DATA_SCHEMA_TSID_COLUMN_NAME,
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: ReservedColumnId::tsid(),
|
||||
},
|
||||
]);
|
||||
|
||||
for (i, name) in tag_fields.iter().enumerate() {
|
||||
output.push(ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
name.to_string(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: (i + 2) as u32,
|
||||
});
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
/// Asserts the column names.
|
||||
pub fn assert_column_name(table_info: &RawTableInfo, expected_column_names: &[&str]) {
|
||||
assert_eq!(
|
||||
table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.name.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
expected_column_names
|
||||
);
|
||||
}
|
||||
|
||||
/// Asserts the column metadatas
|
||||
pub fn assert_column_name_and_id(column_metadatas: &[ColumnMetadata], expected: &[(&str, u32)]) {
|
||||
assert_eq!(expected.len(), column_metadatas.len());
|
||||
for (name, id) in expected {
|
||||
let column_metadata = column_metadatas
|
||||
.iter()
|
||||
.find(|c| c.column_id == *id)
|
||||
.unwrap();
|
||||
assert_eq!(column_metadata.column_schema.name, *name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the raw table info.
|
||||
pub async fn get_raw_table_info(ddl_context: &DdlContext, table_id: TableId) -> RawTableInfo {
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.into_inner()
|
||||
.table_info
|
||||
}
|
||||
|
||||
@@ -132,6 +132,7 @@ pub fn build_raw_table_info_from_expr(expr: &CreateTableExpr) -> RawTableInfo {
|
||||
options: TableOptions::try_from_iter(&expr.table_options).unwrap(),
|
||||
created_on: DateTime::default(),
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
},
|
||||
table_type: TableType::Base,
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::RegionRequest;
|
||||
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
||||
@@ -32,6 +34,7 @@ impl MockDatanodeHandler for () {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,10 +47,13 @@ impl MockDatanodeHandler for () {
|
||||
}
|
||||
}
|
||||
|
||||
type RegionRequestHandler =
|
||||
Arc<dyn Fn(Peer, RegionRequest) -> Result<RegionResponse> + Send + Sync>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DatanodeWatcher {
|
||||
sender: mpsc::Sender<(Peer, RegionRequest)>,
|
||||
handler: Option<fn(Peer, RegionRequest) -> Result<RegionResponse>>,
|
||||
handler: Option<RegionRequestHandler>,
|
||||
}
|
||||
|
||||
impl DatanodeWatcher {
|
||||
@@ -60,9 +66,9 @@ impl DatanodeWatcher {
|
||||
|
||||
pub fn with_handler(
|
||||
mut self,
|
||||
user_handler: fn(Peer, RegionRequest) -> Result<RegionResponse>,
|
||||
user_handler: impl Fn(Peer, RegionRequest) -> Result<RegionResponse> + Send + Sync + 'static,
|
||||
) -> Self {
|
||||
self.handler = Some(user_handler);
|
||||
self.handler = Some(Arc::new(user_handler));
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -75,7 +81,7 @@ impl MockDatanodeHandler for DatanodeWatcher {
|
||||
.send((peer.clone(), request.clone()))
|
||||
.await
|
||||
.unwrap();
|
||||
if let Some(handler) = self.handler {
|
||||
if let Some(handler) = self.handler.as_ref() {
|
||||
handler(peer.clone(), request)
|
||||
} else {
|
||||
Ok(RegionResponse::new(0))
|
||||
|
||||
@@ -23,17 +23,20 @@ use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_procedure::{Procedure, ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::test_util::alter_table::TestAlterTableExprBuilder;
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||
use crate::ddl::test_util::datanode_handler::DatanodeWatcher;
|
||||
use crate::ddl::test_util::{
|
||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
||||
assert_column_name, create_logical_table, create_physical_table,
|
||||
create_physical_table_metadata, get_raw_table_info, test_column_metadatas,
|
||||
test_create_physical_table_task,
|
||||
};
|
||||
use crate::error::Error::{AlterLogicalTablesInvalidArguments, TableNotFound};
|
||||
@@ -96,6 +99,52 @@ fn make_alter_logical_table_rename_task(
|
||||
}
|
||||
}
|
||||
|
||||
fn make_alters_request_handler(
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
) -> impl Fn(Peer, RegionRequest) -> Result<RegionResponse> {
|
||||
move |_peer: Peer, request: RegionRequest| {
|
||||
if let region_request::Body::Alters(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1000, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(
|
||||
region_id,
|
||||
RegionManifestInfo::metric(1, 0, 2, 0),
|
||||
)])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&column_metadatas).unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_alters_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_ids: &[RegionId],
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id,);
|
||||
let Some(region_request::Body::Alters(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
for (i, region_id) in expected_region_ids.iter().enumerate() {
|
||||
assert_eq!(
|
||||
req.requests[i].region_id,
|
||||
*region_id,
|
||||
"actual region id: {}",
|
||||
RegionId::from_u64(req.requests[i].region_id)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_check_schema() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
@@ -205,15 +254,20 @@ async fn test_on_prepare() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let test_column_metadatas = test_column_metadatas(&["new_col", "mew_col"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(test_column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
let logical_table1_id = create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let logical_table2_id = create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
let logical_table3_id = create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table5").await;
|
||||
|
||||
@@ -223,7 +277,7 @@ async fn test_on_update_metadata() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
status,
|
||||
@@ -255,18 +309,52 @@ async fn test_on_update_metadata() {
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
RegionId::new(logical_table3_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "new_col", "mew_col"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_part_duplicate_alter_request() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["col_0"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let mut ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
let logical_table1_id = create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let logical_table2_id = create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
|
||||
@@ -305,6 +393,40 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "col_0"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2
|
||||
]
|
||||
);
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["col_0", "new_col_1", "new_col_2"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
ddl_context.node_manager = node_manager;
|
||||
|
||||
// re-alter
|
||||
let tasks = vec![
|
||||
@@ -357,6 +479,44 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
}
|
||||
);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&[
|
||||
"ts",
|
||||
"value",
|
||||
"__table_id",
|
||||
"__tsid",
|
||||
"col_0",
|
||||
"new_col_1",
|
||||
"new_col_2",
|
||||
],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
]
|
||||
);
|
||||
|
||||
let table_name_keys = vec![
|
||||
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table1"),
|
||||
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table2"),
|
||||
@@ -422,27 +582,13 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
);
|
||||
}
|
||||
|
||||
fn alters_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
if let region_request::Body::Alters(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1000, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::metric(1, 0, 2, 0))])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_region_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let handler = DatanodeWatcher::new(tx).with_handler(alters_request_handler);
|
||||
let column_metadatas = test_column_metadatas(&["new_col", "mew_col"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
|
||||
@@ -30,7 +30,12 @@ use common_error::status_code::StatusCode;
|
||||
use common_procedure::store::poison_store::PoisonStore;
|
||||
use common_procedure::{ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{
|
||||
MANIFEST_INFO_EXTENSION_KEY, TABLE_COLUMN_METADATA_EXTENSION_KEY,
|
||||
};
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::TTL_KEY;
|
||||
@@ -43,6 +48,7 @@ use crate::ddl::test_util::datanode_handler::{
|
||||
AllFailureDatanodeHandler, DatanodeWatcher, PartialSuccessDatanodeHandler,
|
||||
RequestOutdatedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::ddl::test_util::{assert_column_name, assert_column_name_and_id};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -179,6 +185,30 @@ fn alter_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionRe
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::mito(1, 1))])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
TABLE_COLUMN_METADATA_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&[
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"host",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
@@ -187,6 +217,7 @@ fn alter_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionRe
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let datanode_handler = DatanodeWatcher::new(tx).with_handler(alter_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
@@ -234,6 +265,8 @@ async fn test_on_submit_alter_request() {
|
||||
assert_sync_request(peer, request, 4, RegionId::new(table_id, 2), 1);
|
||||
let (peer, request) = results.remove(0);
|
||||
assert_sync_request(peer, request, 5, RegionId::new(table_id, 1), 1);
|
||||
let column_metadatas = procedure.data().column_metadatas();
|
||||
assert_column_name_and_id(column_metadatas, &[("ts", 0), ("host", 1)]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -378,6 +411,7 @@ async fn test_on_update_metadata_rename() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata_add_columns() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let table_name = "foo";
|
||||
@@ -431,6 +465,34 @@ async fn test_on_update_metadata_add_columns() {
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
// Returned column metadatas is empty.
|
||||
assert!(procedure.data().column_metadatas().is_empty());
|
||||
procedure.mut_data().set_column_metadatas(vec![
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("host", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("my_tag3", ConcreteDataType::string_datatype(), true),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 3,
|
||||
},
|
||||
]);
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
@@ -447,6 +509,8 @@ async fn test_on_update_metadata_add_columns() {
|
||||
table_info.meta.schema.column_schemas.len() as u32,
|
||||
table_info.meta.next_column_id
|
||||
);
|
||||
assert_column_name(&table_info, &["ts", "host", "cpu", "my_tag3"]);
|
||||
assert_eq!(table_info.meta.column_ids, vec![0, 1, 2, 3]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -141,3 +141,41 @@ async fn test_create_flow() {
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow_same_source_and_sink_table() {
|
||||
let table_id = 1024;
|
||||
let table_name = TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "same_table");
|
||||
|
||||
// Use the same table for both source and sink
|
||||
let source_table_names = vec![table_name.clone()];
|
||||
let sink_table_name = table_name.clone();
|
||||
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Create the table first so it exists
|
||||
let task = test_create_table_task("same_table", table_id);
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![]),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Try to create a flow with same source and sink table - should fail
|
||||
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::Unsupported { .. });
|
||||
|
||||
// Verify the error message contains information about the same table
|
||||
if let error::Error::Unsupported { operation, .. } = &err {
|
||||
assert!(operation.contains("source and sink table being the same"));
|
||||
assert!(operation.contains("same_table"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,15 +23,18 @@ use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||
use crate::ddl::test_util::{
|
||||
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
|
||||
assert_column_name, create_physical_table_metadata, get_raw_table_info, test_column_metadatas,
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::{Error, Result};
|
||||
@@ -39,6 +42,54 @@ use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
fn make_creates_request_handler(
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
) -> impl Fn(Peer, RegionRequest) -> Result<RegionResponse> {
|
||||
move |_peer, request| {
|
||||
let _ = _peer;
|
||||
if let region_request::Body::Creates(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(
|
||||
region_id,
|
||||
RegionManifestInfo::metric(1, 0, 2, 0),
|
||||
)])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&column_metadatas).unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_creates_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_ids: &[RegionId],
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id,);
|
||||
let Some(region_request::Body::Creates(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
for (i, region_id) in expected_region_ids.iter().enumerate() {
|
||||
assert_eq!(
|
||||
req.requests[i].region_id,
|
||||
*region_id,
|
||||
"actual region id: {}",
|
||||
RegionId::from_u64(req.requests[i].region_id)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_physical_table_not_found() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
@@ -227,7 +278,12 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
@@ -255,7 +311,7 @@ async fn test_on_create_metadata() {
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
vec![task, yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
ddl_context.clone(),
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
@@ -274,11 +330,42 @@ async fn test_on_create_metadata() {
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_ids = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*table_ids, vec![1025, 1026]);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_creates_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[RegionId::new(1025, 0), RegionId::new(1026, 0)],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "host", "cpu"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
@@ -317,7 +404,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
ddl_context.clone(),
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
@@ -336,6 +423,27 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_ids = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*table_ids, vec![8192, 1025]);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_creates_request(peer, request, 0, &[RegionId::new(1025, 0)]);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "host", "cpu"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -399,27 +507,13 @@ async fn test_on_create_metadata_err() {
|
||||
assert!(!error.is_retry_later());
|
||||
}
|
||||
|
||||
fn creates_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
if let region_request::Body::Creates(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::metric(1, 0, 2, 0))])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_create_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let handler = DatanodeWatcher::new(tx).with_handler(creates_request_handler);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
|
||||
@@ -16,7 +16,9 @@ use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::v1::meta::Partition;
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::meta::{Partition, Peer};
|
||||
use api::v1::region::{region_request, RegionRequest};
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -24,7 +26,12 @@ use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Stat
|
||||
use common_procedure_test::{
|
||||
execute_procedure_until, execute_procedure_until_done, MockContextProvider,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::create_table::{CreateTableProcedure, CreateTableState};
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
@@ -32,14 +39,73 @@ use crate::ddl::test_util::create_table::{
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
};
|
||||
use crate::ddl::test_util::datanode_handler::{
|
||||
NaiveDatanodeHandler, RetryErrorDatanodeHandler, UnexpectedErrorDatanodeHandler,
|
||||
DatanodeWatcher, NaiveDatanodeHandler, RetryErrorDatanodeHandler,
|
||||
UnexpectedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::error::Error;
|
||||
use crate::ddl::test_util::{assert_column_name, get_raw_table_info};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager};
|
||||
|
||||
fn create_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
let _ = _peer;
|
||||
if let region_request::Body::Create(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
|
||||
response.extensions.insert(
|
||||
TABLE_COLUMN_METADATA_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&[
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"host",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"cpu",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
},
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
fn assert_create_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_id: RegionId,
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id);
|
||||
let Some(region_request::Body::Create(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
assert_eq!(req.region_id, expected_region_id);
|
||||
}
|
||||
|
||||
pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||
let create_table = TestCreateTableExprBuilder::default()
|
||||
.column_defs([
|
||||
@@ -230,11 +296,13 @@ async fn test_on_create_metadata_error() {
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let datanode_handler = DatanodeWatcher::new(tx).with_handler(create_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -243,8 +311,16 @@ async fn test_on_create_metadata() {
|
||||
procedure.execute(&ctx).await.unwrap();
|
||||
// Triggers procedure to create table metadata
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_id = status.downcast_output_ref::<u32>().unwrap();
|
||||
assert_eq!(*table_id, 1024);
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
assert_eq!(table_id, 1024);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_create_request(peer, request, 0, RegionId::new(table_id, 0));
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(&table_info, &["ts", "host", "cpu"]);
|
||||
assert_eq!(table_info.meta.column_ids, vec![0, 1, 2]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod raw_table_info;
|
||||
pub(crate) mod table_id;
|
||||
pub(crate) mod table_info;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
|
||||
@@ -29,6 +33,7 @@ use common_telemetry::{error, info, warn};
|
||||
use common_wal::options::WalOptions;
|
||||
use futures::future::join_all;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
@@ -37,8 +42,8 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::{DdlContext, DetectingRegion};
|
||||
use crate::error::{
|
||||
self, Error, OperateDatanodeSnafu, ParseWalOptionsSnafu, Result, TableNotFoundSnafu,
|
||||
UnsupportedSnafu,
|
||||
self, DecodeJsonSnafu, Error, MetadataCorruptionSnafu, OperateDatanodeSnafu,
|
||||
ParseWalOptionsSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu,
|
||||
};
|
||||
use crate::key::datanode_table::DatanodeTableValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -314,11 +319,23 @@ pub fn parse_manifest_infos_from_extensions(
|
||||
Ok(data_manifest_version)
|
||||
}
|
||||
|
||||
/// Parses column metadatas from extensions.
|
||||
pub fn parse_column_metadatas(
|
||||
extensions: &HashMap<String, Vec<u8>>,
|
||||
key: &str,
|
||||
) -> Result<Vec<ColumnMetadata>> {
|
||||
let value = extensions.get(key).context(error::UnexpectedSnafu {
|
||||
err_msg: format!("column metadata extension not found: {}", key),
|
||||
})?;
|
||||
let column_metadatas = ColumnMetadata::decode_list(value).context(error::SerdeJsonSnafu {})?;
|
||||
Ok(column_metadatas)
|
||||
}
|
||||
|
||||
/// Sync follower regions on datanodes.
|
||||
pub async fn sync_follower_regions(
|
||||
context: &DdlContext,
|
||||
table_id: TableId,
|
||||
results: Vec<RegionResponse>,
|
||||
results: &[RegionResponse],
|
||||
region_routes: &[RegionRoute],
|
||||
engine: &str,
|
||||
) -> Result<()> {
|
||||
@@ -331,7 +348,7 @@ pub async fn sync_follower_regions(
|
||||
}
|
||||
|
||||
let results = results
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|response| parse_manifest_infos_from_extensions(&response.extensions))
|
||||
.collect::<Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
@@ -418,6 +435,38 @@ pub async fn sync_follower_regions(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extracts column metadatas from extensions.
|
||||
pub fn extract_column_metadatas(
|
||||
results: &mut [RegionResponse],
|
||||
key: &str,
|
||||
) -> Result<Option<Vec<ColumnMetadata>>> {
|
||||
let schemas = results
|
||||
.iter_mut()
|
||||
.map(|r| r.extensions.remove(key))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if schemas.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = schemas.first().unwrap();
|
||||
ensure!(
|
||||
schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The table column metadata schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
if let Some(first) = first {
|
||||
let column_metadatas = ColumnMetadata::decode_list(first).context(DecodeJsonSnafu)?;
|
||||
Ok(Some(column_metadatas))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -12,9 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing::warn;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
@@ -23,6 +25,10 @@ pub(crate) fn build_new_physical_table_info(
|
||||
mut raw_table_info: RawTableInfo,
|
||||
physical_columns: &[ColumnMetadata],
|
||||
) -> RawTableInfo {
|
||||
debug!(
|
||||
"building new physical table info for table: {}, table_id: {}",
|
||||
raw_table_info.name, raw_table_info.ident.table_id
|
||||
);
|
||||
let existing_columns = raw_table_info
|
||||
.meta
|
||||
.schema
|
||||
@@ -36,6 +42,8 @@ pub(crate) fn build_new_physical_table_info(
|
||||
let time_index = &mut raw_table_info.meta.schema.timestamp_index;
|
||||
let columns = &mut raw_table_info.meta.schema.column_schemas;
|
||||
columns.clear();
|
||||
let column_ids = &mut raw_table_info.meta.column_ids;
|
||||
column_ids.clear();
|
||||
|
||||
for (idx, col) in physical_columns.iter().enumerate() {
|
||||
match col.semantic_type {
|
||||
@@ -50,6 +58,7 @@ pub(crate) fn build_new_physical_table_info(
|
||||
}
|
||||
|
||||
columns.push(col.column_schema.clone());
|
||||
column_ids.push(col.column_id);
|
||||
}
|
||||
|
||||
if let Some(time_index) = *time_index {
|
||||
@@ -58,3 +67,54 @@ pub(crate) fn build_new_physical_table_info(
|
||||
|
||||
raw_table_info
|
||||
}
|
||||
|
||||
/// Updates the column IDs in the table info based on the provided column metadata.
|
||||
///
|
||||
/// This function validates that the column metadata matches the existing table schema
|
||||
/// before updating the column ids. If the column metadata doesn't match the table schema,
|
||||
/// the table info remains unchanged.
|
||||
pub(crate) fn update_table_info_column_ids(
|
||||
raw_table_info: &mut RawTableInfo,
|
||||
column_metadatas: &[ColumnMetadata],
|
||||
) {
|
||||
let mut table_column_names = raw_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.name.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
table_column_names.sort_unstable();
|
||||
|
||||
let mut column_names = column_metadatas
|
||||
.iter()
|
||||
.map(|c| c.column_schema.name.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
column_names.sort_unstable();
|
||||
|
||||
if table_column_names != column_names {
|
||||
warn!(
|
||||
"Column metadata doesn't match the table schema for table {}, table_id: {}, column in table: {:?}, column in metadata: {:?}",
|
||||
raw_table_info.name,
|
||||
raw_table_info.ident.table_id,
|
||||
table_column_names,
|
||||
column_names,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let name_to_id = column_metadatas
|
||||
.iter()
|
||||
.map(|c| (c.column_schema.name.clone(), c.column_id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let schema = &raw_table_info.meta.schema.column_schemas;
|
||||
let mut column_ids = Vec::with_capacity(schema.len());
|
||||
for column_schema in schema {
|
||||
if let Some(id) = name_to_id.get(&column_schema.name) {
|
||||
column_ids.push(*id);
|
||||
}
|
||||
}
|
||||
|
||||
raw_table_info.meta.column_ids = column_ids;
|
||||
}
|
||||
46
src/common/meta/src/ddl/utils/table_id.rs
Normal file
46
src/common/meta/src/ddl/utils/table_id.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_name::{TableNameKey, TableNameManager};
|
||||
|
||||
/// Get all the table ids from the table names.
|
||||
///
|
||||
/// Returns an error if any table does not exist.
|
||||
pub(crate) async fn get_all_table_ids_by_names<'a>(
|
||||
table_name_manager: &TableNameManager,
|
||||
table_names: &[TableReference<'a>],
|
||||
) -> Result<Vec<TableId>> {
|
||||
let table_name_keys = table_names
|
||||
.iter()
|
||||
.map(TableNameKey::from)
|
||||
.collect::<Vec<_>>();
|
||||
let table_name_values = table_name_manager.batch_get(table_name_keys).await?;
|
||||
let mut table_ids = Vec::with_capacity(table_name_values.len());
|
||||
for (value, table_name) in table_name_values.into_iter().zip(table_names) {
|
||||
let value = value
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?
|
||||
.table_id();
|
||||
|
||||
table_ids.push(value);
|
||||
}
|
||||
|
||||
Ok(table_ids)
|
||||
}
|
||||
44
src/common/meta/src/ddl/utils/table_info.rs
Normal file
44
src/common/meta/src/ddl/utils/table_info.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
|
||||
/// Get all table info values by table ids.
|
||||
///
|
||||
/// Returns an error if any table does not exist.
|
||||
pub(crate) async fn get_all_table_info_values_by_table_ids<'a>(
|
||||
table_info_manager: &TableInfoManager,
|
||||
table_ids: &[TableId],
|
||||
table_names: &[TableReference<'a>],
|
||||
) -> Result<Vec<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let mut table_info_map = table_info_manager.batch_get_raw(table_ids).await?;
|
||||
let mut table_info_values = Vec::with_capacity(table_ids.len());
|
||||
for (table_id, table_name) in table_ids.iter().zip(table_names) {
|
||||
let table_info_value =
|
||||
table_info_map
|
||||
.remove(table_id)
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: table_name.to_string(),
|
||||
})?;
|
||||
table_info_values.push(table_info_value);
|
||||
}
|
||||
|
||||
Ok(table_info_values)
|
||||
}
|
||||
@@ -50,7 +50,11 @@ use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::DropTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::DropTrigger;
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
@@ -91,6 +95,14 @@ pub trait TriggerDdlManager: Send + Sync {
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
async fn drop_trigger(
|
||||
&self,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
ddl_context: DdlContext,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
@@ -125,13 +137,12 @@ impl DdlManager {
|
||||
ddl_context: DdlContext,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
register_loaders: bool,
|
||||
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Result<Self> {
|
||||
let manager = Self {
|
||||
ddl_context,
|
||||
procedure_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
trigger_ddl_manager,
|
||||
trigger_ddl_manager: None,
|
||||
};
|
||||
if register_loaders {
|
||||
manager.register_loaders()?;
|
||||
@@ -139,6 +150,15 @@ impl DdlManager {
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_trigger_ddl_manager(
|
||||
mut self,
|
||||
trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||
) -> Self {
|
||||
self.trigger_ddl_manager = trigger_ddl_manager;
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the [TableMetadataManagerRef].
|
||||
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
|
||||
&self.ddl_context.table_metadata_manager
|
||||
@@ -640,6 +660,28 @@ async fn handle_drop_flow_task(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
async fn handle_drop_trigger_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
return UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
m.drop_trigger(
|
||||
drop_trigger_task,
|
||||
ddl_manager.procedure_manager.clone(),
|
||||
ddl_manager.ddl_context.clone(),
|
||||
query_context,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_drop_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_view_task: DropViewTask,
|
||||
@@ -827,6 +869,11 @@ impl ProcedureExecutor for DdlManager {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
@@ -836,11 +883,11 @@ impl ProcedureExecutor for DdlManager {
|
||||
)
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(drop_trigger_task) => {
|
||||
handle_drop_trigger_task(self, drop_trigger_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
@@ -948,6 +995,7 @@ mod tests {
|
||||
Default::default(),
|
||||
state_store,
|
||||
poison_manager,
|
||||
None,
|
||||
));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
@@ -964,8 +1012,6 @@ mod tests {
|
||||
},
|
||||
procedure_manager.clone(),
|
||||
true,
|
||||
#[cfg(feature = "enterprise")]
|
||||
None,
|
||||
);
|
||||
|
||||
let expected_loaders = vec![
|
||||
|
||||
@@ -100,8 +100,8 @@
|
||||
pub mod catalog_name;
|
||||
pub mod datanode_table;
|
||||
pub mod flow;
|
||||
pub mod maintenance;
|
||||
pub mod node_address;
|
||||
pub mod runtime_switch;
|
||||
mod schema_metadata_manager;
|
||||
pub mod schema_name;
|
||||
pub mod table_info;
|
||||
@@ -164,7 +164,9 @@ use crate::state_store::PoisonValue;
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
|
||||
pub const MAINTENANCE_KEY: &str = "__maintenance";
|
||||
pub const LEGACY_MAINTENANCE_KEY: &str = "__maintenance";
|
||||
pub const MAINTENANCE_KEY: &str = "__switches/maintenance";
|
||||
pub const PAUSE_PROCEDURE_KEY: &str = "__switches/pause_procedure";
|
||||
|
||||
pub const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::MAINTENANCE_KEY;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
pub type MaintenanceModeManagerRef = Arc<MaintenanceModeManager>;
|
||||
|
||||
/// The maintenance mode manager.
|
||||
///
|
||||
/// Used to enable or disable maintenance mode.
|
||||
#[derive(Clone)]
|
||||
pub struct MaintenanceModeManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl MaintenanceModeManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.kv_backend
|
||||
.delete(MAINTENANCE_KEY.as_bytes(), false)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
self.kv_backend.exists(MAINTENANCE_KEY.as_bytes()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::maintenance::MaintenanceModeManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maintenance_mode_manager() {
|
||||
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(Arc::new(
|
||||
MemoryKvBackend::new(),
|
||||
)));
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.set_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
}
|
||||
224
src/common/meta/src/key/runtime_switch.rs
Normal file
224
src/common/meta/src/key/runtime_switch.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::local::PauseAware;
|
||||
use moka::future::Cache;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{GetCacheSnafu, Result};
|
||||
use crate::key::{LEGACY_MAINTENANCE_KEY, MAINTENANCE_KEY, PAUSE_PROCEDURE_KEY};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{BatchDeleteRequest, PutRequest};
|
||||
|
||||
pub type RuntimeSwitchManagerRef = Arc<RuntimeSwitchManager>;
|
||||
|
||||
/// The runtime switch manager.
|
||||
///
|
||||
/// Used to enable or disable runtime switches.
|
||||
#[derive(Clone)]
|
||||
pub struct RuntimeSwitchManager {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: Cache<Vec<u8>, Option<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PauseAware for RuntimeSwitchManager {
|
||||
async fn is_paused(&self) -> std::result::Result<bool, BoxedError> {
|
||||
self.is_procedure_paused().await.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
|
||||
const CACHE_TTL: Duration = Duration::from_secs(10);
|
||||
const MAX_CAPACITY: u64 = 32;
|
||||
|
||||
impl RuntimeSwitchManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Cache::builder()
|
||||
.time_to_live(CACHE_TTL)
|
||||
.max_capacity(MAX_CAPACITY)
|
||||
.build();
|
||||
Self { kv_backend, cache }
|
||||
}
|
||||
|
||||
async fn put_key(&self, key: &str) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(key),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
self.cache.invalidate(key.as_bytes()).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_keys(&self, keys: &[&str]) -> Result<()> {
|
||||
let req = BatchDeleteRequest::new()
|
||||
.with_keys(keys.iter().map(|x| x.as_bytes().to_vec()).collect());
|
||||
self.kv_backend.batch_delete(req).await?;
|
||||
for key in keys {
|
||||
self.cache.invalidate(key.as_bytes()).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the key exists.
|
||||
async fn exists(&self, key: &str) -> Result<bool> {
|
||||
let key = key.as_bytes().to_vec();
|
||||
let kv_backend = self.kv_backend.clone();
|
||||
let value = self
|
||||
.cache
|
||||
.try_get_with(key.clone(), async move {
|
||||
kv_backend.get(&key).await.map(|v| v.map(|v| v.value))
|
||||
})
|
||||
.await
|
||||
.context(GetCacheSnafu)?;
|
||||
|
||||
Ok(value.is_some())
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
self.put_key(MAINTENANCE_KEY).await
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.delete_keys(&[MAINTENANCE_KEY, LEGACY_MAINTENANCE_KEY])
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
let exists = self.exists(MAINTENANCE_KEY).await?;
|
||||
if exists {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let exists = self.exists(LEGACY_MAINTENANCE_KEY).await?;
|
||||
if exists {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Pauses handling of incoming procedure requests.
|
||||
pub async fn pasue_procedure(&self) -> Result<()> {
|
||||
self.put_key(PAUSE_PROCEDURE_KEY).await
|
||||
}
|
||||
|
||||
/// Resumes processing of incoming procedure requests.
|
||||
pub async fn resume_procedure(&self) -> Result<()> {
|
||||
self.delete_keys(&[PAUSE_PROCEDURE_KEY]).await
|
||||
}
|
||||
|
||||
/// Returns true if the system is currently pausing incoming procedure requests.
|
||||
pub async fn is_procedure_paused(&self) -> Result<bool> {
|
||||
self.exists(PAUSE_PROCEDURE_KEY).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::runtime_switch::RuntimeSwitchManager;
|
||||
use crate::key::{LEGACY_MAINTENANCE_KEY, MAINTENANCE_KEY};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager_basic() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
runtime_switch_manager
|
||||
.put_key(MAINTENANCE_KEY)
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_none());
|
||||
runtime_switch_manager
|
||||
.exists(MAINTENANCE_KEY)
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_some());
|
||||
runtime_switch_manager
|
||||
.delete_keys(&[MAINTENANCE_KEY])
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager.set_maintenance_mode().await.unwrap();
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager_with_legacy_key() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
kv_backend
|
||||
.put(PutRequest {
|
||||
key: Vec::from(LEGACY_MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let runtime_switch_manager = Arc::new(RuntimeSwitchManager::new(kv_backend));
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager.set_maintenance_mode().await.unwrap();
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pasue_procedure() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
runtime_switch_manager.pasue_procedure().await.unwrap();
|
||||
assert!(runtime_switch_manager.is_procedure_paused().await.unwrap());
|
||||
runtime_switch_manager.resume_procedure().await.unwrap();
|
||||
assert!(!runtime_switch_manager.is_procedure_paused().await.unwrap());
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user