mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
Compare commits
29 Commits
flow/faste
...
v0.15.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
941906dc74 | ||
|
|
cbf251d0f0 | ||
|
|
1519379262 | ||
|
|
4bfe02ec7f | ||
|
|
ecacf1333e | ||
|
|
92fa33c250 | ||
|
|
8b2d1a3753 | ||
|
|
13401c94e0 | ||
|
|
fd637dae47 | ||
|
|
69fac19770 | ||
|
|
6435b97314 | ||
|
|
726e3909fe | ||
|
|
00d759e828 | ||
|
|
0042ea6462 | ||
|
|
d06450715f | ||
|
|
8612bb066f | ||
|
|
467593d329 | ||
|
|
9e4ae070b2 | ||
|
|
d8261dda51 | ||
|
|
7ab9b335a1 | ||
|
|
60835afb47 | ||
|
|
aba5bf7431 | ||
|
|
7897fe8dbe | ||
|
|
cc8ec706a1 | ||
|
|
7c688718db | ||
|
|
8a0e554e5a | ||
|
|
80fae1c559 | ||
|
|
c37c4df20d | ||
|
|
f712c1b356 |
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get current version
|
||||
CURRENT_VERSION=$1
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Failed to get current version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version from GitHub Releases
|
||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
||||
|
||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
||||
echo "Error: Failed to fetch latest version from GitHub"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the latest version
|
||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
||||
|
||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
||||
echo "Error: No valid version found in GitHub releases"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
||||
|
||||
echo "Current version: $CLEAN_CURRENT"
|
||||
echo "Latest release version: $CLEAN_LATEST"
|
||||
|
||||
# Use sort -V to compare versions
|
||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
||||
|
||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -110,8 +110,6 @@ jobs:
|
||||
|
||||
# The 'version' use as the global tag name of the release workflow.
|
||||
version: ${{ steps.create-version.outputs.version }}
|
||||
|
||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -137,11 +135,6 @@ jobs:
|
||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||
|
||||
- name: Check version
|
||||
id: check-version
|
||||
run: |
|
||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
||||
|
||||
- name: Allocate linux-amd64 runner
|
||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||
uses: ./.github/actions/start-runner
|
||||
@@ -321,7 +314,7 @@ jobs:
|
||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
- name: Set build image result
|
||||
id: set-build-image-result
|
||||
@@ -339,7 +332,7 @@ jobs:
|
||||
build-windows-artifacts,
|
||||
release-images-to-dockerhub,
|
||||
]
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
runs-on: ubuntu-latest
|
||||
# When we push to ACR, it's easy to fail due to some unknown network issues.
|
||||
# However, we don't want to fail the whole workflow because of this.
|
||||
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
|
||||
@@ -368,7 +361,7 @@ jobs:
|
||||
dev-mode: false
|
||||
upload-to-s3: true
|
||||
update-version-info: true
|
||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||
|
||||
publish-github-release:
|
||||
name: Create GitHub release and upload artifacts
|
||||
|
||||
10
.github/workflows/semantic-pull-request.yml
vendored
10
.github/workflows/semantic-pull-request.yml
vendored
@@ -11,17 +11,17 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write # Add permissions to modify PRs
|
||||
issues: write
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.github/actions/setup-cyborg
|
||||
- name: Check Pull Request
|
||||
working-directory: cyborg
|
||||
|
||||
306
Cargo.lock
generated
306
Cargo.lock
generated
@@ -211,7 +211,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -944,7 +944,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1586,7 +1586,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1621,7 +1621,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -1959,7 +1959,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -1986,6 +1986,7 @@ dependencies = [
|
||||
"common-version",
|
||||
"common-wal",
|
||||
"datatypes",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"humantime",
|
||||
@@ -1996,14 +1997,14 @@ dependencies = [
|
||||
"operator",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -2012,7 +2013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -2042,7 +2043,7 @@ dependencies = [
|
||||
"rand 0.9.0",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2083,7 +2084,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -2113,6 +2114,7 @@ dependencies = [
|
||||
"common-wal",
|
||||
"datanode",
|
||||
"datatypes",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"file-engine",
|
||||
"flow",
|
||||
@@ -2127,14 +2129,13 @@ dependencies = [
|
||||
"mito2",
|
||||
"moka",
|
||||
"nu-ansi-term",
|
||||
"object-store",
|
||||
"plugins",
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"rexpect",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2144,7 +2145,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"stat",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -2191,7 +2192,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -2213,11 +2214,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2230,7 +2231,6 @@ dependencies = [
|
||||
"humantime-serde",
|
||||
"meta-client",
|
||||
"num_cpus",
|
||||
"object-store",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
@@ -2243,7 +2243,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-schema 54.3.1",
|
||||
@@ -2280,7 +2280,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.8",
|
||||
"common-error",
|
||||
@@ -2293,7 +2293,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-macro",
|
||||
"http 1.1.0",
|
||||
@@ -2304,7 +2304,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2320,7 +2320,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2343,7 +2343,6 @@ dependencies = [
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datafusion-functions-aggregate-common",
|
||||
"datatypes",
|
||||
"derive_more",
|
||||
"geo",
|
||||
@@ -2374,7 +2373,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2382,7 +2381,7 @@ dependencies = [
|
||||
"common-test-util",
|
||||
"common-version",
|
||||
"hyper 0.14.30",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -2391,7 +2390,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2423,7 +2422,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2442,7 +2441,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2456,7 +2455,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"common-error",
|
||||
@@ -2472,7 +2471,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2537,7 +2536,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2546,11 +2545,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2562,7 +2561,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2589,7 +2588,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2598,7 +2597,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2624,7 +2623,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2644,7 +2643,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2674,32 +2673,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-session"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"strum 0.27.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-sql"
|
||||
version = "0.16.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-datasource",
|
||||
"common-decimal",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-time",
|
||||
"datafusion-sql",
|
||||
"datatypes",
|
||||
"hex",
|
||||
"jsonb",
|
||||
"snafu 0.8.5",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"common-error",
|
||||
@@ -2727,7 +2708,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-grpc",
|
||||
@@ -2740,7 +2721,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"chrono",
|
||||
@@ -2758,7 +2739,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"cargo-manifest",
|
||||
@@ -2769,7 +2750,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -2792,7 +2773,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-workload"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-telemetry",
|
||||
@@ -3748,7 +3729,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3794,14 +3775,14 @@ dependencies = [
|
||||
"prometheus",
|
||||
"prost 0.13.5",
|
||||
"query",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"servers",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3810,7 +3791,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arrow 54.2.1",
|
||||
"arrow-array 54.2.1",
|
||||
@@ -4230,9 +4211,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.15.0"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -4470,7 +4451,7 @@ checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4607,7 +4588,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow 54.2.1",
|
||||
@@ -4672,7 +4653,7 @@ dependencies = [
|
||||
"sql",
|
||||
"store-api",
|
||||
"strum 0.27.1",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4727,7 +4708,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4787,7 +4768,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"strfmt",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -5177,7 +5158,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ceb1af4fa9309ce65bda0367db7b384df2bb4d4f#ceb1af4fa9309ce65bda0367db7b384df2bb4d4f"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=96c733f8472284d3c83a4c011dc6de9cf830c353#96c733f8472284d3c83a4c011dc6de9cf830c353"
|
||||
dependencies = [
|
||||
"prost 0.13.5",
|
||||
"serde",
|
||||
@@ -5948,7 +5929,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -6728,7 +6709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6833,7 +6814,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6845,7 +6826,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -7143,7 +7124,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7171,7 +7152,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -7201,7 +7182,6 @@ dependencies = [
|
||||
"deadpool",
|
||||
"deadpool-postgres",
|
||||
"derive_builder 0.20.1",
|
||||
"either",
|
||||
"etcd-client",
|
||||
"futures",
|
||||
"h2 0.3.26",
|
||||
@@ -7263,7 +7243,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7273,7 +7253,6 @@ dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-meta",
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
@@ -7354,7 +7333,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito-codec"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"bytes",
|
||||
@@ -7377,7 +7356,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -8127,25 +8106,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
"common-base",
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"futures",
|
||||
"humantime-serde",
|
||||
"lazy_static",
|
||||
"md5",
|
||||
"moka",
|
||||
"opendal",
|
||||
"prometheus",
|
||||
"reqwest 0.12.9",
|
||||
"serde",
|
||||
"snafu 0.8.5",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
@@ -8280,7 +8252,7 @@ dependencies = [
|
||||
"prometheus",
|
||||
"quick-xml 0.36.2",
|
||||
"reqsign",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -8352,19 +8324,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f51189ce8be654f9b5f7e70e49967ed894e84a06fc35c6c042e64ac1fc5399e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"reqwest 0.11.27",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.14.0"
|
||||
@@ -8375,12 +8334,10 @@ dependencies = [
|
||||
"futures-core",
|
||||
"http 0.2.12",
|
||||
"opentelemetry 0.21.0",
|
||||
"opentelemetry-http",
|
||||
"opentelemetry-proto 0.4.0",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"opentelemetry_sdk 0.21.2",
|
||||
"prost 0.11.9",
|
||||
"reqwest 0.11.27",
|
||||
"thiserror 1.0.64",
|
||||
"tokio",
|
||||
"tonic 0.9.2",
|
||||
@@ -8463,7 +8420,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8488,7 +8445,6 @@ dependencies = [
|
||||
"common-query",
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-sql",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
@@ -8519,7 +8475,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8786,7 +8742,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -9074,7 +9030,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9217,7 +9173,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -9530,7 +9486,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -9554,9 +9510,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql-parser"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "328fe69c2443ec4f8e6c33ea925dde04a1026e6c95928e89ed02343944cac9bf"
|
||||
version = "0.5.1"
|
||||
source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=0410e8b459dda7cb222ce9596f8bf3971bd07bd2#0410e8b459dda7cb222ce9596f8bf3971bd07bd2"
|
||||
dependencies = [
|
||||
"cfgrammar",
|
||||
"chrono",
|
||||
@@ -9627,7 +9582,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"itertools 0.14.0",
|
||||
"itertools 0.11.0",
|
||||
"log",
|
||||
"multimap",
|
||||
"once_cell",
|
||||
@@ -9673,7 +9628,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools 0.14.0",
|
||||
"itertools 0.11.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
@@ -9813,7 +9768,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9855,7 +9810,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9921,7 +9876,7 @@ dependencies = [
|
||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -10369,7 +10324,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"quick-xml 0.35.0",
|
||||
"rand 0.8.5",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"rsa",
|
||||
"rust-ini 0.21.1",
|
||||
"serde",
|
||||
@@ -10378,42 +10333,6 @@ dependencies = [
|
||||
"sha2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.11.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"encoding_rs",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.30",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper 0.1.2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"url",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"web-sys",
|
||||
"winreg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.9"
|
||||
@@ -11243,7 +11162,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -11276,7 +11195,6 @@ dependencies = [
|
||||
"common-recordbatch",
|
||||
"common-runtime",
|
||||
"common-session",
|
||||
"common-sql",
|
||||
"common-telemetry",
|
||||
"common-test-util",
|
||||
"common-time",
|
||||
@@ -11330,7 +11248,7 @@ dependencies = [
|
||||
"quoted-string",
|
||||
"rand 0.9.0",
|
||||
"regex",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"rust-embed",
|
||||
"rustls",
|
||||
"rustls-pemfile",
|
||||
@@ -11365,7 +11283,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -11704,7 +11622,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -11715,7 +11633,6 @@ dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
"common-query",
|
||||
"common-sql",
|
||||
"common-time",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
@@ -11760,7 +11677,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11775,7 +11692,7 @@ dependencies = [
|
||||
"local-ip-address",
|
||||
"mysql",
|
||||
"num_cpus",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -12060,7 +11977,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "stat"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"nix 0.30.1",
|
||||
]
|
||||
@@ -12086,7 +12003,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -12247,7 +12164,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -12425,30 +12342,9 @@ dependencies = [
|
||||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"core-foundation",
|
||||
"system-configuration-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "system-configuration-sys"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
|
||||
dependencies = [
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -12709,7 +12605,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -12736,7 +12632,7 @@ dependencies = [
|
||||
"paste",
|
||||
"rand 0.9.0",
|
||||
"rand_chacha 0.9.0",
|
||||
"reqwest 0.12.9",
|
||||
"reqwest",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -12753,7 +12649,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -12820,7 +12716,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.16.0",
|
||||
"substrait 0.15.2",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -14302,7 +14198,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -14620,16 +14516,6 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rt"
|
||||
version = "0.39.0"
|
||||
|
||||
14
Cargo.toml
14
Cargo.toml
@@ -30,7 +30,6 @@ members = [
|
||||
"src/common/recordbatch",
|
||||
"src/common/runtime",
|
||||
"src/common/session",
|
||||
"src/common/sql",
|
||||
"src/common/stat",
|
||||
"src/common/substrait",
|
||||
"src/common/telemetry",
|
||||
@@ -72,13 +71,11 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.16.0"
|
||||
version = "0.15.2"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[workspace.lints]
|
||||
clippy.print_stdout = "warn"
|
||||
clippy.print_stderr = "warn"
|
||||
clippy.dbg_macro = "warn"
|
||||
clippy.implicit_clone = "warn"
|
||||
clippy.result_large_err = "allow"
|
||||
@@ -124,7 +121,6 @@ datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "
|
||||
datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-functions-aggregate-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "12c0381babd52c681043957e9d6ee083a03f7646" }
|
||||
@@ -134,12 +130,11 @@ deadpool = "0.12"
|
||||
deadpool-postgres = "0.14"
|
||||
derive_builder = "0.20"
|
||||
dotenv = "0.15"
|
||||
either = "1.15"
|
||||
etcd-client = "0.14"
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ceb1af4fa9309ce65bda0367db7b384df2bb4d4f" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "96c733f8472284d3c83a4c011dc6de9cf830c353" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -172,7 +167,9 @@ parquet = { version = "54.2", default-features = false, features = ["arrow", "as
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { version = "0.6", features = ["ser"] }
|
||||
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "0410e8b459dda7cb222ce9596f8bf3971bd07bd2", features = [
|
||||
"ser",
|
||||
] }
|
||||
prost = { version = "0.13", features = ["no-recursion-limit"] }
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.9"
|
||||
@@ -261,7 +258,6 @@ common-query = { path = "src/common/query" }
|
||||
common-recordbatch = { path = "src/common/recordbatch" }
|
||||
common-runtime = { path = "src/common/runtime" }
|
||||
common-session = { path = "src/common/session" }
|
||||
common-sql = { path = "src/common/sql" }
|
||||
common-telemetry = { path = "src/common/telemetry" }
|
||||
common-test-util = { path = "src/common/test-util" }
|
||||
common-time = { path = "src/common/time" }
|
||||
|
||||
@@ -75,9 +75,9 @@
|
||||
| --------- | ----------- |
|
||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
||||
|
||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||
|
||||
|
||||
@@ -185,11 +185,10 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -289,11 +288,10 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `slow_query` | -- | -- | The slow query log options. |
|
||||
@@ -325,7 +323,6 @@
|
||||
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
|
||||
| `use_memory_store` | Bool | `false` | Store data in memory. |
|
||||
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
|
||||
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. |
|
||||
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
|
||||
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
|
||||
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
|
||||
@@ -373,11 +370,10 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -436,8 +432,8 @@
|
||||
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
|
||||
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.purge_interval` | String | `1m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
|
||||
@@ -538,11 +534,10 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
||||
@@ -589,14 +584,11 @@
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4318` | The OTLP tracing endpoint. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
|
||||
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
|
||||
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
|
||||
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. |
|
||||
| `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||
| `query` | -- | -- | -- |
|
||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||
|
||||
@@ -129,11 +129,11 @@ dir = "./greptimedb_data/wal"
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
file_size = "128MB"
|
||||
|
||||
## The threshold of the WAL size to trigger a purge.
|
||||
## The threshold of the WAL size to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_threshold = "1GB"
|
||||
|
||||
## The interval to trigger a purge.
|
||||
## The interval to trigger a flush.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
purge_interval = "1m"
|
||||
|
||||
@@ -629,7 +629,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -640,9 +640,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -83,7 +83,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -94,9 +94,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
@@ -108,8 +105,3 @@ default_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
[query]
|
||||
## Parallelism of the query engine for query sent by flownode.
|
||||
## Default to 1, so it won't use too much cpu or memory
|
||||
parallelism = 1
|
||||
|
||||
@@ -218,7 +218,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -229,9 +229,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -43,11 +43,6 @@ use_memory_store = false
|
||||
## - Using shared storage (e.g., s3).
|
||||
enable_region_failover = false
|
||||
|
||||
## The delay before starting region failure detection.
|
||||
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
|
||||
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
|
||||
region_failure_detector_initialization_delay = '10m'
|
||||
|
||||
## Whether to allow region failover on local WAL.
|
||||
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
|
||||
allow_region_failover_on_local_wal = false
|
||||
@@ -225,7 +220,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -236,9 +231,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -720,7 +720,7 @@ level = "info"
|
||||
enable_otlp_tracing = false
|
||||
|
||||
## The OTLP tracing endpoint.
|
||||
otlp_endpoint = "http://localhost:4318"
|
||||
otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Whether to append logs to stdout.
|
||||
append_stdout = true
|
||||
@@ -731,9 +731,6 @@ log_format = "text"
|
||||
## The maximum amount of log files.
|
||||
max_log_files = 720
|
||||
|
||||
## The OTLP tracing export protocol. Can be `grpc`/`http`.
|
||||
otlp_export_protocol = "http"
|
||||
|
||||
## The percentage of tracing will be sampled and exported.
|
||||
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
|
||||
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
|
||||
|
||||
@@ -55,25 +55,12 @@ async function main() {
|
||||
await client.rest.issues.addLabels({
|
||||
owner, repo, issue_number: number, labels: [labelDocsRequired],
|
||||
})
|
||||
|
||||
// Get available assignees for the docs repo
|
||||
const assigneesResponse = await docsClient.rest.issues.listAssignees({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
})
|
||||
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
|
||||
core.info(`Available assignees: ${validAssignees.join(', ')}`)
|
||||
|
||||
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
|
||||
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
|
||||
core.info(`Assigning issue to: ${assignee}`)
|
||||
|
||||
await docsClient.rest.issues.create({
|
||||
owner: 'GreptimeTeam',
|
||||
repo: 'docs',
|
||||
title: `Update docs for ${title}`,
|
||||
body: `A document change request is generated from ${html_url}`,
|
||||
assignee: assignee,
|
||||
assignee: actor,
|
||||
}).then((res) => {
|
||||
core.info(`Created issue ${res.data}`)
|
||||
})
|
||||
|
||||
@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
|
||||
|
||||
## Addition
|
||||
- You can tune GreptimeDB's configuration to get better performance.
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments-administration/configuration#storage-options).
|
||||
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).
|
||||
|
||||
@@ -83,7 +83,7 @@ If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to depl
|
||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration-administration/deploy-on-kubernetes/getting-started).
|
||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/getting-started).
|
||||
|
||||
### Self-host Prometheus and import dashboards manually
|
||||
|
||||
|
||||
@@ -31,10 +31,8 @@ excludes = [
|
||||
"src/operator/src/expr_helper/trigger.rs",
|
||||
"src/sql/src/statements/create/trigger.rs",
|
||||
"src/sql/src/statements/show/trigger.rs",
|
||||
"src/sql/src/statements/drop/trigger.rs",
|
||||
"src/sql/src/parsers/create_parser/trigger.rs",
|
||||
"src/sql/src/parsers/show_parser/trigger.rs",
|
||||
"src/mito2/src/extension.rs",
|
||||
]
|
||||
|
||||
[properties]
|
||||
|
||||
@@ -22,7 +22,6 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
||||
pub struct RegionResponse {
|
||||
pub affected_rows: AffectedRows,
|
||||
pub extensions: HashMap<String, Vec<u8>>,
|
||||
pub metadata: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RegionResponse {
|
||||
@@ -30,7 +29,6 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows: region_response.affected_rows as _,
|
||||
extensions: region_response.extensions,
|
||||
metadata: region_response.metadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,16 +37,6 @@ impl RegionResponse {
|
||||
Self {
|
||||
affected_rows,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates one response with metadata.
|
||||
pub fn from_metadata(metadata: Vec<u8>) -> Self {
|
||||
Self {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[features]
|
||||
enterprise = []
|
||||
testing = []
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -14,11 +14,9 @@
|
||||
|
||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||
|
||||
mod builder;
|
||||
mod client;
|
||||
mod manager;
|
||||
mod table_cache;
|
||||
|
||||
pub use builder::KvBackendCatalogManagerBuilder;
|
||||
pub use manager::KvBackendCatalogManager;
|
||||
pub use table_cache::{new_table_cache, TableCache, TableCacheRef};
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||
use common_meta::cache::LayeredCacheRegistryRef;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::TableMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManager;
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::manager::{SystemCatalog, CATALOG_CACHE_MAX_CAPACITY};
|
||||
use crate::kvbackend::KvBackendCatalogManager;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||
|
||||
pub struct KvBackendCatalogManagerBuilder {
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl KvBackendCatalogManagerBuilder {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
) -> Self {
|
||||
Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager: None,
|
||||
process_manager: None,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories: std::collections::HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_procedure_manager(mut self, procedure_manager: ProcedureManagerRef) -> Self {
|
||||
self.procedure_manager = Some(procedure_manager);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_process_manager(mut self, process_manager: ProcessManagerRef) -> Self {
|
||||
self.process_manager = Some(process_manager);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the extra information tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn with_extra_information_table_factories(
|
||||
mut self,
|
||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_information_table_factories = factories;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Arc<KvBackendCatalogManager> {
|
||||
let Self {
|
||||
information_extension,
|
||||
backend,
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
} = self;
|
||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: {
|
||||
let provider = InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
process_manager.clone(),
|
||||
backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
},
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
process_manager,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_information_table_factories,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,13 +30,13 @@ use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::key::schema_name::SchemaNameKey;
|
||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use common_meta::key::table_name::TableNameKey;
|
||||
use common_meta::key::TableMetadataManagerRef;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureManagerRef;
|
||||
use futures_util::stream::BoxStream;
|
||||
use futures_util::{StreamExt, TryStreamExt};
|
||||
use moka::sync::Cache;
|
||||
use partition::manager::PartitionRuleManagerRef;
|
||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||
use session::context::{Channel, QueryContext};
|
||||
use snafu::prelude::*;
|
||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
||||
@@ -52,8 +52,6 @@ use crate::error::{
|
||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||
};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||
use crate::kvbackend::TableCacheRef;
|
||||
use crate::process_manager::ProcessManagerRef;
|
||||
@@ -69,22 +67,60 @@ use crate::CatalogManager;
|
||||
#[derive(Clone)]
|
||||
pub struct KvBackendCatalogManager {
|
||||
/// Provides the extension methods for the `information_schema` tables
|
||||
pub(super) information_extension: InformationExtensionRef,
|
||||
information_extension: InformationExtensionRef,
|
||||
/// Manages partition rules.
|
||||
pub(super) partition_manager: PartitionRuleManagerRef,
|
||||
partition_manager: PartitionRuleManagerRef,
|
||||
/// Manages table metadata.
|
||||
pub(super) table_metadata_manager: TableMetadataManagerRef,
|
||||
table_metadata_manager: TableMetadataManagerRef,
|
||||
/// A sub-CatalogManager that handles system tables
|
||||
pub(super) system_catalog: SystemCatalog,
|
||||
system_catalog: SystemCatalog,
|
||||
/// Cache registry for all caches.
|
||||
pub(super) cache_registry: LayeredCacheRegistryRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
/// Only available in `Standalone` mode.
|
||||
pub(super) procedure_manager: Option<ProcedureManagerRef>,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
}
|
||||
|
||||
pub(super) const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
const CATALOG_CACHE_MAX_CAPACITY: u64 = 128;
|
||||
|
||||
impl KvBackendCatalogManager {
|
||||
pub fn new(
|
||||
information_extension: InformationExtensionRef,
|
||||
backend: KvBackendRef,
|
||||
cache_registry: LayeredCacheRegistryRef,
|
||||
procedure_manager: Option<ProcedureManagerRef>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
) -> Arc<Self> {
|
||||
Arc::new_cyclic(|me| Self {
|
||||
information_extension,
|
||||
partition_manager: Arc::new(PartitionRuleManager::new(
|
||||
backend.clone(),
|
||||
cache_registry
|
||||
.get()
|
||||
.expect("Failed to get table_route_cache"),
|
||||
)),
|
||||
table_metadata_manager: Arc::new(TableMetadataManager::new(backend.clone())),
|
||||
system_catalog: SystemCatalog {
|
||||
catalog_manager: me.clone(),
|
||||
catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
pg_catalog_cache: Cache::new(CATALOG_CACHE_MAX_CAPACITY),
|
||||
information_schema_provider: Arc::new(InformationSchemaProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
process_manager.clone(),
|
||||
)),
|
||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
me.clone(),
|
||||
)),
|
||||
backend,
|
||||
process_manager,
|
||||
},
|
||||
cache_registry,
|
||||
procedure_manager,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn view_info_cache(&self) -> Result<ViewInfoCacheRef> {
|
||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||
name: "view_info_cache",
|
||||
@@ -471,19 +507,16 @@ fn build_table(table_info_value: TableInfoValue) -> Result<TableRef> {
|
||||
/// - information_schema.{tables}
|
||||
/// - pg_catalog.{tables}
|
||||
#[derive(Clone)]
|
||||
pub(super) struct SystemCatalog {
|
||||
pub(super) catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
pub(super) catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pub(super) pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
struct SystemCatalog {
|
||||
catalog_manager: Weak<KvBackendCatalogManager>,
|
||||
catalog_cache: Cache<String, Arc<InformationSchemaProvider>>,
|
||||
pg_catalog_cache: Cache<String, Arc<PGCatalogProvider>>,
|
||||
|
||||
// system_schema_provider for default catalog
|
||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
pub(super) backend: KvBackendRef,
|
||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(super) extra_information_table_factories:
|
||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||
backend: KvBackendRef,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
}
|
||||
|
||||
impl SystemCatalog {
|
||||
@@ -547,17 +580,12 @@ impl SystemCatalog {
|
||||
if schema == INFORMATION_SCHEMA_NAME {
|
||||
let information_schema_provider =
|
||||
self.catalog_cache.get_with_by_ref(catalog, move || {
|
||||
let provider = InformationSchemaProvider::new(
|
||||
Arc::new(InformationSchemaProvider::new(
|
||||
catalog.to_string(),
|
||||
self.catalog_manager.clone(),
|
||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||
self.process_manager.clone(),
|
||||
self.backend.clone(),
|
||||
);
|
||||
#[cfg(feature = "enterprise")]
|
||||
let provider = provider
|
||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||
Arc::new(provider)
|
||||
))
|
||||
});
|
||||
information_schema_provider.table(table_name)
|
||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||
|
||||
@@ -352,13 +352,11 @@ impl MemoryCatalogManager {
|
||||
}
|
||||
|
||||
fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
|
||||
let backend = Arc::new(MemoryKvBackend::new());
|
||||
let information_schema_provider = InformationSchemaProvider::new(
|
||||
catalog,
|
||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||
None, // we don't need ProcessManager on regions server.
|
||||
backend,
|
||||
);
|
||||
let information_schema = information_schema_provider.tables().clone();
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
pub mod information_schema;
|
||||
mod memory_table;
|
||||
pub mod pg_catalog;
|
||||
pub mod predicate;
|
||||
mod predicate;
|
||||
mod utils;
|
||||
|
||||
use std::collections::HashMap;
|
||||
@@ -96,7 +96,7 @@ trait SystemSchemaProviderInner {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SystemTable {
|
||||
pub(crate) trait SystemTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
@@ -110,7 +110,7 @@ pub trait SystemTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
pub(crate) type SystemTableRef = Arc<dyn SystemTable + Send + Sync>;
|
||||
|
||||
struct SystemTableDataSource {
|
||||
table: SystemTableRef,
|
||||
|
||||
@@ -19,7 +19,7 @@ mod information_memory_table;
|
||||
pub mod key_column_usage;
|
||||
mod partitions;
|
||||
mod procedure_info;
|
||||
pub mod process_list;
|
||||
mod process_list;
|
||||
pub mod region_peers;
|
||||
mod region_statistics;
|
||||
mod runtime_metrics;
|
||||
@@ -38,7 +38,6 @@ use common_meta::cluster::NodeInfo;
|
||||
use common_meta::datanode::RegionStat;
|
||||
use common_meta::key::flow::flow_state::FlowStat;
|
||||
use common_meta::key::flow::FlowMetadataManager;
|
||||
use common_meta::kv_backend::KvBackendRef;
|
||||
use common_procedure::ProcedureInfo;
|
||||
use common_recordbatch::SendableRecordBatchStream;
|
||||
use datatypes::schema::SchemaRef;
|
||||
@@ -113,25 +112,6 @@ macro_rules! setup_memory_table {
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct MakeInformationTableRequest {
|
||||
pub catalog_name: String,
|
||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||
pub kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
/// A factory trait for making information schema tables.
|
||||
///
|
||||
/// This trait allows for extensibility of the information schema by providing
|
||||
/// a way to dynamically create custom information schema tables.
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub trait InformationSchemaTableFactory {
|
||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||
|
||||
/// The `information_schema` tables info provider.
|
||||
pub struct InformationSchemaProvider {
|
||||
catalog_name: String,
|
||||
@@ -139,10 +119,6 @@ pub struct InformationSchemaProvider {
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
tables: HashMap<String, TableRef>,
|
||||
#[allow(dead_code)]
|
||||
kv_backend: KvBackendRef,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
}
|
||||
|
||||
impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
@@ -152,7 +128,6 @@ impl SystemSchemaProvider for InformationSchemaProvider {
|
||||
&self.tables
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
fn catalog_name(&self) -> &str {
|
||||
&self.catalog_name
|
||||
@@ -240,22 +215,7 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
||||
.process_manager
|
||||
.as_ref()
|
||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
||||
table_name => {
|
||||
#[cfg(feature = "enterprise")]
|
||||
return self.extra_table_factories.get(table_name).map(|factory| {
|
||||
let req = MakeInformationTableRequest {
|
||||
catalog_name: self.catalog_name.clone(),
|
||||
catalog_manager: self.catalog_manager.clone(),
|
||||
kv_backend: self.kv_backend.clone(),
|
||||
};
|
||||
factory.make_information_table(req)
|
||||
});
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
{
|
||||
let _ = table_name;
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,7 +226,6 @@ impl InformationSchemaProvider {
|
||||
catalog_manager: Weak<dyn CatalogManager>,
|
||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||
process_manager: Option<ProcessManagerRef>,
|
||||
kv_backend: KvBackendRef,
|
||||
) -> Self {
|
||||
let mut provider = Self {
|
||||
catalog_name,
|
||||
@@ -274,9 +233,6 @@ impl InformationSchemaProvider {
|
||||
flow_metadata_manager,
|
||||
process_manager,
|
||||
tables: HashMap::new(),
|
||||
kv_backend,
|
||||
#[cfg(feature = "enterprise")]
|
||||
extra_table_factories: HashMap::new(),
|
||||
};
|
||||
|
||||
provider.build_tables();
|
||||
@@ -284,16 +240,6 @@ impl InformationSchemaProvider {
|
||||
provider
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub(crate) fn with_extra_table_factories(
|
||||
mut self,
|
||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||
) -> Self {
|
||||
self.extra_table_factories = factories;
|
||||
self.build_tables();
|
||||
self
|
||||
}
|
||||
|
||||
fn build_tables(&mut self) {
|
||||
let mut tables = HashMap::new();
|
||||
|
||||
@@ -344,19 +290,16 @@ impl InformationSchemaProvider {
|
||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
for name in self.extra_table_factories.keys() {
|
||||
tables.insert(name.to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
// Add memory tables
|
||||
for name in MEMORY_TABLES.iter() {
|
||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||
}
|
||||
|
||||
self.tables = tables;
|
||||
}
|
||||
}
|
||||
|
||||
pub trait InformationTable {
|
||||
trait InformationTable {
|
||||
fn table_id(&self) -> TableId;
|
||||
|
||||
fn table_name(&self) -> &'static str;
|
||||
|
||||
@@ -39,14 +39,14 @@ use crate::process_manager::ProcessManagerRef;
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
|
||||
/// Column names of `information_schema.process_list`
|
||||
pub const ID: &str = "id";
|
||||
pub const CATALOG: &str = "catalog";
|
||||
pub const SCHEMAS: &str = "schemas";
|
||||
pub const QUERY: &str = "query";
|
||||
pub const CLIENT: &str = "client";
|
||||
pub const FRONTEND: &str = "frontend";
|
||||
pub const START_TIMESTAMP: &str = "start_timestamp";
|
||||
pub const ELAPSED_TIME: &str = "elapsed_time";
|
||||
const ID: &str = "id";
|
||||
const CATALOG: &str = "catalog";
|
||||
const SCHEMAS: &str = "schemas";
|
||||
const QUERY: &str = "query";
|
||||
const CLIENT: &str = "client";
|
||||
const FRONTEND: &str = "frontend";
|
||||
const START_TIMESTAMP: &str = "start_timestamp";
|
||||
const ELAPSED_TIME: &str = "elapsed_time";
|
||||
|
||||
/// `information_schema.process_list` table implementation that tracks running
|
||||
/// queries in current cluster.
|
||||
|
||||
@@ -48,4 +48,3 @@ pub const FLOWS: &str = "flows";
|
||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||
pub const PROCESS_LIST: &str = "process_list";
|
||||
pub const TRIGGER_LIST: &str = "trigger_list";
|
||||
|
||||
@@ -207,7 +207,6 @@ mod tests {
|
||||
use session::context::QueryContext;
|
||||
|
||||
use super::*;
|
||||
use crate::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use crate::memory::MemoryCatalogManager;
|
||||
|
||||
#[test]
|
||||
@@ -324,13 +323,13 @@ mod tests {
|
||||
.build(),
|
||||
);
|
||||
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
Arc::new(NoopInformationExtension),
|
||||
backend.clone(),
|
||||
layered_cache_registry,
|
||||
)
|
||||
.build();
|
||||
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||
view_info.table_type = TableType::View;
|
||||
|
||||
@@ -43,6 +43,7 @@ common-time.workspace = true
|
||||
common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
futures.workspace = true
|
||||
humantime.workspace = true
|
||||
|
||||
@@ -160,7 +160,6 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
||||
options: Default::default(),
|
||||
region_numbers: (1..=100).collect(),
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
};
|
||||
|
||||
RawTableInfo {
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(clippy::print_stdout)]
|
||||
mod bench;
|
||||
mod data;
|
||||
mod database;
|
||||
|
||||
@@ -241,6 +241,7 @@ impl RepairTool {
|
||||
let alter_table_request = alter_table::make_alter_region_request_for_peer(
|
||||
logical_table_id,
|
||||
&alter_table_expr,
|
||||
full_table_metadata.table_info.ident.version,
|
||||
peer,
|
||||
physical_region_routes,
|
||||
)?;
|
||||
|
||||
@@ -66,6 +66,7 @@ pub fn generate_alter_table_expr_for_all_columns(
|
||||
pub fn make_alter_region_request_for_peer(
|
||||
logical_table_id: TableId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
@@ -73,7 +74,7 @@ pub fn make_alter_region_request_for_peer(
|
||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
||||
let request = make_alter_region_request(region_id, alter_table_expr);
|
||||
let request = make_alter_region_request(region_id, alter_table_expr, schema_version);
|
||||
requests.push(request);
|
||||
}
|
||||
|
||||
|
||||
@@ -301,6 +301,7 @@ struct MetaInfoTool {
|
||||
|
||||
#[async_trait]
|
||||
impl Tool for MetaInfoTool {
|
||||
#[allow(clippy::print_stdout)]
|
||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||
let result = MetadataSnapshotManager::info(
|
||||
&self.inner,
|
||||
|
||||
@@ -31,7 +31,7 @@ use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use common_catalog::build_db_string;
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_grpc::flight::do_put::DoPutResponse;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_query::Output;
|
||||
@@ -48,7 +48,7 @@ use tonic::transport::Channel;
|
||||
|
||||
use crate::error::{
|
||||
ConvertFlightDataSnafu, Error, FlightGetSnafu, IllegalFlightMessagesSnafu,
|
||||
InvalidTonicMetadataValueSnafu,
|
||||
InvalidTonicMetadataValueSnafu, ServerSnafu,
|
||||
};
|
||||
use crate::{error, from_grpc_response, Client, Result};
|
||||
|
||||
@@ -196,22 +196,12 @@ impl Database {
|
||||
|
||||
/// Retry if connection fails, max_retries is the max number of retries, so the total wait time
|
||||
/// is `max_retries * GRPC_CONN_TIMEOUT`
|
||||
pub async fn handle_with_retry(
|
||||
&self,
|
||||
request: Request,
|
||||
max_retries: u32,
|
||||
hints: &[(&str, &str)],
|
||||
) -> Result<u32> {
|
||||
pub async fn handle_with_retry(&self, request: Request, max_retries: u32) -> Result<u32> {
|
||||
let mut client = make_database_client(&self.client)?.inner;
|
||||
let mut retries = 0;
|
||||
|
||||
let request = self.to_rpc_request(request);
|
||||
|
||||
loop {
|
||||
let mut tonic_request = tonic::Request::new(request.clone());
|
||||
let metadata = tonic_request.metadata_mut();
|
||||
Self::put_hints(metadata, hints)?;
|
||||
let raw_response = client.handle(tonic_request).await;
|
||||
let raw_response = client.handle(request.clone()).await;
|
||||
match (raw_response, retries < max_retries) {
|
||||
(Ok(resp), _) => return from_grpc_response(resp.into_inner()),
|
||||
(Err(err), true) => {
|
||||
@@ -302,16 +292,21 @@ impl Database {
|
||||
let response = client.mut_inner().do_get(request).await.or_else(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error =
|
||||
Err(BoxedError::new(ServerSnafu { code, msg }.build())).with_context(|_| {
|
||||
FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
}
|
||||
});
|
||||
error!(
|
||||
"Failed to do Flight get, addr: {}, code: {}, source: {:?}",
|
||||
client.addr(),
|
||||
tonic_code,
|
||||
e
|
||||
error
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})?;
|
||||
|
||||
@@ -441,11 +436,8 @@ mod tests {
|
||||
|
||||
use api::v1::auth_header::AuthScheme;
|
||||
use api::v1::{AuthHeader, Basic};
|
||||
use common_error::status_code::StatusCode;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
use super::*;
|
||||
use crate::error::TonicSnafu;
|
||||
|
||||
#[test]
|
||||
fn test_flight_ctx() {
|
||||
@@ -468,19 +460,4 @@ mod tests {
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_tonic_status() {
|
||||
let expected = TonicSnafu {
|
||||
code: StatusCode::Internal,
|
||||
msg: "blabla".to_string(),
|
||||
tonic_code: Code::Internal,
|
||||
}
|
||||
.build();
|
||||
|
||||
let status = Status::new(Code::Internal, "blabla");
|
||||
let actual: Error = status.into();
|
||||
|
||||
assert_eq!(expected.to_string(), actual.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
|
||||
use std::any::Any;
|
||||
|
||||
use common_error::define_from_tonic_status;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_error::status_code::{convert_tonic_code_to_status_code, StatusCode};
|
||||
use common_error::{GREPTIME_DB_HEADER_ERROR_CODE, GREPTIME_DB_HEADER_ERROR_MSG};
|
||||
use common_macro::stack_trace_debug;
|
||||
use snafu::{location, Location, Snafu};
|
||||
use tonic::metadata::errors::InvalidMetadataValue;
|
||||
use tonic::Code;
|
||||
use tonic::{Code, Status};
|
||||
|
||||
#[derive(Snafu)]
|
||||
#[snafu(visibility(pub))]
|
||||
@@ -124,15 +124,6 @@ pub enum Error {
|
||||
location: Location,
|
||||
source: datatypes::error::Error,
|
||||
},
|
||||
|
||||
#[snafu(display("{}", msg))]
|
||||
Tonic {
|
||||
code: StatusCode,
|
||||
msg: String,
|
||||
tonic_code: Code,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -144,7 +135,7 @@ impl ErrorExt for Error {
|
||||
| Error::MissingField { .. }
|
||||
| Error::IllegalDatabaseResponse { .. } => StatusCode::Internal,
|
||||
|
||||
Error::Server { code, .. } | Error::Tonic { code, .. } => *code,
|
||||
Error::Server { code, .. } => *code,
|
||||
Error::FlightGet { source, .. }
|
||||
| Error::RegionServer { source, .. }
|
||||
| Error::FlowServer { source, .. } => source.status_code(),
|
||||
@@ -162,7 +153,34 @@ impl ErrorExt for Error {
|
||||
}
|
||||
}
|
||||
|
||||
define_from_tonic_status!(Error, Tonic);
|
||||
impl From<Status> for Error {
|
||||
fn from(e: Status) -> Self {
|
||||
fn get_metadata_value(e: &Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_CODE).and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let tonic_code = e.code();
|
||||
let code = code.unwrap_or_else(|| convert_tonic_code_to_status_code(tonic_code));
|
||||
|
||||
let msg = get_metadata_value(&e, GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
Self::Server {
|
||||
code,
|
||||
msg,
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn should_retry(&self) -> bool {
|
||||
|
||||
@@ -21,7 +21,7 @@ use arc_swap::ArcSwapOption;
|
||||
use arrow_flight::Ticket;
|
||||
use async_stream::stream;
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_error::ext::{BoxedError, ErrorExt};
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_grpc::flight::{FlightDecoder, FlightMessage};
|
||||
use common_meta::error::{self as meta_error, Result as MetaResult};
|
||||
@@ -107,18 +107,24 @@ impl RegionRequester {
|
||||
.mut_inner()
|
||||
.do_get(ticket)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
.map_err(|e| {
|
||||
let tonic_code = e.code();
|
||||
let e: error::Error = e.into();
|
||||
let code = e.status_code();
|
||||
let msg = e.to_string();
|
||||
let error = ServerSnafu { code, msg }
|
||||
.fail::<()>()
|
||||
.map_err(BoxedError::new)
|
||||
.with_context(|_| FlightGetSnafu {
|
||||
tonic_code,
|
||||
addr: flight_client.addr().to_string(),
|
||||
})
|
||||
.unwrap_err();
|
||||
error!(
|
||||
e; "Failed to do Flight get, addr: {}, code: {}",
|
||||
flight_client.addr(),
|
||||
tonic_code
|
||||
);
|
||||
let error = Err(BoxedError::new(e)).with_context(|_| FlightGetSnafu {
|
||||
addr: flight_client.addr().to_string(),
|
||||
tonic_code,
|
||||
});
|
||||
error
|
||||
})?;
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ default = [
|
||||
"meta-srv/pg_kvbackend",
|
||||
"meta-srv/mysql_kvbackend",
|
||||
]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
|
||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||
tokio-console = ["common-telemetry/tokio-console"]
|
||||
|
||||
[lints]
|
||||
@@ -52,6 +52,7 @@ common-version.workspace = true
|
||||
common-wal.workspace = true
|
||||
datanode.workspace = true
|
||||
datatypes.workspace = true
|
||||
either = "1.8"
|
||||
etcd-client.workspace = true
|
||||
file-engine.workspace = true
|
||||
flow.workspace = true
|
||||
@@ -66,7 +67,6 @@ metric-engine.workspace = true
|
||||
mito2.workspace = true
|
||||
moka.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
object-store.workspace = true
|
||||
plugins.workspace = true
|
||||
prometheus.workspace = true
|
||||
prost.workspace = true
|
||||
|
||||
@@ -280,7 +280,7 @@ mod tests {
|
||||
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use object_store::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config};
|
||||
use servers::heartbeat_options::HeartbeatOptions;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::time::Duration;
|
||||
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
use common_base::Plugins;
|
||||
@@ -342,12 +342,13 @@ impl StartCommand {
|
||||
|
||||
let information_extension =
|
||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||
let catalog_manager = KvBackendCatalogManagerBuilder::new(
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
)
|
||||
.build();
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
let table_metadata_manager =
|
||||
Arc::new(TableMetadataManager::new(cached_meta_backend.clone()));
|
||||
@@ -370,11 +371,8 @@ impl StartCommand {
|
||||
|
||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
|
||||
let frontend_client = FrontendClient::from_meta_client(
|
||||
meta_client.clone(),
|
||||
flow_auth_header,
|
||||
opts.query.clone(),
|
||||
);
|
||||
let frontend_client =
|
||||
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
let flownode_builder = FlownodeBuilder::new(
|
||||
opts.clone(),
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::time::Duration;
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_extension::DistributedInformationExtension;
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::client_manager::NodeClients;
|
||||
@@ -102,7 +102,7 @@ impl App for Instance {
|
||||
#[derive(Parser)]
|
||||
pub struct Command {
|
||||
#[clap(subcommand)]
|
||||
pub subcmd: SubCommand,
|
||||
subcmd: SubCommand,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
@@ -116,7 +116,7 @@ impl Command {
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
pub enum SubCommand {
|
||||
enum SubCommand {
|
||||
Start(StartCommand),
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[clap(short, long)]
|
||||
pub config_file: Option<String>,
|
||||
config_file: Option<String>,
|
||||
#[clap(short, long)]
|
||||
influxdb_enable: Option<bool>,
|
||||
#[clap(long, value_delimiter = ',', num_args = 1..)]
|
||||
@@ -169,7 +169,7 @@ pub struct StartCommand {
|
||||
#[clap(long)]
|
||||
disable_dashboard: Option<bool>,
|
||||
#[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
|
||||
pub env_prefix: String,
|
||||
env_prefix: String,
|
||||
}
|
||||
|
||||
impl StartCommand {
|
||||
@@ -350,20 +350,13 @@ impl StartCommand {
|
||||
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
||||
Some(meta_client.clone()),
|
||||
));
|
||||
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
information_extension,
|
||||
cached_meta_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
)
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
None,
|
||||
Some(process_manager.clone()),
|
||||
);
|
||||
|
||||
let executor = HandlerGroupExecutor::new(vec![
|
||||
Arc::new(ParseMailboxMessageHandler),
|
||||
|
||||
@@ -54,10 +54,6 @@ impl Instance {
|
||||
pub fn get_inner(&self) -> &MetasrvInstance {
|
||||
&self.instance
|
||||
}
|
||||
|
||||
pub fn mut_inner(&mut self) -> &mut MetasrvInstance {
|
||||
&mut self.instance
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -340,12 +336,12 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartMetaServerSnafu)?;
|
||||
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
|
||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
let instance = MetasrvInstance::new(metasrv)
|
||||
let instance = MetasrvInstance::new(opts, plugins, metasrv)
|
||||
.await
|
||||
.context(error::BuildMetaServerSnafu)?;
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::{fs, path};
|
||||
use async_trait::async_trait;
|
||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||
use catalog::information_schema::InformationExtension;
|
||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
||||
use catalog::kvbackend::KvBackendCatalogManager;
|
||||
use catalog::process_manager::ProcessManager;
|
||||
use clap::Parser;
|
||||
use client::api::v1::meta::RegionRole;
|
||||
@@ -257,34 +257,15 @@ pub struct Instance {
|
||||
flownode: FlownodeInstance,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
wal_options_allocator: WalOptionsAllocatorRef,
|
||||
|
||||
// The components of standalone, which make it easier to expand based
|
||||
// on the components.
|
||||
#[cfg(feature = "enterprise")]
|
||||
components: Components,
|
||||
|
||||
// Keep the logging guard to prevent the worker from being dropped.
|
||||
_guard: Vec<WorkerGuard>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub struct Components {
|
||||
pub plugins: Plugins,
|
||||
pub kv_backend: KvBackendRef,
|
||||
pub frontend_client: Arc<FrontendClient>,
|
||||
pub catalog_manager: catalog::CatalogManagerRef,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Find the socket addr of a server by its `name`.
|
||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||
self.frontend.server_handlers().addr(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
pub fn components(&self) -> &Components {
|
||||
&self.components
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -544,20 +525,13 @@ impl StartCommand {
|
||||
));
|
||||
|
||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
||||
let builder = KvBackendCatalogManagerBuilder::new(
|
||||
let catalog_manager = KvBackendCatalogManager::new(
|
||||
information_extension.clone(),
|
||||
kv_backend.clone(),
|
||||
layered_cache_registry.clone(),
|
||||
)
|
||||
.with_procedure_manager(procedure_manager.clone())
|
||||
.with_process_manager(process_manager.clone());
|
||||
#[cfg(feature = "enterprise")]
|
||||
let builder = if let Some(factories) = plugins.get() {
|
||||
builder.with_extra_information_table_factories(factories)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
let catalog_manager = builder.build();
|
||||
Some(procedure_manager.clone()),
|
||||
Some(process_manager.clone()),
|
||||
);
|
||||
|
||||
let table_metadata_manager =
|
||||
Self::create_table_metadata_manager(kv_backend.clone()).await?;
|
||||
@@ -571,15 +545,14 @@ impl StartCommand {
|
||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||
// actually make a connection
|
||||
let (frontend_client, frontend_instance_handler) =
|
||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||
let frontend_client = Arc::new(frontend_client);
|
||||
FrontendClient::from_empty_grpc_handler();
|
||||
let flow_builder = FlownodeBuilder::new(
|
||||
flownode_options,
|
||||
plugins.clone(),
|
||||
table_metadata_manager.clone(),
|
||||
catalog_manager.clone(),
|
||||
flow_metadata_manager.clone(),
|
||||
frontend_client.clone(),
|
||||
Arc::new(frontend_client.clone()),
|
||||
);
|
||||
let flownode = flow_builder
|
||||
.build()
|
||||
@@ -689,7 +662,7 @@ impl StartCommand {
|
||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||
.context(error::ServersSnafu)?;
|
||||
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
||||
let servers = Services::new(opts, fe_instance.clone(), plugins)
|
||||
.build()
|
||||
.context(error::StartFrontendSnafu)?;
|
||||
|
||||
@@ -700,22 +673,12 @@ impl StartCommand {
|
||||
export_metrics_task,
|
||||
};
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
let components = Components {
|
||||
plugins,
|
||||
kv_backend,
|
||||
frontend_client,
|
||||
catalog_manager,
|
||||
};
|
||||
|
||||
Ok(Instance {
|
||||
datanode,
|
||||
frontend,
|
||||
flownode,
|
||||
procedure_manager,
|
||||
wal_options_allocator,
|
||||
#[cfg(feature = "enterprise")]
|
||||
components,
|
||||
_guard: guard,
|
||||
})
|
||||
}
|
||||
@@ -855,7 +818,7 @@ mod tests {
|
||||
use common_config::ENV_VAR_SEP;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use object_store::config::{FileConfig, GcsConfig};
|
||||
use datanode::config::{FileConfig, GcsConfig};
|
||||
|
||||
use super::*;
|
||||
use crate::options::GlobalOptions;
|
||||
@@ -974,15 +937,15 @@ mod tests {
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
object_store::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
datanode::config::ObjectStoreConfig::File(FileConfig { .. })
|
||||
));
|
||||
assert_eq!(dn_opts.storage.providers.len(), 2);
|
||||
assert!(matches!(
|
||||
dn_opts.storage.providers[0],
|
||||
object_store::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. })
|
||||
));
|
||||
match &dn_opts.storage.providers[1] {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
datanode::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(
|
||||
"SecretBox<alloc::string::String>([REDACTED])".to_string(),
|
||||
format!("{:?}", s3_config.access_key_id)
|
||||
|
||||
@@ -18,19 +18,17 @@ use cmd::options::GreptimeOptions;
|
||||
use cmd::standalone::StandaloneOptions;
|
||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT};
|
||||
use common_telemetry::logging::{LoggingOptions, DEFAULT_LOGGING_DIR, DEFAULT_OTLP_ENDPOINT};
|
||||
use common_wal::config::raft_engine::RaftEngineConfig;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
|
||||
use file_engine::config::EngineConfig as FileEngineConfig;
|
||||
use flow::FlownodeOptions;
|
||||
use frontend::frontend::FrontendOptions;
|
||||
use meta_client::MetaClientOptions;
|
||||
use meta_srv::metasrv::MetasrvOptions;
|
||||
use meta_srv::selector::SelectorType;
|
||||
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||
use mito2::config::MitoConfig;
|
||||
use query::options::QueryOptions;
|
||||
use servers::export_metrics::ExportMetricsOption;
|
||||
use servers::grpc::GrpcOptions;
|
||||
use servers::http::HttpOptions;
|
||||
@@ -83,7 +81,7 @@ fn test_load_datanode_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -126,7 +124,7 @@ fn test_load_frontend_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -174,7 +172,7 @@ fn test_load_metasrv_example_config() {
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -197,57 +195,6 @@ fn test_load_metasrv_example_config() {
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_flownode_example_config() {
|
||||
let example_config = common_test_util::find_workspace_path("config/flownode.example.toml");
|
||||
let options =
|
||||
GreptimeOptions::<FlownodeOptions>::load_layered_options(example_config.to_str(), "")
|
||||
.unwrap();
|
||||
let expected = GreptimeOptions::<FlownodeOptions> {
|
||||
component: FlownodeOptions {
|
||||
node_id: Some(14),
|
||||
flow: Default::default(),
|
||||
grpc: GrpcOptions {
|
||||
bind_addr: "127.0.0.1:6800".to_string(),
|
||||
server_addr: "127.0.0.1:6800".to_string(),
|
||||
runtime_size: 2,
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_export_protocol: Some(common_telemetry::logging::OtlpExportProtocol::Http),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
tracing: Default::default(),
|
||||
heartbeat: Default::default(),
|
||||
// flownode deliberately use a slower query parallelism
|
||||
// to avoid overwhelming the frontend with too many queries
|
||||
query: QueryOptions { parallelism: 1 },
|
||||
meta_client: Some(MetaClientOptions {
|
||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||
timeout: Duration::from_secs(3),
|
||||
heartbeat_timeout: Duration::from_millis(500),
|
||||
ddl_timeout: Duration::from_secs(10),
|
||||
connect_timeout: Duration::from_secs(1),
|
||||
tcp_nodelay: true,
|
||||
metadata_cache_max_capacity: 100000,
|
||||
metadata_cache_ttl: Duration::from_secs(600),
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
http: HttpOptions {
|
||||
addr: "127.0.0.1:4000".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
user_provider: None,
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
similar_asserts::assert_eq!(options, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_standalone_example_config() {
|
||||
let example_config = common_test_util::find_workspace_path("config/standalone.example.toml");
|
||||
@@ -282,7 +229,7 @@ fn test_load_standalone_example_config() {
|
||||
logging: LoggingOptions {
|
||||
level: Some("info".to_string()),
|
||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_HTTP_ENDPOINT.to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
|
||||
@@ -78,7 +78,7 @@ pub const INFORMATION_SCHEMA_ROUTINES_TABLE_ID: u32 = 21;
|
||||
pub const INFORMATION_SCHEMA_SCHEMA_PRIVILEGES_TABLE_ID: u32 = 22;
|
||||
/// id for information_schema.TABLE_PRIVILEGES
|
||||
pub const INFORMATION_SCHEMA_TABLE_PRIVILEGES_TABLE_ID: u32 = 23;
|
||||
/// id for information_schema.TRIGGERS (for mysql)
|
||||
/// id for information_schema.TRIGGERS
|
||||
pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
|
||||
/// id for information_schema.GLOBAL_STATUS
|
||||
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
|
||||
@@ -104,8 +104,6 @@ pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
||||
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||
/// id for information_schema.process_list
|
||||
pub const INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID: u32 = 36;
|
||||
/// id for information_schema.trigger_list (for greptimedb trigger)
|
||||
pub const INFORMATION_SCHEMA_TRIGGER_TABLE_ID: u32 = 37;
|
||||
|
||||
// ----- End of information_schema tables -----
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ common-macro.workspace = true
|
||||
config.workspace = true
|
||||
humantime-serde.workspace = true
|
||||
num_cpus.workspace = true
|
||||
object-store.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
|
||||
@@ -106,7 +106,7 @@ mod tests {
|
||||
use common_telemetry::logging::LoggingOptions;
|
||||
use common_test_util::temp_dir::create_named_temp_file;
|
||||
use common_wal::config::DatanodeWalConfig;
|
||||
use datanode::config::StorageConfig;
|
||||
use datanode::config::{ObjectStoreConfig, StorageConfig};
|
||||
use meta_client::MetaClientOptions;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -212,7 +212,7 @@ mod tests {
|
||||
|
||||
// Check the configs from environment variables.
|
||||
match &opts.storage.store {
|
||||
object_store::config::ObjectStoreConfig::S3(s3_config) => {
|
||||
ObjectStoreConfig::S3(s3_config) => {
|
||||
assert_eq!(s3_config.bucket, "mybucket".to_string());
|
||||
}
|
||||
_ => panic!("unexpected store type"),
|
||||
|
||||
@@ -119,11 +119,6 @@ pub enum StatusCode {
|
||||
FlowAlreadyExists = 8000,
|
||||
FlowNotFound = 8001,
|
||||
// ====== End of flow related status code =====
|
||||
|
||||
// ====== Begin of trigger related status code =====
|
||||
TriggerAlreadyExists = 9000,
|
||||
TriggerNotFound = 9001,
|
||||
// ====== End of trigger related status code =====
|
||||
}
|
||||
|
||||
impl StatusCode {
|
||||
@@ -160,8 +155,6 @@ impl StatusCode {
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::RegionReadonly
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::TableColumnExists
|
||||
@@ -205,8 +198,6 @@ impl StatusCode {
|
||||
| StatusCode::PlanQuery
|
||||
| StatusCode::FlowAlreadyExists
|
||||
| StatusCode::FlowNotFound
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::RegionNotReady
|
||||
| StatusCode::RegionBusy
|
||||
| StatusCode::RegionReadonly
|
||||
@@ -239,48 +230,6 @@ impl fmt::Display for StatusCode {
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! define_from_tonic_status {
|
||||
($Error: ty, $Variant: ident) => {
|
||||
impl From<tonic::Status> for $Error {
|
||||
fn from(e: tonic::Status) -> Self {
|
||||
use snafu::location;
|
||||
|
||||
fn metadata_value(e: &tonic::Status, key: &str) -> Option<String> {
|
||||
e.metadata()
|
||||
.get(key)
|
||||
.and_then(|v| String::from_utf8(v.as_bytes().to_vec()).ok())
|
||||
}
|
||||
|
||||
let code = metadata_value(&e, $crate::GREPTIME_DB_HEADER_ERROR_CODE)
|
||||
.and_then(|s| {
|
||||
if let Ok(code) = s.parse::<u32>() {
|
||||
StatusCode::from_u32(code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| match e.code() {
|
||||
tonic::Code::Cancelled => StatusCode::Cancelled,
|
||||
tonic::Code::DeadlineExceeded => StatusCode::DeadlineExceeded,
|
||||
_ => StatusCode::Internal,
|
||||
});
|
||||
|
||||
let msg = metadata_value(&e, $crate::GREPTIME_DB_HEADER_ERROR_MSG)
|
||||
.unwrap_or_else(|| e.message().to_string());
|
||||
|
||||
// TODO(LFC): Make the error variant defined automatically.
|
||||
Self::$Variant {
|
||||
code,
|
||||
msg,
|
||||
tonic_code: e.code(),
|
||||
location: location!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! define_into_tonic_status {
|
||||
($Error: ty) => {
|
||||
@@ -332,14 +281,12 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
| StatusCode::TableColumnExists
|
||||
| StatusCode::RegionAlreadyExists
|
||||
| StatusCode::DatabaseAlreadyExists
|
||||
| StatusCode::TriggerAlreadyExists
|
||||
| StatusCode::FlowAlreadyExists => Code::AlreadyExists,
|
||||
StatusCode::TableNotFound
|
||||
| StatusCode::RegionNotFound
|
||||
| StatusCode::TableColumnNotFound
|
||||
| StatusCode::DatabaseNotFound
|
||||
| StatusCode::UserNotFound
|
||||
| StatusCode::TriggerNotFound
|
||||
| StatusCode::FlowNotFound => Code::NotFound,
|
||||
StatusCode::TableUnavailable
|
||||
| StatusCode::StorageUnavailable
|
||||
@@ -357,6 +304,15 @@ pub fn status_to_tonic_code(status_code: StatusCode) -> Code {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts tonic [Code] to [StatusCode].
|
||||
pub fn convert_tonic_code_to_status_code(code: Code) -> StatusCode {
|
||||
match code {
|
||||
Code::Cancelled => StatusCode::Cancelled,
|
||||
Code::DeadlineExceeded => StatusCode::DeadlineExceeded,
|
||||
_ => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
@@ -33,7 +33,6 @@ common-version.workspace = true
|
||||
datafusion.workspace = true
|
||||
datafusion-common.workspace = true
|
||||
datafusion-expr.workspace = true
|
||||
datafusion-functions-aggregate-common.workspace = true
|
||||
datatypes.workspace = true
|
||||
derive_more = { version = "1", default-features = false, features = ["display"] }
|
||||
geo = { version = "0.29", optional = true }
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod approximate;
|
||||
pub mod count_hash;
|
||||
#[cfg(feature = "geo")]
|
||||
pub mod geo;
|
||||
pub mod vector;
|
||||
|
||||
@@ -1,647 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! `CountHash` / `count_hash` is a hash-based approximate distinct count function.
|
||||
//!
|
||||
//! It is a variant of `CountDistinct` that uses a hash function to approximate the
|
||||
//! distinct count.
|
||||
//! It is designed to be more efficient than `CountDistinct` for large datasets,
|
||||
//! but it is not as accurate, as the hash value may be collision.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ahash::RandomState;
|
||||
use datafusion_common::cast::as_list_array;
|
||||
use datafusion_common::error::Result;
|
||||
use datafusion_common::hash_utils::create_hashes;
|
||||
use datafusion_common::utils::SingleRowListArrayBuilder;
|
||||
use datafusion_common::{internal_err, not_impl_err, ScalarValue};
|
||||
use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs};
|
||||
use datafusion_expr::utils::{format_state_name, AggregateOrderSensitivity};
|
||||
use datafusion_expr::{
|
||||
Accumulator, AggregateUDF, AggregateUDFImpl, EmitTo, GroupsAccumulator, ReversedUDAF,
|
||||
SetMonotonicity, Signature, TypeSignature, Volatility,
|
||||
};
|
||||
use datafusion_functions_aggregate_common::aggregate::groups_accumulator::nulls::filtered_null_mask;
|
||||
use datatypes::arrow;
|
||||
use datatypes::arrow::array::{
|
||||
Array, ArrayRef, AsArray, BooleanArray, Int64Array, ListArray, UInt64Array,
|
||||
};
|
||||
use datatypes::arrow::buffer::{OffsetBuffer, ScalarBuffer};
|
||||
use datatypes::arrow::datatypes::{DataType, Field};
|
||||
|
||||
use crate::function_registry::FunctionRegistry;
|
||||
|
||||
type HashValueType = u64;
|
||||
|
||||
// read from /dev/urandom 4047821dc6144e4b2abddf23ad4171126a52eeecd26eff2191cf673b965a7875
|
||||
const RANDOM_SEED_0: u64 = 0x4047821dc6144e4b;
|
||||
const RANDOM_SEED_1: u64 = 0x2abddf23ad417112;
|
||||
const RANDOM_SEED_2: u64 = 0x6a52eeecd26eff21;
|
||||
const RANDOM_SEED_3: u64 = 0x91cf673b965a7875;
|
||||
|
||||
impl CountHash {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_aggr(CountHash::udf_impl());
|
||||
}
|
||||
|
||||
pub fn udf_impl() -> AggregateUDF {
|
||||
AggregateUDF::new_from_impl(CountHash {
|
||||
signature: Signature::one_of(
|
||||
vec![TypeSignature::VariadicAny, TypeSignature::Nullary],
|
||||
Volatility::Immutable,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CountHash {
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl AggregateUDFImpl for CountHash {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"count_hash"
|
||||
}
|
||||
|
||||
fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
fn return_type(&self, _arg_types: &[DataType]) -> Result<DataType> {
|
||||
Ok(DataType::Int64)
|
||||
}
|
||||
|
||||
fn is_nullable(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn state_fields(&self, args: StateFieldsArgs) -> Result<Vec<Field>> {
|
||||
Ok(vec![Field::new_list(
|
||||
format_state_name(args.name, "count_hash"),
|
||||
Field::new_list_field(DataType::UInt64, true),
|
||||
// For count_hash accumulator, null list item stands for an
|
||||
// empty value set (i.e., all NULL value so far for that group).
|
||||
true,
|
||||
)])
|
||||
}
|
||||
|
||||
fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
||||
if acc_args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}))
|
||||
}
|
||||
|
||||
fn aliases(&self) -> &[String] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn groups_accumulator_supported(&self, _args: AccumulatorArgs) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn create_groups_accumulator(
|
||||
&self,
|
||||
args: AccumulatorArgs,
|
||||
) -> Result<Box<dyn GroupsAccumulator>> {
|
||||
if args.exprs.len() > 1 {
|
||||
return not_impl_err!("count_hash with multiple arguments");
|
||||
}
|
||||
|
||||
Ok(Box::new(CountHashGroupAccumulator::new()))
|
||||
}
|
||||
|
||||
fn reverse_expr(&self) -> ReversedUDAF {
|
||||
ReversedUDAF::Identical
|
||||
}
|
||||
|
||||
fn order_sensitivity(&self) -> AggregateOrderSensitivity {
|
||||
AggregateOrderSensitivity::Insensitive
|
||||
}
|
||||
|
||||
fn default_value(&self, _data_type: &DataType) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(0)))
|
||||
}
|
||||
|
||||
fn set_monotonicity(&self, _data_type: &DataType) -> SetMonotonicity {
|
||||
SetMonotonicity::Increasing
|
||||
}
|
||||
}
|
||||
|
||||
/// GroupsAccumulator for `count_hash` aggregate function
|
||||
#[derive(Debug)]
|
||||
pub struct CountHashGroupAccumulator {
|
||||
/// One HashSet per group to track distinct values
|
||||
distinct_sets: Vec<HashSet<HashValueType, RandomState>>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl Default for CountHashGroupAccumulator {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl CountHashGroupAccumulator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
distinct_sets: vec![],
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_sets(&mut self, total_num_groups: usize) {
|
||||
if self.distinct_sets.len() < total_num_groups {
|
||||
self.distinct_sets
|
||||
.resize_with(total_num_groups, HashSet::default);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GroupsAccumulator for CountHashGroupAccumulator {
|
||||
fn update_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let array = &values[0];
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(array.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(array)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
|
||||
// Use a pattern similar to accumulate_indices to process rows
|
||||
// that are not null and pass the filter
|
||||
let nulls = array.logical_nulls();
|
||||
|
||||
match (nulls.as_ref(), opt_filter) {
|
||||
(None, None) => {
|
||||
// No nulls, no filter - process all rows
|
||||
for (row_idx, &group_idx) in group_indices.iter().enumerate() {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
(Some(nulls), None) => {
|
||||
// Has nulls, no filter
|
||||
for (row_idx, (&group_idx, is_valid)) in
|
||||
group_indices.iter().zip(nulls.iter()).enumerate()
|
||||
{
|
||||
if is_valid {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, Some(filter)) => {
|
||||
// No nulls, has filter
|
||||
for (row_idx, (&group_idx, filter_value)) in
|
||||
group_indices.iter().zip(filter.iter()).enumerate()
|
||||
{
|
||||
if let Some(true) = filter_value {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Some(nulls), Some(filter)) => {
|
||||
// Has nulls and filter
|
||||
let iter = filter
|
||||
.iter()
|
||||
.zip(group_indices.iter())
|
||||
.zip(nulls.iter())
|
||||
.enumerate();
|
||||
|
||||
for (row_idx, ((filter_value, &group_idx), is_valid)) in iter {
|
||||
if is_valid && filter_value == Some(true) {
|
||||
self.distinct_sets[group_idx].insert(hashes[row_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self, emit_to: EmitTo) -> Result<ArrayRef> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let counts = distinct_sets
|
||||
.iter()
|
||||
.map(|set| set.len() as i64)
|
||||
.collect::<Vec<_>>();
|
||||
Ok(Arc::new(Int64Array::from(counts)))
|
||||
}
|
||||
|
||||
fn merge_batch(
|
||||
&mut self,
|
||||
values: &[ArrayRef],
|
||||
group_indices: &[usize],
|
||||
_opt_filter: Option<&BooleanArray>,
|
||||
total_num_groups: usize,
|
||||
) -> Result<()> {
|
||||
assert_eq!(
|
||||
values.len(),
|
||||
1,
|
||||
"count_hash merge expects a single state array"
|
||||
);
|
||||
self.ensure_sets(total_num_groups);
|
||||
|
||||
let list_array = as_list_array(&values[0])?;
|
||||
|
||||
// For each group in the incoming batch
|
||||
for (i, &group_idx) in group_indices.iter().enumerate() {
|
||||
if i < list_array.len() {
|
||||
let inner_array = list_array.value(i);
|
||||
let inner_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
// Add each value to our set for this group
|
||||
for j in 0..inner_array.len() {
|
||||
if !inner_array.is_null(j) {
|
||||
self.distinct_sets[group_idx].insert(inner_array.value(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn state(&mut self, emit_to: EmitTo) -> Result<Vec<ArrayRef>> {
|
||||
let distinct_sets: Vec<HashSet<u64, RandomState>> =
|
||||
emit_to.take_needed(&mut self.distinct_sets);
|
||||
|
||||
let mut offsets = Vec::with_capacity(distinct_sets.len() + 1);
|
||||
offsets.push(0);
|
||||
let mut curr_len = 0i32;
|
||||
|
||||
let mut value_iter = distinct_sets
|
||||
.into_iter()
|
||||
.flat_map(|set| {
|
||||
// build offset
|
||||
curr_len += set.len() as i32;
|
||||
offsets.push(curr_len);
|
||||
// convert into iter
|
||||
set.into_iter()
|
||||
})
|
||||
.peekable();
|
||||
let data_array: ArrayRef = if value_iter.peek().is_none() {
|
||||
arrow::array::new_empty_array(&DataType::UInt64) as _
|
||||
} else {
|
||||
Arc::new(UInt64Array::from_iter_values(value_iter))
|
||||
};
|
||||
let offset_buffer = OffsetBuffer::new(ScalarBuffer::from(offsets));
|
||||
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offset_buffer,
|
||||
data_array,
|
||||
None,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array) as _])
|
||||
}
|
||||
|
||||
fn convert_to_state(
|
||||
&self,
|
||||
values: &[ArrayRef],
|
||||
opt_filter: Option<&BooleanArray>,
|
||||
) -> Result<Vec<ArrayRef>> {
|
||||
// For a single hash value per row, create a list array with that value
|
||||
assert_eq!(values.len(), 1, "count_hash expects a single argument");
|
||||
let values = ArrayRef::clone(&values[0]);
|
||||
|
||||
let offsets = OffsetBuffer::new(ScalarBuffer::from_iter(0..values.len() as i32 + 1));
|
||||
let nulls = filtered_null_mask(opt_filter, &values);
|
||||
let list_array = ListArray::new(
|
||||
Arc::new(Field::new_list_field(DataType::UInt64, true)),
|
||||
offsets,
|
||||
values,
|
||||
nulls,
|
||||
);
|
||||
|
||||
Ok(vec![Arc::new(list_array)])
|
||||
}
|
||||
|
||||
fn supports_convert_to_state(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
// Base size of the struct
|
||||
let mut size = size_of::<Self>();
|
||||
|
||||
// Size of the vector holding the HashSets
|
||||
size += size_of::<Vec<HashSet<HashValueType, RandomState>>>()
|
||||
+ self.distinct_sets.capacity() * size_of::<HashSet<HashValueType, RandomState>>();
|
||||
|
||||
// Estimate HashSet contents size more efficiently
|
||||
// Instead of iterating through all values which is expensive, use an approximation
|
||||
for set in &self.distinct_sets {
|
||||
// Base size of the HashSet
|
||||
size += set.capacity() * size_of::<HashValueType>();
|
||||
}
|
||||
|
||||
size
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CountHashAccumulator {
|
||||
values: HashSet<HashValueType, RandomState>,
|
||||
random_state: RandomState,
|
||||
batch_hashes: Vec<HashValueType>,
|
||||
}
|
||||
|
||||
impl CountHashAccumulator {
|
||||
// calculating the size for fixed length values, taking first batch size *
|
||||
// number of batches.
|
||||
fn fixed_size(&self) -> usize {
|
||||
size_of_val(self) + (size_of::<HashValueType>() * self.values.capacity())
|
||||
}
|
||||
}
|
||||
|
||||
impl Accumulator for CountHashAccumulator {
|
||||
/// Returns the distinct values seen so far as (one element) ListArray.
|
||||
fn state(&mut self) -> Result<Vec<ScalarValue>> {
|
||||
let values = self.values.iter().cloned().collect::<Vec<_>>();
|
||||
let arr = Arc::new(UInt64Array::from(values)) as _;
|
||||
let list_scalar = SingleRowListArrayBuilder::new(arr).build_list_scalar();
|
||||
Ok(vec![list_scalar])
|
||||
}
|
||||
|
||||
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
|
||||
if values.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let arr = &values[0];
|
||||
if arr.data_type() == &DataType::Null {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.batch_hashes.clear();
|
||||
self.batch_hashes.resize(arr.len(), 0);
|
||||
let hashes = create_hashes(
|
||||
&[ArrayRef::clone(arr)],
|
||||
&self.random_state,
|
||||
&mut self.batch_hashes,
|
||||
)?;
|
||||
for hash in hashes.as_slice() {
|
||||
self.values.insert(*hash);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merges multiple sets of distinct values into the current set.
|
||||
///
|
||||
/// The input to this function is a `ListArray` with **multiple** rows,
|
||||
/// where each row contains the values from a partial aggregate's phase (e.g.
|
||||
/// the result of calling `Self::state` on multiple accumulators).
|
||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
|
||||
if states.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
assert_eq!(states.len(), 1, "array_agg states must be singleton!");
|
||||
let array = &states[0];
|
||||
let list_array = array.as_list::<i32>();
|
||||
for inner_array in list_array.iter() {
|
||||
let Some(inner_array) = inner_array else {
|
||||
return internal_err!(
|
||||
"Intermediate results of count_hash should always be non null"
|
||||
);
|
||||
};
|
||||
let hash_array = inner_array.as_any().downcast_ref::<UInt64Array>().unwrap();
|
||||
for i in 0..hash_array.len() {
|
||||
self.values.insert(hash_array.value(i));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn evaluate(&mut self) -> Result<ScalarValue> {
|
||||
Ok(ScalarValue::Int64(Some(self.values.len() as i64)))
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.fixed_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use datatypes::arrow::array::{Array, BooleanArray, Int32Array, Int64Array};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn create_test_accumulator() -> CountHashAccumulator {
|
||||
CountHashAccumulator {
|
||||
values: HashSet::default(),
|
||||
random_state: RandomState::with_seeds(
|
||||
RANDOM_SEED_0,
|
||||
RANDOM_SEED_1,
|
||||
RANDOM_SEED_2,
|
||||
RANDOM_SEED_3,
|
||||
),
|
||||
batch_hashes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_accumulator();
|
||||
|
||||
// Test with some data
|
||||
let array = Arc::new(Int32Array::from(vec![
|
||||
Some(1),
|
||||
Some(2),
|
||||
Some(3),
|
||||
Some(1),
|
||||
Some(2),
|
||||
None,
|
||||
])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(4)));
|
||||
|
||||
// Test with empty data
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![] as Vec<Option<i32>>)) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(0)));
|
||||
|
||||
// Test with only nulls
|
||||
let mut acc = create_test_accumulator();
|
||||
let array = Arc::new(Int32Array::from(vec![None, None, None])) as ArrayRef;
|
||||
acc.update_batch(&[array])?;
|
||||
let result = acc.evaluate()?;
|
||||
assert_eq!(result, ScalarValue::Int64(Some(1)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_accumulator();
|
||||
let array1 = Arc::new(Int32Array::from(vec![Some(1), Some(2), Some(3)])) as ArrayRef;
|
||||
acc1.update_batch(&[array1])?;
|
||||
let state1 = acc1.state()?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_accumulator();
|
||||
let array2 = Arc::new(Int32Array::from(vec![Some(3), Some(4), Some(5)])) as ArrayRef;
|
||||
acc2.update_batch(&[array2])?;
|
||||
let state2 = acc2.state()?;
|
||||
|
||||
// Merge state1 and state2 into a new accumulator
|
||||
let mut acc_merged = create_test_accumulator();
|
||||
let state_array1 = state1[0].to_array()?;
|
||||
let state_array2 = state2[0].to_array()?;
|
||||
|
||||
acc_merged.merge_batch(&[state_array1])?;
|
||||
acc_merged.merge_batch(&[state_array2])?;
|
||||
|
||||
let result = acc_merged.evaluate()?;
|
||||
// Distinct values are {1, 2, 3, 4, 5}, so count is 5
|
||||
assert_eq!(result, ScalarValue::Int64(Some(5)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_test_group_accumulator() -> CountHashGroupAccumulator {
|
||||
CountHashGroupAccumulator::new()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 1, 3, 2, 4, 5])) as ArrayRef;
|
||||
let group_indices = vec![0, 1, 0, 0, 1, 2, 0];
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, None, total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1, 3, 5} -> 3
|
||||
// Group 1: {2} -> 1
|
||||
// Group 2: {4} -> 1
|
||||
assert_eq!(result.value(0), 3);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_with_filter() -> Result<()> {
|
||||
let mut acc = create_test_group_accumulator();
|
||||
let values = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6])) as ArrayRef;
|
||||
let group_indices = vec![0, 0, 1, 1, 2, 2];
|
||||
let filter = BooleanArray::from(vec![true, false, true, true, false, true]);
|
||||
let total_num_groups = 3;
|
||||
|
||||
acc.update_batch(&[values], &group_indices, Some(&filter), total_num_groups)?;
|
||||
|
||||
let result_array = acc.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Group 0: {1} (2 is filtered out) -> 1
|
||||
// Group 1: {3, 4} -> 2
|
||||
// Group 2: {6} (5 is filtered out) -> 1
|
||||
assert_eq!(result.value(0), 1);
|
||||
assert_eq!(result.value(1), 2);
|
||||
assert_eq!(result.value(2), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_hash_group_accumulator_merge() -> Result<()> {
|
||||
// Accumulator 1
|
||||
let mut acc1 = create_test_group_accumulator();
|
||||
let values1 = Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as ArrayRef;
|
||||
let group_indices1 = vec![0, 0, 1, 1];
|
||||
acc1.update_batch(&[values1], &group_indices1, None, 2)?;
|
||||
// acc1 state: group 0 -> {1, 2}, group 1 -> {3, 4}
|
||||
let state1 = acc1.state(EmitTo::All)?;
|
||||
|
||||
// Accumulator 2
|
||||
let mut acc2 = create_test_group_accumulator();
|
||||
let values2 = Arc::new(Int32Array::from(vec![5, 6, 1, 3])) as ArrayRef;
|
||||
// Merge into different group indices
|
||||
let group_indices2 = vec![2, 2, 0, 1];
|
||||
acc2.update_batch(&[values2], &group_indices2, None, 3)?;
|
||||
// acc2 state: group 0 -> {1}, group 1 -> {3}, group 2 -> {5, 6}
|
||||
|
||||
// Merge state from acc1 into acc2
|
||||
// We will merge acc1's group 0 into acc2's group 0
|
||||
// and acc1's group 1 into acc2's group 2
|
||||
let merge_group_indices = vec![0, 2];
|
||||
acc2.merge_batch(&state1, &merge_group_indices, None, 3)?;
|
||||
|
||||
let result_array = acc2.evaluate(EmitTo::All)?;
|
||||
let result = result_array.as_any().downcast_ref::<Int64Array>().unwrap();
|
||||
|
||||
// Final state of acc2:
|
||||
// Group 0: {1} U {1, 2} -> {1, 2}, count = 2
|
||||
// Group 1: {3}, count = 1
|
||||
// Group 2: {5, 6} U {3, 4} -> {3, 4, 5, 6}, count = 4
|
||||
assert_eq!(result.value(0), 2);
|
||||
assert_eq!(result.value(1), 1);
|
||||
assert_eq!(result.value(2), 4);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
let acc = create_test_group_accumulator();
|
||||
// Just test it doesn't crash and returns a value.
|
||||
assert!(acc.size() > 0);
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,6 @@ use once_cell::sync::Lazy;
|
||||
|
||||
use crate::admin::AdminFunction;
|
||||
use crate::aggrs::approximate::ApproximateFunction;
|
||||
use crate::aggrs::count_hash::CountHash;
|
||||
use crate::aggrs::vector::VectorFunction as VectorAggrFunction;
|
||||
use crate::function::{AsyncFunctionRef, Function, FunctionRef};
|
||||
use crate::function_factory::ScalarFunctionFactory;
|
||||
@@ -145,9 +144,6 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
|
||||
// Approximate functions
|
||||
ApproximateFunction::register(&function_registry);
|
||||
|
||||
// CountHash function
|
||||
CountHash::register(&function_registry);
|
||||
|
||||
Arc::new(function_registry)
|
||||
});
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
pub mod clamp;
|
||||
mod modulo;
|
||||
mod pow;
|
||||
mod rate;
|
||||
|
||||
use std::fmt;
|
||||
@@ -25,6 +26,7 @@ use datafusion::error::DataFusionError;
|
||||
use datafusion::logical_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::VectorRef;
|
||||
pub use pow::PowFunction;
|
||||
pub use rate::RateFunction;
|
||||
use snafu::ResultExt;
|
||||
|
||||
@@ -37,6 +39,7 @@ pub(crate) struct MathFunction;
|
||||
impl MathFunction {
|
||||
pub fn register(registry: &FunctionRegistry) {
|
||||
registry.register_scalar(ModuloFunction);
|
||||
registry.register_scalar(PowFunction);
|
||||
registry.register_scalar(RateFunction);
|
||||
registry.register_scalar(RangeFunction);
|
||||
registry.register_scalar(ClampFunction);
|
||||
|
||||
120
src/common/function/src/scalars/math/pow.rs
Normal file
120
src/common/function/src/scalars/math/pow.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Result;
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use datatypes::data_type::DataType;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::types::LogicalPrimitiveType;
|
||||
use datatypes::vectors::VectorRef;
|
||||
use datatypes::with_match_primitive_type_id;
|
||||
use num::traits::Pow;
|
||||
use num_traits::AsPrimitive;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::expression::{scalar_binary_op, EvalContext};
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PowFunction;
|
||||
|
||||
impl Function for PowFunction {
|
||||
fn name(&self) -> &str {
|
||||
"pow"
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::float64_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::uniform(2, ConcreteDataType::numerics(), Volatility::Immutable)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
with_match_primitive_type_id!(columns[0].data_type().logical_type_id(), |$S| {
|
||||
with_match_primitive_type_id!(columns[1].data_type().logical_type_id(), |$T| {
|
||||
let col = scalar_binary_op::<<$S as LogicalPrimitiveType>::Native, <$T as LogicalPrimitiveType>::Native, f64, _>(&columns[0], &columns[1], scalar_pow, &mut EvalContext::default())?;
|
||||
Ok(Arc::new(col))
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
},{
|
||||
unreachable!()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn scalar_pow<S, T>(value: Option<S>, base: Option<T>, _ctx: &mut EvalContext) -> Option<f64>
|
||||
where
|
||||
S: AsPrimitive<f64>,
|
||||
T: AsPrimitive<f64>,
|
||||
{
|
||||
match (value, base) {
|
||||
(Some(value), Some(base)) => Some(value.as_().pow(base.as_())),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PowFunction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "POW")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_query::prelude::TypeSignature;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{Float32Vector, Int8Vector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_pow_function() {
|
||||
let pow = PowFunction;
|
||||
|
||||
assert_eq!("pow", pow.name());
|
||||
assert_eq!(
|
||||
ConcreteDataType::float64_datatype(),
|
||||
pow.return_type(&[]).unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(pow.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::Uniform(2, valid_types),
|
||||
volatility: Volatility::Immutable
|
||||
} if valid_types == ConcreteDataType::numerics()
|
||||
));
|
||||
|
||||
let values = vec![1.0, 2.0, 3.0];
|
||||
let bases = vec![0i8, -1i8, 3i8];
|
||||
|
||||
let args: Vec<VectorRef> = vec![
|
||||
Arc::new(Float32Vector::from_vec(values.clone())),
|
||||
Arc::new(Int8Vector::from_vec(bases.clone())),
|
||||
];
|
||||
|
||||
let vector = pow.eval(&FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(3, vector.len());
|
||||
|
||||
for i in 0..3 {
|
||||
let p: f64 = (values[i] as f64).pow(bases[i] as f64);
|
||||
assert!(matches!(vector.get(i), Value::Float64(v) if v == p));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,6 +50,7 @@ pub mod drop_flow;
|
||||
pub mod drop_table;
|
||||
pub mod drop_view;
|
||||
pub mod flow_meta;
|
||||
mod physical_table_metadata;
|
||||
pub mod table_meta;
|
||||
#[cfg(any(test, feature = "testing"))]
|
||||
pub mod test_util;
|
||||
|
||||
@@ -12,32 +12,32 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod executor;
|
||||
mod check;
|
||||
mod metadata;
|
||||
mod region_request;
|
||||
mod table_cache_keys;
|
||||
mod update_metadata;
|
||||
mod validator;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use async_trait::async_trait;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
|
||||
use common_procedure::{Context, LockKey, Procedure, Status};
|
||||
use common_telemetry::{debug, error, info, warn};
|
||||
pub use executor::make_alter_region_request;
|
||||
use common_telemetry::{error, info, warn};
|
||||
use futures_util::future;
|
||||
pub use region_request::make_alter_region_request;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::cache_invalidator::Context as CacheContext;
|
||||
use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor;
|
||||
use crate::ddl::alter_logical_tables::validator::{
|
||||
retain_unskipped, AlterLogicalTableValidator, ValidatorResult,
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
};
|
||||
use crate::ddl::utils::{extract_column_metadatas, map_to_procedure_error, sync_follower_regions};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
@@ -45,38 +45,13 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::rpc::router::{find_leaders, RegionRoute};
|
||||
|
||||
pub struct AlterLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
pub data: AlterTablesData,
|
||||
}
|
||||
|
||||
/// Builds the validator from the [`AlterTablesData`].
|
||||
fn build_validator_from_alter_table_data<'a>(
|
||||
data: &'a AlterTablesData,
|
||||
) -> AlterLogicalTableValidator<'a> {
|
||||
let phsycial_table_id = data.physical_table_id;
|
||||
let alters = data
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task| &task.alter_table)
|
||||
.collect::<Vec<_>>();
|
||||
AlterLogicalTableValidator::new(phsycial_table_id, alters)
|
||||
}
|
||||
|
||||
/// Builds the executor from the [`AlterTablesData`].
|
||||
fn build_executor_from_alter_expr<'a>(data: &'a AlterTablesData) -> AlterLogicalTablesExecutor<'a> {
|
||||
debug_assert_eq!(data.tasks.len(), data.table_info_values.len());
|
||||
let alters = data
|
||||
.tasks
|
||||
.iter()
|
||||
.zip(data.table_info_values.iter())
|
||||
.map(|(task, table_info)| (table_info.table_info.ident.table_id, &task.alter_table))
|
||||
.collect::<Vec<_>>();
|
||||
AlterLogicalTablesExecutor::new(alters)
|
||||
}
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
|
||||
|
||||
@@ -106,44 +81,35 @@ impl AlterLogicalTablesProcedure {
|
||||
}
|
||||
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
let validator = build_validator_from_alter_table_data(&self.data);
|
||||
let ValidatorResult {
|
||||
num_skipped,
|
||||
skip_alter,
|
||||
table_info_values,
|
||||
physical_table_info,
|
||||
physical_table_route,
|
||||
} = validator
|
||||
.validate(&self.context.table_metadata_manager)
|
||||
.await?;
|
||||
|
||||
let num_tasks = self.data.tasks.len();
|
||||
if num_skipped == num_tasks {
|
||||
// Checks all the tasks
|
||||
self.check_input_tasks()?;
|
||||
// Fills the table info values
|
||||
self.fill_table_info_values().await?;
|
||||
// Checks the physical table, must after [fill_table_info_values]
|
||||
self.check_physical_table().await?;
|
||||
// Fills the physical table info
|
||||
self.fill_physical_table_info().await?;
|
||||
// Filter the finished tasks
|
||||
let finished_tasks = self.check_finished_tasks()?;
|
||||
let already_finished_count = finished_tasks
|
||||
.iter()
|
||||
.map(|x| if *x { 1 } else { 0 })
|
||||
.sum::<usize>();
|
||||
let apply_tasks_count = self.data.tasks.len();
|
||||
if already_finished_count == apply_tasks_count {
|
||||
info!("All the alter tasks are finished, will skip the procedure.");
|
||||
let cache_ident_keys = AlterLogicalTablesExecutor::build_cache_ident_keys(
|
||||
&physical_table_info,
|
||||
&table_info_values
|
||||
.iter()
|
||||
.map(|v| v.get_inner_ref())
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
self.data.table_cache_keys_to_invalidate = cache_ident_keys;
|
||||
// Re-invalidate the table cache
|
||||
self.data.state = AlterTablesState::InvalidateTableCache;
|
||||
return Ok(Status::executing(true));
|
||||
} else if num_skipped > 0 {
|
||||
} else if already_finished_count > 0 {
|
||||
info!(
|
||||
"There are {} alter tasks, {} of them were already finished.",
|
||||
num_tasks, num_skipped
|
||||
apply_tasks_count, already_finished_count
|
||||
);
|
||||
}
|
||||
self.filter_task(&finished_tasks)?;
|
||||
|
||||
// Updates the procedure state.
|
||||
retain_unskipped(&mut self.data.tasks, &skip_alter);
|
||||
self.data.physical_table_info = Some(physical_table_info);
|
||||
self.data.physical_table_route = Some(physical_table_route);
|
||||
self.data.table_info_values = table_info_values;
|
||||
debug_assert_eq!(self.data.tasks.len(), self.data.table_info_values.len());
|
||||
// Next state
|
||||
self.data.state = AlterTablesState::SubmitAlterRegionRequests;
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
@@ -151,21 +117,57 @@ impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn on_submit_alter_region_requests(&mut self) -> Result<Status> {
|
||||
// Safety: we have checked the state in on_prepare
|
||||
let physical_table_route = &self.data.physical_table_route.as_ref().unwrap();
|
||||
let executor = build_executor_from_alter_expr(&self.data);
|
||||
let mut results = executor
|
||||
.on_alter_regions(
|
||||
&self.context.node_manager,
|
||||
&physical_table_route.region_routes,
|
||||
)
|
||||
.await?;
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, ALTER_PHYSICAL_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.physical_columns = column_metadatas;
|
||||
for peer in leaders {
|
||||
let requester = self.context.node_manager.datanode(&peer).await;
|
||||
let request = self.make_request(&peer, &physical_table_route.region_routes)?;
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
|
||||
let mut results = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
// Collects responses from datanodes.
|
||||
let phy_raw_schemas = results
|
||||
.iter_mut()
|
||||
.map(|res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.data.state = AlterTablesState::UpdateMetadata;
|
||||
return Ok(Status::executing(true));
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = phy_raw_schemas.first().unwrap();
|
||||
ensure!(
|
||||
phy_raw_schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The physical schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
// Decodes the physical raw schemas
|
||||
if let Some(phy_raw_schema) = first {
|
||||
self.data.physical_columns =
|
||||
ColumnMetadata::decode_list(phy_raw_schema).context(DecodeJsonSnafu)?;
|
||||
} else {
|
||||
warn!("altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
|
||||
}
|
||||
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.data.state = AlterTablesState::UpdateMetadata;
|
||||
@@ -181,7 +183,7 @@ impl AlterLogicalTablesProcedure {
|
||||
if let Err(err) = sync_follower_regions(
|
||||
&self.context,
|
||||
self.data.physical_table_id,
|
||||
&results,
|
||||
results,
|
||||
region_routes,
|
||||
table_info.meta.engine.as_str(),
|
||||
)
|
||||
@@ -198,18 +200,7 @@ impl AlterLogicalTablesProcedure {
|
||||
self.update_physical_table_metadata().await?;
|
||||
self.update_logical_tables_metadata().await?;
|
||||
|
||||
let logical_table_info_values = self
|
||||
.data
|
||||
.table_info_values
|
||||
.iter()
|
||||
.map(|v| v.get_inner_ref())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let cache_ident_keys = AlterLogicalTablesExecutor::build_cache_ident_keys(
|
||||
self.data.physical_table_info.as_ref().unwrap(),
|
||||
&logical_table_info_values,
|
||||
);
|
||||
self.data.table_cache_keys_to_invalidate = cache_ident_keys;
|
||||
self.data.build_cache_keys_to_invalidate();
|
||||
self.data.clear_metadata_fields();
|
||||
|
||||
self.data.state = AlterTablesState::InvalidateTableCache;
|
||||
@@ -219,16 +210,9 @@ impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
|
||||
let to_invalidate = &self.data.table_cache_keys_to_invalidate;
|
||||
|
||||
let ctx = CacheContext {
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by altering logical tables, physical_table_id: {}",
|
||||
self.data.physical_table_id,
|
||||
)),
|
||||
};
|
||||
|
||||
self.context
|
||||
.cache_invalidator
|
||||
.invalidate(&ctx, to_invalidate)
|
||||
.invalidate(&Default::default(), to_invalidate)
|
||||
.await?;
|
||||
Ok(Status::done())
|
||||
}
|
||||
@@ -248,10 +232,6 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
let _timer = metrics::METRIC_META_PROCEDURE_ALTER_TABLE
|
||||
.with_label_values(&[step])
|
||||
.start_timer();
|
||||
debug!(
|
||||
"Executing alter logical tables procedure, state: {:?}",
|
||||
state
|
||||
);
|
||||
|
||||
match state {
|
||||
AlterTablesState::Prepare => self.on_prepare().await,
|
||||
|
||||
136
src/common/meta/src/ddl/alter_logical_tables/check.rs
Normal file
136
src/common/meta/src/ddl/alter_logical_tables/check.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use snafu::{ensure, OptionExt};
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::{AlterLogicalTablesInvalidArgumentsSnafu, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn check_input_tasks(&self) -> Result<()> {
|
||||
self.check_schema()?;
|
||||
self.check_alter_kind()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn check_physical_table(&self) -> Result<()> {
|
||||
let table_route_manager = self.context.table_metadata_manager.table_route_manager();
|
||||
let table_ids = self
|
||||
.data
|
||||
.table_info_values
|
||||
.iter()
|
||||
.map(|v| v.table_info.ident.table_id)
|
||||
.collect::<Vec<_>>();
|
||||
let table_routes = table_route_manager
|
||||
.table_route_storage()
|
||||
.batch_get(&table_ids)
|
||||
.await?;
|
||||
let physical_table_id = self.data.physical_table_id;
|
||||
let is_same_physical_table = table_routes.iter().all(|r| {
|
||||
if let Some(TableRouteValue::Logical(r)) = r {
|
||||
r.physical_table_id() == physical_table_id
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_physical_table,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "All the tasks should have the same physical table id"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn check_finished_tasks(&self) -> Result<Vec<bool>> {
|
||||
let task = &self.data.tasks;
|
||||
let table_info_values = &self.data.table_info_values;
|
||||
|
||||
Ok(task
|
||||
.iter()
|
||||
.zip(table_info_values.iter())
|
||||
.map(|(task, table)| Self::check_finished_task(task, table))
|
||||
.collect())
|
||||
}
|
||||
|
||||
// Checks if the schemas of the tasks are the same
|
||||
fn check_schema(&self) -> Result<()> {
|
||||
let is_same_schema = self.data.tasks.windows(2).all(|pair| {
|
||||
pair[0].alter_table.catalog_name == pair[1].alter_table.catalog_name
|
||||
&& pair[0].alter_table.schema_name == pair[1].alter_table.schema_name
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_schema,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Schemas of the tasks are not the same"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_alter_kind(&self) -> Result<()> {
|
||||
for task in &self.data.tasks {
|
||||
let kind = task.alter_table.kind.as_ref().context(
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Alter kind is missing",
|
||||
},
|
||||
)?;
|
||||
let Kind::AddColumns(_) = kind else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Only support add columns operation",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_finished_task(task: &AlterTableTask, table: &TableInfoValue) -> bool {
|
||||
let columns = table
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| &c.name)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let Some(kind) = task.alter_table.kind.as_ref() else {
|
||||
return true; // Never get here since we have checked it in `check_alter_kind`
|
||||
};
|
||||
let Kind::AddColumns(add_columns) = kind else {
|
||||
return true; // Never get here since we have checked it in `check_alter_kind`
|
||||
};
|
||||
|
||||
// We only check that all columns have been finished. That is to say,
|
||||
// if one part is finished but another part is not, it will be considered
|
||||
// unfinished.
|
||||
add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| add_column.column_def.as_ref().map(|c| &c.name))
|
||||
.all(|column| column.map(|c| columns.contains(c)).unwrap_or(false))
|
||||
}
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{self, AlterTableExpr};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{debug, warn};
|
||||
use futures::future;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, raw_table_info};
|
||||
use crate::error::Result;
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef};
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// [AlterLogicalTablesExecutor] performs:
|
||||
/// - Alters logical regions on the datanodes.
|
||||
/// - Updates table metadata for alter table operation.
|
||||
pub struct AlterLogicalTablesExecutor<'a> {
|
||||
/// The alter table expressions.
|
||||
///
|
||||
/// The first element is the logical table id, the second element is the alter table expression.
|
||||
alters: Vec<(TableId, &'a AlterTableExpr)>,
|
||||
}
|
||||
|
||||
impl<'a> AlterLogicalTablesExecutor<'a> {
|
||||
pub fn new(alters: Vec<(TableId, &'a AlterTableExpr)>) -> Self {
|
||||
Self { alters }
|
||||
}
|
||||
|
||||
/// Alters logical regions on the datanodes.
|
||||
pub(crate) async fn on_alter_regions(
|
||||
&self,
|
||||
node_manager: &NodeManagerRef,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<Vec<RegionResponse>> {
|
||||
let region_distribution = region_distribution(region_routes);
|
||||
let leaders = find_leaders(region_routes)
|
||||
.into_iter()
|
||||
.map(|p| (p.id, p))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
for (datanode_id, region_role_set) in region_distribution {
|
||||
if region_role_set.leader_regions.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// Safety: must exists.
|
||||
let peer = leaders.get(&datanode_id).unwrap();
|
||||
let requester = node_manager.datanode(peer).await;
|
||||
let requests = self.make_alter_region_request(®ion_role_set.leader_regions);
|
||||
let requester = requester.clone();
|
||||
let peer = peer.clone();
|
||||
|
||||
debug!("Sending alter region requests to datanode {}", peer);
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(make_request(requests))
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
|
||||
future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()
|
||||
}
|
||||
|
||||
fn make_alter_region_request(&self, region_numbers: &[RegionNumber]) -> AlterRequests {
|
||||
let mut requests = Vec::with_capacity(region_numbers.len() * self.alters.len());
|
||||
for (table_id, alter) in self.alters.iter() {
|
||||
for region_number in region_numbers {
|
||||
let region_id = RegionId::new(*table_id, *region_number);
|
||||
let request = make_alter_region_request(region_id, alter);
|
||||
requests.push(request);
|
||||
}
|
||||
}
|
||||
|
||||
AlterRequests { requests }
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
///
|
||||
/// ## Panic:
|
||||
/// - If the region distribution is not set when updating table metadata.
|
||||
pub(crate) async fn on_alter_metadata(
|
||||
physical_table_id: TableId,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_distribution: RegionDistribution,
|
||||
physical_columns: &[ColumnMetadata],
|
||||
) -> Result<()> {
|
||||
if physical_columns.is_empty() {
|
||||
warn!("No physical columns found, leaving the physical table's schema unchanged when altering logical tables");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let table_ref = current_table_info_value.table_ref();
|
||||
let table_id = physical_table_id;
|
||||
|
||||
// Generates new table info
|
||||
let old_raw_table_info = current_table_info_value.table_info.clone();
|
||||
let new_raw_table_info =
|
||||
raw_table_info::build_new_physical_table_info(old_raw_table_info, physical_columns);
|
||||
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}",
|
||||
table_ref, table_id, new_raw_table_info
|
||||
);
|
||||
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
Some(region_distribution),
|
||||
new_raw_table_info,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds the cache ident keys for the alter logical tables.
|
||||
///
|
||||
/// The cache ident keys are:
|
||||
/// - The table id of the logical tables.
|
||||
/// - The table name of the logical tables.
|
||||
/// - The table id of the physical table.
|
||||
pub(crate) fn build_cache_ident_keys(
|
||||
physical_table_info: &TableInfoValue,
|
||||
logical_table_info_values: &[&TableInfoValue],
|
||||
) -> Vec<CacheIdent> {
|
||||
let mut cache_keys = Vec::with_capacity(logical_table_info_values.len() * 2 + 2);
|
||||
cache_keys.extend(logical_table_info_values.iter().flat_map(|table| {
|
||||
vec![
|
||||
CacheIdent::TableId(table.table_info.ident.table_id),
|
||||
CacheIdent::TableName(table.table_name()),
|
||||
]
|
||||
}));
|
||||
cache_keys.push(CacheIdent::TableId(
|
||||
physical_table_info.table_info.ident.table_id,
|
||||
));
|
||||
cache_keys.push(CacheIdent::TableName(physical_table_info.table_name()));
|
||||
|
||||
cache_keys
|
||||
}
|
||||
}
|
||||
|
||||
fn make_request(alter_requests: AlterRequests) -> RegionRequest {
|
||||
RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alters(alter_requests)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes an alter region request.
|
||||
pub fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
) -> AlterRequest {
|
||||
let region_id = region_id.as_u64();
|
||||
let kind = match &alter_table_expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
|
||||
AlterRequest {
|
||||
region_id,
|
||||
schema_version: 0,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
fn to_region_add_columns(add_columns: &v1::AddColumns) -> AddColumns {
|
||||
let add_columns = add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let region_column_def = RegionColumnDef {
|
||||
column_def: add_column.column_def.clone(),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
};
|
||||
AddColumn {
|
||||
column_def: Some(region_column_def),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
AddColumns { add_columns }
|
||||
}
|
||||
158
src/common/meta/src/ddl/alter_logical_tables/metadata.rs
Normal file
158
src/common/meta/src/ddl/alter_logical_tables/metadata.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::OptionExt;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::{
|
||||
AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu, TableNotFoundSnafu,
|
||||
TableRouteNotFoundSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn filter_task(&mut self, finished_tasks: &[bool]) -> Result<()> {
|
||||
debug_assert_eq!(finished_tasks.len(), self.data.tasks.len());
|
||||
debug_assert_eq!(finished_tasks.len(), self.data.table_info_values.len());
|
||||
self.data.tasks = self
|
||||
.data
|
||||
.tasks
|
||||
.drain(..)
|
||||
.zip(finished_tasks.iter())
|
||||
.filter_map(|(task, finished)| if *finished { None } else { Some(task) })
|
||||
.collect();
|
||||
self.data.table_info_values = self
|
||||
.data
|
||||
.table_info_values
|
||||
.drain(..)
|
||||
.zip(finished_tasks.iter())
|
||||
.filter_map(|(table_info_value, finished)| {
|
||||
if *finished {
|
||||
None
|
||||
} else {
|
||||
Some(table_info_value)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fill_physical_table_info(&mut self) -> Result<()> {
|
||||
let (physical_table_info, physical_table_route) = self
|
||||
.context
|
||||
.table_metadata_manager
|
||||
.get_full_table_info(self.data.physical_table_id)
|
||||
.await?;
|
||||
|
||||
let physical_table_info = physical_table_info.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id - {}", self.data.physical_table_id),
|
||||
})?;
|
||||
let physical_table_route = physical_table_route
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: self.data.physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
self.data.physical_table_info = Some(physical_table_info);
|
||||
let TableRouteValue::Physical(physical_table_route) = physical_table_route else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: format!(
|
||||
"expected a physical table but got a logical table: {:?}",
|
||||
self.data.physical_table_id
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
self.data.physical_table_route = Some(physical_table_route);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fill_table_info_values(&mut self) -> Result<()> {
|
||||
let table_ids = self.get_all_table_ids().await?;
|
||||
let table_info_values = self.get_all_table_info_values(&table_ids).await?;
|
||||
debug_assert_eq!(table_info_values.len(), self.data.tasks.len());
|
||||
self.data.table_info_values = table_info_values;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_all_table_info_values(
|
||||
&self,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<Vec<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let table_info_manager = self.context.table_metadata_manager.table_info_manager();
|
||||
let mut table_info_map = table_info_manager.batch_get_raw(table_ids).await?;
|
||||
let mut table_info_values = Vec::with_capacity(table_ids.len());
|
||||
for (table_id, task) in table_ids.iter().zip(self.data.tasks.iter()) {
|
||||
let table_info_value =
|
||||
table_info_map
|
||||
.remove(table_id)
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: extract_table_name(task),
|
||||
})?;
|
||||
table_info_values.push(table_info_value);
|
||||
}
|
||||
|
||||
Ok(table_info_values)
|
||||
}
|
||||
|
||||
async fn get_all_table_ids(&self) -> Result<Vec<TableId>> {
|
||||
let table_name_manager = self.context.table_metadata_manager.table_name_manager();
|
||||
let table_name_keys = self
|
||||
.data
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task| extract_table_name_key(task))
|
||||
.collect();
|
||||
|
||||
let table_name_values = table_name_manager.batch_get(table_name_keys).await?;
|
||||
let mut table_ids = Vec::with_capacity(table_name_values.len());
|
||||
for (value, task) in table_name_values.into_iter().zip(self.data.tasks.iter()) {
|
||||
let table_id = value
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: extract_table_name(task),
|
||||
})?
|
||||
.table_id();
|
||||
table_ids.push(table_id);
|
||||
}
|
||||
|
||||
Ok(table_ids)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn extract_table_name(task: &AlterTableTask) -> String {
|
||||
format_full_table_name(
|
||||
&task.alter_table.catalog_name,
|
||||
&task.alter_table.schema_name,
|
||||
&task.alter_table.table_name,
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn extract_table_name_key(task: &AlterTableTask) -> TableNameKey {
|
||||
TableNameKey::new(
|
||||
&task.alter_table.catalog_name,
|
||||
&task.alter_table.schema_name,
|
||||
&task.alter_table.table_name,
|
||||
)
|
||||
}
|
||||
113
src/common/meta/src/ddl/alter_logical_tables/region_request.rs
Normal file
113
src/common/meta/src/ddl/alter_logical_tables/region_request.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::{
|
||||
alter_request, region_request, AddColumn, AddColumns, AlterRequest, AlterRequests,
|
||||
RegionColumnDef, RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use api::v1::{self, AlterTableExpr};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use store_api::storage::RegionId;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::error::Result;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::router::{find_leader_regions, RegionRoute};
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) fn make_request(
|
||||
&self,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<RegionRequest> {
|
||||
let alter_requests = self.make_alter_region_requests(peer, region_routes)?;
|
||||
let request = RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(region_request::Body::Alters(alter_requests)),
|
||||
};
|
||||
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
fn make_alter_region_requests(
|
||||
&self,
|
||||
peer: &Peer,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Result<AlterRequests> {
|
||||
let tasks = &self.data.tasks;
|
||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
||||
let mut requests = Vec::with_capacity(tasks.len() * regions_on_this_peer.len());
|
||||
for (task, table) in self
|
||||
.data
|
||||
.tasks
|
||||
.iter()
|
||||
.zip(self.data.table_info_values.iter())
|
||||
{
|
||||
for region_number in ®ions_on_this_peer {
|
||||
let region_id = RegionId::new(table.table_info.ident.table_id, *region_number);
|
||||
let request = make_alter_region_request(
|
||||
region_id,
|
||||
&task.alter_table,
|
||||
table.table_info.ident.version,
|
||||
);
|
||||
requests.push(request);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AlterRequests { requests })
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes an alter region request.
|
||||
pub fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
alter_table_expr: &AlterTableExpr,
|
||||
schema_version: u64,
|
||||
) -> AlterRequest {
|
||||
let region_id = region_id.as_u64();
|
||||
let kind = match &alter_table_expr.kind {
|
||||
Some(Kind::AddColumns(add_columns)) => Some(alter_request::Kind::AddColumns(
|
||||
to_region_add_columns(add_columns),
|
||||
)),
|
||||
_ => unreachable!(), // Safety: we have checked the kind in check_input_tasks
|
||||
};
|
||||
|
||||
AlterRequest {
|
||||
region_id,
|
||||
schema_version,
|
||||
kind,
|
||||
}
|
||||
}
|
||||
|
||||
fn to_region_add_columns(add_columns: &v1::AddColumns) -> AddColumns {
|
||||
let add_columns = add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| {
|
||||
let region_column_def = RegionColumnDef {
|
||||
column_def: add_column.column_def.clone(),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
};
|
||||
AddColumn {
|
||||
column_def: Some(region_column_def),
|
||||
..Default::default() // other fields are not used in alter logical table
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
AddColumns { add_columns }
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use table::metadata::RawTableInfo;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterTablesData;
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
impl AlterTablesData {
|
||||
pub(crate) fn build_cache_keys_to_invalidate(&mut self) {
|
||||
let mut cache_keys = self
|
||||
.table_info_values
|
||||
.iter()
|
||||
.flat_map(|table| {
|
||||
vec![
|
||||
CacheIdent::TableId(table.table_info.ident.table_id),
|
||||
CacheIdent::TableName(extract_table_name(&table.table_info)),
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
cache_keys.push(CacheIdent::TableId(self.physical_table_id));
|
||||
// Safety: physical_table_info already filled in previous steps
|
||||
let physical_table_info = &self.physical_table_info.as_ref().unwrap().table_info;
|
||||
cache_keys.push(CacheIdent::TableName(extract_table_name(
|
||||
physical_table_info,
|
||||
)));
|
||||
|
||||
self.table_cache_keys_to_invalidate = cache_keys;
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_table_name(table_info: &RawTableInfo) -> TableName {
|
||||
TableName::new(
|
||||
&table_info.catalog_name,
|
||||
&table_info.schema_name,
|
||||
&table_info.name,
|
||||
)
|
||||
}
|
||||
@@ -13,35 +13,41 @@
|
||||
// limitations under the License.
|
||||
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use common_telemetry::warn;
|
||||
use itertools::Itertools;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
|
||||
use crate::ddl::alter_logical_tables::executor::AlterLogicalTablesExecutor;
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::physical_table_metadata;
|
||||
use crate::error;
|
||||
use crate::error::{ConvertAlterTableRequestSnafu, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::region_distribution;
|
||||
|
||||
impl AlterLogicalTablesProcedure {
|
||||
pub(crate) async fn update_physical_table_metadata(&mut self) -> Result<()> {
|
||||
if self.data.physical_columns.is_empty() {
|
||||
warn!("No physical columns found, leaving the physical table's schema unchanged when altering logical tables");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Safety: must exist.
|
||||
let physical_table_info = self.data.physical_table_info.as_ref().unwrap();
|
||||
let physical_table_route = self.data.physical_table_route.as_ref().unwrap();
|
||||
let region_distribution = region_distribution(&physical_table_route.region_routes);
|
||||
|
||||
// Updates physical table's metadata.
|
||||
AlterLogicalTablesExecutor::on_alter_metadata(
|
||||
self.data.physical_table_id,
|
||||
&self.context.table_metadata_manager,
|
||||
physical_table_info,
|
||||
region_distribution,
|
||||
// Generates new table info
|
||||
let old_raw_table_info = physical_table_info.table_info.clone();
|
||||
let new_raw_table_info = physical_table_metadata::build_new_physical_table_info(
|
||||
old_raw_table_info,
|
||||
&self.data.physical_columns,
|
||||
)
|
||||
.await?;
|
||||
);
|
||||
|
||||
// Updates physical table's metadata, and we don't need to touch per-region settings.
|
||||
self.context
|
||||
.table_metadata_manager
|
||||
.update_table_info(physical_table_info, None, new_raw_table_info)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,284 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::AlterTableExpr;
|
||||
use snafu::{ensure, OptionExt};
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::utils::table_id::get_all_table_ids_by_names;
|
||||
use crate::ddl::utils::table_info::get_all_table_info_values_by_table_ids;
|
||||
use crate::error::{
|
||||
AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu,
|
||||
TableRouteNotFoundSnafu,
|
||||
};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteManager, TableRouteValue};
|
||||
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
|
||||
/// [AlterLogicalTableValidator] validates the alter logical expressions.
|
||||
pub struct AlterLogicalTableValidator<'a> {
|
||||
physical_table_id: TableId,
|
||||
alters: Vec<&'a AlterTableExpr>,
|
||||
}
|
||||
|
||||
impl<'a> AlterLogicalTableValidator<'a> {
|
||||
pub fn new(physical_table_id: TableId, alters: Vec<&'a AlterTableExpr>) -> Self {
|
||||
Self {
|
||||
physical_table_id,
|
||||
alters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validates all alter table expressions have the same schema and catalog.
|
||||
fn validate_schema(&self) -> Result<()> {
|
||||
let is_same_schema = self.alters.windows(2).all(|pair| {
|
||||
pair[0].catalog_name == pair[1].catalog_name
|
||||
&& pair[0].schema_name == pair[1].schema_name
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_schema,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Schemas of the alter table expressions are not the same"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates that all alter table expressions are of the supported kind.
|
||||
/// Currently only supports `AddColumns` operations.
|
||||
fn validate_alter_kind(&self) -> Result<()> {
|
||||
for alter in &self.alters {
|
||||
let kind = alter
|
||||
.kind
|
||||
.as_ref()
|
||||
.context(AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Alter kind is missing",
|
||||
})?;
|
||||
|
||||
let Kind::AddColumns(_) = kind else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "Only support add columns operation",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn table_names(&self) -> Vec<TableReference> {
|
||||
self.alters
|
||||
.iter()
|
||||
.map(|alter| {
|
||||
TableReference::full(&alter.catalog_name, &alter.schema_name, &alter.table_name)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Validates that the physical table info and route exist.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Retrieves the full table info and route for the given physical table id
|
||||
/// 2. Ensures the table info and table route exists
|
||||
/// 3. Verifies that the table route is actually a physical table route, not a logical one
|
||||
///
|
||||
/// Returns a tuple containing the validated table info and physical table route.
|
||||
async fn validate_physical_table(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<(
|
||||
DeserializedValueWithBytes<TableInfoValue>,
|
||||
PhysicalTableRouteValue,
|
||||
)> {
|
||||
let (table_info, table_route) = table_metadata_manager
|
||||
.get_full_table_info(self.physical_table_id)
|
||||
.await?;
|
||||
|
||||
let table_info = table_info.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: format!("table id - {}", self.physical_table_id),
|
||||
})?;
|
||||
|
||||
let physical_table_route = table_route
|
||||
.context(TableRouteNotFoundSnafu {
|
||||
table_id: self.physical_table_id,
|
||||
})?
|
||||
.into_inner();
|
||||
|
||||
let TableRouteValue::Physical(table_route) = physical_table_route else {
|
||||
return AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: format!(
|
||||
"expected a physical table but got a logical table: {:?}",
|
||||
self.physical_table_id
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
Ok((table_info, table_route))
|
||||
}
|
||||
|
||||
/// Validates that all logical table routes have the same physical table id.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Retrieves table routes for all the given table ids.
|
||||
/// 2. Ensures that all retrieved routes are logical table routes (not physical)
|
||||
/// 3. Verifies that all logical table routes reference the same physical table id.
|
||||
/// 4. Returns an error if any route is not logical or references a different physical table.
|
||||
async fn validate_logical_table_routes(
|
||||
&self,
|
||||
table_route_manager: &TableRouteManager,
|
||||
table_ids: &[TableId],
|
||||
) -> Result<()> {
|
||||
let table_routes = table_route_manager
|
||||
.table_route_storage()
|
||||
.batch_get(table_ids)
|
||||
.await?;
|
||||
|
||||
let physical_table_id = self.physical_table_id;
|
||||
|
||||
let is_same_physical_table = table_routes.iter().all(|r| {
|
||||
if let Some(TableRouteValue::Logical(r)) = r {
|
||||
r.physical_table_id() == physical_table_id
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
ensure!(
|
||||
is_same_physical_table,
|
||||
AlterLogicalTablesInvalidArgumentsSnafu {
|
||||
err_msg: "All the tasks should have the same physical table id"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates the alter logical expressions.
|
||||
///
|
||||
/// This method performs the following validations:
|
||||
/// 1. Validates that all alter table expressions have the same schema and catalog.
|
||||
/// 2. Validates that all alter table expressions are of the supported kind.
|
||||
/// 3. Validates that the physical table info and route exist.
|
||||
/// 4. Validates that all logical table routes have the same physical table id.
|
||||
///
|
||||
/// Returns a [ValidatorResult] containing the validation results.
|
||||
pub async fn validate(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<ValidatorResult> {
|
||||
self.validate_schema()?;
|
||||
self.validate_alter_kind()?;
|
||||
let (physical_table_info, physical_table_route) =
|
||||
self.validate_physical_table(table_metadata_manager).await?;
|
||||
let table_names = self.table_names();
|
||||
let table_ids =
|
||||
get_all_table_ids_by_names(table_metadata_manager.table_name_manager(), &table_names)
|
||||
.await?;
|
||||
let mut table_info_values = get_all_table_info_values_by_table_ids(
|
||||
table_metadata_manager.table_info_manager(),
|
||||
&table_ids,
|
||||
&table_names,
|
||||
)
|
||||
.await?;
|
||||
self.validate_logical_table_routes(
|
||||
table_metadata_manager.table_route_manager(),
|
||||
&table_ids,
|
||||
)
|
||||
.await?;
|
||||
let skip_alter = self
|
||||
.alters
|
||||
.iter()
|
||||
.zip(table_info_values.iter())
|
||||
.map(|(task, table)| skip_alter_logical_region(task, table))
|
||||
.collect::<Vec<_>>();
|
||||
retain_unskipped(&mut table_info_values, &skip_alter);
|
||||
let num_skipped = skip_alter.iter().filter(|&&x| x).count();
|
||||
|
||||
Ok(ValidatorResult {
|
||||
num_skipped,
|
||||
skip_alter,
|
||||
table_info_values,
|
||||
physical_table_info,
|
||||
physical_table_route,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of the validator.
|
||||
pub(crate) struct ValidatorResult {
|
||||
pub(crate) num_skipped: usize,
|
||||
pub(crate) skip_alter: Vec<bool>,
|
||||
pub(crate) table_info_values: Vec<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
pub(crate) physical_table_info: DeserializedValueWithBytes<TableInfoValue>,
|
||||
pub(crate) physical_table_route: PhysicalTableRouteValue,
|
||||
}
|
||||
|
||||
/// Retains the elements that are not skipped.
|
||||
pub(crate) fn retain_unskipped<T>(target: &mut Vec<T>, skipped: &[bool]) {
|
||||
debug_assert_eq!(target.len(), skipped.len());
|
||||
let mut iter = skipped.iter();
|
||||
target.retain(|_| !iter.next().unwrap());
|
||||
}
|
||||
|
||||
/// Returns true if does not required to alter the logical region.
|
||||
fn skip_alter_logical_region(alter: &AlterTableExpr, table: &TableInfoValue) -> bool {
|
||||
let existing_columns = table
|
||||
.table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| &c.name)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let Some(kind) = alter.kind.as_ref() else {
|
||||
return true; // Never get here since we have checked it in `validate_alter_kind`
|
||||
};
|
||||
let Kind::AddColumns(add_columns) = kind else {
|
||||
return true; // Never get here since we have checked it in `validate_alter_kind`
|
||||
};
|
||||
|
||||
// We only check that all columns have been finished. That is to say,
|
||||
// if one part is finished but another part is not, it will be considered
|
||||
// unfinished.
|
||||
add_columns
|
||||
.add_columns
|
||||
.iter()
|
||||
.map(|add_column| add_column.column_def.as_ref().map(|c| &c.name))
|
||||
.all(|column| {
|
||||
column
|
||||
.map(|c| existing_columns.contains(c))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_retain_unskipped() {
|
||||
let mut target = vec![1, 2, 3, 4, 5];
|
||||
let skipped = vec![false, true, false, true, false];
|
||||
retain_unskipped(&mut target, &skipped);
|
||||
assert_eq!(target, vec![1, 3, 5]);
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod executor;
|
||||
mod check;
|
||||
mod metadata;
|
||||
mod region_request;
|
||||
mod update_metadata;
|
||||
|
||||
use std::vec;
|
||||
|
||||
@@ -28,29 +29,30 @@ use common_procedure::{
|
||||
Context as ProcedureContext, ContextProvider, Error as ProcedureError, LockKey, PoisonKey,
|
||||
PoisonKeys, Procedure, ProcedureId, Status, StringKey,
|
||||
};
|
||||
use common_telemetry::{error, info, warn};
|
||||
use common_telemetry::{debug, error, info};
|
||||
use futures::future::{self};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use store_api::storage::RegionId;
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId, TableInfo};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::alter_table::executor::AlterTableExecutor;
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::{
|
||||
extract_column_metadatas, handle_multiple_results, map_to_procedure_error,
|
||||
add_peer_context_if_needed, handle_multiple_results, map_to_procedure_error,
|
||||
sync_follower_regions, MultipleResults,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::{AbortProcedureSnafu, NoLeaderSnafu, PutPoisonSnafu, Result, RetryLaterSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::poison_key::table_poison_key;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
@@ -62,24 +64,6 @@ pub struct AlterTableProcedure {
|
||||
/// If we recover the procedure from json, then the table info value is not cached.
|
||||
/// But we already validated it in the prepare step.
|
||||
new_table_info: Option<TableInfo>,
|
||||
/// The alter table executor.
|
||||
executor: AlterTableExecutor,
|
||||
}
|
||||
|
||||
/// Builds the executor from the [`AlterTableData`].
|
||||
///
|
||||
/// # Panics
|
||||
/// - If the alter kind is not set.
|
||||
fn build_executor_from_alter_expr(alter_data: &AlterTableData) -> AlterTableExecutor {
|
||||
let table_name = alter_data.table_ref().into();
|
||||
let table_id = alter_data.table_id;
|
||||
let alter_kind = alter_data.task.alter_table.kind.as_ref().unwrap();
|
||||
let new_table_name = if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
Some(new_table_name.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
AlterTableExecutor::new(table_name, table_id, new_table_name)
|
||||
}
|
||||
|
||||
impl AlterTableProcedure {
|
||||
@@ -87,42 +71,33 @@ impl AlterTableProcedure {
|
||||
|
||||
pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
|
||||
task.validate()?;
|
||||
let data = AlterTableData::new(task, table_id);
|
||||
let executor = build_executor_from_alter_expr(&data);
|
||||
Ok(Self {
|
||||
context,
|
||||
data,
|
||||
data: AlterTableData::new(task, table_id),
|
||||
new_table_info: None,
|
||||
executor,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_json(json: &str, context: DdlContext) -> ProcedureResult<Self> {
|
||||
let data: AlterTableData = serde_json::from_str(json).context(FromJsonSnafu)?;
|
||||
let executor = build_executor_from_alter_expr(&data);
|
||||
|
||||
Ok(AlterTableProcedure {
|
||||
context,
|
||||
data,
|
||||
new_table_info: None,
|
||||
executor,
|
||||
})
|
||||
}
|
||||
|
||||
// Checks whether the table exists.
|
||||
pub(crate) async fn on_prepare(&mut self) -> Result<Status> {
|
||||
self.executor
|
||||
.on_prepare(&self.context.table_metadata_manager)
|
||||
.await?;
|
||||
self.check_alter().await?;
|
||||
self.fill_table_info().await?;
|
||||
|
||||
// Safety: filled in `fill_table_info`.
|
||||
// Validates the request and builds the new table info.
|
||||
// We need to build the new table info here because we should ensure the alteration
|
||||
// is valid in `UpdateMeta` state as we already altered the region.
|
||||
// Safety: `fill_table_info()` already set it.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
let new_table_info = AlterTableExecutor::validate_alter_table_expr(
|
||||
&table_info_value.table_info,
|
||||
self.data.task.alter_table.clone(),
|
||||
)?;
|
||||
self.new_table_info = Some(new_table_info);
|
||||
self.new_table_info = Some(self.build_new_table_info(&table_info_value.table_info)?);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
@@ -165,7 +140,9 @@ impl AlterTableProcedure {
|
||||
|
||||
self.data.region_distribution =
|
||||
Some(region_distribution(&physical_table_route.region_routes));
|
||||
|
||||
let leaders = find_leaders(&physical_table_route.region_routes);
|
||||
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
|
||||
let alter_kind = self.make_region_alter_kind()?;
|
||||
|
||||
info!(
|
||||
@@ -178,14 +155,31 @@ impl AlterTableProcedure {
|
||||
ensure!(!leaders.is_empty(), NoLeaderSnafu { table_id });
|
||||
// Puts the poison before submitting alter region requests to datanodes.
|
||||
self.put_poison(ctx_provider, procedure_id).await?;
|
||||
let results = self
|
||||
.executor
|
||||
.on_alter_regions(
|
||||
&self.context.node_manager,
|
||||
&physical_table_route.region_routes,
|
||||
alter_kind,
|
||||
)
|
||||
.await;
|
||||
for datanode in leaders {
|
||||
let requester = self.context.node_manager.datanode(&datanode).await;
|
||||
let regions = find_leader_regions(&physical_table_route.region_routes, &datanode);
|
||||
|
||||
for region in regions {
|
||||
let region_id = RegionId::new(table_id, region);
|
||||
let request = self.make_alter_region_request(region_id, alter_kind.clone())?;
|
||||
debug!("Submitting {request:?} to {datanode}");
|
||||
|
||||
let datanode = datanode.clone();
|
||||
let requester = requester.clone();
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(datanode))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let results = future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
match handle_multiple_results(results) {
|
||||
MultipleResults::PartialRetryable(error) => {
|
||||
@@ -208,9 +202,9 @@ impl AlterTableProcedure {
|
||||
})
|
||||
}
|
||||
MultipleResults::Ok(results) => {
|
||||
self.submit_sync_region_requests(&results, &physical_table_route.region_routes)
|
||||
self.submit_sync_region_requests(results, &physical_table_route.region_routes)
|
||||
.await;
|
||||
self.handle_alter_region_response(results)?;
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
Ok(Status::executing_with_clean_poisons(true))
|
||||
}
|
||||
MultipleResults::AllNonRetryable(error) => {
|
||||
@@ -226,22 +220,9 @@ impl AlterTableProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_alter_region_response(&mut self, mut results: Vec<RegionResponse>) -> Result<()> {
|
||||
self.data.state = AlterTableState::UpdateMetadata;
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, TABLE_COLUMN_METADATA_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.column_metadatas = column_metadatas;
|
||||
} else {
|
||||
warn!("altering table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn submit_sync_region_requests(
|
||||
&mut self,
|
||||
results: &[RegionResponse],
|
||||
results: Vec<RegionResponse>,
|
||||
region_routes: &[RegionRoute],
|
||||
) {
|
||||
// Safety: filled in `prepare` step.
|
||||
@@ -263,34 +244,39 @@ impl AlterTableProcedure {
|
||||
pub(crate) async fn on_update_metadata(&mut self) -> Result<Status> {
|
||||
let table_id = self.data.table_id();
|
||||
let table_ref = self.data.table_ref();
|
||||
// Safety: filled in `fill_table_info`.
|
||||
// Safety: checked before.
|
||||
let table_info_value = self.data.table_info_value.as_ref().unwrap();
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
|
||||
// Gets the table info from the cache or builds it.
|
||||
let new_info = match &self.new_table_info {
|
||||
let new_info = match &self.new_table_info {
|
||||
Some(cached) => cached.clone(),
|
||||
None => AlterTableExecutor::validate_alter_table_expr(
|
||||
&table_info_value.table_info,
|
||||
self.data.task.alter_table.clone(),
|
||||
)
|
||||
None => self.build_new_table_info(&table_info_value.table_info)
|
||||
.inspect_err(|e| {
|
||||
// We already check the table info in the prepare step so this should not happen.
|
||||
error!(e; "Unable to build info for table {} in update metadata step, table_id: {}", table_ref, table_id);
|
||||
})?,
|
||||
};
|
||||
|
||||
// Safety: region distribution is set in `submit_alter_region_requests`.
|
||||
self.executor
|
||||
.on_alter_metadata(
|
||||
&self.context.table_metadata_manager,
|
||||
table_info_value,
|
||||
self.data.region_distribution.as_ref(),
|
||||
debug!(
|
||||
"Starting update table: {} metadata, new table info {:?}",
|
||||
table_ref.to_string(),
|
||||
new_info
|
||||
);
|
||||
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
self.on_update_metadata_for_rename(new_table_name.to_string(), table_info_value)
|
||||
.await?;
|
||||
} else {
|
||||
// region distribution is set in submit_alter_region_requests
|
||||
let region_distribution = self.data.region_distribution.as_ref().unwrap().clone();
|
||||
self.on_update_metadata_for_alter(
|
||||
new_info.into(),
|
||||
&self.data.column_metadatas,
|
||||
region_distribution,
|
||||
table_info_value,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Updated table metadata for table {table_ref}, table_id: {table_id}, kind: {alter_kind:?}");
|
||||
self.data.state = AlterTableState::InvalidateTableCache;
|
||||
@@ -299,9 +285,18 @@ impl AlterTableProcedure {
|
||||
|
||||
/// Broadcasts the invalidating table cache instructions.
|
||||
async fn on_broadcast(&mut self) -> Result<Status> {
|
||||
self.executor
|
||||
.invalidate_table_cache(&self.context.cache_invalidator)
|
||||
let cache_invalidator = &self.context.cache_invalidator;
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&Context::default(),
|
||||
&[
|
||||
CacheIdent::TableId(self.data.table_id()),
|
||||
CacheIdent::TableName(self.data.table_ref().into()),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Status::done())
|
||||
}
|
||||
|
||||
@@ -323,16 +318,6 @@ impl AlterTableProcedure {
|
||||
|
||||
lock_key
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn data(&self) -> &AlterTableData {
|
||||
&self.data
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn mut_data(&mut self) -> &mut AlterTableData {
|
||||
&mut self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -395,8 +380,6 @@ pub struct AlterTableData {
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
table_id: TableId,
|
||||
#[serde(default)]
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
/// Table info value before alteration.
|
||||
table_info_value: Option<DeserializedValueWithBytes<TableInfoValue>>,
|
||||
/// Region distribution for table in case we need to update region options.
|
||||
@@ -409,7 +392,6 @@ impl AlterTableData {
|
||||
state: AlterTableState::Prepare,
|
||||
task,
|
||||
table_id,
|
||||
column_metadatas: vec![],
|
||||
table_info_value: None,
|
||||
region_distribution: None,
|
||||
}
|
||||
@@ -428,14 +410,4 @@ impl AlterTableData {
|
||||
.as_ref()
|
||||
.map(|value| &value.table_info)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn column_metadatas(&self) -> &[ColumnMetadata] {
|
||||
&self.column_metadatas
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn set_column_metadatas(&mut self, column_metadatas: Vec<ColumnMetadata>) {
|
||||
self.column_metadatas = column_metadatas;
|
||||
}
|
||||
}
|
||||
|
||||
62
src/common/meta/src/ddl/alter_table/check.rs
Normal file
62
src/common/meta/src/ddl/alter_table/check.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::RenameTable;
|
||||
use common_catalog::format_full_table_name;
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Checks:
|
||||
/// - The new table name doesn't exist (rename).
|
||||
/// - Table exists.
|
||||
pub(crate) async fn check_alter(&self) -> Result<()> {
|
||||
let alter_expr = &self.data.task.alter_table;
|
||||
let catalog = &alter_expr.catalog_name;
|
||||
let schema = &alter_expr.schema_name;
|
||||
let table_name = &alter_expr.table_name;
|
||||
// Safety: Checked in `AlterTableProcedure::new`.
|
||||
let alter_kind = self.data.task.alter_table.kind.as_ref().unwrap();
|
||||
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
if let Kind::RenameTable(RenameTable { new_table_name }) = alter_kind {
|
||||
let new_table_name_key = TableNameKey::new(catalog, schema, new_table_name);
|
||||
let exists = manager
|
||||
.table_name_manager()
|
||||
.exists(new_table_name_key)
|
||||
.await?;
|
||||
ensure!(
|
||||
!exists,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, new_table_name),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let exists = manager.table_name_manager().exists(table_name_key).await?;
|
||||
ensure!(
|
||||
exists,
|
||||
error::TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, &alter_expr.table_name),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,308 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{alter_request, AlterRequest, RegionRequest, RegionRequestHeader};
|
||||
use api::v1::AlterTableExpr;
|
||||
use common_catalog::format_full_table_name;
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use futures::future;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::requests::AlterKind;
|
||||
use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::{CacheInvalidatorRef, Context};
|
||||
use crate::ddl::utils::{add_peer_context_if_needed, raw_table_info};
|
||||
use crate::error::{self, Result, UnexpectedSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution, TableMetadataManagerRef};
|
||||
use crate::node_manager::NodeManagerRef;
|
||||
use crate::rpc::router::{find_leaders, region_distribution, RegionRoute};
|
||||
|
||||
/// [AlterTableExecutor] performs:
|
||||
/// - Alters the metadata of the table.
|
||||
/// - Alters regions on the datanode nodes.
|
||||
pub struct AlterTableExecutor {
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
/// The new table name if the alter kind is rename table.
|
||||
new_table_name: Option<String>,
|
||||
}
|
||||
|
||||
impl AlterTableExecutor {
|
||||
/// Creates a new [`AlterTableExecutor`].
|
||||
pub fn new(table: TableName, table_id: TableId, new_table_name: Option<String>) -> Self {
|
||||
Self {
|
||||
table,
|
||||
table_id,
|
||||
new_table_name,
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepares to alter the table.
|
||||
///
|
||||
/// ## Checks:
|
||||
/// - The new table name doesn't exist (rename).
|
||||
/// - Table exists.
|
||||
pub(crate) async fn on_prepare(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
) -> Result<()> {
|
||||
let catalog = &self.table.catalog_name;
|
||||
let schema = &self.table.schema_name;
|
||||
let table_name = &self.table.table_name;
|
||||
|
||||
let manager = table_metadata_manager;
|
||||
if let Some(new_table_name) = &self.new_table_name {
|
||||
let new_table_name_key = TableNameKey::new(catalog, schema, new_table_name);
|
||||
let exists = manager
|
||||
.table_name_manager()
|
||||
.exists(new_table_name_key)
|
||||
.await?;
|
||||
ensure!(
|
||||
!exists,
|
||||
error::TableAlreadyExistsSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, new_table_name),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let table_name_key = TableNameKey::new(catalog, schema, table_name);
|
||||
let exists = manager.table_name_manager().exists(table_name_key).await?;
|
||||
ensure!(
|
||||
exists,
|
||||
error::TableNotFoundSnafu {
|
||||
table_name: format_full_table_name(catalog, schema, table_name),
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates the alter table expression and builds the new table info.
|
||||
///
|
||||
/// This validation is performed early to ensure the alteration is valid before
|
||||
/// proceeding to the `on_alter_metadata` state, where regions have already been altered.
|
||||
/// Building the new table info here allows us to catch any issues with the
|
||||
/// alteration before committing metadata changes.
|
||||
pub(crate) fn validate_alter_table_expr(
|
||||
table_info: &RawTableInfo,
|
||||
alter_table_expr: AlterTableExpr,
|
||||
) -> Result<TableInfo> {
|
||||
build_new_table_info(table_info, alter_table_expr)
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
pub(crate) async fn on_alter_metadata(
|
||||
&self,
|
||||
table_metadata_manager: &TableMetadataManagerRef,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_distribution: Option<&RegionDistribution>,
|
||||
mut raw_table_info: RawTableInfo,
|
||||
column_metadatas: &[ColumnMetadata],
|
||||
) -> Result<()> {
|
||||
let table_ref = self.table.table_ref();
|
||||
let table_id = self.table_id;
|
||||
|
||||
if let Some(new_table_name) = &self.new_table_name {
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}, new table name: {}",
|
||||
table_ref, table_id, raw_table_info, new_table_name
|
||||
);
|
||||
|
||||
table_metadata_manager
|
||||
.rename_table(current_table_info_value, new_table_name.to_string())
|
||||
.await?;
|
||||
} else {
|
||||
debug!(
|
||||
"Starting update table: {} metadata, table_id: {}, new table info: {:?}",
|
||||
table_ref, table_id, raw_table_info
|
||||
);
|
||||
|
||||
ensure!(
|
||||
region_distribution.is_some(),
|
||||
UnexpectedSnafu {
|
||||
err_msg: "region distribution is not set when updating table metadata",
|
||||
}
|
||||
);
|
||||
|
||||
if !column_metadatas.is_empty() {
|
||||
raw_table_info::update_table_info_column_ids(&mut raw_table_info, column_metadatas);
|
||||
}
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
region_distribution.cloned(),
|
||||
raw_table_info,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Alters regions on the datanode nodes.
|
||||
pub(crate) async fn on_alter_regions(
|
||||
&self,
|
||||
node_manager: &NodeManagerRef,
|
||||
region_routes: &[RegionRoute],
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Vec<Result<RegionResponse>> {
|
||||
let region_distribution = region_distribution(region_routes);
|
||||
let leaders = find_leaders(region_routes)
|
||||
.into_iter()
|
||||
.map(|p| (p.id, p))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let total_num_region = region_distribution
|
||||
.values()
|
||||
.map(|r| r.leader_regions.len())
|
||||
.sum::<usize>();
|
||||
let mut alter_region_tasks = Vec::with_capacity(total_num_region);
|
||||
for (datanode_id, region_role_set) in region_distribution {
|
||||
if region_role_set.leader_regions.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// Safety: must exists.
|
||||
let peer = leaders.get(&datanode_id).unwrap();
|
||||
let requester = node_manager.datanode(peer).await;
|
||||
|
||||
for region_id in region_role_set.leader_regions {
|
||||
let region_id = RegionId::new(self.table_id, region_id);
|
||||
let request = make_alter_region_request(region_id, kind.clone());
|
||||
|
||||
let requester = requester.clone();
|
||||
let peer = peer.clone();
|
||||
|
||||
alter_region_tasks.push(async move {
|
||||
requester
|
||||
.handle(request)
|
||||
.await
|
||||
.map_err(add_peer_context_if_needed(peer))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
future::join_all(alter_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Invalidates cache for the table.
|
||||
pub(crate) async fn invalidate_table_cache(
|
||||
&self,
|
||||
cache_invalidator: &CacheInvalidatorRef,
|
||||
) -> Result<()> {
|
||||
let ctx = Context {
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by altering table {}, table_id: {}",
|
||||
self.table.table_ref(),
|
||||
self.table_id,
|
||||
)),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
.invalidate(
|
||||
&ctx,
|
||||
&[
|
||||
CacheIdent::TableName(self.table.clone()),
|
||||
CacheIdent::TableId(self.table_id),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes alter region request.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> RegionRequest {
|
||||
RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(Body::Alter(AlterRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
kind,
|
||||
..Default::default()
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds new table info after alteration.
|
||||
///
|
||||
/// This function creates a new table info by applying the alter table expression
|
||||
/// to the existing table info. For add column operations, it increments the
|
||||
/// `next_column_id` by the number of columns being added, which may result in gaps
|
||||
/// in the column id sequence.
|
||||
fn build_new_table_info(
|
||||
table_info: &RawTableInfo,
|
||||
alter_table_expr: AlterTableExpr,
|
||||
) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
let schema_name = &table_info.schema_name;
|
||||
let catalog_name = &table_info.catalog_name;
|
||||
let table_name = &table_info.name;
|
||||
let table_id = table_info.ident.table_id;
|
||||
let request = alter_expr_to_request(table_id, alter_table_expr)
|
||||
.context(error::ConvertAlterTableRequestSnafu)?;
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_name, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
table_name: format_full_table_name(catalog_name, schema_name, table_name),
|
||||
})?;
|
||||
|
||||
let mut new_info = table_info.clone();
|
||||
new_info.meta = new_meta;
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
}
|
||||
@@ -15,16 +15,43 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::alter_table_expr::Kind;
|
||||
use api::v1::region::region_request::Body;
|
||||
use api::v1::region::{
|
||||
alter_request, AddColumn, AddColumns, DropColumn, DropColumns, RegionColumnDef,
|
||||
alter_request, AddColumn, AddColumns, AlterRequest, DropColumn, DropColumns, RegionColumnDef,
|
||||
RegionRequest, RegionRequestHeader,
|
||||
};
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{InvalidProtoMsgSnafu, Result};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Makes alter region request from existing an alter kind.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_alter_region_request(
|
||||
&self,
|
||||
region_id: RegionId,
|
||||
kind: Option<alter_request::Kind>,
|
||||
) -> Result<RegionRequest> {
|
||||
// Safety: checked
|
||||
let table_info = self.data.table_info().unwrap();
|
||||
|
||||
Ok(RegionRequest {
|
||||
header: Some(RegionRequestHeader {
|
||||
tracing_context: TracingContext::from_current_span().to_w3c(),
|
||||
..Default::default()
|
||||
}),
|
||||
body: Some(Body::Alter(AlterRequest {
|
||||
region_id: region_id.as_u64(),
|
||||
schema_version: table_info.ident.version,
|
||||
kind,
|
||||
})),
|
||||
})
|
||||
}
|
||||
|
||||
/// Makes alter kind proto that all regions can reuse.
|
||||
/// Region alter request always add columns if not exist.
|
||||
pub(crate) fn make_region_alter_kind(&self) -> Result<Option<alter_request::Kind>> {
|
||||
@@ -128,7 +155,6 @@ mod tests {
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use store_api::storage::{RegionId, TableId};
|
||||
|
||||
use crate::ddl::alter_table::executor::make_alter_region_request;
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::create_table::{
|
||||
@@ -235,13 +261,15 @@ mod tests {
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
make_alter_region_request(region_id, alter_kind).body
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(alter_region_request.region_id, region_id.as_u64());
|
||||
assert_eq!(alter_region_request.schema_version, 0);
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::AddColumns(
|
||||
@@ -291,13 +319,15 @@ mod tests {
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) =
|
||||
make_alter_region_request(region_id, alter_kind).body
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
.make_alter_region_request(region_id, alter_kind)
|
||||
.unwrap()
|
||||
.body
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(alter_region_request.region_id, region_id.as_u64());
|
||||
assert_eq!(alter_region_request.schema_version, 0);
|
||||
assert_eq!(alter_region_request.schema_version, 1);
|
||||
assert_eq!(
|
||||
alter_region_request.kind,
|
||||
Some(region::alter_request::Kind::ModifyColumnTypes(
|
||||
|
||||
103
src/common/meta/src/ddl/alter_table/update_metadata.rs
Normal file
103
src/common/meta/src/ddl/alter_table/update_metadata.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use common_grpc_expr::alter_expr_to_request;
|
||||
use snafu::ResultExt;
|
||||
use table::metadata::{RawTableInfo, TableInfo};
|
||||
use table::requests::AlterKind;
|
||||
|
||||
use crate::ddl::alter_table::AlterTableProcedure;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
|
||||
impl AlterTableProcedure {
|
||||
/// Builds new table info after alteration.
|
||||
/// It bumps the column id of the table by the number of the add column requests.
|
||||
/// So there may be holes in the column id sequence.
|
||||
pub(crate) fn build_new_table_info(&self, table_info: &RawTableInfo) -> Result<TableInfo> {
|
||||
let table_info =
|
||||
TableInfo::try_from(table_info.clone()).context(error::ConvertRawTableInfoSnafu)?;
|
||||
let table_ref = self.data.table_ref();
|
||||
let alter_expr = self.data.task.alter_table.clone();
|
||||
let request = alter_expr_to_request(self.data.table_id(), alter_expr)
|
||||
.context(error::ConvertAlterTableRequestSnafu)?;
|
||||
|
||||
let new_meta = table_info
|
||||
.meta
|
||||
.builder_with_alter_kind(table_ref.table, &request.alter_kind)
|
||||
.context(error::TableSnafu)?
|
||||
.build()
|
||||
.with_context(|_| error::BuildTableMetaSnafu {
|
||||
table_name: table_ref.table,
|
||||
})?;
|
||||
|
||||
let mut new_info = table_info.clone();
|
||||
new_info.meta = new_meta;
|
||||
new_info.ident.version = table_info.ident.version + 1;
|
||||
match request.alter_kind {
|
||||
AlterKind::AddColumns { columns } => {
|
||||
// Bumps the column id for the new columns.
|
||||
// It may bump more than the actual number of columns added if there are
|
||||
// existing columns, but it's fine.
|
||||
new_info.meta.next_column_id += columns.len() as u32;
|
||||
}
|
||||
AlterKind::RenameTable { new_table_name } => {
|
||||
new_info.name = new_table_name.to_string();
|
||||
}
|
||||
AlterKind::DropColumns { .. }
|
||||
| AlterKind::ModifyColumnTypes { .. }
|
||||
| AlterKind::SetTableOptions { .. }
|
||||
| AlterKind::UnsetTableOptions { .. }
|
||||
| AlterKind::SetIndex { .. }
|
||||
| AlterKind::UnsetIndex { .. }
|
||||
| AlterKind::DropDefaults { .. } => {}
|
||||
}
|
||||
|
||||
Ok(new_info)
|
||||
}
|
||||
|
||||
/// Updates table metadata for rename table operation.
|
||||
pub(crate) async fn on_update_metadata_for_rename(
|
||||
&self,
|
||||
new_table_name: String,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
table_metadata_manager
|
||||
.rename_table(current_table_info_value, new_table_name)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Updates table metadata for alter table operation.
|
||||
pub(crate) async fn on_update_metadata_for_alter(
|
||||
&self,
|
||||
new_table_info: RawTableInfo,
|
||||
region_distribution: RegionDistribution,
|
||||
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
|
||||
) -> Result<()> {
|
||||
let table_metadata_manager = &self.context.table_metadata_manager;
|
||||
table_metadata_manager
|
||||
.update_table_info(
|
||||
current_table_info_value,
|
||||
Some(region_distribution),
|
||||
new_table_info,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -167,25 +167,6 @@ impl CreateFlowProcedure {
|
||||
}
|
||||
|
||||
self.collect_source_tables().await?;
|
||||
|
||||
// Validate that source and sink tables are not the same
|
||||
let sink_table_name = &self.data.task.sink_table_name;
|
||||
if self
|
||||
.data
|
||||
.task
|
||||
.source_table_names
|
||||
.iter()
|
||||
.any(|source| source == sink_table_name)
|
||||
{
|
||||
return error::UnsupportedSnafu {
|
||||
operation: format!(
|
||||
"Creating flow with source and sink table being the same: {}",
|
||||
sink_table_name
|
||||
),
|
||||
}
|
||||
.fail();
|
||||
}
|
||||
|
||||
if self.data.flow_id.is_none() {
|
||||
self.allocate_flow_id().await?;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ use common_telemetry::{debug, error, warn};
|
||||
use futures::future;
|
||||
pub use region_request::create_region_request_builder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::ResultExt;
|
||||
use snafu::{ensure, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
@@ -35,11 +35,10 @@ use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, extract_column_metadatas, map_to_procedure_error,
|
||||
sync_follower_regions,
|
||||
add_peer_context_if_needed, map_to_procedure_error, sync_follower_regions,
|
||||
};
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
@@ -167,23 +166,47 @@ impl CreateLogicalTablesProcedure {
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, ALTER_PHYSICAL_EXTENSION_KEY)?
|
||||
{
|
||||
self.data.physical_columns = column_metadatas;
|
||||
// Collects response from datanodes.
|
||||
let phy_raw_schemas = results
|
||||
.iter_mut()
|
||||
.map(|res| res.extensions.remove(ALTER_PHYSICAL_EXTENSION_KEY))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if phy_raw_schemas.is_empty() {
|
||||
self.submit_sync_region_requests(results, region_routes)
|
||||
.await;
|
||||
self.data.state = CreateTablesState::CreateMetadata;
|
||||
return Ok(Status::executing(false));
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = phy_raw_schemas.first().unwrap();
|
||||
ensure!(
|
||||
phy_raw_schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The physical schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
// Decodes the physical raw schemas
|
||||
if let Some(phy_raw_schemas) = first {
|
||||
self.data.physical_columns =
|
||||
ColumnMetadata::decode_list(phy_raw_schemas).context(DecodeJsonSnafu)?;
|
||||
} else {
|
||||
warn!("creating logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
|
||||
}
|
||||
|
||||
self.submit_sync_region_requests(&results, region_routes)
|
||||
self.submit_sync_region_requests(results, region_routes)
|
||||
.await;
|
||||
self.data.state = CreateTablesState::CreateMetadata;
|
||||
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
async fn submit_sync_region_requests(
|
||||
&self,
|
||||
results: &[RegionResponse],
|
||||
results: Vec<RegionResponse>,
|
||||
region_routes: &[RegionRoute],
|
||||
) {
|
||||
if let Err(err) = sync_follower_regions(
|
||||
|
||||
@@ -22,7 +22,7 @@ use table::table_name::TableName;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::utils::raw_table_info;
|
||||
use crate::ddl::physical_table_metadata;
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::instruction::CacheIdent;
|
||||
|
||||
@@ -47,7 +47,7 @@ impl CreateLogicalTablesProcedure {
|
||||
// Generates new table info
|
||||
let raw_table_info = physical_table_info.deref().table_info.clone();
|
||||
|
||||
let new_table_info = raw_table_info::build_new_physical_table_info(
|
||||
let new_table_info = physical_table_metadata::build_new_physical_table_info(
|
||||
raw_table_info,
|
||||
&self.data.physical_columns,
|
||||
);
|
||||
|
||||
@@ -22,23 +22,20 @@ use common_procedure::error::{
|
||||
ExternalSnafu, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
|
||||
};
|
||||
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
|
||||
use common_telemetry::info;
|
||||
use common_telemetry::tracing_context::TracingContext;
|
||||
use common_telemetry::{info, warn};
|
||||
use futures::future::join_all;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
use strum::AsRefStr;
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
|
||||
use crate::ddl::utils::raw_table_info::update_table_info_column_ids;
|
||||
use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions,
|
||||
extract_column_metadatas, map_to_procedure_error, region_storage_path,
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, map_to_procedure_error,
|
||||
region_storage_path,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
@@ -246,21 +243,14 @@ impl CreateTableProcedure {
|
||||
}
|
||||
}
|
||||
|
||||
self.creator.data.state = CreateTableState::CreateMetadata;
|
||||
|
||||
let mut results = join_all(create_region_tasks)
|
||||
join_all(create_region_tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
if let Some(column_metadatas) =
|
||||
extract_column_metadatas(&mut results, TABLE_COLUMN_METADATA_EXTENSION_KEY)?
|
||||
{
|
||||
self.creator.data.column_metadatas = column_metadatas;
|
||||
} else {
|
||||
warn!("creating table result doesn't contains extension key `{TABLE_COLUMN_METADATA_EXTENSION_KEY}`,leaving the table's column metadata unchanged");
|
||||
}
|
||||
self.creator.data.state = CreateTableState::CreateMetadata;
|
||||
|
||||
// TODO(weny): Add more tests.
|
||||
Ok(Status::executing(true))
|
||||
}
|
||||
|
||||
@@ -272,10 +262,7 @@ impl CreateTableProcedure {
|
||||
let table_id = self.table_id();
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let mut raw_table_info = self.table_info().clone();
|
||||
if !self.creator.data.column_metadatas.is_empty() {
|
||||
update_table_info_column_ids(&mut raw_table_info, &self.creator.data.column_metadatas);
|
||||
}
|
||||
let raw_table_info = self.table_info().clone();
|
||||
// Safety: the region_wal_options must be allocated.
|
||||
let region_wal_options = self.region_wal_options()?.clone();
|
||||
// Safety: the table_route must be allocated.
|
||||
@@ -359,7 +346,6 @@ impl TableCreator {
|
||||
Self {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
column_metadatas: vec![],
|
||||
task,
|
||||
table_route: None,
|
||||
region_wal_options: None,
|
||||
@@ -421,8 +407,6 @@ pub enum CreateTableState {
|
||||
pub struct CreateTableData {
|
||||
pub state: CreateTableState,
|
||||
pub task: CreateTableTask,
|
||||
#[serde(default)]
|
||||
pub column_metadatas: Vec<ColumnMetadata>,
|
||||
/// None stands for not allocated yet.
|
||||
table_route: Option<PhysicalTableRouteValue>,
|
||||
/// None stands for not allocated yet.
|
||||
|
||||
@@ -185,15 +185,11 @@ impl DropTableExecutor {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Invalidates caches for the table.
|
||||
/// Invalidates frontend caches
|
||||
pub async fn invalidate_table_cache(&self, ctx: &DdlContext) -> Result<()> {
|
||||
let cache_invalidator = &ctx.cache_invalidator;
|
||||
let ctx = Context {
|
||||
subject: Some(format!(
|
||||
"Invalidate table cache by dropping table {}, table_id: {}",
|
||||
self.table.table_ref(),
|
||||
self.table_id,
|
||||
)),
|
||||
subject: Some("Invalidate table cache by dropping table".to_string()),
|
||||
};
|
||||
|
||||
cache_invalidator
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashSet;
|
||||
|
||||
use api::v1::SemanticType;
|
||||
use common_telemetry::debug;
|
||||
use common_telemetry::tracing::warn;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use table::metadata::RawTableInfo;
|
||||
|
||||
@@ -25,10 +23,6 @@ pub(crate) fn build_new_physical_table_info(
|
||||
mut raw_table_info: RawTableInfo,
|
||||
physical_columns: &[ColumnMetadata],
|
||||
) -> RawTableInfo {
|
||||
debug!(
|
||||
"building new physical table info for table: {}, table_id: {}",
|
||||
raw_table_info.name, raw_table_info.ident.table_id
|
||||
);
|
||||
let existing_columns = raw_table_info
|
||||
.meta
|
||||
.schema
|
||||
@@ -42,8 +36,6 @@ pub(crate) fn build_new_physical_table_info(
|
||||
let time_index = &mut raw_table_info.meta.schema.timestamp_index;
|
||||
let columns = &mut raw_table_info.meta.schema.column_schemas;
|
||||
columns.clear();
|
||||
let column_ids = &mut raw_table_info.meta.column_ids;
|
||||
column_ids.clear();
|
||||
|
||||
for (idx, col) in physical_columns.iter().enumerate() {
|
||||
match col.semantic_type {
|
||||
@@ -58,7 +50,6 @@ pub(crate) fn build_new_physical_table_info(
|
||||
}
|
||||
|
||||
columns.push(col.column_schema.clone());
|
||||
column_ids.push(col.column_id);
|
||||
}
|
||||
|
||||
if let Some(time_index) = *time_index {
|
||||
@@ -67,54 +58,3 @@ pub(crate) fn build_new_physical_table_info(
|
||||
|
||||
raw_table_info
|
||||
}
|
||||
|
||||
/// Updates the column IDs in the table info based on the provided column metadata.
|
||||
///
|
||||
/// This function validates that the column metadata matches the existing table schema
|
||||
/// before updating the column ids. If the column metadata doesn't match the table schema,
|
||||
/// the table info remains unchanged.
|
||||
pub(crate) fn update_table_info_column_ids(
|
||||
raw_table_info: &mut RawTableInfo,
|
||||
column_metadatas: &[ColumnMetadata],
|
||||
) {
|
||||
let mut table_column_names = raw_table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.name.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
table_column_names.sort_unstable();
|
||||
|
||||
let mut column_names = column_metadatas
|
||||
.iter()
|
||||
.map(|c| c.column_schema.name.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
column_names.sort_unstable();
|
||||
|
||||
if table_column_names != column_names {
|
||||
warn!(
|
||||
"Column metadata doesn't match the table schema for table {}, table_id: {}, column in table: {:?}, column in metadata: {:?}",
|
||||
raw_table_info.name,
|
||||
raw_table_info.ident.table_id,
|
||||
table_column_names,
|
||||
column_names,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let name_to_id = column_metadatas
|
||||
.iter()
|
||||
.map(|c| (c.column_schema.name.clone(), c.column_id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let schema = &raw_table_info.meta.schema.column_schemas;
|
||||
let mut column_ids = Vec::with_capacity(schema.len());
|
||||
for column_schema in schema {
|
||||
if let Some(id) = name_to_id.get(&column_schema.name) {
|
||||
column_ids.push(*id);
|
||||
}
|
||||
}
|
||||
|
||||
raw_table_info.meta.column_ids = column_ids;
|
||||
}
|
||||
@@ -122,7 +122,6 @@ impl TableMetadataAllocator {
|
||||
);
|
||||
|
||||
let peers = self.peer_allocator.alloc(regions).await?;
|
||||
debug!("Allocated peers {:?} for table {}", peers, table_id);
|
||||
let region_routes = task
|
||||
.partitions
|
||||
.iter()
|
||||
|
||||
@@ -24,14 +24,7 @@ use std::collections::HashMap;
|
||||
use api::v1::meta::Partition;
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_procedure::Status;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{
|
||||
DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME, LOGICAL_TABLE_METADATA_KEY,
|
||||
METRIC_ENGINE_NAME,
|
||||
};
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME};
|
||||
use table::metadata::{RawTableInfo, TableId};
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
@@ -153,7 +146,6 @@ pub fn test_create_logical_table_task(name: &str) -> CreateTableTask {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a physical table task with a single region.
|
||||
pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
|
||||
let create_table = TestCreateTableExprBuilder::default()
|
||||
.column_defs([
|
||||
@@ -190,95 +182,3 @@ pub fn test_create_physical_table_task(name: &str) -> CreateTableTask {
|
||||
table_info,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a column metadata list with tag fields.
|
||||
pub fn test_column_metadatas(tag_fields: &[&str]) -> Vec<ColumnMetadata> {
|
||||
let mut output = Vec::with_capacity(tag_fields.len() + 4);
|
||||
output.extend([
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Field,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
DATA_SCHEMA_TABLE_ID_COLUMN_NAME,
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: ReservedColumnId::table_id(),
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
DATA_SCHEMA_TSID_COLUMN_NAME,
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: ReservedColumnId::tsid(),
|
||||
},
|
||||
]);
|
||||
|
||||
for (i, name) in tag_fields.iter().enumerate() {
|
||||
output.push(ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
name.to_string(),
|
||||
ConcreteDataType::string_datatype(),
|
||||
true,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: (i + 2) as u32,
|
||||
});
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
/// Asserts the column names.
|
||||
pub fn assert_column_name(table_info: &RawTableInfo, expected_column_names: &[&str]) {
|
||||
assert_eq!(
|
||||
table_info
|
||||
.meta
|
||||
.schema
|
||||
.column_schemas
|
||||
.iter()
|
||||
.map(|c| c.name.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
expected_column_names
|
||||
);
|
||||
}
|
||||
|
||||
/// Asserts the column metadatas
|
||||
pub fn assert_column_name_and_id(column_metadatas: &[ColumnMetadata], expected: &[(&str, u32)]) {
|
||||
assert_eq!(expected.len(), column_metadatas.len());
|
||||
for (name, id) in expected {
|
||||
let column_metadata = column_metadatas
|
||||
.iter()
|
||||
.find(|c| c.column_id == *id)
|
||||
.unwrap();
|
||||
assert_eq!(column_metadata.column_schema.name, *name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the raw table info.
|
||||
pub async fn get_raw_table_info(ddl_context: &DdlContext, table_id: TableId) -> RawTableInfo {
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.table_info_manager()
|
||||
.get(table_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.into_inner()
|
||||
.table_info
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ pub fn build_raw_table_info_from_expr(expr: &CreateTableExpr) -> RawTableInfo {
|
||||
options: TableOptions::try_from_iter(&expr.table_options).unwrap(),
|
||||
created_on: DateTime::default(),
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
},
|
||||
table_type: TableType::Base,
|
||||
}
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::region::RegionRequest;
|
||||
use common_error::ext::{BoxedError, ErrorExt, StackError};
|
||||
@@ -34,7 +32,6 @@ impl MockDatanodeHandler for () {
|
||||
Ok(RegionResponse {
|
||||
affected_rows: 0,
|
||||
extensions: Default::default(),
|
||||
metadata: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -47,13 +44,10 @@ impl MockDatanodeHandler for () {
|
||||
}
|
||||
}
|
||||
|
||||
type RegionRequestHandler =
|
||||
Arc<dyn Fn(Peer, RegionRequest) -> Result<RegionResponse> + Send + Sync>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DatanodeWatcher {
|
||||
sender: mpsc::Sender<(Peer, RegionRequest)>,
|
||||
handler: Option<RegionRequestHandler>,
|
||||
handler: Option<fn(Peer, RegionRequest) -> Result<RegionResponse>>,
|
||||
}
|
||||
|
||||
impl DatanodeWatcher {
|
||||
@@ -66,9 +60,9 @@ impl DatanodeWatcher {
|
||||
|
||||
pub fn with_handler(
|
||||
mut self,
|
||||
user_handler: impl Fn(Peer, RegionRequest) -> Result<RegionResponse> + Send + Sync + 'static,
|
||||
user_handler: fn(Peer, RegionRequest) -> Result<RegionResponse>,
|
||||
) -> Self {
|
||||
self.handler = Some(Arc::new(user_handler));
|
||||
self.handler = Some(user_handler);
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -81,7 +75,7 @@ impl MockDatanodeHandler for DatanodeWatcher {
|
||||
.send((peer.clone(), request.clone()))
|
||||
.await
|
||||
.unwrap();
|
||||
if let Some(handler) = self.handler.as_ref() {
|
||||
if let Some(handler) = self.handler {
|
||||
handler(peer.clone(), request)
|
||||
} else {
|
||||
Ok(RegionResponse::new(0))
|
||||
|
||||
@@ -23,20 +23,17 @@ use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
||||
use common_procedure::{Procedure, ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
|
||||
use crate::ddl::test_util::alter_table::TestAlterTableExprBuilder;
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::datanode_handler::DatanodeWatcher;
|
||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||
use crate::ddl::test_util::{
|
||||
assert_column_name, create_logical_table, create_physical_table,
|
||||
create_physical_table_metadata, get_raw_table_info, test_column_metadatas,
|
||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
||||
test_create_physical_table_task,
|
||||
};
|
||||
use crate::error::Error::{AlterLogicalTablesInvalidArguments, TableNotFound};
|
||||
@@ -99,52 +96,6 @@ fn make_alter_logical_table_rename_task(
|
||||
}
|
||||
}
|
||||
|
||||
fn make_alters_request_handler(
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
) -> impl Fn(Peer, RegionRequest) -> Result<RegionResponse> {
|
||||
move |_peer: Peer, request: RegionRequest| {
|
||||
if let region_request::Body::Alters(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1000, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(
|
||||
region_id,
|
||||
RegionManifestInfo::metric(1, 0, 2, 0),
|
||||
)])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&column_metadatas).unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_alters_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_ids: &[RegionId],
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id,);
|
||||
let Some(region_request::Body::Alters(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
for (i, region_id) in expected_region_ids.iter().enumerate() {
|
||||
assert_eq!(
|
||||
req.requests[i].region_id,
|
||||
*region_id,
|
||||
"actual region id: {}",
|
||||
RegionId::from_u64(req.requests[i].region_id)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_check_schema() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
@@ -254,20 +205,15 @@ async fn test_on_prepare() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let test_column_metadatas = test_column_metadatas(&["new_col", "mew_col"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(test_column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
let logical_table1_id = create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let logical_table2_id = create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
let logical_table3_id = create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table5").await;
|
||||
|
||||
@@ -277,7 +223,7 @@ async fn test_on_update_metadata() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
status,
|
||||
@@ -309,52 +255,18 @@ async fn test_on_update_metadata() {
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
RegionId::new(logical_table3_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "new_col", "mew_col"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_part_duplicate_alter_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["col_0"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let mut ddl_context = new_ddl_context(node_manager);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
let logical_table1_id = create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let logical_table2_id = create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
|
||||
@@ -393,40 +305,6 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
clean_poisons: false
|
||||
}
|
||||
);
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "col_0"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2
|
||||
]
|
||||
);
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["col_0", "new_col_1", "new_col_2"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
ddl_context.node_manager = node_manager;
|
||||
|
||||
// re-alter
|
||||
let tasks = vec![
|
||||
@@ -479,44 +357,6 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
}
|
||||
);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_alters_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[
|
||||
RegionId::new(logical_table1_id, 0),
|
||||
RegionId::new(logical_table2_id, 0),
|
||||
],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, phy_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&[
|
||||
"ts",
|
||||
"value",
|
||||
"__table_id",
|
||||
"__tsid",
|
||||
"col_0",
|
||||
"new_col_1",
|
||||
"new_col_2",
|
||||
],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
]
|
||||
);
|
||||
|
||||
let table_name_keys = vec![
|
||||
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table1"),
|
||||
TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "table2"),
|
||||
@@ -582,13 +422,27 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
);
|
||||
}
|
||||
|
||||
fn alters_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
if let region_request::Body::Alters(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1000, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::metric(1, 0, 2, 0))])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_region_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["new_col", "mew_col"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_alters_request_handler(column_metadatas));
|
||||
let handler = DatanodeWatcher::new(tx).with_handler(alters_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
|
||||
@@ -30,12 +30,7 @@ use common_error::status_code::StatusCode;
|
||||
use common_procedure::store::poison_store::PoisonStore;
|
||||
use common_procedure::{ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{
|
||||
MANIFEST_INFO_EXTENSION_KEY, TABLE_COLUMN_METADATA_EXTENSION_KEY,
|
||||
};
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::RegionId;
|
||||
use table::requests::TTL_KEY;
|
||||
@@ -48,7 +43,6 @@ use crate::ddl::test_util::datanode_handler::{
|
||||
AllFailureDatanodeHandler, DatanodeWatcher, PartialSuccessDatanodeHandler,
|
||||
RequestOutdatedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::ddl::test_util::{assert_column_name, assert_column_name_and_id};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::key::datanode_table::DatanodeTableKey;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -185,30 +179,6 @@ fn alter_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionRe
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::mito(1, 1))])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
TABLE_COLUMN_METADATA_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&[
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"host",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
@@ -217,7 +187,6 @@ fn alter_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionRe
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_alter_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let datanode_handler = DatanodeWatcher::new(tx).with_handler(alter_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
@@ -265,8 +234,6 @@ async fn test_on_submit_alter_request() {
|
||||
assert_sync_request(peer, request, 4, RegionId::new(table_id, 2), 1);
|
||||
let (peer, request) = results.remove(0);
|
||||
assert_sync_request(peer, request, 5, RegionId::new(table_id, 1), 1);
|
||||
let column_metadatas = procedure.data().column_metadatas();
|
||||
assert_column_name_and_id(column_metadatas, &[("ts", 0), ("host", 1)]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -411,7 +378,6 @@ async fn test_on_update_metadata_rename() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata_add_columns() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let table_name = "foo";
|
||||
@@ -465,34 +431,6 @@ async fn test_on_update_metadata_add_columns() {
|
||||
.submit_alter_region_requests(procedure_id, provider.as_ref())
|
||||
.await
|
||||
.unwrap();
|
||||
// Returned column metadatas is empty.
|
||||
assert!(procedure.data().column_metadatas().is_empty());
|
||||
procedure.mut_data().set_column_metadatas(vec![
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("host", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), false),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new("my_tag3", ConcreteDataType::string_datatype(), true),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 3,
|
||||
},
|
||||
]);
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
let table_info = ddl_context
|
||||
@@ -509,8 +447,6 @@ async fn test_on_update_metadata_add_columns() {
|
||||
table_info.meta.schema.column_schemas.len() as u32,
|
||||
table_info.meta.next_column_id
|
||||
);
|
||||
assert_column_name(&table_info, &["ts", "host", "cpu", "my_tag3"]);
|
||||
assert_eq!(table_info.meta.column_ids, vec![0, 1, 2, 3]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -141,41 +141,3 @@ async fn test_create_flow() {
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow_same_source_and_sink_table() {
|
||||
let table_id = 1024;
|
||||
let table_name = TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "same_table");
|
||||
|
||||
// Use the same table for both source and sink
|
||||
let source_table_names = vec![table_name.clone()];
|
||||
let sink_table_name = table_name.clone();
|
||||
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Create the table first so it exists
|
||||
let task = test_create_table_task("same_table", table_id);
|
||||
ddl_context
|
||||
.table_metadata_manager
|
||||
.create_table_metadata(
|
||||
task.table_info.clone(),
|
||||
TableRouteValue::physical(vec![]),
|
||||
HashMap::new(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Try to create a flow with same source and sink table - should fail
|
||||
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::Unsupported { .. });
|
||||
|
||||
// Verify the error message contains information about the same table
|
||||
if let error::Error::Unsupported { operation, .. } = &err {
|
||||
assert!(operation.contains("source and sink table being the same"));
|
||||
assert!(operation.contains("same_table"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,18 +23,15 @@ use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Status};
|
||||
use common_procedure_test::MockContextProvider;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{ALTER_PHYSICAL_EXTENSION_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::metric_engine_consts::MANIFEST_INFO_EXTENSION_KEY;
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::consts::ReservedColumnId;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::create_logical_tables::CreateLogicalTablesProcedure;
|
||||
use crate::ddl::test_util::datanode_handler::{DatanodeWatcher, NaiveDatanodeHandler};
|
||||
use crate::ddl::test_util::{
|
||||
assert_column_name, create_physical_table_metadata, get_raw_table_info, test_column_metadatas,
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::{Error, Result};
|
||||
@@ -42,54 +39,6 @@ use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
|
||||
use crate::rpc::router::{Region, RegionRoute};
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
fn make_creates_request_handler(
|
||||
column_metadatas: Vec<ColumnMetadata>,
|
||||
) -> impl Fn(Peer, RegionRequest) -> Result<RegionResponse> {
|
||||
move |_peer, request| {
|
||||
let _ = _peer;
|
||||
if let region_request::Body::Creates(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(
|
||||
region_id,
|
||||
RegionManifestInfo::metric(1, 0, 2, 0),
|
||||
)])
|
||||
.unwrap(),
|
||||
);
|
||||
response.extensions.insert(
|
||||
ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&column_metadatas).unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_creates_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_ids: &[RegionId],
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id,);
|
||||
let Some(region_request::Body::Creates(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
for (i, region_id) in expected_region_ids.iter().enumerate() {
|
||||
assert_eq!(
|
||||
req.requests[i].region_id,
|
||||
*region_id,
|
||||
"actual region id: {}",
|
||||
RegionId::from_u64(req.requests[i].region_id)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_physical_table_not_found() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
@@ -278,12 +227,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
@@ -311,7 +255,7 @@ async fn test_on_create_metadata() {
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
vec![task, yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
ddl_context,
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
@@ -330,42 +274,11 @@ async fn test_on_create_metadata() {
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_ids = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*table_ids, vec![1025, 1026]);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_creates_request(
|
||||
peer,
|
||||
request,
|
||||
0,
|
||||
&[RegionId::new(1025, 0), RegionId::new(1026, 0)],
|
||||
);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "host", "cpu"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let datanode_handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
@@ -404,7 +317,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
ddl_context,
|
||||
);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(
|
||||
@@ -423,27 +336,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_ids = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*table_ids, vec![8192, 1025]);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_creates_request(peer, request, 0, &[RegionId::new(1025, 0)]);
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(
|
||||
&table_info,
|
||||
&["ts", "value", "__table_id", "__tsid", "host", "cpu"],
|
||||
);
|
||||
assert_eq!(
|
||||
table_info.meta.column_ids,
|
||||
vec![
|
||||
0,
|
||||
1,
|
||||
ReservedColumnId::table_id(),
|
||||
ReservedColumnId::tsid(),
|
||||
2,
|
||||
3
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -507,13 +399,27 @@ async fn test_on_create_metadata_err() {
|
||||
assert!(!error.is_retry_later());
|
||||
}
|
||||
|
||||
fn creates_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
if let region_request::Body::Creates(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
// Default region id for physical table.
|
||||
let region_id = RegionId::new(1024, 1);
|
||||
response.extensions.insert(
|
||||
MANIFEST_INFO_EXTENSION_KEY.to_string(),
|
||||
RegionManifestInfo::encode_list(&[(region_id, RegionManifestInfo::metric(1, 0, 2, 0))])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_submit_create_request() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let column_metadatas = test_column_metadatas(&["host", "cpu"]);
|
||||
let handler =
|
||||
DatanodeWatcher::new(tx).with_handler(make_creates_request_handler(column_metadatas));
|
||||
let handler = DatanodeWatcher::new(tx).with_handler(creates_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
|
||||
@@ -16,9 +16,7 @@ use std::assert_matches::assert_matches;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::region::RegionResponse;
|
||||
use api::v1::meta::{Partition, Peer};
|
||||
use api::v1::region::{region_request, RegionRequest};
|
||||
use api::v1::meta::Partition;
|
||||
use api::v1::{ColumnDataType, SemanticType};
|
||||
use common_error::ext::ErrorExt;
|
||||
use common_error::status_code::StatusCode;
|
||||
@@ -26,12 +24,7 @@ use common_procedure::{Context as ProcedureContext, Procedure, ProcedureId, Stat
|
||||
use common_procedure_test::{
|
||||
execute_procedure_until, execute_procedure_until_done, MockContextProvider,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::schema::ColumnSchema;
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::TABLE_COLUMN_METADATA_EXTENSION_KEY;
|
||||
use store_api::storage::RegionId;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::ddl::create_table::{CreateTableProcedure, CreateTableState};
|
||||
use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
@@ -39,73 +32,14 @@ use crate::ddl::test_util::create_table::{
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
};
|
||||
use crate::ddl::test_util::datanode_handler::{
|
||||
DatanodeWatcher, NaiveDatanodeHandler, RetryErrorDatanodeHandler,
|
||||
UnexpectedErrorDatanodeHandler,
|
||||
NaiveDatanodeHandler, RetryErrorDatanodeHandler, UnexpectedErrorDatanodeHandler,
|
||||
};
|
||||
use crate::ddl::test_util::{assert_column_name, get_raw_table_info};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::Error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDatanodeManager};
|
||||
|
||||
fn create_request_handler(_peer: Peer, request: RegionRequest) -> Result<RegionResponse> {
|
||||
let _ = _peer;
|
||||
if let region_request::Body::Create(_) = request.body.unwrap() {
|
||||
let mut response = RegionResponse::new(0);
|
||||
|
||||
response.extensions.insert(
|
||||
TABLE_COLUMN_METADATA_EXTENSION_KEY.to_string(),
|
||||
ColumnMetadata::encode_list(&[
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"ts",
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Timestamp,
|
||||
column_id: 0,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"host",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 1,
|
||||
},
|
||||
ColumnMetadata {
|
||||
column_schema: ColumnSchema::new(
|
||||
"cpu",
|
||||
ConcreteDataType::float64_datatype(),
|
||||
false,
|
||||
),
|
||||
semantic_type: SemanticType::Tag,
|
||||
column_id: 2,
|
||||
},
|
||||
])
|
||||
.unwrap(),
|
||||
);
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
Ok(RegionResponse::new(0))
|
||||
}
|
||||
|
||||
fn assert_create_request(
|
||||
peer: Peer,
|
||||
request: RegionRequest,
|
||||
expected_peer_id: u64,
|
||||
expected_region_id: RegionId,
|
||||
) {
|
||||
assert_eq!(peer.id, expected_peer_id);
|
||||
let Some(region_request::Body::Create(req)) = request.body else {
|
||||
unreachable!();
|
||||
};
|
||||
assert_eq!(req.region_id, expected_region_id);
|
||||
}
|
||||
|
||||
pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||
let create_table = TestCreateTableExprBuilder::default()
|
||||
.column_defs([
|
||||
@@ -296,13 +230,11 @@ async fn test_on_create_metadata_error() {
|
||||
#[tokio::test]
|
||||
async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let (tx, mut rx) = mpsc::channel(8);
|
||||
let datanode_handler = DatanodeWatcher::new(tx).with_handler(create_request_handler);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -311,16 +243,8 @@ async fn test_on_create_metadata() {
|
||||
procedure.execute(&ctx).await.unwrap();
|
||||
// Triggers procedure to create table metadata
|
||||
let status = procedure.execute(&ctx).await.unwrap();
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
assert_eq!(table_id, 1024);
|
||||
|
||||
let (peer, request) = rx.try_recv().unwrap();
|
||||
rx.try_recv().unwrap_err();
|
||||
assert_create_request(peer, request, 0, RegionId::new(table_id, 0));
|
||||
|
||||
let table_info = get_raw_table_info(&ddl_context, table_id).await;
|
||||
assert_column_name(&table_info, &["ts", "host", "cpu"]);
|
||||
assert_eq!(table_info.meta.column_ids, vec![0, 1, 2]);
|
||||
let table_id = status.downcast_output_ref::<u32>().unwrap();
|
||||
assert_eq!(*table_id, 1024);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,10 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod raw_table_info;
|
||||
pub(crate) mod table_id;
|
||||
pub(crate) mod table_info;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
|
||||
@@ -33,7 +29,6 @@ use common_telemetry::{error, info, warn};
|
||||
use common_wal::options::WalOptions;
|
||||
use futures::future::join_all;
|
||||
use snafu::{ensure, OptionExt, ResultExt};
|
||||
use store_api::metadata::ColumnMetadata;
|
||||
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, MANIFEST_INFO_EXTENSION_KEY};
|
||||
use store_api::region_engine::RegionManifestInfo;
|
||||
use store_api::storage::{RegionId, RegionNumber};
|
||||
@@ -42,8 +37,8 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::ddl::{DdlContext, DetectingRegion};
|
||||
use crate::error::{
|
||||
self, DecodeJsonSnafu, Error, MetadataCorruptionSnafu, OperateDatanodeSnafu,
|
||||
ParseWalOptionsSnafu, Result, TableNotFoundSnafu, UnsupportedSnafu,
|
||||
self, Error, OperateDatanodeSnafu, ParseWalOptionsSnafu, Result, TableNotFoundSnafu,
|
||||
UnsupportedSnafu,
|
||||
};
|
||||
use crate::key::datanode_table::DatanodeTableValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
@@ -319,23 +314,11 @@ pub fn parse_manifest_infos_from_extensions(
|
||||
Ok(data_manifest_version)
|
||||
}
|
||||
|
||||
/// Parses column metadatas from extensions.
|
||||
pub fn parse_column_metadatas(
|
||||
extensions: &HashMap<String, Vec<u8>>,
|
||||
key: &str,
|
||||
) -> Result<Vec<ColumnMetadata>> {
|
||||
let value = extensions.get(key).context(error::UnexpectedSnafu {
|
||||
err_msg: format!("column metadata extension not found: {}", key),
|
||||
})?;
|
||||
let column_metadatas = ColumnMetadata::decode_list(value).context(error::SerdeJsonSnafu {})?;
|
||||
Ok(column_metadatas)
|
||||
}
|
||||
|
||||
/// Sync follower regions on datanodes.
|
||||
pub async fn sync_follower_regions(
|
||||
context: &DdlContext,
|
||||
table_id: TableId,
|
||||
results: &[RegionResponse],
|
||||
results: Vec<RegionResponse>,
|
||||
region_routes: &[RegionRoute],
|
||||
engine: &str,
|
||||
) -> Result<()> {
|
||||
@@ -348,7 +331,7 @@ pub async fn sync_follower_regions(
|
||||
}
|
||||
|
||||
let results = results
|
||||
.iter()
|
||||
.into_iter()
|
||||
.map(|response| parse_manifest_infos_from_extensions(&response.extensions))
|
||||
.collect::<Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
@@ -435,38 +418,6 @@ pub async fn sync_follower_regions(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extracts column metadatas from extensions.
|
||||
pub fn extract_column_metadatas(
|
||||
results: &mut [RegionResponse],
|
||||
key: &str,
|
||||
) -> Result<Option<Vec<ColumnMetadata>>> {
|
||||
let schemas = results
|
||||
.iter_mut()
|
||||
.map(|r| r.extensions.remove(key))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if schemas.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Verify all the physical schemas are the same
|
||||
// Safety: previous check ensures this vec is not empty
|
||||
let first = schemas.first().unwrap();
|
||||
ensure!(
|
||||
schemas.iter().all(|x| x == first),
|
||||
MetadataCorruptionSnafu {
|
||||
err_msg: "The table column metadata schemas from datanodes are not the same."
|
||||
}
|
||||
);
|
||||
|
||||
if let Some(first) = first {
|
||||
let column_metadatas = ColumnMetadata::decode_list(first).context(DecodeJsonSnafu)?;
|
||||
Ok(Some(column_metadatas))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Result, TableNotFoundSnafu};
|
||||
use crate::key::table_name::{TableNameKey, TableNameManager};
|
||||
|
||||
/// Get all the table ids from the table names.
|
||||
///
|
||||
/// Returns an error if any table does not exist.
|
||||
pub(crate) async fn get_all_table_ids_by_names<'a>(
|
||||
table_name_manager: &TableNameManager,
|
||||
table_names: &[TableReference<'a>],
|
||||
) -> Result<Vec<TableId>> {
|
||||
let table_name_keys = table_names
|
||||
.iter()
|
||||
.map(TableNameKey::from)
|
||||
.collect::<Vec<_>>();
|
||||
let table_name_values = table_name_manager.batch_get(table_name_keys).await?;
|
||||
let mut table_ids = Vec::with_capacity(table_name_values.len());
|
||||
for (value, table_name) in table_name_values.into_iter().zip(table_names) {
|
||||
let value = value
|
||||
.with_context(|| TableNotFoundSnafu {
|
||||
table_name: table_name.to_string(),
|
||||
})?
|
||||
.table_id();
|
||||
|
||||
table_ids.push(value);
|
||||
}
|
||||
|
||||
Ok(table_ids)
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use snafu::OptionExt;
|
||||
use store_api::storage::TableId;
|
||||
use table::table_reference::TableReference;
|
||||
|
||||
use crate::error::{Result, TableInfoNotFoundSnafu};
|
||||
use crate::key::table_info::{TableInfoManager, TableInfoValue};
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
|
||||
/// Get all table info values by table ids.
|
||||
///
|
||||
/// Returns an error if any table does not exist.
|
||||
pub(crate) async fn get_all_table_info_values_by_table_ids<'a>(
|
||||
table_info_manager: &TableInfoManager,
|
||||
table_ids: &[TableId],
|
||||
table_names: &[TableReference<'a>],
|
||||
) -> Result<Vec<DeserializedValueWithBytes<TableInfoValue>>> {
|
||||
let mut table_info_map = table_info_manager.batch_get_raw(table_ids).await?;
|
||||
let mut table_info_values = Vec::with_capacity(table_ids.len());
|
||||
for (table_id, table_name) in table_ids.iter().zip(table_names) {
|
||||
let table_info_value =
|
||||
table_info_map
|
||||
.remove(table_id)
|
||||
.with_context(|| TableInfoNotFoundSnafu {
|
||||
table: table_name.to_string(),
|
||||
})?;
|
||||
table_info_values.push(table_info_value);
|
||||
}
|
||||
|
||||
Ok(table_info_values)
|
||||
}
|
||||
@@ -50,11 +50,7 @@ use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::CreateTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::trigger::DropTriggerTask;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::CreateTrigger;
|
||||
#[cfg(feature = "enterprise")]
|
||||
use crate::rpc::ddl::DdlTask::DropTrigger;
|
||||
use crate::rpc::ddl::DdlTask::{
|
||||
AlterDatabase, AlterLogicalTables, AlterTable, CreateDatabase, CreateFlow, CreateLogicalTables,
|
||||
CreateTable, CreateView, DropDatabase, DropFlow, DropLogicalTables, DropTable, DropView,
|
||||
@@ -95,14 +91,6 @@ pub trait TriggerDdlManager: Send + Sync {
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
async fn drop_trigger(
|
||||
&self,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
procedure_manager: ProcedureManagerRef,
|
||||
ddl_context: DdlContext,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse>;
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any;
|
||||
}
|
||||
|
||||
@@ -660,28 +648,6 @@ async fn handle_drop_flow_task(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "enterprise")]
|
||||
async fn handle_drop_trigger_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_trigger_task: DropTriggerTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let Some(m) = ddl_manager.trigger_ddl_manager.as_ref() else {
|
||||
return UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
.fail();
|
||||
};
|
||||
|
||||
m.drop_trigger(
|
||||
drop_trigger_task,
|
||||
ddl_manager.procedure_manager.clone(),
|
||||
ddl_manager.ddl_context.clone(),
|
||||
query_context,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_drop_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
drop_view_task: DropViewTask,
|
||||
@@ -869,11 +835,6 @@ impl ProcedureExecutor for DdlManager {
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
#[cfg(feature = "enterprise")]
|
||||
CreateTrigger(create_trigger_task) => {
|
||||
handle_create_trigger_task(
|
||||
@@ -883,11 +844,11 @@ impl ProcedureExecutor for DdlManager {
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(drop_trigger_task) => {
|
||||
handle_drop_trigger_task(self, drop_trigger_task, request.query_context.into())
|
||||
.await
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
@@ -995,7 +956,6 @@ mod tests {
|
||||
Default::default(),
|
||||
state_store,
|
||||
poison_manager,
|
||||
None,
|
||||
));
|
||||
|
||||
let _ = DdlManager::try_new(
|
||||
|
||||
@@ -100,8 +100,8 @@
|
||||
pub mod catalog_name;
|
||||
pub mod datanode_table;
|
||||
pub mod flow;
|
||||
pub mod maintenance;
|
||||
pub mod node_address;
|
||||
pub mod runtime_switch;
|
||||
mod schema_metadata_manager;
|
||||
pub mod schema_name;
|
||||
pub mod table_info;
|
||||
@@ -164,9 +164,7 @@ use crate::state_store::PoisonValue;
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
|
||||
pub const LEGACY_MAINTENANCE_KEY: &str = "__maintenance";
|
||||
pub const MAINTENANCE_KEY: &str = "__switches/maintenance";
|
||||
pub const PAUSE_PROCEDURE_KEY: &str = "__switches/pause_procedure";
|
||||
pub const MAINTENANCE_KEY: &str = "__maintenance";
|
||||
|
||||
pub const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
|
||||
pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
|
||||
|
||||
86
src/common/meta/src/key/maintenance.rs
Normal file
86
src/common/meta/src/key/maintenance.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::key::MAINTENANCE_KEY;
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
pub type MaintenanceModeManagerRef = Arc<MaintenanceModeManager>;
|
||||
|
||||
/// The maintenance mode manager.
|
||||
///
|
||||
/// Used to enable or disable maintenance mode.
|
||||
#[derive(Clone)]
|
||||
pub struct MaintenanceModeManager {
|
||||
kv_backend: KvBackendRef,
|
||||
}
|
||||
|
||||
impl MaintenanceModeManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
Self { kv_backend }
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.kv_backend
|
||||
.delete(MAINTENANCE_KEY.as_bytes(), false)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
self.kv_backend.exists(MAINTENANCE_KEY.as_bytes()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::maintenance::MaintenanceModeManager;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maintenance_mode_manager() {
|
||||
let maintenance_mode_manager = Arc::new(MaintenanceModeManager::new(Arc::new(
|
||||
MemoryKvBackend::new(),
|
||||
)));
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.set_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
maintenance_mode_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!maintenance_mode_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use common_error::ext::BoxedError;
|
||||
use common_procedure::local::PauseAware;
|
||||
use moka::future::Cache;
|
||||
use snafu::ResultExt;
|
||||
|
||||
use crate::error::{GetCacheSnafu, Result};
|
||||
use crate::key::{LEGACY_MAINTENANCE_KEY, MAINTENANCE_KEY, PAUSE_PROCEDURE_KEY};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{BatchDeleteRequest, PutRequest};
|
||||
|
||||
pub type RuntimeSwitchManagerRef = Arc<RuntimeSwitchManager>;
|
||||
|
||||
/// The runtime switch manager.
|
||||
///
|
||||
/// Used to enable or disable runtime switches.
|
||||
#[derive(Clone)]
|
||||
pub struct RuntimeSwitchManager {
|
||||
kv_backend: KvBackendRef,
|
||||
cache: Cache<Vec<u8>, Option<Vec<u8>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PauseAware for RuntimeSwitchManager {
|
||||
async fn is_paused(&self) -> std::result::Result<bool, BoxedError> {
|
||||
self.is_procedure_paused().await.map_err(BoxedError::new)
|
||||
}
|
||||
}
|
||||
|
||||
const CACHE_TTL: Duration = Duration::from_secs(10);
|
||||
const MAX_CAPACITY: u64 = 32;
|
||||
|
||||
impl RuntimeSwitchManager {
|
||||
pub fn new(kv_backend: KvBackendRef) -> Self {
|
||||
let cache = Cache::builder()
|
||||
.time_to_live(CACHE_TTL)
|
||||
.max_capacity(MAX_CAPACITY)
|
||||
.build();
|
||||
Self { kv_backend, cache }
|
||||
}
|
||||
|
||||
async fn put_key(&self, key: &str) -> Result<()> {
|
||||
let req = PutRequest {
|
||||
key: Vec::from(key),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
};
|
||||
self.kv_backend.put(req).await?;
|
||||
self.cache.invalidate(key.as_bytes()).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_keys(&self, keys: &[&str]) -> Result<()> {
|
||||
let req = BatchDeleteRequest::new()
|
||||
.with_keys(keys.iter().map(|x| x.as_bytes().to_vec()).collect());
|
||||
self.kv_backend.batch_delete(req).await?;
|
||||
for key in keys {
|
||||
self.cache.invalidate(key.as_bytes()).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the key exists.
|
||||
async fn exists(&self, key: &str) -> Result<bool> {
|
||||
let key = key.as_bytes().to_vec();
|
||||
let kv_backend = self.kv_backend.clone();
|
||||
let value = self
|
||||
.cache
|
||||
.try_get_with(key.clone(), async move {
|
||||
kv_backend.get(&key).await.map(|v| v.map(|v| v.value))
|
||||
})
|
||||
.await
|
||||
.context(GetCacheSnafu)?;
|
||||
|
||||
Ok(value.is_some())
|
||||
}
|
||||
|
||||
/// Enables maintenance mode.
|
||||
pub async fn set_maintenance_mode(&self) -> Result<()> {
|
||||
self.put_key(MAINTENANCE_KEY).await
|
||||
}
|
||||
|
||||
/// Unsets maintenance mode.
|
||||
pub async fn unset_maintenance_mode(&self) -> Result<()> {
|
||||
self.delete_keys(&[MAINTENANCE_KEY, LEGACY_MAINTENANCE_KEY])
|
||||
.await
|
||||
}
|
||||
|
||||
/// Returns true if maintenance mode is enabled.
|
||||
pub async fn maintenance_mode(&self) -> Result<bool> {
|
||||
let exists = self.exists(MAINTENANCE_KEY).await?;
|
||||
if exists {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let exists = self.exists(LEGACY_MAINTENANCE_KEY).await?;
|
||||
if exists {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Pauses handling of incoming procedure requests.
|
||||
pub async fn pasue_procedure(&self) -> Result<()> {
|
||||
self.put_key(PAUSE_PROCEDURE_KEY).await
|
||||
}
|
||||
|
||||
/// Resumes processing of incoming procedure requests.
|
||||
pub async fn resume_procedure(&self) -> Result<()> {
|
||||
self.delete_keys(&[PAUSE_PROCEDURE_KEY]).await
|
||||
}
|
||||
|
||||
/// Returns true if the system is currently pausing incoming procedure requests.
|
||||
pub async fn is_procedure_paused(&self) -> Result<bool> {
|
||||
self.exists(PAUSE_PROCEDURE_KEY).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::key::runtime_switch::RuntimeSwitchManager;
|
||||
use crate::key::{LEGACY_MAINTENANCE_KEY, MAINTENANCE_KEY};
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::kv_backend::KvBackend;
|
||||
use crate::rpc::store::PutRequest;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager_basic() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
runtime_switch_manager
|
||||
.put_key(MAINTENANCE_KEY)
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_none());
|
||||
runtime_switch_manager
|
||||
.exists(MAINTENANCE_KEY)
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_some());
|
||||
runtime_switch_manager
|
||||
.delete_keys(&[MAINTENANCE_KEY])
|
||||
.await
|
||||
.unwrap();
|
||||
let v = runtime_switch_manager
|
||||
.cache
|
||||
.get(MAINTENANCE_KEY.as_bytes())
|
||||
.await;
|
||||
assert!(v.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager.set_maintenance_mode().await.unwrap();
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_runtime_switch_manager_with_legacy_key() {
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
kv_backend
|
||||
.put(PutRequest {
|
||||
key: Vec::from(LEGACY_MAINTENANCE_KEY),
|
||||
value: vec![],
|
||||
prev_kv: false,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let runtime_switch_manager = Arc::new(RuntimeSwitchManager::new(kv_backend));
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager
|
||||
.unset_maintenance_mode()
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(!runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
runtime_switch_manager.set_maintenance_mode().await.unwrap();
|
||||
assert!(runtime_switch_manager.maintenance_mode().await.unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pasue_procedure() {
|
||||
let runtime_switch_manager =
|
||||
Arc::new(RuntimeSwitchManager::new(Arc::new(MemoryKvBackend::new())));
|
||||
runtime_switch_manager.pasue_procedure().await.unwrap();
|
||||
assert!(runtime_switch_manager.is_procedure_paused().await.unwrap());
|
||||
runtime_switch_manager.resume_procedure().await.unwrap();
|
||||
assert!(!runtime_switch_manager.is_procedure_paused().await.unwrap());
|
||||
}
|
||||
}
|
||||
@@ -334,7 +334,6 @@ mod tests {
|
||||
options: Default::default(),
|
||||
region_numbers: vec![1],
|
||||
partition_key_indices: vec![],
|
||||
column_ids: vec![],
|
||||
};
|
||||
|
||||
RawTableInfo {
|
||||
|
||||
@@ -103,26 +103,6 @@ pub fn table_decoder(kv: KeyValue) -> Result<(String, TableNameValue)> {
|
||||
Ok((table_name_key.table.to_string(), table_name_value))
|
||||
}
|
||||
|
||||
impl<'a> From<&TableReference<'a>> for TableNameKey<'a> {
|
||||
fn from(value: &TableReference<'a>) -> Self {
|
||||
Self {
|
||||
catalog: value.catalog,
|
||||
schema: value.schema,
|
||||
table: value.table,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<TableReference<'a>> for TableNameKey<'a> {
|
||||
fn from(value: TableReference<'a>) -> Self {
|
||||
Self {
|
||||
catalog: value.catalog,
|
||||
schema: value.schema,
|
||||
table: value.table,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a TableName> for TableNameKey<'a> {
|
||||
fn from(value: &'a TableName) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -48,11 +48,6 @@ impl TableRouteKey {
|
||||
pub fn new(table_id: TableId) -> Self {
|
||||
Self { table_id }
|
||||
}
|
||||
|
||||
/// Returns the range prefix of the table route key.
|
||||
pub fn range_prefix() -> Vec<u8> {
|
||||
format!("{}/", TABLE_ROUTE_PREFIX).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
|
||||
|
||||
@@ -69,8 +69,6 @@ pub enum DdlTask {
|
||||
AlterDatabase(AlterDatabaseTask),
|
||||
CreateFlow(CreateFlowTask),
|
||||
DropFlow(DropFlowTask),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DropTrigger(trigger::DropTriggerTask),
|
||||
CreateView(CreateViewTask),
|
||||
DropView(DropViewTask),
|
||||
#[cfg(feature = "enterprise")]
|
||||
@@ -261,18 +259,6 @@ impl TryFrom<Task> for DdlTask {
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
Task::DropTriggerTask(drop_trigger) => {
|
||||
#[cfg(feature = "enterprise")]
|
||||
return Ok(DdlTask::DropTrigger(drop_trigger.try_into()?));
|
||||
#[cfg(not(feature = "enterprise"))]
|
||||
{
|
||||
let _ = drop_trigger;
|
||||
crate::error::UnsupportedSnafu {
|
||||
operation: "drop trigger",
|
||||
}
|
||||
.fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -325,8 +311,6 @@ impl TryFrom<SubmitDdlTaskRequest> for PbDdlTaskRequest {
|
||||
DdlTask::DropView(task) => Task::DropViewTask(task.into()),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DdlTask::CreateTrigger(task) => Task::CreateTriggerTask(task.into()),
|
||||
#[cfg(feature = "enterprise")]
|
||||
DdlTask::DropTrigger(task) => Task::DropTriggerTask(task.into()),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
@@ -1374,7 +1358,6 @@ mod tests {
|
||||
options: Default::default(),
|
||||
created_on: Default::default(),
|
||||
partition_key_indices: Default::default(),
|
||||
column_ids: Default::default(),
|
||||
};
|
||||
|
||||
// construct RawTableInfo
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use api::v1::meta::{
|
||||
CreateTriggerTask as PbCreateTriggerTask, DropTriggerTask as PbDropTriggerTask,
|
||||
};
|
||||
use api::v1::meta::CreateTriggerTask as PbCreateTriggerTask;
|
||||
use api::v1::notify_channel::ChannelType as PbChannelType;
|
||||
use api::v1::{
|
||||
CreateTriggerExpr as PbCreateTriggerExpr, DropTriggerExpr as PbDropTriggerExpr,
|
||||
NotifyChannel as PbNotifyChannel, WebhookOptions as PbWebhookOptions,
|
||||
CreateTriggerExpr, NotifyChannel as PbNotifyChannel, WebhookOptions as PbWebhookOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use snafu::OptionExt;
|
||||
@@ -59,7 +56,7 @@ impl From<CreateTriggerTask> for PbCreateTriggerTask {
|
||||
.map(PbNotifyChannel::from)
|
||||
.collect();
|
||||
|
||||
let expr = PbCreateTriggerExpr {
|
||||
let expr = CreateTriggerExpr {
|
||||
catalog_name: task.catalog_name,
|
||||
trigger_name: task.trigger_name,
|
||||
create_if_not_exists: task.if_not_exists,
|
||||
@@ -142,86 +139,17 @@ impl TryFrom<PbNotifyChannel> for NotifyChannel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DropTriggerTask {
|
||||
pub catalog_name: String,
|
||||
pub trigger_name: String,
|
||||
pub drop_if_exists: bool,
|
||||
}
|
||||
|
||||
impl From<DropTriggerTask> for PbDropTriggerTask {
|
||||
fn from(task: DropTriggerTask) -> Self {
|
||||
let expr = PbDropTriggerExpr {
|
||||
catalog_name: task.catalog_name,
|
||||
trigger_name: task.trigger_name,
|
||||
drop_if_exists: task.drop_if_exists,
|
||||
};
|
||||
|
||||
PbDropTriggerTask {
|
||||
drop_trigger: Some(expr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PbDropTriggerTask> for DropTriggerTask {
|
||||
type Error = error::Error;
|
||||
|
||||
fn try_from(task: PbDropTriggerTask) -> Result<Self> {
|
||||
let expr = task.drop_trigger.context(error::InvalidProtoMsgSnafu {
|
||||
err_msg: "expected drop_trigger",
|
||||
})?;
|
||||
|
||||
Ok(DropTriggerTask {
|
||||
catalog_name: expr.catalog_name,
|
||||
trigger_name: expr.trigger_name,
|
||||
drop_if_exists: expr.drop_if_exists,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DdlTask {
|
||||
/// Creates a [`DdlTask`] to create a trigger.
|
||||
pub fn new_create_trigger(expr: CreateTriggerTask) -> Self {
|
||||
DdlTask::CreateTrigger(expr)
|
||||
}
|
||||
|
||||
/// Creates a [`DdlTask`] to drop a trigger.
|
||||
pub fn new_drop_trigger(expr: DropTriggerTask) -> Self {
|
||||
DdlTask::DropTrigger(expr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_convert_drop_trigger_task() {
|
||||
let original = DropTriggerTask {
|
||||
catalog_name: "test_catalog".to_string(),
|
||||
trigger_name: "test_trigger".to_string(),
|
||||
drop_if_exists: true,
|
||||
};
|
||||
|
||||
let pb_task: PbDropTriggerTask = original.clone().into();
|
||||
|
||||
let expr = pb_task.drop_trigger.as_ref().unwrap();
|
||||
assert_eq!(expr.catalog_name, "test_catalog");
|
||||
assert_eq!(expr.trigger_name, "test_trigger");
|
||||
assert!(expr.drop_if_exists);
|
||||
|
||||
let round_tripped = DropTriggerTask::try_from(pb_task).unwrap();
|
||||
|
||||
assert_eq!(original.catalog_name, round_tripped.catalog_name);
|
||||
assert_eq!(original.trigger_name, round_tripped.trigger_name);
|
||||
assert_eq!(original.drop_if_exists, round_tripped.drop_if_exists);
|
||||
|
||||
// Test invalid case where drop_trigger is None
|
||||
let invalid_task = PbDropTriggerTask { drop_trigger: None };
|
||||
let result = DropTriggerTask::try_from(invalid_task);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_create_trigger_task() {
|
||||
let original = CreateTriggerTask {
|
||||
|
||||
@@ -28,19 +28,6 @@ use crate::PoisonKey;
|
||||
#[snafu(visibility(pub))]
|
||||
#[stack_trace_debug]
|
||||
pub enum Error {
|
||||
#[snafu(display("Failed to check procedure manager status"))]
|
||||
CheckStatus {
|
||||
source: BoxedError,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("Manager is pasued"))]
|
||||
ManagerPasued {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display(
|
||||
"Failed to execute procedure due to external error, clean poisons: {}",
|
||||
clean_poisons
|
||||
@@ -259,8 +246,7 @@ impl ErrorExt for Error {
|
||||
| Error::ListState { source, .. }
|
||||
| Error::PutPoison { source, .. }
|
||||
| Error::DeletePoison { source, .. }
|
||||
| Error::GetPoison { source, .. }
|
||||
| Error::CheckStatus { source, .. } => source.status_code(),
|
||||
| Error::GetPoison { source, .. } => source.status_code(),
|
||||
|
||||
Error::ToJson { .. }
|
||||
| Error::DeleteState { .. }
|
||||
@@ -273,8 +259,7 @@ impl ErrorExt for Error {
|
||||
|
||||
Error::RetryTimesExceeded { .. }
|
||||
| Error::RollbackTimesExceeded { .. }
|
||||
| Error::ManagerNotStart { .. }
|
||||
| Error::ManagerPasued { .. } => StatusCode::IllegalState,
|
||||
| Error::ManagerNotStart { .. } => StatusCode::IllegalState,
|
||||
|
||||
Error::RollbackNotSupported { .. } => StatusCode::Unsupported,
|
||||
Error::LoaderConflict { .. } | Error::DuplicateProcedure { .. } => {
|
||||
|
||||
@@ -22,7 +22,6 @@ use std::time::{Duration, Instant};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use backon::ExponentialBuilder;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_runtime::{RepeatedTask, TaskFunction};
|
||||
use common_telemetry::tracing_context::{FutureExt, TracingContext};
|
||||
use common_telemetry::{error, info, tracing};
|
||||
@@ -31,10 +30,9 @@ use tokio::sync::watch::{self, Receiver, Sender};
|
||||
use tokio::sync::{Mutex as TokioMutex, Notify};
|
||||
|
||||
use crate::error::{
|
||||
self, CheckStatusSnafu, DuplicateProcedureSnafu, Error, LoaderConflictSnafu,
|
||||
ManagerNotStartSnafu, ManagerPasuedSnafu, PoisonKeyNotDefinedSnafu, ProcedureNotFoundSnafu,
|
||||
Result, StartRemoveOutdatedMetaTaskSnafu, StopRemoveOutdatedMetaTaskSnafu,
|
||||
TooManyRunningProceduresSnafu,
|
||||
self, DuplicateProcedureSnafu, Error, LoaderConflictSnafu, ManagerNotStartSnafu,
|
||||
PoisonKeyNotDefinedSnafu, ProcedureNotFoundSnafu, Result, StartRemoveOutdatedMetaTaskSnafu,
|
||||
StopRemoveOutdatedMetaTaskSnafu, TooManyRunningProceduresSnafu,
|
||||
};
|
||||
use crate::local::runner::Runner;
|
||||
use crate::procedure::{BoxedProcedureLoader, InitProcedureState, PoisonKeys, ProcedureInfo};
|
||||
@@ -524,14 +522,6 @@ impl Default for ManagerConfig {
|
||||
}
|
||||
}
|
||||
|
||||
type PauseAwareRef = Arc<dyn PauseAware>;
|
||||
|
||||
#[async_trait]
|
||||
pub trait PauseAware: Send + Sync {
|
||||
/// Returns true if the procedure manager is paused.
|
||||
async fn is_paused(&self) -> std::result::Result<bool, BoxedError>;
|
||||
}
|
||||
|
||||
/// A [ProcedureManager] that maintains procedure states locally.
|
||||
pub struct LocalManager {
|
||||
manager_ctx: Arc<ManagerContext>,
|
||||
@@ -541,7 +531,6 @@ pub struct LocalManager {
|
||||
/// GC task.
|
||||
remove_outdated_meta_task: TokioMutex<Option<RepeatedTask<Error>>>,
|
||||
config: ManagerConfig,
|
||||
pause_aware: Option<PauseAwareRef>,
|
||||
}
|
||||
|
||||
impl LocalManager {
|
||||
@@ -550,7 +539,6 @@ impl LocalManager {
|
||||
config: ManagerConfig,
|
||||
state_store: StateStoreRef,
|
||||
poison_store: PoisonStoreRef,
|
||||
pause_aware: Option<PauseAwareRef>,
|
||||
) -> LocalManager {
|
||||
let manager_ctx = Arc::new(ManagerContext::new(poison_store));
|
||||
|
||||
@@ -561,7 +549,6 @@ impl LocalManager {
|
||||
retry_delay: config.retry_delay,
|
||||
remove_outdated_meta_task: TokioMutex::new(None),
|
||||
config,
|
||||
pause_aware,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -732,17 +719,6 @@ impl LocalManager {
|
||||
let loaders = self.manager_ctx.loaders.lock().unwrap();
|
||||
loaders.contains_key(name)
|
||||
}
|
||||
|
||||
async fn check_status(&self) -> Result<()> {
|
||||
if let Some(pause_aware) = self.pause_aware.as_ref() {
|
||||
ensure!(
|
||||
!pause_aware.is_paused().await.context(CheckStatusSnafu)?,
|
||||
ManagerPasuedSnafu
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -798,7 +774,6 @@ impl ProcedureManager for LocalManager {
|
||||
!self.manager_ctx.contains_procedure(procedure_id),
|
||||
DuplicateProcedureSnafu { procedure_id }
|
||||
);
|
||||
self.check_status().await?;
|
||||
|
||||
self.submit_root(
|
||||
procedure.id,
|
||||
@@ -1004,7 +979,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -1029,7 +1004,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
manager
|
||||
@@ -1083,7 +1058,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let procedure_id = ProcedureId::random();
|
||||
@@ -1135,7 +1110,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1216,7 +1191,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
procedure.lock_key = LockKey::single_exclusive("test.submit");
|
||||
@@ -1244,7 +1219,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
|
||||
manager.start().await.unwrap();
|
||||
manager.stop().await.unwrap();
|
||||
@@ -1281,7 +1256,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
let mut procedure = ProcedureToLoad::new("submit");
|
||||
@@ -1363,7 +1338,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.set_running();
|
||||
|
||||
manager
|
||||
@@ -1488,7 +1463,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(object_store.clone()));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::new());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.manager_ctx.start();
|
||||
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
@@ -83,7 +83,7 @@ mod tests {
|
||||
};
|
||||
let state_store = Arc::new(ObjectStateStore::new(test_util::new_object_store(&dir)));
|
||||
let poison_manager = Arc::new(InMemoryPoisonStore::default());
|
||||
let manager = LocalManager::new(config, state_store, poison_manager, None);
|
||||
let manager = LocalManager::new(config, state_store, poison_manager);
|
||||
manager.start().await.unwrap();
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
pub mod columnar_value;
|
||||
pub mod error;
|
||||
pub mod function;
|
||||
mod function;
|
||||
pub mod logical_plan;
|
||||
pub mod prelude;
|
||||
pub mod request;
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "common-sql"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
common-base.workspace = true
|
||||
common-datasource.workspace = true
|
||||
common-decimal.workspace = true
|
||||
common-error.workspace = true
|
||||
common-macro.workspace = true
|
||||
common-time.workspace = true
|
||||
datafusion-sql.workspace = true
|
||||
datatypes.workspace = true
|
||||
hex = "0.4"
|
||||
jsonb.workspace = true
|
||||
snafu.workspace = true
|
||||
sqlparser.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user