mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-25 23:49:58 +00:00
Compare commits
50 Commits
recording_
...
feat/prefi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab1928d5fd | ||
|
|
09dacc8e9b | ||
|
|
dec439db2b | ||
|
|
dc76571166 | ||
|
|
3e17f8c426 | ||
|
|
a5df3954f3 | ||
|
|
32fd850c20 | ||
|
|
2bfdae4f8f | ||
|
|
fcb898e9a4 | ||
|
|
8fa2fdfc42 | ||
|
|
4dc1a1d60f | ||
|
|
e375a18011 | ||
|
|
e0ff701e51 | ||
|
|
25645a3303 | ||
|
|
b32ea7d84c | ||
|
|
f164f6eaf3 | ||
|
|
af1920defc | ||
|
|
7c97fae522 | ||
|
|
b8070adc3a | ||
|
|
11bfb17328 | ||
|
|
1d87bd2d43 | ||
|
|
ababeaf538 | ||
|
|
2cbf51d0be | ||
|
|
3059b04b19 | ||
|
|
352b197be4 | ||
|
|
d0254f9705 | ||
|
|
8a86903c73 | ||
|
|
0bd322a078 | ||
|
|
3811e3f632 | ||
|
|
c14aa176b5 | ||
|
|
a922dcd9df | ||
|
|
530ff53422 | ||
|
|
73ca39f37e | ||
|
|
0acc6b0354 | ||
|
|
face361fcb | ||
|
|
9860bca986 | ||
|
|
3a83c33a48 | ||
|
|
373bd59b07 | ||
|
|
c8db4b286d | ||
|
|
56c8c0651f | ||
|
|
448e588fa7 | ||
|
|
f4cbf1d776 | ||
|
|
b35eefcf45 | ||
|
|
408dd55a2f | ||
|
|
e463942a5b | ||
|
|
0124a0d156 | ||
|
|
e23628a4e0 | ||
|
|
1d637cad51 | ||
|
|
a56030e6a5 | ||
|
|
a71b93dd84 |
@@ -41,7 +41,14 @@ runs:
|
||||
username: ${{ inputs.dockerhub-image-registry-username }}
|
||||
password: ${{ inputs.dockerhub-image-registry-token }}
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image
|
||||
- name: Set up qemu for multi-platform builds
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# The latest version will lead to segmentation fault.
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
|
||||
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
|
||||
run: |
|
||||
@@ -52,7 +59,7 @@ runs:
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
- name: Build and push dev-builder-centos image
|
||||
- name: Build and push dev-builder-centos image # Only build image for amd64 platform.
|
||||
shell: bash
|
||||
if: ${{ inputs.build-dev-builder-centos == 'true' }}
|
||||
run: |
|
||||
@@ -69,8 +76,7 @@ runs:
|
||||
run: |
|
||||
make dev-builder \
|
||||
BASE_IMAGE=android \
|
||||
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
|
||||
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
|
||||
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
|
||||
|
||||
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}
|
||||
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
|
||||
|
||||
@@ -52,7 +52,7 @@ runs:
|
||||
uses: ./.github/actions/build-greptime-binary
|
||||
with:
|
||||
base-image: ubuntu
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
@@ -70,7 +70,7 @@ runs:
|
||||
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Builds greptime for centos if the host machine is amd64.
|
||||
with:
|
||||
base-image: centos
|
||||
features: servers/dashboard,pg_kvbackend
|
||||
features: servers/dashboard,pg_kvbackend,mysql_kvbackend
|
||||
cargo-profile: ${{ inputs.cargo-profile }}
|
||||
artifacts-dir: greptime-linux-${{ inputs.arch }}-centos-${{ inputs.version }}
|
||||
version: ${{ inputs.version }}
|
||||
|
||||
7
.github/workflows/dev-build.yml
vendored
7
.github/workflows/dev-build.yml
vendored
@@ -238,6 +238,13 @@ jobs:
|
||||
version: ${{ needs.allocate-runners.outputs.version }}
|
||||
push-latest-tag: false # Don't push the latest tag to registry.
|
||||
dev-mode: true # Only build the standard images.
|
||||
|
||||
- name: Echo Docker image tag to step summary
|
||||
run: |
|
||||
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Set build result
|
||||
id: set-build-result
|
||||
|
||||
10
.github/workflows/develop.yml
vendored
10
.github/workflows/develop.yml
vendored
@@ -111,7 +111,7 @@ jobs:
|
||||
- name: Build greptime binaries
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
|
||||
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binaries
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -270,7 +270,7 @@ jobs:
|
||||
- name: Build greptime bianry
|
||||
shell: bash
|
||||
# `cargo gc` will invoke `cargo build` with specified args
|
||||
run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
|
||||
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||
- name: Pack greptime binary
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -687,7 +687,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend
|
||||
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -704,6 +704,7 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
@@ -739,7 +740,7 @@ jobs:
|
||||
working-directory: tests-integration/fixtures
|
||||
run: docker compose up -d --wait
|
||||
- name: Run nextest cases
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
|
||||
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
|
||||
env:
|
||||
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -755,6 +756,7 @@ jobs:
|
||||
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
|
||||
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
|
||||
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
|
||||
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
|
||||
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
|
||||
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
|
||||
UNITTEST_LOG_DIR: "__unittest_logs"
|
||||
|
||||
52
.github/workflows/grafana.yml
vendored
Normal file
52
.github/workflows/grafana.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Check Grafana Panels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'grafana/**' # Trigger only when files under the grafana/ directory change
|
||||
|
||||
jobs:
|
||||
check-panels:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Install jq (required for the script)
|
||||
- name: Install jq
|
||||
run: sudo apt-get install -y jq
|
||||
|
||||
# Make the check.sh script executable
|
||||
- name: Make check.sh executable
|
||||
run: chmod +x grafana/check.sh
|
||||
|
||||
# Run the check.sh script
|
||||
- name: Run check.sh
|
||||
run: ./grafana/check.sh
|
||||
|
||||
# Only run summary.sh for pull_request events (not for merge queues or final pushes)
|
||||
- name: Check if this is a pull request
|
||||
id: check-pr
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
echo "is_pull_request=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_pull_request=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Make the summary.sh script executable
|
||||
- name: Make summary.sh executable
|
||||
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||
run: chmod +x grafana/summary.sh
|
||||
|
||||
# Run the summary.sh script and add its output to the GitHub Job Summary
|
||||
- name: Run summary.sh and add to Job Summary
|
||||
if: steps.check-pr.outputs.is_pull_request == 'true'
|
||||
run: |
|
||||
SUMMARY=$(./grafana/summary.sh)
|
||||
echo "### Summary of Grafana Panels" >> $GITHUB_STEP_SUMMARY
|
||||
echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -91,7 +91,7 @@ env:
|
||||
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
|
||||
NIGHTLY_RELEASE_PREFIX: nightly
|
||||
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
|
||||
NEXT_RELEASE_VERSION: v0.13.0
|
||||
NEXT_RELEASE_VERSION: v0.14.0
|
||||
|
||||
jobs:
|
||||
allocate-runners:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -54,3 +54,6 @@ tests-fuzz/corpus/
|
||||
# Nix
|
||||
.direnv
|
||||
.envrc
|
||||
|
||||
## default data home
|
||||
greptimedb_data
|
||||
|
||||
216
Cargo.lock
generated
216
Cargo.lock
generated
@@ -185,7 +185,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
||||
|
||||
[[package]]
|
||||
name = "api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-decimal",
|
||||
@@ -710,7 +710,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "auth"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -1324,7 +1324,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"catalog",
|
||||
"common-error",
|
||||
@@ -1348,7 +1348,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -1661,7 +1661,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||
|
||||
[[package]]
|
||||
name = "cli"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1693,6 +1693,7 @@ dependencies = [
|
||||
"humantime",
|
||||
"meta-client",
|
||||
"nu-ansi-term",
|
||||
"opendal",
|
||||
"query",
|
||||
"rand",
|
||||
"reqwest",
|
||||
@@ -1703,7 +1704,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
@@ -1712,7 +1713,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -1739,7 +1740,7 @@ dependencies = [
|
||||
"rand",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"substrait 0.37.3",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -1780,7 +1781,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cmd"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"auth",
|
||||
@@ -1841,7 +1842,7 @@ dependencies = [
|
||||
"similar-asserts",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
@@ -1887,7 +1888,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
||||
|
||||
[[package]]
|
||||
name = "common-base"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"async-trait",
|
||||
@@ -1909,11 +1910,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-catalog"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-config"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -1938,7 +1939,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-datasource"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-schema",
|
||||
@@ -1974,7 +1975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-decimal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"bigdecimal 0.4.5",
|
||||
"common-error",
|
||||
@@ -1987,7 +1988,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-error"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"http 1.1.0",
|
||||
"snafu 0.8.5",
|
||||
@@ -1997,7 +1998,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-error",
|
||||
@@ -2007,7 +2008,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-function"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -2015,6 +2016,7 @@ dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"bincode",
|
||||
"chrono",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
"common-error",
|
||||
@@ -2057,7 +2059,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-greptimedb-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-runtime",
|
||||
@@ -2074,7 +2076,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -2102,7 +2104,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-grpc-expr"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"common-base",
|
||||
@@ -2121,7 +2123,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-macro"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-query",
|
||||
@@ -2135,7 +2137,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-mem-prof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2148,7 +2150,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-meta"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anymap2",
|
||||
"api",
|
||||
@@ -2196,6 +2198,7 @@ dependencies = [
|
||||
"serde_with",
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"table",
|
||||
@@ -2208,7 +2211,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-options"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-grpc",
|
||||
"humantime-serde",
|
||||
@@ -2217,11 +2220,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
|
||||
[[package]]
|
||||
name = "common-pprof"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-error",
|
||||
"common-macro",
|
||||
@@ -2233,7 +2236,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -2260,7 +2263,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-procedure-test"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"common-procedure",
|
||||
@@ -2268,7 +2271,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -2294,7 +2297,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-recordbatch"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"common-error",
|
||||
@@ -2313,7 +2316,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-runtime"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -2343,7 +2346,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-telemetry"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"backtrace",
|
||||
@@ -2371,7 +2374,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-test-util"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"client",
|
||||
"common-query",
|
||||
@@ -2383,7 +2386,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-time"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"chrono",
|
||||
@@ -2401,7 +2404,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-version"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"build-data",
|
||||
"const_format",
|
||||
@@ -2411,7 +2414,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "common-wal"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"common-base",
|
||||
"common-error",
|
||||
@@ -3342,7 +3345,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datanode"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -3394,7 +3397,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"toml 0.8.19",
|
||||
@@ -3403,7 +3406,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "datatypes"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arrow",
|
||||
"arrow-array",
|
||||
@@ -4047,7 +4050,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -4116,11 +4119,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.34"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0"
|
||||
checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-rs-sys",
|
||||
"libz-sys",
|
||||
"miniz_oxide",
|
||||
]
|
||||
@@ -4157,7 +4161,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
||||
|
||||
[[package]]
|
||||
name = "flow"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow",
|
||||
@@ -4167,6 +4171,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"cache",
|
||||
"catalog",
|
||||
"chrono",
|
||||
"client",
|
||||
"common-base",
|
||||
"common-catalog",
|
||||
@@ -4218,7 +4223,7 @@ dependencies = [
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tonic 0.12.3",
|
||||
@@ -4273,7 +4278,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
||||
|
||||
[[package]]
|
||||
name = "frontend"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -4701,7 +4706,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "greptime-proto"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
|
||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c5419bbd20cb42e568ec325a4d71a3c94cc327e1#c5419bbd20cb42e568ec325a4d71a3c94cc327e1"
|
||||
dependencies = [
|
||||
"prost 0.13.3",
|
||||
"serde",
|
||||
@@ -5541,7 +5546,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"asynchronous-codec",
|
||||
@@ -5566,6 +5571,7 @@ dependencies = [
|
||||
"rand",
|
||||
"regex",
|
||||
"regex-automata 0.4.8",
|
||||
"roaring",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"snafu 0.8.5",
|
||||
@@ -5897,15 +5903,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jsonpath-rust"
|
||||
version = "0.7.3"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69a61b87f6a55cc6c28fed5739dd36b9642321ce63e4a5e4a4715d69106f4a10"
|
||||
checksum = "0c00ae348f9f8fd2d09f82a98ca381c60df9e0820d8d79fce43e649b4dc3128b"
|
||||
dependencies = [
|
||||
"pest",
|
||||
"pest_derive",
|
||||
"regex",
|
||||
"serde_json",
|
||||
"thiserror 1.0.64",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6273,6 +6279,15 @@ dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-rs-sys"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "902bc563b5d65ad9bba616b490842ef0651066a1a1dc3ce1087113ffcb873c8d"
|
||||
dependencies = [
|
||||
"zlib-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.20"
|
||||
@@ -6333,7 +6348,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "log-query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"common-error",
|
||||
@@ -6345,7 +6360,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -6638,7 +6653,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-client"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6665,7 +6680,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meta-srv"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -6716,6 +6731,7 @@ dependencies = [
|
||||
"session",
|
||||
"snafu 0.8.5",
|
||||
"store-api",
|
||||
"strum 0.25.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-postgres",
|
||||
@@ -6751,7 +6767,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "metric-engine"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -6816,9 +6832,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.0"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
|
||||
checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
]
|
||||
@@ -6849,7 +6865,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mito2"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -7546,7 +7562,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object-store"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -7795,7 +7811,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "operator"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -7843,7 +7859,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -8080,7 +8096,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "partition"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -8270,7 +8286,7 @@ dependencies = [
|
||||
"rand",
|
||||
"ring",
|
||||
"rust_decimal",
|
||||
"thiserror 2.0.6",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.0",
|
||||
"tokio-util",
|
||||
@@ -8348,7 +8364,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pipeline"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -8382,7 +8398,7 @@ dependencies = [
|
||||
"greptime-proto",
|
||||
"itertools 0.10.5",
|
||||
"jsonb",
|
||||
"jsonpath-rust 0.7.3",
|
||||
"jsonpath-rust 0.7.5",
|
||||
"lazy_static",
|
||||
"moka",
|
||||
"once_cell",
|
||||
@@ -8488,7 +8504,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "plugins"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"auth",
|
||||
"clap 4.5.19",
|
||||
@@ -8750,7 +8766,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"async-trait",
|
||||
@@ -8760,6 +8776,7 @@ dependencies = [
|
||||
"common-recordbatch",
|
||||
"common-telemetry",
|
||||
"datafusion",
|
||||
"datafusion-common",
|
||||
"datafusion-expr",
|
||||
"datatypes",
|
||||
"futures",
|
||||
@@ -8773,8 +8790,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "promql-parser"
|
||||
version = "0.4.3"
|
||||
source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=27abb8e16003a50c720f00d6c85f41f5fa2a2a8e#27abb8e16003a50c720f00d6c85f41f5fa2a2a8e"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c6b1429bdd199d53bd58b745075c1652efedbe2746e5d4f0d56d3184dda48ec"
|
||||
dependencies = [
|
||||
"cfgrammar",
|
||||
"chrono",
|
||||
@@ -8995,7 +9013,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "puffin"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-compression 0.4.13",
|
||||
"async-trait",
|
||||
@@ -9036,7 +9054,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "query"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -9101,7 +9119,7 @@ dependencies = [
|
||||
"sqlparser 0.52.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af97b9925093d40c4e03360403170)",
|
||||
"statrs",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -9632,6 +9650,16 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "roaring"
|
||||
version = "0.10.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41589aba99537475bf697f2118357cad1c31590c5a1b9f6d9fc4ad6d07503661"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "robust"
|
||||
version = "1.1.0"
|
||||
@@ -10446,7 +10474,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "servers"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"ahash 0.8.11",
|
||||
"api",
|
||||
@@ -10563,7 +10591,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "session"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arc-swap",
|
||||
@@ -10872,7 +10900,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sql"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"chrono",
|
||||
@@ -10926,7 +10954,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlness-runner"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"clap 4.5.19",
|
||||
@@ -11051,7 +11079,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"smallvec",
|
||||
"thiserror 2.0.6",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
@@ -11136,7 +11164,7 @@ dependencies = [
|
||||
"smallvec",
|
||||
"sqlx-core",
|
||||
"stringprep",
|
||||
"thiserror 2.0.6",
|
||||
"thiserror 2.0.12",
|
||||
"tracing",
|
||||
"whoami",
|
||||
]
|
||||
@@ -11174,7 +11202,7 @@ dependencies = [
|
||||
"smallvec",
|
||||
"sqlx-core",
|
||||
"stringprep",
|
||||
"thiserror 2.0.6",
|
||||
"thiserror 2.0.12",
|
||||
"tracing",
|
||||
"whoami",
|
||||
]
|
||||
@@ -11243,7 +11271,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "store-api"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"aquamarine",
|
||||
@@ -11373,7 +11401,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "substrait"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -11554,7 +11582,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "table"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"async-trait",
|
||||
@@ -11805,7 +11833,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
||||
|
||||
[[package]]
|
||||
name = "tests-fuzz"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"async-trait",
|
||||
@@ -11849,7 +11877,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tests-integration"
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
dependencies = [
|
||||
"api",
|
||||
"arrow-flight",
|
||||
@@ -11915,7 +11943,7 @@ dependencies = [
|
||||
"sql",
|
||||
"sqlx",
|
||||
"store-api",
|
||||
"substrait 0.13.0",
|
||||
"substrait 0.14.0",
|
||||
"table",
|
||||
"tempfile",
|
||||
"time",
|
||||
@@ -11955,11 +11983,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.6"
|
||||
version = "2.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
|
||||
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.6",
|
||||
"thiserror-impl 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -11975,9 +12003,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.6"
|
||||
version = "2.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
|
||||
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -13936,6 +13964,12 @@ dependencies = [
|
||||
"syn 2.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zlib-rs"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b20717f0917c908dc63de2e44e97f1e6b126ca58d0e391cee86d504eb8fbd05"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.11.2+zstd.1.5.2"
|
||||
|
||||
13
Cargo.toml
13
Cargo.toml
@@ -67,7 +67,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.13.0"
|
||||
version = "0.14.0"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -126,10 +126,11 @@ deadpool-postgres = "0.12"
|
||||
derive_builder = "0.12"
|
||||
dotenv = "0.15"
|
||||
etcd-client = "0.14"
|
||||
flate2 = { version = "1.1.0", default-features = false, features = ["zlib-rs"] }
|
||||
fst = "0.4.7"
|
||||
futures = "0.3"
|
||||
futures-util = "0.3"
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
|
||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c5419bbd20cb42e568ec325a4d71a3c94cc327e1" }
|
||||
hex = "0.4"
|
||||
http = "1"
|
||||
humantime = "2.1"
|
||||
@@ -160,9 +161,7 @@ parquet = { version = "53.0.0", default-features = false, features = ["arrow", "
|
||||
paste = "1.0"
|
||||
pin-project = "1.0"
|
||||
prometheus = { version = "0.13.3", features = ["process"] }
|
||||
promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", features = [
|
||||
"ser",
|
||||
], rev = "27abb8e16003a50c720f00d6c85f41f5fa2a2a8e" }
|
||||
promql-parser = { version = "0.5", features = ["ser"] }
|
||||
prost = "0.13"
|
||||
raft-engine = { version = "0.4.1", default-features = false }
|
||||
rand = "0.8"
|
||||
@@ -190,6 +189,10 @@ shadow-rs = "0.38"
|
||||
similar-asserts = "1.6.0"
|
||||
smallvec = { version = "1", features = ["serde"] }
|
||||
snafu = "0.8"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"mysql",
|
||||
] }
|
||||
sysinfo = "0.30"
|
||||
# on branch v0.52.x
|
||||
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "71dd86058d2af97b9925093d40c4e03360403170", features = [
|
||||
|
||||
4
Makefile
4
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
||||
IMAGE_REGISTRY ?= docker.io
|
||||
IMAGE_NAMESPACE ?= greptime
|
||||
IMAGE_TAG ?= latest
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
|
||||
DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
|
||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||
BASE_IMAGE ?= ubuntu
|
||||
@@ -60,6 +60,8 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
|
||||
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
|
||||
else
|
||||
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
|
||||
endif
|
||||
|
||||
@@ -112,7 +112,7 @@ Start a GreptimeDB container with:
|
||||
|
||||
```shell
|
||||
docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
||||
-v "$(pwd)/greptimedb:/tmp/greptimedb" \
|
||||
-v "$(pwd)/greptimedb:./greptimedb_data" \
|
||||
--name greptime --rm \
|
||||
greptime/greptimedb:latest standalone start \
|
||||
--http-addr 0.0.0.0:4000 \
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
| `flow` | -- | -- | flow engine options. |
|
||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -181,7 +181,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -279,7 +279,7 @@
|
||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||
| `datanode.client.tcp_nodelay` | Bool | `true` | -- |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -308,7 +308,7 @@
|
||||
|
||||
| Key | Type | Default | Descriptions |
|
||||
| --- | -----| ------- | ----------- |
|
||||
| `data_home` | String | `/tmp/metasrv/` | The working home directory. |
|
||||
| `data_home` | String | `./greptimedb_data/metasrv/` | The working home directory. |
|
||||
| `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
|
||||
| `server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
||||
@@ -352,7 +352,7 @@
|
||||
| `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
|
||||
| `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -442,7 +442,7 @@
|
||||
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
|
||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||
| `storage` | -- | -- | The data storage options. |
|
||||
| `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
|
||||
| `storage.data_home` | String | `./greptimedb_data/` | The working home directory. |
|
||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. |
|
||||
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
|
||||
@@ -522,7 +522,7 @@
|
||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
@@ -579,7 +579,7 @@
|
||||
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
|
||||
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
|
||||
| `logging` | -- | -- | The logging options. |
|
||||
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
|
||||
| `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
|
||||
|
||||
@@ -119,7 +119,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -231,6 +231,7 @@ overwrite_entry_start_id = false
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -264,7 +265,7 @@ overwrite_entry_start_id = false
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -617,7 +618,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -76,7 +76,7 @@ retry_interval = "3s"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
@@ -121,4 +121,3 @@ sample_ratio = 1.0
|
||||
## The tokio console address.
|
||||
## @toml2docs:none-default
|
||||
#+ tokio_console_addr = "127.0.0.1"
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ tcp_nodelay = true
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## The working home directory.
|
||||
data_home = "/tmp/metasrv/"
|
||||
data_home = "./greptimedb_data/metasrv/"
|
||||
|
||||
## The bind address of metasrv.
|
||||
bind_addr = "127.0.0.1:3002"
|
||||
@@ -177,7 +177,7 @@ backoff_deadline = "5mins"
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -164,7 +164,7 @@ provider = "raft_engine"
|
||||
## The directory to store the WAL files.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
## @toml2docs:none-default
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
|
||||
## The size of the WAL segment file.
|
||||
## **It's only used when the provider is `raft_engine`**.
|
||||
@@ -318,6 +318,7 @@ retry_delay = "500ms"
|
||||
# secret_access_key = "123456"
|
||||
# endpoint = "https://s3.amazonaws.com"
|
||||
# region = "us-west-2"
|
||||
# enable_virtual_host_style = false
|
||||
|
||||
# Example of using Oss as the storage.
|
||||
# [storage]
|
||||
@@ -351,7 +352,7 @@ retry_delay = "500ms"
|
||||
## The data storage options.
|
||||
[storage]
|
||||
## The working home directory.
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
## The storage type used to store the data.
|
||||
## - `File`: the data is stored in the local file system.
|
||||
@@ -704,7 +705,7 @@ experimental_sparse_primary_key_encoding = false
|
||||
## The logging options.
|
||||
[logging]
|
||||
## The directory to store the log files. If set to empty, logs will not be written to files.
|
||||
dir = "/tmp/greptimedb/logs"
|
||||
dir = "./greptimedb_data/logs"
|
||||
|
||||
## The log level. Can be `info`/`debug`/`warn`/`error`.
|
||||
## @toml2docs:none-default
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- --initial-cluster-state=new
|
||||
- *etcd_initial_cluster_token
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
|
||||
healthcheck:
|
||||
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
|
||||
interval: 5s
|
||||
@@ -68,12 +68,13 @@ services:
|
||||
- datanode
|
||||
- start
|
||||
- --node-id=0
|
||||
- --data-home=/greptimedb_data
|
||||
- --rpc-bind-addr=0.0.0.0:3001
|
||||
- --rpc-server-addr=datanode0:3001
|
||||
- --metasrv-addrs=metasrv:3002
|
||||
- --http-addr=0.0.0.0:5000
|
||||
volumes:
|
||||
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
|
||||
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
|
||||
interval: 5s
|
||||
|
||||
19
grafana/check.sh
Executable file
19
grafana/check.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
|
||||
# Use jq to check for panels with empty or missing descriptions
|
||||
invalid_panels=$(cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||
.panels[]
|
||||
| select((.type == "stats" or .type == "timeseries") and (.description == "" or .description == null))
|
||||
')
|
||||
|
||||
# Check if any invalid panels were found
|
||||
if [[ -n "$invalid_panels" ]]; then
|
||||
echo "Error: The following panels have empty or missing descriptions:"
|
||||
echo "$invalid_panels"
|
||||
exit 1
|
||||
else
|
||||
echo "All panels with type 'stats' or 'timeseries' have valid descriptions."
|
||||
exit 0
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
11
grafana/summary.sh
Executable file
11
grafana/summary.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
echo '| Title | Description | Expressions |
|
||||
|---|---|---|'
|
||||
|
||||
cat $BASEDIR/greptimedb-cluster.json | jq -r '
|
||||
.panels |
|
||||
map(select(.type == "stat" or .type == "timeseries")) |
|
||||
.[] | "| \(.title) | \(.description | gsub("\n"; "<br>")) | \(.targets | map(.expr // .rawSql | "`\(.|gsub("\n"; "<br>"))`") | join("<br>")) |"
|
||||
'
|
||||
@@ -19,9 +19,7 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
|
||||
use common_decimal::Decimal128;
|
||||
use common_time::time::Time;
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{
|
||||
Date, DateTime, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp,
|
||||
};
|
||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||
use datatypes::scalars::ScalarVector;
|
||||
use datatypes::types::{
|
||||
@@ -29,8 +27,8 @@ use datatypes::types::{
|
||||
};
|
||||
use datatypes::value::{OrderedF32, OrderedF64, Value};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
BinaryVector, BooleanVector, DateVector, Decimal128Vector, Float32Vector, Float64Vector,
|
||||
Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
|
||||
IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
|
||||
@@ -118,7 +116,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||
ColumnDataType::Json => ConcreteDataType::json_datatype(),
|
||||
ColumnDataType::String => ConcreteDataType::string_datatype(),
|
||||
ColumnDataType::Date => ConcreteDataType::date_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::datetime_datatype(),
|
||||
ColumnDataType::Datetime => ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ColumnDataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
|
||||
ColumnDataType::TimestampMillisecond => {
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
@@ -271,7 +269,6 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||
ConcreteDataType::Binary(_) => ColumnDataType::Binary,
|
||||
ConcreteDataType::String(_) => ColumnDataType::String,
|
||||
ConcreteDataType::Date(_) => ColumnDataType::Date,
|
||||
ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
|
||||
ConcreteDataType::Timestamp(t) => match t {
|
||||
TimestampType::Second(_) => ColumnDataType::TimestampSecond,
|
||||
TimestampType::Millisecond(_) => ColumnDataType::TimestampMillisecond,
|
||||
@@ -476,7 +473,6 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
|
||||
Value::String(val) => values.string_values.push(val.as_utf8().to_string()),
|
||||
Value::Binary(val) => values.binary_values.push(val.to_vec()),
|
||||
Value::Date(val) => values.date_values.push(val.val()),
|
||||
Value::DateTime(val) => values.datetime_values.push(val.val()),
|
||||
Value::Timestamp(val) => match val.unit() {
|
||||
TimeUnit::Second => values.timestamp_second_values.push(val.value()),
|
||||
TimeUnit::Millisecond => values.timestamp_millisecond_values.push(val.value()),
|
||||
@@ -577,12 +573,11 @@ pub fn pb_value_to_value_ref<'a>(
|
||||
ValueData::BinaryValue(bytes) => ValueRef::Binary(bytes.as_slice()),
|
||||
ValueData::StringValue(string) => ValueRef::String(string.as_str()),
|
||||
ValueData::DateValue(d) => ValueRef::Date(Date::from(*d)),
|
||||
ValueData::DatetimeValue(d) => ValueRef::DateTime(DateTime::new(*d)),
|
||||
ValueData::TimestampSecondValue(t) => ValueRef::Timestamp(Timestamp::new_second(*t)),
|
||||
ValueData::TimestampMillisecondValue(t) => {
|
||||
ValueRef::Timestamp(Timestamp::new_millisecond(*t))
|
||||
}
|
||||
ValueData::TimestampMicrosecondValue(t) => {
|
||||
ValueData::DatetimeValue(t) | ValueData::TimestampMicrosecondValue(t) => {
|
||||
ValueRef::Timestamp(Timestamp::new_microsecond(*t))
|
||||
}
|
||||
ValueData::TimestampNanosecondValue(t) => {
|
||||
@@ -651,7 +646,6 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
|
||||
ConcreteDataType::Binary(_) => Arc::new(BinaryVector::from(values.binary_values)),
|
||||
ConcreteDataType::String(_) => Arc::new(StringVector::from_vec(values.string_values)),
|
||||
ConcreteDataType::Date(_) => Arc::new(DateVector::from_vec(values.date_values)),
|
||||
ConcreteDataType::DateTime(_) => Arc::new(DateTimeVector::from_vec(values.datetime_values)),
|
||||
ConcreteDataType::Timestamp(unit) => match unit {
|
||||
TimestampType::Second(_) => Arc::new(TimestampSecondVector::from_vec(
|
||||
values.timestamp_second_values,
|
||||
@@ -787,11 +781,6 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
|
||||
.into_iter()
|
||||
.map(|val| val.into())
|
||||
.collect(),
|
||||
ConcreteDataType::DateTime(_) => values
|
||||
.datetime_values
|
||||
.into_iter()
|
||||
.map(|v| Value::DateTime(v.into()))
|
||||
.collect(),
|
||||
ConcreteDataType::Date(_) => values
|
||||
.date_values
|
||||
.into_iter()
|
||||
@@ -947,9 +936,6 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
|
||||
Value::Date(v) => v1::Value {
|
||||
value_data: Some(ValueData::DateValue(v.val())),
|
||||
},
|
||||
Value::DateTime(v) => v1::Value {
|
||||
value_data: Some(ValueData::DatetimeValue(v.val())),
|
||||
},
|
||||
Value::Timestamp(v) => match v.unit() {
|
||||
TimeUnit::Second => v1::Value {
|
||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
||||
@@ -1066,7 +1052,6 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
||||
Value::String(v) => Some(ValueData::StringValue(v.as_utf8().to_string())),
|
||||
Value::Binary(v) => Some(ValueData::BinaryValue(v.to_vec())),
|
||||
Value::Date(v) => Some(ValueData::DateValue(v.val())),
|
||||
Value::DateTime(v) => Some(ValueData::DatetimeValue(v.val())),
|
||||
Value::Timestamp(v) => Some(match v.unit() {
|
||||
TimeUnit::Second => ValueData::TimestampSecondValue(v.value()),
|
||||
TimeUnit::Millisecond => ValueData::TimestampMillisecondValue(v.value()),
|
||||
@@ -1248,7 +1233,7 @@ mod tests {
|
||||
ColumnDataTypeWrapper::date_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ColumnDataTypeWrapper::datetime_datatype().into()
|
||||
);
|
||||
assert_eq!(
|
||||
@@ -1339,10 +1324,6 @@ mod tests {
|
||||
ColumnDataTypeWrapper::date_datatype(),
|
||||
ConcreteDataType::date_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype().try_into().unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
@@ -1830,17 +1811,6 @@ mod tests {
|
||||
]
|
||||
);
|
||||
|
||||
test_convert_values!(
|
||||
datetime,
|
||||
vec![1.into(), 2.into(), 3.into()],
|
||||
datetime,
|
||||
vec![
|
||||
Value::DateTime(1.into()),
|
||||
Value::DateTime(2.into()),
|
||||
Value::DateTime(3.into())
|
||||
]
|
||||
);
|
||||
|
||||
#[test]
|
||||
fn test_vectors_to_rows_for_different_types() {
|
||||
let boolean_vec = BooleanVector::from_vec(vec![true, false, true]);
|
||||
|
||||
@@ -132,6 +132,15 @@ pub fn options_from_skipping(skipping: &SkippingIndexOptions) -> Result<Option<C
|
||||
Ok((!options.options.is_empty()).then_some(options))
|
||||
}
|
||||
|
||||
/// Tries to construct a `ColumnOptions` for inverted index.
|
||||
pub fn options_from_inverted() -> ColumnOptions {
|
||||
let mut options = ColumnOptions::default();
|
||||
options
|
||||
.options
|
||||
.insert(INVERTED_INDEX_GRPC_KEY.to_string(), "true".to_string());
|
||||
options
|
||||
}
|
||||
|
||||
/// Tries to construct a `FulltextAnalyzer` from the given analyzer.
|
||||
pub fn as_fulltext_option(analyzer: Analyzer) -> FulltextAnalyzer {
|
||||
match analyzer {
|
||||
|
||||
@@ -77,7 +77,7 @@ trait SystemSchemaProviderInner {
|
||||
fn system_table(&self, name: &str) -> Option<SystemTableRef>;
|
||||
|
||||
fn table_info(catalog_name: String, table: &SystemTableRef) -> TableInfoRef {
|
||||
let table_meta = TableMetaBuilder::default()
|
||||
let table_meta = TableMetaBuilder::empty()
|
||||
.schema(table.schema())
|
||||
.primary_key_indices(vec![])
|
||||
.next_column_id(0)
|
||||
|
||||
@@ -365,10 +365,6 @@ impl InformationSchemaColumnsBuilder {
|
||||
self.numeric_scales.push(None);
|
||||
|
||||
match &column_schema.data_type {
|
||||
ConcreteDataType::DateTime(datetime_type) => {
|
||||
self.datetime_precisions
|
||||
.push(Some(datetime_type.precision() as i64));
|
||||
}
|
||||
ConcreteDataType::Timestamp(ts_type) => {
|
||||
self.datetime_precisions
|
||||
.push(Some(ts_type.precision() as i64));
|
||||
|
||||
@@ -28,16 +28,19 @@ use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datatypes::prelude::ConcreteDataType as CDT;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMillisecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
Int64VectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||
UInt32VectorBuilder, UInt64VectorBuilder, VectorRef,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use store_api::storage::{ScanRequest, TableId};
|
||||
|
||||
use crate::error::{
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu, Result,
|
||||
CreateRecordBatchSnafu, FlowInfoNotFoundSnafu, InternalSnafu, JsonSnafu, ListFlowsSnafu,
|
||||
Result, UpgradeWeakCatalogManagerRefSnafu,
|
||||
};
|
||||
use crate::information_schema::{Predicates, FLOWS};
|
||||
use crate::system_schema::information_schema::InformationTable;
|
||||
@@ -59,6 +62,10 @@ pub const SOURCE_TABLE_IDS: &str = "source_table_ids";
|
||||
pub const SINK_TABLE_NAME: &str = "sink_table_name";
|
||||
pub const FLOWNODE_IDS: &str = "flownode_ids";
|
||||
pub const OPTIONS: &str = "options";
|
||||
pub const CREATED_TIME: &str = "created_time";
|
||||
pub const UPDATED_TIME: &str = "updated_time";
|
||||
pub const LAST_EXECUTION_TIME: &str = "last_execution_time";
|
||||
pub const SOURCE_TABLE_NAMES: &str = "source_table_names";
|
||||
|
||||
/// The `information_schema.flows` to provides information about flows in databases.
|
||||
#[derive(Debug)]
|
||||
@@ -99,6 +106,14 @@ impl InformationSchemaFlows {
|
||||
(SINK_TABLE_NAME, CDT::string_datatype(), false),
|
||||
(FLOWNODE_IDS, CDT::string_datatype(), true),
|
||||
(OPTIONS, CDT::string_datatype(), true),
|
||||
(CREATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||
(UPDATED_TIME, CDT::timestamp_millisecond_datatype(), false),
|
||||
(
|
||||
LAST_EXECUTION_TIME,
|
||||
CDT::timestamp_millisecond_datatype(),
|
||||
true,
|
||||
),
|
||||
(SOURCE_TABLE_NAMES, CDT::string_datatype(), true),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|(name, ty, nullable)| ColumnSchema::new(name, ty, nullable))
|
||||
@@ -170,6 +185,10 @@ struct InformationSchemaFlowsBuilder {
|
||||
sink_table_names: StringVectorBuilder,
|
||||
flownode_id_groups: StringVectorBuilder,
|
||||
option_groups: StringVectorBuilder,
|
||||
created_time: TimestampMillisecondVectorBuilder,
|
||||
updated_time: TimestampMillisecondVectorBuilder,
|
||||
last_execution_time: TimestampMillisecondVectorBuilder,
|
||||
source_table_names: StringVectorBuilder,
|
||||
}
|
||||
|
||||
impl InformationSchemaFlowsBuilder {
|
||||
@@ -196,6 +215,10 @@ impl InformationSchemaFlowsBuilder {
|
||||
sink_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
flownode_id_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
option_groups: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
created_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
updated_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
last_execution_time: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
source_table_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,13 +258,14 @@ impl InformationSchemaFlowsBuilder {
|
||||
catalog_name: catalog_name.to_string(),
|
||||
flow_name: flow_name.to_string(),
|
||||
})?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)?;
|
||||
self.add_flow(&predicates, flow_id.flow_id(), flow_info, &flow_stat)
|
||||
.await?;
|
||||
}
|
||||
|
||||
self.finish()
|
||||
}
|
||||
|
||||
fn add_flow(
|
||||
async fn add_flow(
|
||||
&mut self,
|
||||
predicates: &Predicates,
|
||||
flow_id: FlowId,
|
||||
@@ -290,6 +314,36 @@ impl InformationSchemaFlowsBuilder {
|
||||
input: format!("{:?}", flow_info.options()),
|
||||
},
|
||||
)?));
|
||||
self.created_time
|
||||
.push(Some(flow_info.created_time().timestamp_millis().into()));
|
||||
self.updated_time
|
||||
.push(Some(flow_info.updated_time().timestamp_millis().into()));
|
||||
self.last_execution_time
|
||||
.push(flow_stat.as_ref().and_then(|state| {
|
||||
state
|
||||
.last_exec_time_map
|
||||
.get(&flow_id)
|
||||
.map(|v| TimestampMillisecond::new(*v))
|
||||
}));
|
||||
|
||||
let mut source_table_names = vec![];
|
||||
let catalog_name = self.catalog_name.clone();
|
||||
let catalog_manager = self
|
||||
.catalog_manager
|
||||
.upgrade()
|
||||
.context(UpgradeWeakCatalogManagerRefSnafu)?;
|
||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||
source_table_names.extend(
|
||||
catalog_manager
|
||||
.tables_by_ids(&catalog_name, &schema_name, flow_info.source_table_ids())
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|table| table.table_info().full_table_name()),
|
||||
);
|
||||
}
|
||||
|
||||
let source_table_names = source_table_names.join(",");
|
||||
self.source_table_names.push(Some(&source_table_names));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -307,6 +361,10 @@ impl InformationSchemaFlowsBuilder {
|
||||
Arc::new(self.sink_table_names.finish()),
|
||||
Arc::new(self.flownode_id_groups.finish()),
|
||||
Arc::new(self.option_groups.finish()),
|
||||
Arc::new(self.created_time.finish()),
|
||||
Arc::new(self.updated_time.finish()),
|
||||
Arc::new(self.last_execution_time.finish()),
|
||||
Arc::new(self.source_table_names.finish()),
|
||||
];
|
||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||
|
||||
use super::table_names::*;
|
||||
use crate::system_schema::utils::tables::{
|
||||
bigint_column, datetime_column, string_column, string_columns,
|
||||
bigint_column, string_column, string_columns, timestamp_micro_column,
|
||||
};
|
||||
|
||||
const NO_VALUE: &str = "NO";
|
||||
@@ -163,17 +163,17 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("EVENT_BODY"),
|
||||
string_column("EVENT_DEFINITION"),
|
||||
string_column("EVENT_TYPE"),
|
||||
datetime_column("EXECUTE_AT"),
|
||||
timestamp_micro_column("EXECUTE_AT"),
|
||||
bigint_column("INTERVAL_VALUE"),
|
||||
string_column("INTERVAL_FIELD"),
|
||||
string_column("SQL_MODE"),
|
||||
datetime_column("STARTS"),
|
||||
datetime_column("ENDS"),
|
||||
timestamp_micro_column("STARTS"),
|
||||
timestamp_micro_column("ENDS"),
|
||||
string_column("STATUS"),
|
||||
string_column("ON_COMPLETION"),
|
||||
datetime_column("CREATED"),
|
||||
datetime_column("LAST_ALTERED"),
|
||||
datetime_column("LAST_EXECUTED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
timestamp_micro_column("LAST_ALTERED"),
|
||||
timestamp_micro_column("LAST_EXECUTED"),
|
||||
string_column("EVENT_COMMENT"),
|
||||
bigint_column("ORIGINATOR"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
@@ -204,10 +204,10 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
bigint_column("INITIAL_SIZE"),
|
||||
bigint_column("MAXIMUM_SIZE"),
|
||||
bigint_column("AUTOEXTEND_SIZE"),
|
||||
datetime_column("CREATION_TIME"),
|
||||
datetime_column("LAST_UPDATE_TIME"),
|
||||
datetime_column("LAST_ACCESS_TIME"),
|
||||
datetime_column("RECOVER_TIME"),
|
||||
timestamp_micro_column("CREATION_TIME"),
|
||||
timestamp_micro_column("LAST_UPDATE_TIME"),
|
||||
timestamp_micro_column("LAST_ACCESS_TIME"),
|
||||
timestamp_micro_column("RECOVER_TIME"),
|
||||
bigint_column("TRANSACTION_COUNTER"),
|
||||
string_column("VERSION"),
|
||||
string_column("ROW_FORMAT"),
|
||||
@@ -217,9 +217,9 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
bigint_column("MAX_DATA_LENGTH"),
|
||||
bigint_column("INDEX_LENGTH"),
|
||||
bigint_column("DATA_FREE"),
|
||||
datetime_column("CREATE_TIME"),
|
||||
datetime_column("UPDATE_TIME"),
|
||||
datetime_column("CHECK_TIME"),
|
||||
timestamp_micro_column("CREATE_TIME"),
|
||||
timestamp_micro_column("UPDATE_TIME"),
|
||||
timestamp_micro_column("CHECK_TIME"),
|
||||
string_column("CHECKSUM"),
|
||||
string_column("STATUS"),
|
||||
string_column("EXTRA"),
|
||||
@@ -330,8 +330,8 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("SQL_DATA_ACCESS"),
|
||||
string_column("SQL_PATH"),
|
||||
string_column("SECURITY_TYPE"),
|
||||
datetime_column("CREATED"),
|
||||
datetime_column("LAST_ALTERED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
timestamp_micro_column("LAST_ALTERED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("ROUTINE_COMMENT"),
|
||||
string_column("DEFINER"),
|
||||
@@ -383,7 +383,7 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
||||
string_column("ACTION_REFERENCE_NEW_TABLE"),
|
||||
string_column("ACTION_REFERENCE_OLD_ROW"),
|
||||
string_column("ACTION_REFERENCE_NEW_ROW"),
|
||||
datetime_column("CREATED"),
|
||||
timestamp_micro_column("CREATED"),
|
||||
string_column("SQL_MODE"),
|
||||
string_column("DEFINER"),
|
||||
string_column("CHARACTER_SET_CLIENT"),
|
||||
|
||||
@@ -20,17 +20,18 @@ use common_catalog::consts::INFORMATION_SCHEMA_PARTITIONS_TABLE_ID;
|
||||
use common_error::ext::BoxedError;
|
||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
||||
use common_time::datetime::DateTime;
|
||||
use datafusion::execution::TaskContext;
|
||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::timestamp::TimestampMicrosecond;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
ConstantVector, DateTimeVector, DateTimeVectorBuilder, Int64Vector, Int64VectorBuilder,
|
||||
MutableVector, StringVector, StringVectorBuilder, UInt64VectorBuilder,
|
||||
ConstantVector, Int64Vector, Int64VectorBuilder, MutableVector, StringVector,
|
||||
StringVectorBuilder, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
};
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use partition::manager::PartitionInfo;
|
||||
@@ -127,9 +128,21 @@ impl InformationSchemaPartitions {
|
||||
ColumnSchema::new("max_data_length", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("index_length", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("data_free", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new("create_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new("update_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new("check_time", ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"create_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"update_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
"check_time",
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new("checksum", ConcreteDataType::int64_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
"partition_comment",
|
||||
@@ -200,7 +213,7 @@ struct InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder,
|
||||
partition_ordinal_positions: Int64VectorBuilder,
|
||||
partition_expressions: StringVectorBuilder,
|
||||
create_times: DateTimeVectorBuilder,
|
||||
create_times: TimestampMicrosecondVectorBuilder,
|
||||
partition_ids: UInt64VectorBuilder,
|
||||
}
|
||||
|
||||
@@ -220,7 +233,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_times: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
}
|
||||
}
|
||||
@@ -324,7 +337,7 @@ impl InformationSchemaPartitionsBuilder {
|
||||
};
|
||||
|
||||
self.partition_expressions.push(expressions.as_deref());
|
||||
self.create_times.push(Some(DateTime::from(
|
||||
self.create_times.push(Some(TimestampMicrosecond::from(
|
||||
table_info.meta.created_on.timestamp_millis(),
|
||||
)));
|
||||
self.partition_ids.push(Some(partition.id.as_u64()));
|
||||
@@ -342,8 +355,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
Arc::new(Int64Vector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let null_datetime_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(DateTimeVector::from(vec![None])),
|
||||
let null_timestampmicrosecond_vector = Arc::new(ConstantVector::new(
|
||||
Arc::new(TimestampMicrosecondVector::from(vec![None])),
|
||||
rows_num,
|
||||
));
|
||||
let partition_methods = Arc::new(ConstantVector::new(
|
||||
@@ -373,8 +386,8 @@ impl InformationSchemaPartitionsBuilder {
|
||||
null_i64_vector.clone(),
|
||||
Arc::new(self.create_times.finish()),
|
||||
// TODO(dennis): supports update_time
|
||||
null_datetime_vector.clone(),
|
||||
null_datetime_vector,
|
||||
null_timestampmicrosecond_vector.clone(),
|
||||
null_timestampmicrosecond_vector,
|
||||
null_i64_vector,
|
||||
null_string_vector.clone(),
|
||||
null_string_vector.clone(),
|
||||
|
||||
@@ -30,7 +30,8 @@ use datatypes::prelude::{ConcreteDataType, ScalarVectorBuilder, VectorRef};
|
||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVectorBuilder, StringVectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
|
||||
StringVectorBuilder, TimestampMicrosecondVectorBuilder, UInt32VectorBuilder,
|
||||
UInt64VectorBuilder,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
@@ -105,9 +106,21 @@ impl InformationSchemaTables {
|
||||
ColumnSchema::new(TABLE_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(DATA_FREE, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(AUTO_INCREMENT, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(UPDATE_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(CHECK_TIME, ConcreteDataType::datetime_datatype(), true),
|
||||
ColumnSchema::new(
|
||||
CREATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
UPDATE_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(
|
||||
CHECK_TIME,
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
true,
|
||||
),
|
||||
ColumnSchema::new(TABLE_COLLATION, ConcreteDataType::string_datatype(), true),
|
||||
ColumnSchema::new(CHECKSUM, ConcreteDataType::uint64_datatype(), true),
|
||||
ColumnSchema::new(CREATE_OPTIONS, ConcreteDataType::string_datatype(), true),
|
||||
@@ -182,9 +195,9 @@ struct InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder,
|
||||
data_free: UInt64VectorBuilder,
|
||||
auto_increment: UInt64VectorBuilder,
|
||||
create_time: DateTimeVectorBuilder,
|
||||
update_time: DateTimeVectorBuilder,
|
||||
check_time: DateTimeVectorBuilder,
|
||||
create_time: TimestampMicrosecondVectorBuilder,
|
||||
update_time: TimestampMicrosecondVectorBuilder,
|
||||
check_time: TimestampMicrosecondVectorBuilder,
|
||||
table_collation: StringVectorBuilder,
|
||||
checksum: UInt64VectorBuilder,
|
||||
create_options: StringVectorBuilder,
|
||||
@@ -219,9 +232,9 @@ impl InformationSchemaTablesBuilder {
|
||||
max_index_length: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
data_free: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
auto_increment: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: DateTimeVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
update_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
check_time: TimestampMicrosecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
table_collation: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
checksum: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
create_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||
|
||||
@@ -51,10 +51,10 @@ pub fn bigint_column(name: &str) -> ColumnSchema {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn datetime_column(name: &str) -> ColumnSchema {
|
||||
pub fn timestamp_micro_column(name: &str) -> ColumnSchema {
|
||||
ColumnSchema::new(
|
||||
str::to_lowercase(name),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ license.workspace = true
|
||||
|
||||
[features]
|
||||
pg_kvbackend = ["common-meta/pg_kvbackend"]
|
||||
mysql_kvbackend = ["common-meta/mysql_kvbackend"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -43,6 +44,10 @@ futures.workspace = true
|
||||
humantime.workspace = true
|
||||
meta-client.workspace = true
|
||||
nu-ansi-term = "0.46"
|
||||
opendal = { version = "0.51.1", features = [
|
||||
"services-fs",
|
||||
"services-s3",
|
||||
] }
|
||||
query.workspace = true
|
||||
rand.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
||||
@@ -23,6 +23,8 @@ use common_error::ext::BoxedError;
|
||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||
use common_meta::kv_backend::etcd::EtcdStore;
|
||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
use common_meta::kv_backend::rds::MySqlStore;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
use common_meta::kv_backend::rds::PgStore;
|
||||
use common_meta::peer::Peer;
|
||||
@@ -63,6 +65,9 @@ pub struct BenchTableMetadataCommand {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[clap(long)]
|
||||
postgres_addr: Option<String>,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[clap(long)]
|
||||
mysql_addr: Option<String>,
|
||||
#[clap(long)]
|
||||
count: u32,
|
||||
}
|
||||
@@ -86,6 +91,16 @@ impl BenchTableMetadataCommand {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
let kv_backend = if let Some(mysql_addr) = &self.mysql_addr {
|
||||
info!("Using mysql as kv backend");
|
||||
MySqlStore::with_url(mysql_addr, "greptime_metakv", 128)
|
||||
.await
|
||||
.unwrap()
|
||||
} else {
|
||||
kv_backend
|
||||
};
|
||||
|
||||
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend));
|
||||
|
||||
let tool = BenchTableMetadata {
|
||||
|
||||
@@ -276,6 +276,24 @@ pub enum Error {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[snafu(display("OpenDAL operator failed"))]
|
||||
OpenDal {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
#[snafu(source)]
|
||||
error: opendal::Error,
|
||||
},
|
||||
#[snafu(display("S3 config need be set"))]
|
||||
S3ConfigNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
#[snafu(display("Output directory not set"))]
|
||||
OutputDirNotSet {
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -319,6 +337,9 @@ impl ErrorExt for Error {
|
||||
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||
|
||||
Error::Other { source, .. } => source.status_code(),
|
||||
Error::OpenDal { .. } => StatusCode::Internal,
|
||||
Error::S3ConfigNotSet { .. } => StatusCode::InvalidArguments,
|
||||
Error::OutputDirNotSet { .. } => StatusCode::InvalidArguments,
|
||||
|
||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||
|
||||
|
||||
@@ -21,15 +21,18 @@ use async_trait::async_trait;
|
||||
use clap::{Parser, ValueEnum};
|
||||
use common_error::ext::BoxedError;
|
||||
use common_telemetry::{debug, error, info};
|
||||
use opendal::layers::LoggingLayer;
|
||||
use opendal::{services, Operator};
|
||||
use serde_json::Value;
|
||||
use snafu::{OptionExt, ResultExt};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncWriteExt, BufWriter};
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::database::{parse_proxy_opts, DatabaseClient};
|
||||
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
||||
use crate::error::{
|
||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||
SchemaNotFoundSnafu,
|
||||
};
|
||||
use crate::{database, Tool};
|
||||
|
||||
type TableReference = (String, String, String);
|
||||
@@ -52,8 +55,9 @@ pub struct ExportCommand {
|
||||
addr: String,
|
||||
|
||||
/// Directory to put the exported data. E.g.: /tmp/greptimedb-export
|
||||
/// for local export.
|
||||
#[clap(long)]
|
||||
output_dir: String,
|
||||
output_dir: Option<String>,
|
||||
|
||||
/// The name of the catalog to export.
|
||||
#[clap(long, default_value = "greptime-*")]
|
||||
@@ -101,10 +105,51 @@ pub struct ExportCommand {
|
||||
/// Disable proxy server, if set, will not use any proxy.
|
||||
#[clap(long)]
|
||||
no_proxy: bool,
|
||||
|
||||
/// if export data to s3
|
||||
#[clap(long)]
|
||||
s3: bool,
|
||||
|
||||
/// The s3 bucket name
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_bucket: Option<String>,
|
||||
|
||||
/// The s3 endpoint
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_endpoint: Option<String>,
|
||||
|
||||
/// The s3 access key
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_access_key: Option<String>,
|
||||
|
||||
/// The s3 secret key
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_secret_key: Option<String>,
|
||||
|
||||
/// The s3 region
|
||||
/// if s3 is set, this is required
|
||||
#[clap(long)]
|
||||
s3_region: Option<String>,
|
||||
}
|
||||
|
||||
impl ExportCommand {
|
||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||
if self.s3
|
||||
&& (self.s3_bucket.is_none()
|
||||
|| self.s3_endpoint.is_none()
|
||||
|| self.s3_access_key.is_none()
|
||||
|| self.s3_secret_key.is_none()
|
||||
|| self.s3_region.is_none())
|
||||
{
|
||||
return Err(BoxedError::new(S3ConfigNotSetSnafu {}.build()));
|
||||
}
|
||||
if !self.s3 && self.output_dir.is_none() {
|
||||
return Err(BoxedError::new(OutputDirNotSetSnafu {}.build()));
|
||||
}
|
||||
let (catalog, schema) =
|
||||
database::split_database(&self.database).map_err(BoxedError::new)?;
|
||||
let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
|
||||
@@ -126,24 +171,43 @@ impl ExportCommand {
|
||||
target: self.target.clone(),
|
||||
start_time: self.start_time.clone(),
|
||||
end_time: self.end_time.clone(),
|
||||
s3: self.s3,
|
||||
s3_bucket: self.s3_bucket.clone(),
|
||||
s3_endpoint: self.s3_endpoint.clone(),
|
||||
s3_access_key: self.s3_access_key.clone(),
|
||||
s3_secret_key: self.s3_secret_key.clone(),
|
||||
s3_region: self.s3_region.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Export {
|
||||
catalog: String,
|
||||
schema: Option<String>,
|
||||
database_client: DatabaseClient,
|
||||
output_dir: String,
|
||||
output_dir: Option<String>,
|
||||
parallelism: usize,
|
||||
target: ExportTarget,
|
||||
start_time: Option<String>,
|
||||
end_time: Option<String>,
|
||||
s3: bool,
|
||||
s3_bucket: Option<String>,
|
||||
s3_endpoint: Option<String>,
|
||||
s3_access_key: Option<String>,
|
||||
s3_secret_key: Option<String>,
|
||||
s3_region: Option<String>,
|
||||
}
|
||||
|
||||
impl Export {
|
||||
fn catalog_path(&self) -> PathBuf {
|
||||
PathBuf::from(&self.output_dir).join(&self.catalog)
|
||||
if self.s3 {
|
||||
PathBuf::from(&self.catalog)
|
||||
} else if let Some(dir) = &self.output_dir {
|
||||
PathBuf::from(dir).join(&self.catalog)
|
||||
} else {
|
||||
unreachable!("catalog_path: output_dir must be set when not using s3")
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_db_names(&self) -> Result<Vec<String>> {
|
||||
@@ -300,19 +364,23 @@ impl Export {
|
||||
let timer = Instant::now();
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let operator = self.build_operator().await?;
|
||||
|
||||
for schema in db_names {
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_database.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
let create_database = self
|
||||
.show_create("DATABASE", &self.catalog, &schema, None)
|
||||
.await?;
|
||||
file.write_all(create_database.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
let file_path = self.get_file_path(&schema, "create_database.sql");
|
||||
self.write_to_storage(&operator, &file_path, create_database.into_bytes())
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Exported {}.{} database creation SQL to {}",
|
||||
self.catalog,
|
||||
schema,
|
||||
self.format_output_path(&file_path)
|
||||
);
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
@@ -326,149 +394,267 @@ impl Export {
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let operator = Arc::new(self.build_operator().await?);
|
||||
let mut tasks = Vec::with_capacity(db_names.len());
|
||||
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
let export_self = self.clone();
|
||||
let operator = operator.clone();
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let (metric_physical_tables, remaining_tables, views) =
|
||||
self.get_table_list(&self.catalog, &schema).await?;
|
||||
let table_count =
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let file = db_dir.join("create_tables.sql");
|
||||
let mut file = File::create(file).await.context(FileIoSnafu)?;
|
||||
for (c, s, t) in metric_physical_tables.into_iter().chain(remaining_tables) {
|
||||
let create_table = self.show_create("TABLE", &c, &s, Some(&t)).await?;
|
||||
file.write_all(create_table.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
}
|
||||
for (c, s, v) in views {
|
||||
let create_view = self.show_create("VIEW", &c, &s, Some(&v)).await?;
|
||||
file.write_all(create_view.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
let (metric_physical_tables, remaining_tables, views) = export_self
|
||||
.get_table_list(&export_self.catalog, &schema)
|
||||
.await?;
|
||||
|
||||
// Create directory if needed for file system storage
|
||||
if !export_self.s3 {
|
||||
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||
}
|
||||
|
||||
let file_path = export_self.get_file_path(&schema, "create_tables.sql");
|
||||
let mut content = Vec::new();
|
||||
|
||||
// Add table creation SQL
|
||||
for (c, s, t) in metric_physical_tables.iter().chain(&remaining_tables) {
|
||||
let create_table = export_self.show_create("TABLE", c, s, Some(t)).await?;
|
||||
content.extend_from_slice(create_table.as_bytes());
|
||||
}
|
||||
|
||||
// Add view creation SQL
|
||||
for (c, s, v) in &views {
|
||||
let create_view = export_self.show_create("VIEW", c, s, Some(v)).await?;
|
||||
content.extend_from_slice(create_view.as_bytes());
|
||||
}
|
||||
|
||||
// Write to storage
|
||||
export_self
|
||||
.write_to_storage(&operator, &file_path, content)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} with {table_count} table schemas to path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
"Finished exporting {}.{schema} with {} table schemas to path: {}",
|
||||
export_self.catalog,
|
||||
metric_physical_tables.len() + remaining_tables.len() + views.len(),
|
||||
export_self.format_output_path(&file_path)
|
||||
);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export schema job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
|
||||
let success = self.execute_tasks(tasks).await;
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, cost: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_operator(&self) -> Result<Operator> {
|
||||
if self.s3 {
|
||||
self.build_s3_operator().await
|
||||
} else {
|
||||
self.build_fs_operator().await
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_s3_operator(&self) -> Result<Operator> {
|
||||
let mut builder = services::S3::default().root("").bucket(
|
||||
self.s3_bucket
|
||||
.as_ref()
|
||||
.expect("s3_bucket must be provided when s3 is enabled"),
|
||||
);
|
||||
|
||||
if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||
builder = builder.endpoint(endpoint);
|
||||
}
|
||||
|
||||
if let Some(region) = self.s3_region.as_ref() {
|
||||
builder = builder.region(region);
|
||||
}
|
||||
|
||||
if let Some(key_id) = self.s3_access_key.as_ref() {
|
||||
builder = builder.access_key_id(key_id);
|
||||
}
|
||||
|
||||
if let Some(secret_key) = self.s3_secret_key.as_ref() {
|
||||
builder = builder.secret_access_key(secret_key);
|
||||
}
|
||||
|
||||
let op = Operator::new(builder)
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
async fn build_fs_operator(&self) -> Result<Operator> {
|
||||
let root = self
|
||||
.output_dir
|
||||
.as_ref()
|
||||
.context(OutputDirNotSetSnafu)?
|
||||
.clone();
|
||||
let op = Operator::new(services::Fs::default().root(&root))
|
||||
.context(OpenDalSnafu)?
|
||||
.layer(LoggingLayer::default())
|
||||
.finish();
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
async fn export_database_data(&self) -> Result<()> {
|
||||
let timer = Instant::now();
|
||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
||||
let db_names = self.get_db_names().await?;
|
||||
let db_count = db_names.len();
|
||||
let mut tasks = Vec::with_capacity(db_count);
|
||||
let operator = Arc::new(self.build_operator().await?);
|
||||
let with_options = build_with_options(&self.start_time, &self.end_time);
|
||||
|
||||
for schema in db_names {
|
||||
let semaphore_moved = semaphore.clone();
|
||||
let export_self = self.clone();
|
||||
let with_options_clone = with_options.clone();
|
||||
let operator = operator.clone();
|
||||
|
||||
tasks.push(async move {
|
||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||
let db_dir = self.catalog_path().join(format!("{schema}/"));
|
||||
tokio::fs::create_dir_all(&db_dir)
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
|
||||
let with_options = match (&self.start_time, &self.end_time) {
|
||||
(Some(start_time), Some(end_time)) => {
|
||||
format!(
|
||||
"WITH (FORMAT='parquet', start_time='{}', end_time='{}')",
|
||||
start_time, end_time
|
||||
)
|
||||
}
|
||||
(Some(start_time), None) => {
|
||||
format!("WITH (FORMAT='parquet', start_time='{}')", start_time)
|
||||
}
|
||||
(None, Some(end_time)) => {
|
||||
format!("WITH (FORMAT='parquet', end_time='{}')", end_time)
|
||||
}
|
||||
(None, None) => "WITH (FORMAT='parquet')".to_string(),
|
||||
};
|
||||
// Create directory if not using S3
|
||||
if !export_self.s3 {
|
||||
let db_dir = format!("{}/{}/", export_self.catalog, schema);
|
||||
operator.create_dir(&db_dir).await.context(OpenDalSnafu)?;
|
||||
}
|
||||
|
||||
let (path, connection_part) = export_self.get_storage_params(&schema);
|
||||
|
||||
// Execute COPY DATABASE TO command
|
||||
let sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' {};"#,
|
||||
self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap(),
|
||||
with_options
|
||||
r#"COPY DATABASE "{}"."{}" TO '{}' WITH ({}){};"#,
|
||||
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||
);
|
||||
info!("Executing sql: {sql}");
|
||||
export_self.database_client.sql_in_public(&sql).await?;
|
||||
info!(
|
||||
"Finished exporting {}.{} data to {}",
|
||||
export_self.catalog, schema, path
|
||||
);
|
||||
|
||||
info!("Executing sql: {sql}");
|
||||
// Create copy_from.sql file
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
|
||||
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||
);
|
||||
|
||||
self.database_client.sql_in_public(&sql).await?;
|
||||
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
|
||||
export_self
|
||||
.write_to_storage(
|
||||
&operator,
|
||||
©_from_path,
|
||||
copy_database_from_sql.into_bytes(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"Finished exporting {}.{schema} data into path: {}",
|
||||
self.catalog,
|
||||
db_dir.to_string_lossy()
|
||||
);
|
||||
|
||||
// The export copy from sql
|
||||
let copy_from_file = db_dir.join("copy_from.sql");
|
||||
let mut writer =
|
||||
BufWriter::new(File::create(copy_from_file).await.context(FileIoSnafu)?);
|
||||
let copy_database_from_sql = format!(
|
||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH (FORMAT='parquet');"#,
|
||||
self.catalog,
|
||||
"Finished exporting {}.{} copy_from.sql to {}",
|
||||
export_self.catalog,
|
||||
schema,
|
||||
db_dir.to_str().unwrap()
|
||||
export_self.format_output_path(©_from_path)
|
||||
);
|
||||
writer
|
||||
.write(copy_database_from_sql.as_bytes())
|
||||
.await
|
||||
.context(FileIoSnafu)?;
|
||||
writer.flush().await.context(FileIoSnafu)?;
|
||||
|
||||
info!("Finished exporting {}.{schema} copy_from.sql", self.catalog);
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let success = futures::future::join_all(tasks)
|
||||
let success = self.execute_tasks(tasks).await;
|
||||
let elapsed = timer.elapsed();
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_file_path(&self, schema: &str, file_name: &str) -> String {
|
||||
format!("{}/{}/{}", self.catalog, schema, file_name)
|
||||
}
|
||||
|
||||
fn format_output_path(&self, file_path: &str) -> String {
|
||||
if self.s3 {
|
||||
format!(
|
||||
"s3://{}/{}",
|
||||
self.s3_bucket.as_ref().unwrap_or(&String::new()),
|
||||
file_path
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{}/{}",
|
||||
self.output_dir.as_ref().unwrap_or(&String::new()),
|
||||
file_path
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_to_storage(
|
||||
&self,
|
||||
op: &Operator,
|
||||
file_path: &str,
|
||||
content: Vec<u8>,
|
||||
) -> Result<()> {
|
||||
op.write(file_path, content).await.context(OpenDalSnafu)
|
||||
}
|
||||
|
||||
fn get_storage_params(&self, schema: &str) -> (String, String) {
|
||||
if self.s3 {
|
||||
let s3_path = format!(
|
||||
"s3://{}/{}/{}/",
|
||||
// Safety: s3_bucket is required when s3 is enabled
|
||||
self.s3_bucket.as_ref().unwrap(),
|
||||
self.catalog,
|
||||
schema
|
||||
);
|
||||
|
||||
// endpoint is optional
|
||||
let endpoint_option = if let Some(endpoint) = self.s3_endpoint.as_ref() {
|
||||
format!(", ENDPOINT='{}'", endpoint)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
// Safety: All s3 options are required
|
||||
let connection_options = format!(
|
||||
"ACCESS_KEY_ID='{}', SECRET_ACCESS_KEY='{}', REGION='{}'{}",
|
||||
self.s3_access_key.as_ref().unwrap(),
|
||||
self.s3_secret_key.as_ref().unwrap(),
|
||||
self.s3_region.as_ref().unwrap(),
|
||||
endpoint_option
|
||||
);
|
||||
|
||||
(s3_path, format!(" CONNECTION ({})", connection_options))
|
||||
} else {
|
||||
(
|
||||
self.catalog_path()
|
||||
.join(format!("{schema}/"))
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
String::new(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_tasks(
|
||||
&self,
|
||||
tasks: Vec<impl std::future::Future<Output = Result<()>>>,
|
||||
) -> usize {
|
||||
futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|r| match r {
|
||||
Ok(_) => true,
|
||||
Err(e) => {
|
||||
error!(e; "export database job failed");
|
||||
error!(e; "export job failed");
|
||||
false
|
||||
}
|
||||
})
|
||||
.count();
|
||||
let elapsed = timer.elapsed();
|
||||
|
||||
info!("Success {success}/{db_count} jobs, costs: {elapsed:?}");
|
||||
|
||||
Ok(())
|
||||
.count()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,3 +679,15 @@ impl Tool for Export {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
|
||||
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
|
||||
let mut options = vec!["format = 'parquet'".to_string()];
|
||||
if let Some(start) = start_time {
|
||||
options.push(format!("start_time = '{}'", start));
|
||||
}
|
||||
if let Some(end) = end_time {
|
||||
options.push(format!("end_time = '{}'", end));
|
||||
}
|
||||
options.join(", ")
|
||||
}
|
||||
|
||||
@@ -287,7 +287,6 @@ impl StartCommand {
|
||||
.await
|
||||
.context(StartDatanodeSnafu)?;
|
||||
|
||||
let cluster_id = 0; // TODO(hl): read from config
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -296,13 +295,10 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Datanode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let meta_backend = Arc::new(MetaKvBackend {
|
||||
client: meta_client.clone(),
|
||||
@@ -410,7 +406,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -424,7 +420,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -471,7 +467,7 @@ mod tests {
|
||||
assert_eq!(10000, ddl_timeout.as_millis());
|
||||
assert_eq!(3000, timeout.as_millis());
|
||||
assert!(tcp_nodelay);
|
||||
assert_eq!("/tmp/greptimedb/", options.storage.data_home);
|
||||
assert_eq!("./greptimedb_data/", options.storage.data_home);
|
||||
assert!(matches!(
|
||||
&options.storage.store,
|
||||
ObjectStoreConfig::File(FileConfig { .. })
|
||||
@@ -487,7 +483,10 @@ mod tests {
|
||||
));
|
||||
|
||||
assert_eq!("debug", options.logging.level.unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -530,7 +529,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -540,7 +539,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -569,11 +568,11 @@ mod tests {
|
||||
|
||||
[storage]
|
||||
type = "File"
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -241,9 +241,6 @@ impl StartCommand {
|
||||
let mut opts = opts.component;
|
||||
opts.grpc.detect_server_addr();
|
||||
|
||||
// TODO(discord9): make it not optionale after cluster id is required
|
||||
let cluster_id = opts.cluster_id.unwrap_or(0);
|
||||
|
||||
let member_id = opts
|
||||
.node_id
|
||||
.context(MissingConfigSnafu { msg: "'node_id'" })?;
|
||||
@@ -252,13 +249,10 @@ impl StartCommand {
|
||||
msg: "'meta_client_options'",
|
||||
})?;
|
||||
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Flownode { member_id },
|
||||
meta_config,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
|
||||
let cache_ttl = meta_config.metadata_cache_ttl;
|
||||
|
||||
@@ -295,14 +295,10 @@ impl StartCommand {
|
||||
let cache_ttl = meta_client_options.metadata_cache_ttl;
|
||||
let cache_tti = meta_client_options.metadata_cache_tti;
|
||||
|
||||
let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
|
||||
let meta_client = meta_client::create_meta_client(
|
||||
cluster_id,
|
||||
MetaClientType::Frontend,
|
||||
meta_client_options,
|
||||
)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
let meta_client =
|
||||
meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options)
|
||||
.await
|
||||
.context(MetaClientInitSnafu)?;
|
||||
|
||||
// TODO(discord9): add helper function to ease the creation of cache registry&such
|
||||
let cached_meta_backend =
|
||||
@@ -452,7 +448,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -470,7 +466,10 @@ mod tests {
|
||||
assert_eq!(ReadableSize::gb(2), fe_opts.http.body_limit);
|
||||
|
||||
assert_eq!("debug", fe_opts.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), fe_opts.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
fe_opts.logging.dir
|
||||
);
|
||||
assert!(!fe_opts.opentsdb.enable);
|
||||
}
|
||||
|
||||
@@ -509,7 +508,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -519,7 +518,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
|
||||
[failure_detector]
|
||||
threshold = 8.0
|
||||
@@ -358,7 +358,10 @@ mod tests {
|
||||
assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
|
||||
assert_eq!(SelectorType::LeaseBased, options.selector);
|
||||
assert_eq!("debug", options.logging.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/logs".to_string(),
|
||||
options.logging.dir
|
||||
);
|
||||
assert_eq!(8.0, options.failure_detector.threshold);
|
||||
assert_eq!(
|
||||
100.0,
|
||||
@@ -396,7 +399,7 @@ mod tests {
|
||||
|
||||
let options = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -406,7 +409,7 @@ mod tests {
|
||||
.component;
|
||||
|
||||
let logging_opt = options.logging;
|
||||
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", logging_opt.dir);
|
||||
assert_eq!("debug", logging_opt.level.as_ref().unwrap());
|
||||
}
|
||||
|
||||
@@ -424,7 +427,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
|
||||
@@ -852,7 +852,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/test/wal"
|
||||
dir = "./greptimedb_data/test/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -860,7 +860,7 @@ mod tests {
|
||||
sync_write = false
|
||||
|
||||
[storage]
|
||||
data_home = "/tmp/greptimedb/"
|
||||
data_home = "./greptimedb_data/"
|
||||
type = "File"
|
||||
|
||||
[[storage.providers]]
|
||||
@@ -892,7 +892,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let cmd = StartCommand {
|
||||
@@ -922,7 +922,10 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = dn_opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!("/tmp/greptimedb/test/wal", raft_engine_config.dir.unwrap());
|
||||
assert_eq!(
|
||||
"./greptimedb_data/test/wal",
|
||||
raft_engine_config.dir.unwrap()
|
||||
);
|
||||
|
||||
assert!(matches!(
|
||||
&dn_opts.storage.store,
|
||||
@@ -946,7 +949,7 @@ mod tests {
|
||||
}
|
||||
|
||||
assert_eq!("debug", logging_opts.level.as_ref().unwrap());
|
||||
assert_eq!("/tmp/greptimedb/test/logs".to_string(), logging_opts.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs".to_string(), logging_opts.dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -958,7 +961,7 @@ mod tests {
|
||||
|
||||
let opts = cmd
|
||||
.load_options(&GlobalOptions {
|
||||
log_dir: Some("/tmp/greptimedb/test/logs".to_string()),
|
||||
log_dir: Some("./greptimedb_data/test/logs".to_string()),
|
||||
log_level: Some("debug".to_string()),
|
||||
|
||||
#[cfg(feature = "tokio-console")]
|
||||
@@ -967,7 +970,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.component;
|
||||
|
||||
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
|
||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||
assert_eq!("debug", opts.logging.level.unwrap());
|
||||
}
|
||||
|
||||
|
||||
@@ -56,13 +56,13 @@ fn test_load_datanode_example_config() {
|
||||
metadata_cache_tti: Duration::from_secs(300),
|
||||
}),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
region_engine: vec![
|
||||
@@ -159,10 +159,10 @@ fn test_load_metasrv_example_config() {
|
||||
let expected = GreptimeOptions::<MetasrvOptions> {
|
||||
component: MetasrvOptions {
|
||||
selector: SelectorType::default(),
|
||||
data_home: "/tmp/metasrv/".to_string(),
|
||||
data_home: "./greptimedb_data/metasrv/".to_string(),
|
||||
server_addr: "127.0.0.1:3002".to_string(),
|
||||
logging: LoggingOptions {
|
||||
dir: "/tmp/greptimedb/logs".to_string(),
|
||||
dir: "./greptimedb_data/logs".to_string(),
|
||||
level: Some("info".to_string()),
|
||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||
tracing_sample_ratio: Some(Default::default()),
|
||||
@@ -202,7 +202,7 @@ fn test_load_standalone_example_config() {
|
||||
component: StandaloneOptions {
|
||||
default_timezone: Some("UTC".to_string()),
|
||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||
dir: Some("/tmp/greptimedb/wal".to_string()),
|
||||
dir: Some("./greptimedb_data/wal".to_string()),
|
||||
sync_period: Some(Duration::from_secs(10)),
|
||||
recovery_parallelism: 2,
|
||||
..Default::default()
|
||||
@@ -219,7 +219,7 @@ fn test_load_standalone_example_config() {
|
||||
}),
|
||||
],
|
||||
storage: StorageConfig {
|
||||
data_home: "/tmp/greptimedb/".to_string(),
|
||||
data_home: "./greptimedb_data/".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
logging: LoggingOptions {
|
||||
|
||||
@@ -161,7 +161,7 @@ mod tests {
|
||||
|
||||
[wal]
|
||||
provider = "raft_engine"
|
||||
dir = "/tmp/greptimedb/wal"
|
||||
dir = "./greptimedb_data/wal"
|
||||
file_size = "1GB"
|
||||
purge_threshold = "50GB"
|
||||
purge_interval = "10m"
|
||||
@@ -170,7 +170,7 @@ mod tests {
|
||||
|
||||
[logging]
|
||||
level = "debug"
|
||||
dir = "/tmp/greptimedb/test/logs"
|
||||
dir = "./greptimedb_data/test/logs"
|
||||
"#;
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
@@ -246,7 +246,7 @@ mod tests {
|
||||
let DatanodeWalConfig::RaftEngine(raft_engine_config) = opts.wal else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "/tmp/greptimedb/wal");
|
||||
assert_eq!(raft_engine_config.dir.unwrap(), "./greptimedb_data/wal");
|
||||
|
||||
// Should be default values.
|
||||
assert_eq!(opts.node_id, None);
|
||||
|
||||
@@ -17,6 +17,7 @@ api.workspace = true
|
||||
arc-swap = "1.0"
|
||||
async-trait.workspace = true
|
||||
bincode = "1.3"
|
||||
chrono.workspace = true
|
||||
common-base.workspace = true
|
||||
common-catalog.workspace = true
|
||||
common-error.workspace = true
|
||||
|
||||
@@ -43,7 +43,6 @@ impl Function for DateFormatFunction {
|
||||
helper::one_of_sigs2(
|
||||
vec![
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -105,22 +104,6 @@ impl Function for DateFormatFunction {
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
for i in 0..size {
|
||||
let datetime = left.get(i).as_datetime();
|
||||
let format = formats.get(i).as_string();
|
||||
|
||||
let result = match (datetime, format) {
|
||||
(Some(datetime), Some(fmt)) => datetime
|
||||
.as_formatted_string(&fmt, Some(&func_ctx.query_ctx.timezone()))
|
||||
.map_err(BoxedError::new)
|
||||
.context(error::ExecuteSnafu)?,
|
||||
_ => None,
|
||||
};
|
||||
|
||||
results.push(result.as_deref());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
@@ -147,7 +130,7 @@ mod tests {
|
||||
use common_query::prelude::{TypeSignature, Volatility};
|
||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{DateTimeVector, DateVector, StringVector, TimestampSecondVector};
|
||||
use datatypes::vectors::{DateVector, StringVector, TimestampSecondVector};
|
||||
|
||||
use super::{DateFormatFunction, *};
|
||||
|
||||
@@ -169,16 +152,11 @@ mod tests {
|
||||
ConcreteDataType::string_datatype(),
|
||||
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::string_datatype(),
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(matches!(f.signature(),
|
||||
Signature {
|
||||
type_signature: TypeSignature::OneOf(sigs),
|
||||
volatility: Volatility::Immutable
|
||||
} if sigs.len() == 6));
|
||||
} if sigs.len() == 5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -262,45 +240,4 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_date_format() {
|
||||
let f = DateFormatFunction;
|
||||
|
||||
let dates = vec![Some(123), None, Some(42), None];
|
||||
let formats = vec![
|
||||
"%Y-%m-%d %T.%3f",
|
||||
"%Y-%m-%d %T.%3f",
|
||||
"%Y-%m-%d %T.%3f",
|
||||
"%Y-%m-%d %T.%3f",
|
||||
];
|
||||
let results = [
|
||||
Some("1970-01-01 00:00:00.123"),
|
||||
None,
|
||||
Some("1970-01-01 00:00:00.042"),
|
||||
None,
|
||||
];
|
||||
|
||||
let date_vector = DateTimeVector::from(dates.clone());
|
||||
let interval_vector = StringVector::from_vec(formats);
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
|
||||
let vector = f.eval(&FunctionContext::default(), &args).unwrap();
|
||||
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in dates.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
let result = results.get(i).unwrap();
|
||||
|
||||
if result.is_none() {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::String(s) => {
|
||||
assert_eq!(s.as_utf8(), result.unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,11 +118,6 @@ mod tests {
|
||||
ConcreteDataType::date_datatype(),
|
||||
f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
f.return_type(&[ConcreteDataType::datetime_datatype()])
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
matches!(f.signature(),
|
||||
Signature {
|
||||
|
||||
@@ -23,7 +23,7 @@ use datatypes::arrow::array::AsArray;
|
||||
use datatypes::arrow::compute::cast;
|
||||
use datatypes::arrow::compute::kernels::zip;
|
||||
use datatypes::arrow::datatypes::{
|
||||
DataType as ArrowDataType, Date32Type, Date64Type, TimestampMicrosecondType,
|
||||
DataType as ArrowDataType, Date32Type, TimeUnit, TimestampMicrosecondType,
|
||||
TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType,
|
||||
};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
@@ -69,9 +69,8 @@ impl Function for GreatestFunction {
|
||||
);
|
||||
|
||||
match &input_types[0] {
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::String(_) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
|
||||
ConcreteDataType::Date(_) => Ok(ConcreteDataType::date_datatype()),
|
||||
ConcreteDataType::DateTime(_) => Ok(ConcreteDataType::datetime_datatype()),
|
||||
ConcreteDataType::Timestamp(ts_type) => Ok(ConcreteDataType::Timestamp(*ts_type)),
|
||||
_ => UnsupportedInputDataTypeSnafu {
|
||||
function: NAME,
|
||||
@@ -87,7 +86,6 @@ impl Function for GreatestFunction {
|
||||
vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_nanosecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
@@ -109,20 +107,24 @@ impl Function for GreatestFunction {
|
||||
);
|
||||
match columns[0].data_type() {
|
||||
ConcreteDataType::String(_) => {
|
||||
// Treats string as `DateTime` type.
|
||||
let column1 = cast(&columns[0].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<Date64Type>();
|
||||
let column2 = cast(&columns[1].to_arrow_array(), &ArrowDataType::Date64)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<Date64Type>();
|
||||
let column1 = cast(
|
||||
&columns[0].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column1 = column1.as_primitive::<TimestampMillisecondType>();
|
||||
let column2 = cast(
|
||||
&columns[1].to_arrow_array(),
|
||||
&ArrowDataType::Timestamp(TimeUnit::Millisecond, None),
|
||||
)
|
||||
.context(ArrowComputeSnafu)?;
|
||||
let column2 = column2.as_primitive::<TimestampMillisecondType>();
|
||||
let boolean_array = gt(&column1, &column2).context(ArrowComputeSnafu)?;
|
||||
let result =
|
||||
zip::zip(&boolean_array, &column1, &column2).context(ArrowComputeSnafu)?;
|
||||
Ok(Helper::try_into_vector(&result).context(error::FromArrowArraySnafu)?)
|
||||
}
|
||||
ConcreteDataType::Date(_) => gt_time_types!(Date32Type, columns),
|
||||
ConcreteDataType::DateTime(_) => gt_time_types!(Date64Type, columns),
|
||||
ConcreteDataType::Timestamp(ts_type) => match ts_type {
|
||||
TimestampType::Second(_) => gt_time_types!(TimestampSecondType, columns),
|
||||
TimestampType::Millisecond(_) => {
|
||||
@@ -155,15 +157,15 @@ mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_time::timestamp::TimeUnit;
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use common_time::{Date, Timestamp};
|
||||
use datatypes::types::{
|
||||
DateTimeType, DateType, TimestampMicrosecondType, TimestampMillisecondType,
|
||||
TimestampNanosecondType, TimestampSecondType,
|
||||
DateType, TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
|
||||
TimestampSecondType,
|
||||
};
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
DateVector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
|
||||
TimestampNanosecondVector, TimestampSecondVector, Vector,
|
||||
};
|
||||
use paste::paste;
|
||||
|
||||
@@ -178,7 +180,7 @@ mod tests {
|
||||
ConcreteDataType::string_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
let columns = vec![
|
||||
Arc::new(StringVector::from(vec![
|
||||
@@ -194,15 +196,18 @@ mod tests {
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::DateTime(DateTime::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
Value::Timestamp(Timestamp::from_str("2001-02-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::DateTime(DateTime::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
Value::Timestamp(Timestamp::from_str("2012-12-23 00:00:00", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
@@ -245,30 +250,33 @@ mod tests {
|
||||
assert_eq!(
|
||||
function
|
||||
.return_type(&[
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::datetime_datatype()
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
])
|
||||
.unwrap(),
|
||||
ConcreteDataType::DateTime(DateTimeType)
|
||||
ConcreteDataType::timestamp_millisecond_datatype()
|
||||
);
|
||||
|
||||
let columns = vec![
|
||||
Arc::new(DateTimeVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(DateTimeVector::from_slice(vec![0, 1])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![-1, 2])) as _,
|
||||
Arc::new(TimestampMillisecondVector::from_slice(vec![0, 1])) as _,
|
||||
];
|
||||
|
||||
let result = function
|
||||
.eval(&FunctionContext::default(), &columns)
|
||||
.unwrap();
|
||||
let result = result.as_any().downcast_ref::<DateTimeVector>().unwrap();
|
||||
let result = result
|
||||
.as_any()
|
||||
.downcast_ref::<TimestampMillisecondVector>()
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(
|
||||
result.get(0),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00", None).unwrap())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get(1),
|
||||
Value::DateTime(DateTime::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
Value::Timestamp(Timestamp::from_str("1970-01-01 00:00:00.002", None).unwrap())
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ use std::sync::Arc;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
|
||||
use common_query::prelude::{Signature, Volatility};
|
||||
use common_time::{Date, DateTime, Timestamp};
|
||||
use common_time::{Date, Timestamp};
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::vectors::{Int64Vector, VectorRef};
|
||||
use snafu::ensure;
|
||||
@@ -32,10 +32,6 @@ const NAME: &str = "to_unixtime";
|
||||
|
||||
fn convert_to_seconds(arg: &str, func_ctx: &FunctionContext) -> Option<i64> {
|
||||
let timezone = &func_ctx.query_ctx.timezone();
|
||||
if let Ok(dt) = DateTime::from_str(arg, Some(timezone)) {
|
||||
return Some(dt.val() / 1000);
|
||||
}
|
||||
|
||||
if let Ok(ts) = Timestamp::from_str(arg, Some(timezone)) {
|
||||
return Some(ts.split().0);
|
||||
}
|
||||
@@ -59,12 +55,6 @@ fn convert_dates_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
fn convert_datetimes_to_seconds(vector: &VectorRef) -> Vec<Option<i64>> {
|
||||
(0..vector.len())
|
||||
.map(|i| vector.get(i).as_datetime().map(|dt| dt.val() / 1000))
|
||||
.collect::<Vec<Option<i64>>>()
|
||||
}
|
||||
|
||||
impl Function for ToUnixtimeFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
@@ -82,7 +72,6 @@ impl Function for ToUnixtimeFunction {
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -119,10 +108,6 @@ impl Function for ToUnixtimeFunction {
|
||||
let seconds = convert_dates_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::DateTime(_) => {
|
||||
let seconds = convert_datetimes_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
}
|
||||
ConcreteDataType::Timestamp(_) => {
|
||||
let seconds = convert_timestamps_to_seconds(vector);
|
||||
Ok(Arc::new(Int64Vector::from(seconds)))
|
||||
@@ -148,7 +133,7 @@ mod tests {
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::value::Value;
|
||||
use datatypes::vectors::{
|
||||
DateTimeVector, DateVector, StringVector, TimestampMillisecondVector, TimestampSecondVector,
|
||||
DateVector, StringVector, TimestampMillisecondVector, TimestampSecondVector,
|
||||
};
|
||||
|
||||
use super::{ToUnixtimeFunction, *};
|
||||
@@ -171,7 +156,6 @@ mod tests {
|
||||
ConcreteDataType::int32_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::date_datatype(),
|
||||
ConcreteDataType::datetime_datatype(),
|
||||
ConcreteDataType::timestamp_second_datatype(),
|
||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
||||
ConcreteDataType::timestamp_microsecond_datatype(),
|
||||
@@ -253,31 +237,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_datetime_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
let times = vec![Some(123000), None, Some(42000), None];
|
||||
let results = [Some(123), None, Some(42), None];
|
||||
let date_vector = DateTimeVector::from(times.clone());
|
||||
let args: Vec<VectorRef> = vec![Arc::new(date_vector)];
|
||||
let vector = f.eval(&FunctionContext::default(), &args).unwrap();
|
||||
assert_eq!(4, vector.len());
|
||||
for (i, _t) in times.iter().enumerate() {
|
||||
let v = vector.get(i);
|
||||
if i == 1 || i == 3 {
|
||||
assert_eq!(Value::Null, v);
|
||||
continue;
|
||||
}
|
||||
match v {
|
||||
Value::Int64(ts) => {
|
||||
assert_eq!(ts, (*results.get(i).unwrap()).unwrap());
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_to_unixtime() {
|
||||
let f = ToUnixtimeFunction;
|
||||
|
||||
@@ -27,6 +27,7 @@ mod vector_div;
|
||||
mod vector_mul;
|
||||
mod vector_norm;
|
||||
mod vector_sub;
|
||||
mod vector_subvector;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -56,6 +57,7 @@ impl VectorFunction {
|
||||
registry.register(Arc::new(vector_div::VectorDivFunction));
|
||||
registry.register(Arc::new(vector_norm::VectorNormFunction));
|
||||
registry.register(Arc::new(vector_dim::VectorDimFunction));
|
||||
registry.register(Arc::new(vector_subvector::VectorSubvectorFunction));
|
||||
registry.register(Arc::new(elem_sum::ElemSumFunction));
|
||||
registry.register(Arc::new(elem_product::ElemProductFunction));
|
||||
}
|
||||
|
||||
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
240
src/common/function/src/scalars/vector/vector_subvector.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Display;
|
||||
|
||||
use common_query::error::{InvalidFuncArgsSnafu, Result};
|
||||
use common_query::prelude::{Signature, TypeSignature};
|
||||
use datafusion_expr::Volatility;
|
||||
use datatypes::prelude::ConcreteDataType;
|
||||
use datatypes::scalars::ScalarVectorBuilder;
|
||||
use datatypes::vectors::{BinaryVectorBuilder, MutableVector, VectorRef};
|
||||
use snafu::ensure;
|
||||
|
||||
use crate::function::{Function, FunctionContext};
|
||||
use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const, veclit_to_binlit};
|
||||
|
||||
const NAME: &str = "vec_subvector";
|
||||
|
||||
/// Returns a subvector from start(included) to end(excluded) index.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```sql
|
||||
/// SELECT vec_to_string(vec_subvector("[1, 2, 3, 4, 5]", 1, 3)) as result;
|
||||
///
|
||||
/// +---------+
|
||||
/// | result |
|
||||
/// +---------+
|
||||
/// | [2, 3] |
|
||||
/// +---------+
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VectorSubvectorFunction;
|
||||
|
||||
impl Function for VectorSubvectorFunction {
|
||||
fn name(&self) -> &str {
|
||||
NAME
|
||||
}
|
||||
|
||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
||||
Ok(ConcreteDataType::binary_datatype())
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::one_of(
|
||||
vec![
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::string_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
TypeSignature::Exact(vec![
|
||||
ConcreteDataType::binary_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
ConcreteDataType::int64_datatype(),
|
||||
]),
|
||||
],
|
||||
Volatility::Immutable,
|
||||
)
|
||||
}
|
||||
|
||||
fn eval(&self, _func_ctx: &FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
|
||||
ensure!(
|
||||
columns.len() == 3,
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The length of the args is not correct, expect exactly three, have: {}",
|
||||
columns.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let arg0 = &columns[0];
|
||||
let arg1 = &columns[1];
|
||||
let arg2 = &columns[2];
|
||||
|
||||
ensure!(
|
||||
arg0.len() == arg1.len() && arg1.len() == arg2.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"The lengths of the vector are not aligned, args 0: {}, args 1: {}, args 2: {}",
|
||||
arg0.len(),
|
||||
arg1.len(),
|
||||
arg2.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let len = arg0.len();
|
||||
let mut result = BinaryVectorBuilder::with_capacity(len);
|
||||
if len == 0 {
|
||||
return Ok(result.to_vector());
|
||||
}
|
||||
|
||||
let arg0_const = as_veclit_if_const(arg0)?;
|
||||
|
||||
for i in 0..len {
|
||||
let arg0 = match arg0_const.as_ref() {
|
||||
Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
|
||||
None => as_veclit(arg0.get_ref(i))?,
|
||||
};
|
||||
let arg1 = arg1.get(i).as_i64();
|
||||
let arg2 = arg2.get(i).as_i64();
|
||||
let (Some(arg0), Some(arg1), Some(arg2)) = (arg0, arg1, arg2) else {
|
||||
result.push_null();
|
||||
continue;
|
||||
};
|
||||
|
||||
ensure!(
|
||||
0 <= arg1 && arg1 <= arg2 && arg2 as usize <= arg0.len(),
|
||||
InvalidFuncArgsSnafu {
|
||||
err_msg: format!(
|
||||
"Invalid start and end indices: start={}, end={}, vec_len={}",
|
||||
arg1,
|
||||
arg2,
|
||||
arg0.len()
|
||||
)
|
||||
}
|
||||
);
|
||||
|
||||
let subvector = &arg0[arg1 as usize..arg2 as usize];
|
||||
let binlit = veclit_to_binlit(subvector);
|
||||
result.push(Some(&binlit));
|
||||
}
|
||||
|
||||
Ok(result.to_vector())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VectorSubvectorFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", NAME.to_ascii_uppercase())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_query::error::Error;
|
||||
use datatypes::vectors::{Int64Vector, StringVector};
|
||||
|
||||
use super::*;
|
||||
use crate::function::FunctionContext;
|
||||
#[test]
|
||||
fn test_subvector() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0, 4.0, 5.0]".to_string()),
|
||||
Some("[6.0, 7.0, 8.0, 9.0, 10.0]".to_string()),
|
||||
None,
|
||||
Some("[11.0, 12.0, 13.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(0), Some(0), Some(1)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(5), Some(2), Some(3)]));
|
||||
|
||||
let result = func
|
||||
.eval(&FunctionContext::default(), &[input0, input1, input2])
|
||||
.unwrap();
|
||||
|
||||
let result = result.as_ref();
|
||||
assert_eq!(result.len(), 4);
|
||||
assert_eq!(
|
||||
result.get_ref(0).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[2.0, 3.0]).as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
result.get_ref(1).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[6.0, 7.0, 8.0, 9.0, 10.0]).as_slice())
|
||||
);
|
||||
assert!(result.get_ref(2).is_null());
|
||||
assert_eq!(
|
||||
result.get_ref(3).as_binary().unwrap(),
|
||||
Some(veclit_to_binlit(&[12.0, 13.0]).as_slice())
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_subvector_error() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(2)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"The lengths of the vector are not aligned, args 0: 2, args 1: 2, args 2: 1"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subvector_invalid_indices() {
|
||||
let func = VectorSubvectorFunction;
|
||||
|
||||
let input0 = Arc::new(StringVector::from(vec![
|
||||
Some("[1.0, 2.0, 3.0]".to_string()),
|
||||
Some("[4.0, 5.0, 6.0]".to_string()),
|
||||
]));
|
||||
let input1 = Arc::new(Int64Vector::from(vec![Some(1), Some(3)]));
|
||||
let input2 = Arc::new(Int64Vector::from(vec![Some(3), Some(4)]));
|
||||
|
||||
let result = func.eval(&FunctionContext::default(), &[input0, input1, input2]);
|
||||
|
||||
match result {
|
||||
Err(Error::InvalidFuncArgs { err_msg, .. }) => {
|
||||
assert_eq!(
|
||||
err_msg,
|
||||
"Invalid start and end indices: start=3, end=4, vec_len=3"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,8 @@ use api::v1::column::Values;
|
||||
use common_base::BitVec;
|
||||
use datatypes::types::{IntervalType, TimeType, TimestampType, WrapperType};
|
||||
use datatypes::vectors::{
|
||||
BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector, Float32Vector,
|
||||
Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
|
||||
BinaryVector, BooleanVector, DateVector, Decimal128Vector, Float32Vector, Float64Vector,
|
||||
Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
|
||||
IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
|
||||
TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
|
||||
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
|
||||
@@ -141,12 +141,6 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
|
||||
(ConcreteDataType::Date(_), DateVector, date_values, |x| {
|
||||
x.val()
|
||||
}),
|
||||
(
|
||||
ConcreteDataType::DateTime(_),
|
||||
DateTimeVector,
|
||||
datetime_values,
|
||||
|x| { x.val() }
|
||||
),
|
||||
(
|
||||
ConcreteDataType::Timestamp(TimestampType::Second(_)),
|
||||
TimestampSecondVector,
|
||||
|
||||
@@ -18,11 +18,13 @@ mod print_caller;
|
||||
mod range_fn;
|
||||
mod stack_trace_debug;
|
||||
mod utils;
|
||||
|
||||
use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
|
||||
use print_caller::process_print_caller;
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
use range_fn::process_range_fn;
|
||||
use syn::{parse_macro_input, DeriveInput};
|
||||
use syn::{parse_macro_input, Data, DeriveInput, Fields};
|
||||
|
||||
use crate::admin_fn::process_admin_fn;
|
||||
|
||||
@@ -136,3 +138,51 @@ pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
pub fn stack_trace_debug(args: TokenStream, input: TokenStream) -> TokenStream {
|
||||
stack_trace_debug::stack_trace_style_impl(args.into(), input.into()).into()
|
||||
}
|
||||
|
||||
/// Generates implementation for `From<&TableMeta> for TableMetaBuilder`
|
||||
#[proc_macro_derive(ToMetaBuilder)]
|
||||
pub fn derive_meta_builder(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as DeriveInput);
|
||||
|
||||
let Data::Struct(data_struct) = input.data else {
|
||||
panic!("ToMetaBuilder can only be derived for structs");
|
||||
};
|
||||
|
||||
let Fields::Named(fields) = data_struct.fields else {
|
||||
panic!("ToMetaBuilder can only be derived for structs with named fields");
|
||||
};
|
||||
|
||||
// Check that this is being applied to TableMeta struct
|
||||
if input.ident != "TableMeta" {
|
||||
panic!("ToMetaBuilder can only be derived for TableMeta struct");
|
||||
}
|
||||
|
||||
let field_init = fields.named.iter().map(|field| {
|
||||
let field_name = field.ident.as_ref().unwrap();
|
||||
quote! {
|
||||
#field_name: Default::default(),
|
||||
}
|
||||
});
|
||||
|
||||
let field_assignments = fields.named.iter().map(|field| {
|
||||
let field_name = field.ident.as_ref().unwrap();
|
||||
quote! {
|
||||
builder.#field_name(meta.#field_name.clone());
|
||||
}
|
||||
});
|
||||
|
||||
let gen = quote! {
|
||||
impl From<&TableMeta> for TableMetaBuilder {
|
||||
fn from(meta: &TableMeta) -> Self {
|
||||
let mut builder = Self {
|
||||
#(#field_init)*
|
||||
};
|
||||
|
||||
#(#field_assignments)*
|
||||
builder
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
gen.into()
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ license.workspace = true
|
||||
[features]
|
||||
testing = []
|
||||
pg_kvbackend = ["dep:tokio-postgres", "dep:backon", "dep:deadpool-postgres", "dep:deadpool"]
|
||||
mysql_kvbackend = ["dep:sqlx", "dep:backon"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -57,9 +58,10 @@ serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
session.workspace = true
|
||||
snafu.workspace = true
|
||||
sqlx = { workspace = true, optional = true }
|
||||
store-api.workspace = true
|
||||
strum.workspace = true
|
||||
table.workspace = true
|
||||
table = { workspace = true, features = ["testing"] }
|
||||
tokio.workspace = true
|
||||
tokio-postgres = { workspace = true, optional = true }
|
||||
tonic.workspace = true
|
||||
|
||||
@@ -192,6 +192,8 @@ mod tests {
|
||||
expire_after: Some(300),
|
||||
comment: "comment".to_string(),
|
||||
options: Default::default(),
|
||||
created_time: chrono::Utc::now(),
|
||||
updated_time: chrono::Utc::now(),
|
||||
},
|
||||
(1..=3)
|
||||
.map(|i| {
|
||||
|
||||
@@ -28,7 +28,6 @@ use crate::error::{
|
||||
InvalidRoleSnafu, ParseNumSnafu, Result,
|
||||
};
|
||||
use crate::peer::Peer;
|
||||
use crate::ClusterId;
|
||||
|
||||
const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info";
|
||||
|
||||
@@ -56,12 +55,9 @@ pub trait ClusterInfo {
|
||||
// TODO(jeremy): Other info, like region status, etc.
|
||||
}
|
||||
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
|
||||
/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct NodeInfoKey {
|
||||
/// The cluster id.
|
||||
// todo(hl): remove cluster_id as it is not assigned anywhere.
|
||||
pub cluster_id: ClusterId,
|
||||
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
|
||||
pub role: Role,
|
||||
/// The node id.
|
||||
@@ -84,24 +80,15 @@ impl NodeInfoKey {
|
||||
_ => peer.id,
|
||||
};
|
||||
|
||||
Some(NodeInfoKey {
|
||||
cluster_id: header.cluster_id,
|
||||
role,
|
||||
node_id,
|
||||
})
|
||||
Some(NodeInfoKey { role, node_id })
|
||||
}
|
||||
|
||||
pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String {
|
||||
format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id)
|
||||
pub fn key_prefix() -> String {
|
||||
format!("{}-0-", CLUSTER_NODE_INFO_PREFIX)
|
||||
}
|
||||
|
||||
pub fn key_prefix_with_role(cluster_id: ClusterId, role: Role) -> String {
|
||||
format!(
|
||||
"{}-{}-{}-",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
cluster_id,
|
||||
i32::from(role)
|
||||
)
|
||||
pub fn key_prefix_with_role(role: Role) -> String {
|
||||
format!("{}-0-{}-", CLUSTER_NODE_INFO_PREFIX, i32::from(role))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,15 +180,10 @@ impl FromStr for NodeInfoKey {
|
||||
let caps = CLUSTER_NODE_INFO_PREFIX_PATTERN
|
||||
.captures(key)
|
||||
.context(InvalidNodeInfoKeySnafu { key })?;
|
||||
|
||||
ensure!(caps.len() == 4, InvalidNodeInfoKeySnafu { key });
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let role = caps[2].to_string();
|
||||
let node_id = caps[3].to_string();
|
||||
let cluster_id: u64 = cluster_id.parse().context(ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let role: i32 = role.parse().context(ParseNumSnafu {
|
||||
err_msg: format!("invalid role {role}"),
|
||||
})?;
|
||||
@@ -210,11 +192,7 @@ impl FromStr for NodeInfoKey {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
role,
|
||||
node_id,
|
||||
})
|
||||
Ok(Self { role, node_id })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,9 +211,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
|
||||
impl From<&NodeInfoKey> for Vec<u8> {
|
||||
fn from(key: &NodeInfoKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}-{}",
|
||||
"{}-0-{}-{}",
|
||||
CLUSTER_NODE_INFO_PREFIX,
|
||||
key.cluster_id,
|
||||
i32::from(key.role),
|
||||
key.node_id
|
||||
)
|
||||
@@ -308,7 +285,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_node_info_key_round_trip() {
|
||||
let key = NodeInfoKey {
|
||||
cluster_id: 1,
|
||||
role: Datanode,
|
||||
node_id: 2,
|
||||
};
|
||||
@@ -316,7 +292,6 @@ mod tests {
|
||||
let key_bytes: Vec<u8> = (&key).into();
|
||||
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
|
||||
|
||||
assert_eq!(1, new_key.cluster_id);
|
||||
assert_eq!(Datanode, new_key.role);
|
||||
assert_eq!(2, new_key.node_id);
|
||||
}
|
||||
@@ -362,11 +337,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_node_info_key_prefix() {
|
||||
let prefix = NodeInfoKey::key_prefix_with_cluster_id(1);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-1-");
|
||||
let prefix = NodeInfoKey::key_prefix();
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-0-");
|
||||
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-2-1-");
|
||||
let prefix = NodeInfoKey::key_prefix_with_role(Frontend);
|
||||
assert_eq!(prefix, "__meta_cluster_node_info-0-1-");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -25,8 +25,8 @@ use store_api::region_engine::{RegionRole, RegionStatistic};
|
||||
use store_api::storage::RegionId;
|
||||
use table::metadata::TableId;
|
||||
|
||||
use crate::error;
|
||||
use crate::error::Result;
|
||||
use crate::{error, ClusterId};
|
||||
|
||||
pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease";
|
||||
const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region";
|
||||
@@ -48,11 +48,10 @@ lazy_static! {
|
||||
|
||||
/// The key of the datanode stat in the storage.
|
||||
///
|
||||
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
|
||||
/// The format is `__meta_datanode_stat-0-{node_id}`.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct Stat {
|
||||
pub timestamp_millis: i64,
|
||||
pub cluster_id: ClusterId,
|
||||
// The datanode Id.
|
||||
pub id: u64,
|
||||
// The datanode address.
|
||||
@@ -102,10 +101,7 @@ impl Stat {
|
||||
}
|
||||
|
||||
pub fn stat_key(&self) -> DatanodeStatKey {
|
||||
DatanodeStatKey {
|
||||
cluster_id: self.cluster_id,
|
||||
node_id: self.id,
|
||||
}
|
||||
DatanodeStatKey { node_id: self.id }
|
||||
}
|
||||
|
||||
/// Returns a tuple array containing [RegionId] and [RegionRole].
|
||||
@@ -145,7 +141,7 @@ impl TryFrom<&HeartbeatRequest> for Stat {
|
||||
} = value;
|
||||
|
||||
match (header, peer) {
|
||||
(Some(header), Some(peer)) => {
|
||||
(Some(_header), Some(peer)) => {
|
||||
let region_stats = region_stats
|
||||
.iter()
|
||||
.map(RegionStat::from)
|
||||
@@ -153,7 +149,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
|
||||
|
||||
Ok(Self {
|
||||
timestamp_millis: time_util::current_time_millis(),
|
||||
cluster_id: header.cluster_id,
|
||||
// datanode id
|
||||
id: peer.id,
|
||||
// datanode address
|
||||
@@ -196,32 +191,24 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
|
||||
|
||||
/// The key of the datanode stat in the memory store.
|
||||
///
|
||||
/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
|
||||
/// The format is `__meta_datanode_stat-0-{node_id}`.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct DatanodeStatKey {
|
||||
pub cluster_id: ClusterId,
|
||||
pub node_id: u64,
|
||||
}
|
||||
|
||||
impl DatanodeStatKey {
|
||||
/// The key prefix.
|
||||
pub fn prefix_key() -> Vec<u8> {
|
||||
format!("{DATANODE_STAT_PREFIX}-").into_bytes()
|
||||
}
|
||||
|
||||
/// The key prefix with the cluster id.
|
||||
pub fn key_prefix_with_cluster_id(cluster_id: ClusterId) -> String {
|
||||
format!("{DATANODE_STAT_PREFIX}-{cluster_id}-")
|
||||
// todo(hl): remove cluster id in prefix
|
||||
format!("{DATANODE_STAT_PREFIX}-0-").into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DatanodeStatKey> for Vec<u8> {
|
||||
fn from(value: DatanodeStatKey) -> Self {
|
||||
format!(
|
||||
"{}-{}-{}",
|
||||
DATANODE_STAT_PREFIX, value.cluster_id, value.node_id
|
||||
)
|
||||
.into_bytes()
|
||||
// todo(hl): remove cluster id in prefix
|
||||
format!("{}-0-{}", DATANODE_STAT_PREFIX, value.node_id).into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,20 +221,12 @@ impl FromStr for DatanodeStatKey {
|
||||
.context(error::InvalidStatKeySnafu { key })?;
|
||||
|
||||
ensure!(caps.len() == 3, error::InvalidStatKeySnafu { key });
|
||||
|
||||
let cluster_id = caps[1].to_string();
|
||||
let node_id = caps[2].to_string();
|
||||
let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid cluster_id: {cluster_id}"),
|
||||
})?;
|
||||
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
|
||||
err_msg: format!("invalid node_id: {node_id}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
node_id,
|
||||
})
|
||||
Ok(Self { node_id })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +300,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_stat_key() {
|
||||
let stat = Stat {
|
||||
cluster_id: 3,
|
||||
id: 101,
|
||||
region_num: 10,
|
||||
..Default::default()
|
||||
@@ -329,14 +307,12 @@ mod tests {
|
||||
|
||||
let stat_key = stat.stat_key();
|
||||
|
||||
assert_eq!(3, stat_key.cluster_id);
|
||||
assert_eq!(101, stat_key.node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stat_val_round_trip() {
|
||||
let stat = Stat {
|
||||
cluster_id: 0,
|
||||
id: 101,
|
||||
region_num: 100,
|
||||
..Default::default()
|
||||
@@ -351,7 +327,6 @@ mod tests {
|
||||
assert_eq!(1, stats.len());
|
||||
|
||||
let stat = stats.first().unwrap();
|
||||
assert_eq!(0, stat.cluster_id);
|
||||
assert_eq!(101, stat.id);
|
||||
assert_eq!(100, stat.region_num);
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::node_manager::NodeManagerRef;
|
||||
use crate::region_keeper::MemoryRegionKeeperRef;
|
||||
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::{ClusterId, DatanodeId};
|
||||
use crate::DatanodeId;
|
||||
|
||||
pub mod alter_database;
|
||||
pub mod alter_logical_tables;
|
||||
@@ -57,7 +57,6 @@ pub mod utils;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ExecutorContext {
|
||||
pub cluster_id: Option<u64>,
|
||||
pub tracing_context: Option<W3cTrace>,
|
||||
}
|
||||
|
||||
@@ -90,10 +89,6 @@ pub trait ProcedureExecutor: Send + Sync {
|
||||
|
||||
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
|
||||
|
||||
pub struct TableMetadataAllocatorContext {
|
||||
pub cluster_id: ClusterId,
|
||||
}
|
||||
|
||||
/// Metadata allocated to a table.
|
||||
#[derive(Default)]
|
||||
pub struct TableMetadata {
|
||||
@@ -108,7 +103,7 @@ pub struct TableMetadata {
|
||||
|
||||
pub type RegionFailureDetectorControllerRef = Arc<dyn RegionFailureDetectorController>;
|
||||
|
||||
pub type DetectingRegion = (ClusterId, DatanodeId, RegionId);
|
||||
pub type DetectingRegion = (DatanodeId, RegionId);
|
||||
|
||||
/// Used for actively registering Region failure detectors.
|
||||
///
|
||||
|
||||
@@ -30,7 +30,6 @@ use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::rpc::ddl::UnsetDatabaseOption::{self};
|
||||
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct AlterDatabaseProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -65,14 +64,10 @@ fn build_new_schema_value(
|
||||
impl AlterDatabaseProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: AlterDatabaseTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
pub fn new(task: AlterDatabaseTask, context: DdlContext) -> Result<Self> {
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterDatabaseData::new(task, cluster_id)?,
|
||||
data: AlterDatabaseData::new(task)?,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -183,7 +178,6 @@ enum AlterDatabaseState {
|
||||
/// The data of alter database procedure.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterDatabaseData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterDatabaseState,
|
||||
kind: AlterDatabaseKind,
|
||||
catalog_name: String,
|
||||
@@ -192,9 +186,8 @@ pub struct AlterDatabaseData {
|
||||
}
|
||||
|
||||
impl AlterDatabaseData {
|
||||
pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
|
||||
pub fn new(task: AlterDatabaseTask) -> Result<Self> {
|
||||
Ok(Self {
|
||||
cluster_id,
|
||||
state: AlterDatabaseState::Prepare,
|
||||
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
|
||||
catalog_name: task.alter_expr.catalog_name,
|
||||
|
||||
@@ -37,9 +37,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::find_leaders;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct AlterLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -50,7 +50,6 @@ impl AlterLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
tasks: Vec<AlterTableTask>,
|
||||
physical_table_id: TableId,
|
||||
context: DdlContext,
|
||||
@@ -58,7 +57,6 @@ impl AlterLogicalTablesProcedure {
|
||||
Self {
|
||||
context,
|
||||
data: AlterTablesData {
|
||||
cluster_id,
|
||||
state: AlterTablesState::Prepare,
|
||||
tasks,
|
||||
table_info_values: vec![],
|
||||
@@ -240,7 +238,6 @@ impl Procedure for AlterLogicalTablesProcedure {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterTablesData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterTablesState,
|
||||
tasks: Vec<AlterTableTask>,
|
||||
/// Table info values before the alter operation.
|
||||
|
||||
@@ -45,9 +45,9 @@ use crate::instruction::CacheIdent;
|
||||
use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::AlterTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The alter table procedure
|
||||
pub struct AlterTableProcedure {
|
||||
@@ -64,16 +64,11 @@ pub struct AlterTableProcedure {
|
||||
impl AlterTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterTable";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
table_id: TableId,
|
||||
task: AlterTableTask,
|
||||
context: DdlContext,
|
||||
) -> Result<Self> {
|
||||
pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
|
||||
task.validate()?;
|
||||
Ok(Self {
|
||||
context,
|
||||
data: AlterTableData::new(task, table_id, cluster_id),
|
||||
data: AlterTableData::new(task, table_id),
|
||||
new_table_info: None,
|
||||
})
|
||||
}
|
||||
@@ -307,7 +302,6 @@ enum AlterTableState {
|
||||
// The serialized data of alter table.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AlterTableData {
|
||||
cluster_id: ClusterId,
|
||||
state: AlterTableState,
|
||||
task: AlterTableTask,
|
||||
table_id: TableId,
|
||||
@@ -318,12 +312,11 @@ pub struct AlterTableData {
|
||||
}
|
||||
|
||||
impl AlterTableData {
|
||||
pub fn new(task: AlterTableTask, table_id: TableId, cluster_id: u64) -> Self {
|
||||
pub fn new(task: AlterTableTask, table_id: TableId) -> Self {
|
||||
Self {
|
||||
state: AlterTableState::Prepare,
|
||||
task,
|
||||
table_id,
|
||||
cluster_id,
|
||||
table_info_value: None,
|
||||
region_distribution: None,
|
||||
}
|
||||
|
||||
@@ -167,10 +167,9 @@ mod tests {
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
|
||||
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
|
||||
async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
|
||||
async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) {
|
||||
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(datanode_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let region_id = RegionId::new(table_id, 1);
|
||||
let table_name = "foo";
|
||||
@@ -225,19 +224,12 @@ mod tests {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
(
|
||||
ddl_context,
|
||||
cluster_id,
|
||||
table_id,
|
||||
region_id,
|
||||
table_name.to_string(),
|
||||
)
|
||||
(ddl_context, table_id, region_id, table_name.to_string())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_alter_region_request() {
|
||||
let (ddl_context, cluster_id, table_id, region_id, table_name) =
|
||||
prepare_ddl_context().await;
|
||||
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterTableExpr {
|
||||
@@ -265,8 +257,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
@@ -307,8 +298,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_make_alter_column_type_region_request() {
|
||||
let (ddl_context, cluster_id, table_id, region_id, table_name) =
|
||||
prepare_ddl_context().await;
|
||||
let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
|
||||
|
||||
let task = AlterTableTask {
|
||||
alter_table: AlterTableExpr {
|
||||
@@ -325,8 +315,7 @@ mod tests {
|
||||
},
|
||||
};
|
||||
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let alter_kind = procedure.make_region_alter_kind().unwrap();
|
||||
let Some(Body::Alter(alter_region_request)) = procedure
|
||||
|
||||
@@ -46,9 +46,9 @@ use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
|
||||
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure of flow creation.
|
||||
pub struct CreateFlowProcedure {
|
||||
@@ -60,16 +60,10 @@ impl CreateFlowProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateFlow";
|
||||
|
||||
/// Returns a new [CreateFlowProcedure].
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
context: DdlContext,
|
||||
) -> Self {
|
||||
pub fn new(task: CreateFlowTask, query_context: QueryContext, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: CreateFlowData {
|
||||
cluster_id,
|
||||
task,
|
||||
flow_id: None,
|
||||
peers: vec![],
|
||||
@@ -363,7 +357,6 @@ impl fmt::Display for FlowType {
|
||||
/// The serializable data.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateFlowData {
|
||||
pub(crate) cluster_id: ClusterId,
|
||||
pub(crate) state: CreateFlowState,
|
||||
pub(crate) task: CreateFlowTask,
|
||||
pub(crate) flow_id: Option<FlowId>,
|
||||
@@ -432,7 +425,14 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
let flow_type = value.flow_type.unwrap_or_default().to_string();
|
||||
options.insert("flow_type".to_string(), flow_type);
|
||||
|
||||
let flow_info = FlowInfoValue {
|
||||
let mut create_time = chrono::Utc::now();
|
||||
if let Some(prev_flow_value) = value.prev_flow_info_value.as_ref()
|
||||
&& value.task.or_replace
|
||||
{
|
||||
create_time = prev_flow_value.get_inner_ref().created_time;
|
||||
}
|
||||
|
||||
let flow_info: FlowInfoValue = FlowInfoValue {
|
||||
source_table_ids: value.source_table_ids.clone(),
|
||||
sink_table_name,
|
||||
flownode_ids,
|
||||
@@ -442,6 +442,8 @@ impl From<&CreateFlowData> for (FlowInfoValue, Vec<(FlowPartitionId, FlowRouteVa
|
||||
expire_after,
|
||||
comment,
|
||||
options,
|
||||
created_time: create_time,
|
||||
updated_time: chrono::Utc::now(),
|
||||
};
|
||||
|
||||
(flow_info, flow_routes)
|
||||
|
||||
@@ -23,11 +23,10 @@ impl CreateFlowProcedure {
|
||||
pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
|
||||
//TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
|
||||
let partitions = 1;
|
||||
let cluster_id = self.data.cluster_id;
|
||||
let (flow_id, peers) = self
|
||||
.context
|
||||
.flow_metadata_allocator
|
||||
.create(cluster_id, partitions)
|
||||
.create(partitions)
|
||||
.await?;
|
||||
self.data.flow_id = Some(flow_id);
|
||||
self.data.peers = peers;
|
||||
|
||||
@@ -36,9 +36,9 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{find_leaders, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct CreateLogicalTablesProcedure {
|
||||
pub context: DdlContext,
|
||||
@@ -49,7 +49,6 @@ impl CreateLogicalTablesProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateLogicalTables";
|
||||
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
tasks: Vec<CreateTableTask>,
|
||||
physical_table_id: TableId,
|
||||
context: DdlContext,
|
||||
@@ -57,7 +56,6 @@ impl CreateLogicalTablesProcedure {
|
||||
Self {
|
||||
context,
|
||||
data: CreateTablesData {
|
||||
cluster_id,
|
||||
state: CreateTablesState::Prepare,
|
||||
tasks,
|
||||
table_ids_already_exists: vec![],
|
||||
@@ -245,7 +243,6 @@ impl Procedure for CreateLogicalTablesProcedure {
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateTablesData {
|
||||
cluster_id: ClusterId,
|
||||
state: CreateTablesState,
|
||||
tasks: Vec<CreateTableTask>,
|
||||
table_ids_already_exists: Vec<Option<TableId>>,
|
||||
|
||||
@@ -37,17 +37,17 @@ use crate::ddl::utils::{
|
||||
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
|
||||
region_storage_path,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::{
|
||||
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
|
||||
};
|
||||
use crate::{metrics, ClusterId};
|
||||
pub struct CreateTableProcedure {
|
||||
pub context: DdlContext,
|
||||
pub creator: TableCreator,
|
||||
@@ -56,10 +56,10 @@ pub struct CreateTableProcedure {
|
||||
impl CreateTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateTable";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: CreateTableTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: CreateTableTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
creator: TableCreator::new(cluster_id, task),
|
||||
creator: TableCreator::new(task),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,12 +154,7 @@ impl CreateTableProcedure {
|
||||
} = self
|
||||
.context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext {
|
||||
cluster_id: self.creator.data.cluster_id,
|
||||
},
|
||||
&self.creator.data.task,
|
||||
)
|
||||
.create(&self.creator.data.task)
|
||||
.await?;
|
||||
self.creator
|
||||
.set_allocated_metadata(table_id, table_route, region_wal_options);
|
||||
@@ -268,7 +263,6 @@ impl CreateTableProcedure {
|
||||
/// - Failed to create table metadata.
|
||||
async fn on_create_metadata(&mut self) -> Result<Status> {
|
||||
let table_id = self.table_id();
|
||||
let cluster_id = self.creator.data.cluster_id;
|
||||
let manager = &self.context.table_metadata_manager;
|
||||
|
||||
let raw_table_info = self.table_info().clone();
|
||||
@@ -276,10 +270,8 @@ impl CreateTableProcedure {
|
||||
let region_wal_options = self.region_wal_options()?.clone();
|
||||
// Safety: the table_route must be allocated.
|
||||
let physical_table_route = self.table_route()?.clone();
|
||||
let detecting_regions = convert_region_routes_to_detecting_regions(
|
||||
cluster_id,
|
||||
&physical_table_route.region_routes,
|
||||
);
|
||||
let detecting_regions =
|
||||
convert_region_routes_to_detecting_regions(&physical_table_route.region_routes);
|
||||
let table_route = TableRouteValue::Physical(physical_table_route);
|
||||
manager
|
||||
.create_table_metadata(raw_table_info, table_route, region_wal_options)
|
||||
@@ -351,11 +343,10 @@ pub struct TableCreator {
|
||||
}
|
||||
|
||||
impl TableCreator {
|
||||
pub fn new(cluster_id: ClusterId, task: CreateTableTask) -> Self {
|
||||
pub fn new(task: CreateTableTask) -> Self {
|
||||
Self {
|
||||
data: CreateTableData {
|
||||
state: CreateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_route: None,
|
||||
region_wal_options: None,
|
||||
@@ -421,7 +412,6 @@ pub struct CreateTableData {
|
||||
table_route: Option<PhysicalTableRouteValue>,
|
||||
/// None stands for not allocated yet.
|
||||
pub region_wal_options: Option<HashMap<RegionNumber, String>>,
|
||||
pub cluster_id: ClusterId,
|
||||
}
|
||||
|
||||
impl CreateTableData {
|
||||
|
||||
@@ -24,13 +24,13 @@ use table::table_reference::TableReference;
|
||||
|
||||
use crate::cache_invalidator::Context;
|
||||
use crate::ddl::utils::handle_retry_error;
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::CreateViewTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
// The procedure to execute `[CreateViewTask]`.
|
||||
pub struct CreateViewProcedure {
|
||||
@@ -41,12 +41,11 @@ pub struct CreateViewProcedure {
|
||||
impl CreateViewProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: CreateViewTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: CreateViewData {
|
||||
state: CreateViewState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
need_update: false,
|
||||
},
|
||||
@@ -144,12 +143,7 @@ impl CreateViewProcedure {
|
||||
let TableMetadata { table_id, .. } = self
|
||||
.context
|
||||
.table_metadata_allocator
|
||||
.create_view(
|
||||
&TableMetadataAllocatorContext {
|
||||
cluster_id: self.data.cluster_id,
|
||||
},
|
||||
&None,
|
||||
)
|
||||
.create_view(&None)
|
||||
.await?;
|
||||
self.data.set_allocated_metadata(table_id, false);
|
||||
}
|
||||
@@ -285,7 +279,6 @@ pub enum CreateViewState {
|
||||
pub struct CreateViewData {
|
||||
pub state: CreateViewState,
|
||||
pub task: CreateViewTask,
|
||||
pub cluster_id: ClusterId,
|
||||
/// Whether to update the view info.
|
||||
pub need_update: bool,
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::Result;
|
||||
use crate::key::table_name::TableNameValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock};
|
||||
use crate::ClusterId;
|
||||
|
||||
pub struct DropDatabaseProcedure {
|
||||
/// The context of procedure runtime.
|
||||
@@ -54,7 +53,6 @@ pub(crate) enum DropTableTarget {
|
||||
|
||||
/// Context of [DropDatabaseProcedure] execution.
|
||||
pub(crate) struct DropDatabaseContext {
|
||||
cluster_id: ClusterId,
|
||||
catalog: String,
|
||||
schema: String,
|
||||
drop_if_exists: bool,
|
||||
@@ -87,7 +85,6 @@ impl DropDatabaseProcedure {
|
||||
Self {
|
||||
runtime_context: context,
|
||||
context: DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog,
|
||||
schema,
|
||||
drop_if_exists,
|
||||
@@ -108,7 +105,6 @@ impl DropDatabaseProcedure {
|
||||
Ok(Self {
|
||||
runtime_context,
|
||||
context: DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog,
|
||||
schema,
|
||||
drop_if_exists,
|
||||
|
||||
@@ -217,11 +217,10 @@ mod tests {
|
||||
async fn test_next_without_logical_tables() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_physical_table(&ddl_context, "phy").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -252,12 +251,11 @@ mod tests {
|
||||
async fn test_next_with_logical_tables() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), physical_table_id, "metric_0").await;
|
||||
// It always starts from Logical
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -286,7 +284,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -98,11 +98,10 @@ impl State for DropDatabaseExecutor {
|
||||
async fn next(
|
||||
&mut self,
|
||||
ddl_ctx: &DdlContext,
|
||||
ctx: &mut DropDatabaseContext,
|
||||
_ctx: &mut DropDatabaseContext,
|
||||
) -> Result<(Box<dyn State>, Status)> {
|
||||
self.register_dropping_regions(ddl_ctx)?;
|
||||
let executor =
|
||||
DropTableExecutor::new(ctx.cluster_id, self.table_name.clone(), self.table_id, true);
|
||||
let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
|
||||
// Deletes metadata for table permanently.
|
||||
let table_route_value = TableRouteValue::new(
|
||||
self.table_id,
|
||||
@@ -187,7 +186,7 @@ mod tests {
|
||||
async fn test_next_with_physical_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -203,7 +202,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -216,7 +214,6 @@ mod tests {
|
||||
}
|
||||
// Execute again
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -239,8 +236,8 @@ mod tests {
|
||||
async fn test_next_logical_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
create_logical_table(ddl_context.clone(), physical_table_id, "metric").await;
|
||||
let logical_table_id = physical_table_id + 1;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
@@ -257,7 +254,6 @@ mod tests {
|
||||
DropTableTarget::Logical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -270,7 +266,6 @@ mod tests {
|
||||
}
|
||||
// Execute again
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -345,7 +340,7 @@ mod tests {
|
||||
async fn test_next_retryable_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -360,7 +355,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -374,7 +368,7 @@ mod tests {
|
||||
async fn test_on_recovery() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "phy").await;
|
||||
let (_, table_route) = ddl_context
|
||||
.table_metadata_manager
|
||||
.table_route_manager()
|
||||
@@ -390,7 +384,6 @@ mod tests {
|
||||
DropTableTarget::Physical,
|
||||
);
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: DEFAULT_CATALOG_NAME.to_string(),
|
||||
schema: DEFAULT_SCHEMA_NAME.to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -118,7 +118,6 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut state = DropDatabaseRemoveMetadata;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
@@ -145,7 +144,6 @@ mod tests {
|
||||
// Schema not exists
|
||||
let mut state = DropDatabaseRemoveMetadata;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
|
||||
@@ -89,7 +89,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut step = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: false,
|
||||
@@ -105,7 +104,6 @@ mod tests {
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let mut state = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: true,
|
||||
@@ -128,7 +126,6 @@ mod tests {
|
||||
.unwrap();
|
||||
let mut state = DropDatabaseStart;
|
||||
let mut ctx = DropDatabaseContext {
|
||||
cluster_id: 0,
|
||||
catalog: "foo".to_string(),
|
||||
schema: "bar".to_string(),
|
||||
drop_if_exists: false,
|
||||
|
||||
@@ -37,8 +37,8 @@ use crate::instruction::{CacheIdent, DropFlow};
|
||||
use crate::key::flow::flow_info::FlowInfoValue;
|
||||
use crate::key::flow::flow_route::FlowRouteValue;
|
||||
use crate::lock_key::{CatalogLock, FlowLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropFlowTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure for dropping a flow.
|
||||
pub struct DropFlowProcedure {
|
||||
@@ -51,12 +51,11 @@ pub struct DropFlowProcedure {
|
||||
impl DropFlowProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropFlow";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropFlowTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: DropFlowTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: DropFlowData {
|
||||
state: DropFlowState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
flow_info_value: None,
|
||||
flow_route_values: vec![],
|
||||
@@ -218,7 +217,6 @@ impl Procedure for DropFlowProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropFlowData {
|
||||
state: DropFlowState,
|
||||
cluster_id: ClusterId,
|
||||
task: DropFlowTask,
|
||||
pub(crate) flow_info_value: Option<FlowInfoValue>,
|
||||
pub(crate) flow_route_values: Vec<FlowRouteValue>,
|
||||
|
||||
@@ -40,10 +40,10 @@ use crate::ddl::DdlContext;
|
||||
use crate::error::{self, Result};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::region_keeper::OperatingRegionGuard;
|
||||
use crate::rpc::ddl::DropTableTask;
|
||||
use crate::rpc::router::{operating_leader_regions, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct DropTableProcedure {
|
||||
/// The context of procedure runtime.
|
||||
@@ -59,8 +59,8 @@ pub struct DropTableProcedure {
|
||||
impl DropTableProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropTableTask, context: DdlContext) -> Self {
|
||||
let data = DropTableData::new(cluster_id, task);
|
||||
pub fn new(task: DropTableTask, context: DdlContext) -> Self {
|
||||
let data = DropTableData::new(task);
|
||||
let executor = data.build_executor();
|
||||
Self {
|
||||
context,
|
||||
@@ -268,7 +268,6 @@ impl Procedure for DropTableProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DropTableData {
|
||||
pub state: DropTableState,
|
||||
pub cluster_id: ClusterId,
|
||||
pub task: DropTableTask,
|
||||
pub physical_region_routes: Vec<RegionRoute>,
|
||||
pub physical_table_id: Option<TableId>,
|
||||
@@ -279,10 +278,9 @@ pub struct DropTableData {
|
||||
}
|
||||
|
||||
impl DropTableData {
|
||||
pub fn new(cluster_id: ClusterId, task: DropTableTask) -> Self {
|
||||
pub fn new(task: DropTableTask) -> Self {
|
||||
Self {
|
||||
state: DropTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
physical_region_routes: vec![],
|
||||
physical_table_id: None,
|
||||
@@ -301,7 +299,6 @@ impl DropTableData {
|
||||
|
||||
fn build_executor(&self) -> DropTableExecutor {
|
||||
DropTableExecutor::new(
|
||||
self.cluster_id,
|
||||
self.task.table_name(),
|
||||
self.task.table_id,
|
||||
self.task.drop_if_exists,
|
||||
|
||||
@@ -36,7 +36,6 @@ use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::ClusterId;
|
||||
|
||||
/// [Control] indicated to the caller whether to go to the next step.
|
||||
#[derive(Debug)]
|
||||
@@ -54,14 +53,8 @@ impl<T> Control<T> {
|
||||
|
||||
impl DropTableExecutor {
|
||||
/// Returns the [DropTableExecutor].
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
) -> Self {
|
||||
pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self {
|
||||
Self {
|
||||
cluster_id,
|
||||
table,
|
||||
table_id,
|
||||
drop_if_exists,
|
||||
@@ -74,7 +67,6 @@ impl DropTableExecutor {
|
||||
/// - Invalidates the cache on the Frontend nodes.
|
||||
/// - Drops the regions on the Datanode nodes.
|
||||
pub struct DropTableExecutor {
|
||||
cluster_id: ClusterId,
|
||||
table: TableName,
|
||||
table_id: TableId,
|
||||
drop_if_exists: bool,
|
||||
@@ -164,7 +156,7 @@ impl DropTableExecutor {
|
||||
let detecting_regions = if table_route_value.is_physical() {
|
||||
// Safety: checked.
|
||||
let regions = table_route_value.region_routes().unwrap();
|
||||
convert_region_routes_to_detecting_regions(self.cluster_id, regions)
|
||||
convert_region_routes_to_detecting_regions(regions)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -321,7 +313,6 @@ mod tests {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ctx = new_ddl_context(node_manager);
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
true,
|
||||
@@ -331,7 +322,6 @@ mod tests {
|
||||
|
||||
// Drops a non-exists table
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
false,
|
||||
@@ -341,7 +331,6 @@ mod tests {
|
||||
|
||||
// Drops a exists table
|
||||
let executor = DropTableExecutor::new(
|
||||
0,
|
||||
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
|
||||
1024,
|
||||
false,
|
||||
|
||||
@@ -31,8 +31,8 @@ use crate::error::{self, Result};
|
||||
use crate::instruction::CacheIdent;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::DropViewTask;
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
/// The procedure for dropping a view.
|
||||
pub struct DropViewProcedure {
|
||||
@@ -45,12 +45,11 @@ pub struct DropViewProcedure {
|
||||
impl DropViewProcedure {
|
||||
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropView";
|
||||
|
||||
pub fn new(cluster_id: ClusterId, task: DropViewTask, context: DdlContext) -> Self {
|
||||
pub fn new(task: DropViewTask, context: DdlContext) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: DropViewData {
|
||||
state: DropViewState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
},
|
||||
}
|
||||
@@ -216,7 +215,6 @@ impl Procedure for DropViewProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct DropViewData {
|
||||
state: DropViewState,
|
||||
cluster_id: ClusterId,
|
||||
task: DropViewTask,
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::error::Result;
|
||||
use crate::key::FlowId;
|
||||
use crate::peer::Peer;
|
||||
use crate::sequence::SequenceRef;
|
||||
use crate::ClusterId;
|
||||
|
||||
/// The reference of [FlowMetadataAllocator].
|
||||
pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
|
||||
@@ -60,16 +59,9 @@ impl FlowMetadataAllocator {
|
||||
}
|
||||
|
||||
/// Allocates the [FlowId] and [Peer]s.
|
||||
pub async fn create(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
partitions: usize,
|
||||
) -> Result<(FlowId, Vec<Peer>)> {
|
||||
pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
|
||||
let flow_id = self.allocate_flow_id().await?;
|
||||
let peers = self
|
||||
.partition_peer_allocator
|
||||
.alloc(cluster_id, partitions)
|
||||
.await?;
|
||||
let peers = self.partition_peer_allocator.alloc(partitions).await?;
|
||||
|
||||
Ok((flow_id, peers))
|
||||
}
|
||||
@@ -79,7 +71,7 @@ impl FlowMetadataAllocator {
|
||||
#[async_trait]
|
||||
pub trait PartitionPeerAllocator: Send + Sync {
|
||||
/// Allocates [Peer] nodes for storing partitions.
|
||||
async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>>;
|
||||
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>>;
|
||||
}
|
||||
|
||||
/// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions.
|
||||
@@ -89,7 +81,7 @@ struct NoopPartitionPeerAllocator;
|
||||
|
||||
#[async_trait]
|
||||
impl PartitionPeerAllocator for NoopPartitionPeerAllocator {
|
||||
async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>> {
|
||||
async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>> {
|
||||
Ok(vec![Peer::default(); partitions])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use common_telemetry::{debug, info};
|
||||
use snafu::ensure;
|
||||
use store_api::storage::{RegionId, RegionNumber, TableId};
|
||||
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::{self, Result, UnsupportedSnafu};
|
||||
use crate::key::table_route::PhysicalTableRouteValue;
|
||||
use crate::peer::Peer;
|
||||
@@ -109,7 +109,6 @@ impl TableMetadataAllocator {
|
||||
|
||||
async fn create_table_route(
|
||||
&self,
|
||||
ctx: &TableMetadataAllocatorContext,
|
||||
table_id: TableId,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<PhysicalTableRouteValue> {
|
||||
@@ -121,7 +120,7 @@ impl TableMetadataAllocator {
|
||||
}
|
||||
);
|
||||
|
||||
let peers = self.peer_allocator.alloc(ctx, regions).await?;
|
||||
let peers = self.peer_allocator.alloc(regions).await?;
|
||||
let region_routes = task
|
||||
.partitions
|
||||
.iter()
|
||||
@@ -147,11 +146,7 @@ impl TableMetadataAllocator {
|
||||
}
|
||||
|
||||
/// Create VIEW metadata
|
||||
pub async fn create_view(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
table_id: &Option<api::v1::TableId>,
|
||||
) -> Result<TableMetadata> {
|
||||
pub async fn create_view(&self, table_id: &Option<api::v1::TableId>) -> Result<TableMetadata> {
|
||||
let table_id = self.allocate_table_id(table_id).await?;
|
||||
|
||||
Ok(TableMetadata {
|
||||
@@ -160,13 +155,9 @@ impl TableMetadataAllocator {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn create(
|
||||
&self,
|
||||
ctx: &TableMetadataAllocatorContext,
|
||||
task: &CreateTableTask,
|
||||
) -> Result<TableMetadata> {
|
||||
pub async fn create(&self, task: &CreateTableTask) -> Result<TableMetadata> {
|
||||
let table_id = self.allocate_table_id(&task.create_table.table_id).await?;
|
||||
let table_route = self.create_table_route(ctx, table_id, task).await?;
|
||||
let table_route = self.create_table_route(table_id, task).await?;
|
||||
let region_wal_options = self.create_wal_options(&table_route)?;
|
||||
|
||||
debug!(
|
||||
@@ -188,19 +179,14 @@ pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
|
||||
#[async_trait]
|
||||
pub trait PeerAllocator: Send + Sync {
|
||||
/// Allocates `regions` size [`Peer`]s.
|
||||
async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
|
||||
-> Result<Vec<Peer>>;
|
||||
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>>;
|
||||
}
|
||||
|
||||
struct NoopPeerAllocator;
|
||||
|
||||
#[async_trait]
|
||||
impl PeerAllocator for NoopPeerAllocator {
|
||||
async fn alloc(
|
||||
&self,
|
||||
_ctx: &TableMetadataAllocatorContext,
|
||||
regions: usize,
|
||||
) -> Result<Vec<Peer>> {
|
||||
async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
|
||||
Ok(vec![Peer::default(); regions])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,10 +31,9 @@ use crate::ddl::test_util::columns::TestColumnDefBuilder;
|
||||
use crate::ddl::test_util::create_table::{
|
||||
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
|
||||
};
|
||||
use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::{DdlContext, TableMetadata};
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::ClusterId;
|
||||
|
||||
pub async fn create_physical_table_metadata(
|
||||
ddl_context: &DdlContext,
|
||||
@@ -48,11 +47,7 @@ pub async fn create_physical_table_metadata(
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn create_physical_table(
|
||||
ddl_context: &DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
name: &str,
|
||||
) -> TableId {
|
||||
pub async fn create_physical_table(ddl_context: &DdlContext, name: &str) -> TableId {
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task(name);
|
||||
let TableMetadata {
|
||||
@@ -61,10 +56,7 @@ pub async fn create_physical_table(
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -80,15 +72,13 @@ pub async fn create_physical_table(
|
||||
|
||||
pub async fn create_logical_table(
|
||||
ddl_context: DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
physical_table_id: TableId,
|
||||
table_name: &str,
|
||||
) -> TableId {
|
||||
use std::assert_matches::assert_matches;
|
||||
|
||||
let tasks = vec![test_create_logical_table_task(table_name)];
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
let status = procedure.on_create_metadata().await.unwrap();
|
||||
|
||||
@@ -86,7 +86,6 @@ fn make_alter_logical_table_rename_task(
|
||||
async fn test_on_prepare_check_schema() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(
|
||||
Some("schema1"),
|
||||
@@ -100,8 +99,7 @@ async fn test_on_prepare_check_schema() {
|
||||
),
|
||||
];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
@@ -110,50 +108,46 @@ async fn test_on_prepare_check_schema() {
|
||||
async fn test_on_prepare_check_alter_kind() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![make_alter_logical_table_rename_task(
|
||||
"schema1",
|
||||
"table1",
|
||||
"new_table1",
|
||||
)];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_different_physical_table() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
|
||||
let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
|
||||
let phy1_id = create_physical_table(&ddl_context, "phy1").await;
|
||||
create_logical_table(ddl_context.clone(), phy1_id, "table1").await;
|
||||
let phy2_id = create_physical_table(&ddl_context, "phy2").await;
|
||||
create_logical_table(ddl_context.clone(), phy2_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy1_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare_logical_table_not_exists() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
@@ -161,23 +155,22 @@ async fn test_on_prepare_logical_table_not_exists() {
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, TableNotFound { .. });
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_prepare() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
|
||||
@@ -185,25 +178,24 @@ async fn test_on_prepare() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let result = procedure.on_prepare().await;
|
||||
assert_matches!(result, Ok(Status::Executing { persist: true }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_update_metadata() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table4").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table5").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]),
|
||||
@@ -211,7 +203,7 @@ async fn test_on_update_metadata() {
|
||||
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
@@ -229,23 +221,21 @@ async fn test_on_update_metadata() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_on_part_duplicate_alter_request() {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
|
||||
let tasks = vec![
|
||||
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
|
||||
make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]),
|
||||
];
|
||||
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
@@ -278,8 +268,7 @@ async fn test_on_part_duplicate_alter_request() {
|
||||
),
|
||||
];
|
||||
|
||||
let mut procedure =
|
||||
AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
|
||||
let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
|
||||
let mut status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
|
||||
|
||||
@@ -59,7 +59,6 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter
|
||||
async fn test_on_prepare_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo", 1024);
|
||||
// Puts a value to table name key.
|
||||
ddl_context
|
||||
@@ -73,7 +72,7 @@ async fn test_on_prepare_table_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = test_rename_alter_table_task("non-exists", "foo");
|
||||
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
}
|
||||
@@ -82,9 +81,8 @@ async fn test_on_prepare_table_exists_err() {
|
||||
async fn test_on_prepare_table_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_rename_alter_table_task("non-exists", "foo");
|
||||
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -95,7 +93,6 @@ async fn test_on_submit_alter_request() {
|
||||
let datanode_handler = DatanodeWatcher(tx);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -144,8 +141,7 @@ async fn test_on_submit_alter_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
|
||||
@@ -181,7 +177,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
RequestOutdatedErrorDatanodeHandler,
|
||||
));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -230,8 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
}
|
||||
@@ -240,7 +234,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
|
||||
async fn test_on_update_metadata_rename() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let new_table_name = "bar";
|
||||
let table_id = 1024;
|
||||
@@ -257,8 +250,7 @@ async fn test_on_update_metadata_rename() {
|
||||
.unwrap();
|
||||
|
||||
let task = test_rename_alter_table_task(table_name, new_table_name);
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
@@ -291,7 +283,6 @@ async fn test_on_update_metadata_rename() {
|
||||
async fn test_on_update_metadata_add_columns() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -335,8 +326,7 @@ async fn test_on_update_metadata_add_columns() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
@@ -361,7 +351,6 @@ async fn test_on_update_metadata_add_columns() {
|
||||
async fn test_on_update_table_options() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -398,8 +387,7 @@ async fn test_on_update_table_options() {
|
||||
})),
|
||||
},
|
||||
};
|
||||
let mut procedure =
|
||||
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
|
||||
let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.submit_alter_region_requests().await.unwrap();
|
||||
procedure.on_update_metadata().await.unwrap();
|
||||
|
||||
@@ -25,11 +25,11 @@ use crate::ddl::create_flow::CreateFlowProcedure;
|
||||
use crate::ddl::test_util::create_table::test_create_table_task;
|
||||
use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler;
|
||||
use crate::ddl::DdlContext;
|
||||
use crate::error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::key::FlowId;
|
||||
use crate::rpc::ddl::CreateFlowTask;
|
||||
use crate::test_util::{new_ddl_context, MockFlownodeManager};
|
||||
use crate::{error, ClusterId};
|
||||
|
||||
pub(crate) fn test_create_flow_task(
|
||||
name: &str,
|
||||
@@ -53,7 +53,6 @@ pub(crate) fn test_create_flow_task(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow_source_table_not_found() {
|
||||
let cluster_id = 1;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
DEFAULT_SCHEMA_NAME,
|
||||
@@ -65,14 +64,13 @@ async fn test_create_flow_source_table_not_found() {
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(cluster_id, task, query_ctx, ddl_context);
|
||||
let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::TableNotFound { .. });
|
||||
}
|
||||
|
||||
pub(crate) async fn create_test_flow(
|
||||
ddl_context: &DdlContext,
|
||||
cluster_id: ClusterId,
|
||||
flow_name: &str,
|
||||
source_table_names: Vec<TableName>,
|
||||
sink_table_name: TableName,
|
||||
@@ -84,8 +82,7 @@ pub(crate) async fn create_test_flow(
|
||||
false,
|
||||
);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure =
|
||||
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
|
||||
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
|
||||
let flow_id = output.downcast_ref::<FlowId>().unwrap();
|
||||
|
||||
@@ -94,7 +91,6 @@ pub(crate) async fn create_test_flow(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_flow() {
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
@@ -118,7 +114,6 @@ async fn test_create_flow() {
|
||||
.unwrap();
|
||||
let flow_id = create_test_flow(
|
||||
&ddl_context,
|
||||
cluster_id,
|
||||
"my_flow",
|
||||
source_table_names.clone(),
|
||||
sink_table_name.clone(),
|
||||
@@ -134,8 +129,7 @@ async fn test_create_flow() {
|
||||
true,
|
||||
);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure =
|
||||
CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
|
||||
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
|
||||
let flow_id = output.downcast_ref::<FlowId>().unwrap();
|
||||
assert_eq!(*flow_id, 1024);
|
||||
@@ -143,7 +137,7 @@ async fn test_create_flow() {
|
||||
// Creates again
|
||||
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
|
||||
let query_ctx = QueryContext::arc().into();
|
||||
let mut procedure = CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context);
|
||||
let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler;
|
||||
use crate::ddl::test_util::{
|
||||
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::error::Error;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
@@ -35,11 +35,9 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
async fn test_on_prepare_physical_table_not_found() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let tasks = vec![test_create_logical_table_task("foo")];
|
||||
let physical_table_id = 1024u32;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableRouteNotFound { .. });
|
||||
}
|
||||
@@ -48,7 +46,6 @@ async fn test_on_prepare_physical_table_not_found() {
|
||||
async fn test_on_prepare() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -57,10 +54,7 @@ async fn test_on_prepare() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -73,8 +67,7 @@ async fn test_on_prepare() {
|
||||
// The create logical table procedure.
|
||||
let tasks = vec![test_create_logical_table_task("foo")];
|
||||
let physical_table_id = table_id;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
}
|
||||
@@ -83,7 +76,6 @@ async fn test_on_prepare() {
|
||||
async fn test_on_prepare_logical_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -92,10 +84,7 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -119,7 +108,7 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
// The create logical table procedure.
|
||||
let physical_table_id = table_id;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -129,7 +118,6 @@ async fn test_on_prepare_logical_table_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -138,10 +126,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -167,7 +152,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
// Sets `create_if_not_exists`
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
let output = status.downcast_output_ref::<Vec<u32>>().unwrap();
|
||||
assert_eq!(*output, vec![8192]);
|
||||
@@ -177,7 +162,6 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
async fn test_on_prepare_part_logical_tables_exist() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -186,10 +170,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -216,7 +197,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let non_exist_task = test_create_logical_table_task("non_exists");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -229,7 +209,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
|
||||
async fn test_on_create_metadata() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -238,10 +217,7 @@ async fn test_on_create_metadata() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -257,7 +233,6 @@ async fn test_on_create_metadata() {
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let yet_another_task = test_create_logical_table_task("bar");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -279,7 +254,6 @@ async fn test_on_create_metadata() {
|
||||
async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -288,10 +262,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -318,7 +289,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let non_exist_task = test_create_logical_table_task("non_exists");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task, non_exist_task],
|
||||
physical_table_id,
|
||||
ddl_context,
|
||||
@@ -340,7 +310,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
|
||||
async fn test_on_create_metadata_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -349,10 +318,7 @@ async fn test_on_create_metadata_err() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -368,7 +334,6 @@ async fn test_on_create_metadata_err() {
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let yet_another_task = test_create_logical_table_task("bar");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task.clone(), yet_another_task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
|
||||
@@ -87,7 +87,6 @@ pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
|
||||
async fn test_on_prepare_table_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
// Puts a value to table name key.
|
||||
@@ -100,7 +99,7 @@ async fn test_on_prepare_table_exists_err() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -110,7 +109,6 @@ async fn test_on_prepare_table_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.create_table.create_if_not_exists = true;
|
||||
task.table_info.ident.table_id = 1024;
|
||||
@@ -124,7 +122,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Done { output: Some(..) });
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
@@ -135,10 +133,9 @@ async fn test_on_prepare_with_create_if_table_exists() {
|
||||
async fn test_on_prepare_without_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_eq!(procedure.table_id(), 1024);
|
||||
@@ -148,11 +145,10 @@ async fn test_on_prepare_without_create_if_table_exists() {
|
||||
async fn test_on_prepare_with_no_partition_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_table_task("foo");
|
||||
task.partitions = vec![];
|
||||
task.create_table.create_if_not_exists = true;
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::Unexpected { .. });
|
||||
assert!(err
|
||||
@@ -165,10 +161,9 @@ async fn test_on_datanode_create_regions_should_retry() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -183,10 +178,9 @@ async fn test_on_datanode_create_regions_should_not_retry() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -201,10 +195,9 @@ async fn test_on_create_metadata_error() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task.clone(), ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -233,10 +226,9 @@ async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_table_task("foo");
|
||||
assert!(!task.create_table.create_if_not_exists);
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -251,14 +243,12 @@ async fn test_on_create_metadata() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let task = test_create_table_task("foo");
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.creator.data.state == CreateTableState::CreateMetadata
|
||||
|
||||
@@ -97,7 +97,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
|
||||
async fn test_on_prepare_view_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
// Puts a value to table name key.
|
||||
@@ -113,7 +112,7 @@ async fn test_on_prepare_view_exists_err() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::ViewAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -123,7 +122,6 @@ async fn test_on_prepare_view_exists_err() {
|
||||
async fn test_on_prepare_with_create_if_view_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.create_if_not_exists = true;
|
||||
task.view_info.ident.table_id = 1024;
|
||||
@@ -140,7 +138,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Done { output: Some(..) });
|
||||
let table_id = *status.downcast_output_ref::<u32>().unwrap();
|
||||
@@ -151,10 +149,9 @@ async fn test_on_prepare_with_create_if_view_exists() {
|
||||
async fn test_on_prepare_without_create_if_table_exists() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.create_if_not_exists = true;
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
let status = procedure.on_prepare().await.unwrap();
|
||||
assert_matches!(status, Status::Executing { persist: true });
|
||||
assert_eq!(procedure.view_id(), 1024);
|
||||
@@ -165,10 +162,9 @@ async fn test_on_create_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -185,10 +181,9 @@ async fn test_replace_view_metadata() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager.clone());
|
||||
let cluster_id = 1;
|
||||
let task = test_create_view_task("foo");
|
||||
assert!(!task.create_view.create_if_not_exists);
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -213,7 +208,7 @@ async fn test_replace_view_metadata() {
|
||||
let mut task = test_create_view_task("foo");
|
||||
// The view already exists, prepare should fail
|
||||
{
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::ViewAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
@@ -224,7 +219,7 @@ async fn test_replace_view_metadata() {
|
||||
task.create_view.logical_plan = vec![4, 5, 6];
|
||||
task.create_view.definition = "new_definition".to_string();
|
||||
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -254,12 +249,11 @@ async fn test_replace_table() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager.clone());
|
||||
let cluster_id = 1;
|
||||
|
||||
{
|
||||
// Create a `foo` table.
|
||||
let task = test_create_table_task("foo");
|
||||
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = ProcedureContext {
|
||||
procedure_id: ProcedureId::random(),
|
||||
@@ -272,7 +266,7 @@ async fn test_replace_table() {
|
||||
// Try to replace a view named `foo` too.
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.create_view.or_replace = true;
|
||||
let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
|
||||
let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, Error::TableAlreadyExists { .. });
|
||||
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
|
||||
|
||||
@@ -31,7 +31,6 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_with_logical_tables() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -45,11 +44,11 @@ async fn test_drop_database_with_logical_tables() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -80,7 +79,6 @@ async fn test_drop_database_with_logical_tables() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_retryable_error() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -94,11 +92,11 @@ async fn test_drop_database_retryable_error() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates 3 logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table2").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table3").await;
|
||||
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
@@ -128,7 +126,6 @@ async fn test_drop_database_retryable_error() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_database_recover() {
|
||||
common_telemetry::init_default_ut_logging();
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
ddl_context
|
||||
@@ -142,9 +139,9 @@ async fn test_drop_database_recover() {
|
||||
.await
|
||||
.unwrap();
|
||||
// Creates a physical table
|
||||
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
|
||||
let phy_id = create_physical_table(&ddl_context, "phy").await;
|
||||
// Creates a logical tables
|
||||
create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
|
||||
create_logical_table(ddl_context.clone(), phy_id, "table1").await;
|
||||
let mut procedure = DropDatabaseProcedure::new(
|
||||
DEFAULT_CATALOG_NAME.to_string(),
|
||||
DEFAULT_SCHEMA_NAME.to_string(),
|
||||
|
||||
@@ -40,12 +40,11 @@ fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> D
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drop_flow_not_found() {
|
||||
let cluster_id = 1;
|
||||
let flow_id = 1024;
|
||||
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowNotFound { .. });
|
||||
}
|
||||
@@ -53,7 +52,6 @@ async fn test_drop_flow_not_found() {
|
||||
#[tokio::test]
|
||||
async fn test_drop_flow() {
|
||||
// create a flow
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let source_table_names = vec![TableName::new(
|
||||
DEFAULT_CATALOG_NAME,
|
||||
@@ -75,27 +73,21 @@ async fn test_drop_flow() {
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let flow_id = create_test_flow(
|
||||
&ddl_context,
|
||||
cluster_id,
|
||||
"my_flow",
|
||||
source_table_names,
|
||||
sink_table_name,
|
||||
)
|
||||
.await;
|
||||
let flow_id =
|
||||
create_test_flow(&ddl_context, "my_flow", source_table_names, sink_table_name).await;
|
||||
// Drops the flows
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Drops if not exists
|
||||
let task = test_drop_flow_task("my_flow", flow_id, true);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
|
||||
// Drops again
|
||||
let task = test_drop_flow_task("my_flow", flow_id, false);
|
||||
let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropFlowProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_matches!(err, error::Error::FlowNotFound { .. });
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::ddl::test_util::{
|
||||
create_logical_table, create_physical_table, create_physical_table_metadata,
|
||||
test_create_logical_table_task, test_create_physical_table_task,
|
||||
};
|
||||
use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
|
||||
use crate::ddl::TableMetadata;
|
||||
use crate::key::table_route::TableRouteValue;
|
||||
use crate::kv_backend::memory::MemoryKvBackend;
|
||||
use crate::peer::Peer;
|
||||
@@ -47,7 +47,6 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat
|
||||
async fn test_on_prepare_table_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -63,7 +62,7 @@ async fn test_on_prepare_table_not_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -72,7 +71,6 @@ async fn test_on_prepare_table_not_exists_err() {
|
||||
async fn test_on_prepare_table() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_name = "foo";
|
||||
let table_id = 1024;
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -89,13 +87,13 @@ async fn test_on_prepare_table() {
|
||||
|
||||
let task = new_drop_table_task("bar", table_id, true);
|
||||
// Drop if exists
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(!procedure.rollback_supported());
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
}
|
||||
|
||||
@@ -105,7 +103,6 @@ async fn test_on_datanode_drop_regions() {
|
||||
let datanode_handler = DatanodeWatcher(tx);
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let table_id = 1024;
|
||||
let table_name = "foo";
|
||||
let task = test_create_table_task(table_name, table_id);
|
||||
@@ -144,7 +141,7 @@ async fn test_on_datanode_drop_regions() {
|
||||
|
||||
let task = new_drop_table_task(table_name, table_id, false);
|
||||
// Drop table
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
procedure.on_datanode_drop_regions().await.unwrap();
|
||||
|
||||
@@ -179,7 +176,6 @@ async fn test_on_rollback() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone());
|
||||
let cluster_id = 1;
|
||||
// Prepares physical table metadata.
|
||||
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
|
||||
let TableMetadata {
|
||||
@@ -188,10 +184,7 @@ async fn test_on_rollback() {
|
||||
..
|
||||
} = ddl_context
|
||||
.table_metadata_allocator
|
||||
.create(
|
||||
&TableMetadataAllocatorContext { cluster_id },
|
||||
&create_physical_table_task,
|
||||
)
|
||||
.create(&create_physical_table_task)
|
||||
.await
|
||||
.unwrap();
|
||||
create_physical_table_task.set_table_id(table_id);
|
||||
@@ -205,12 +198,8 @@ async fn test_on_rollback() {
|
||||
let physical_table_id = table_id;
|
||||
// Creates the logical table metadata.
|
||||
let task = test_create_logical_table_task("foo");
|
||||
let mut procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
vec![task],
|
||||
physical_table_id,
|
||||
ddl_context.clone(),
|
||||
);
|
||||
let mut procedure =
|
||||
CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
let ctx = new_test_procedure_context();
|
||||
procedure.execute(&ctx).await.unwrap();
|
||||
@@ -223,7 +212,7 @@ async fn test_on_rollback() {
|
||||
// Drops the physical table
|
||||
{
|
||||
let task = new_drop_table_task("phy_table", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(procedure.rollback_supported());
|
||||
procedure.on_delete_metadata().await.unwrap();
|
||||
@@ -238,7 +227,7 @@ async fn test_on_rollback() {
|
||||
|
||||
// Drops the logical table
|
||||
let task = new_drop_table_task("foo", table_ids[0], false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert!(!procedure.rollback_supported());
|
||||
}
|
||||
@@ -255,18 +244,15 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
|
||||
let cluster_id = 1;
|
||||
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let logical_table_id =
|
||||
create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let logical_table_id = create_logical_table(ddl_context.clone(), physical_table_id, "s").await;
|
||||
|
||||
let inner_test = |task: DropTableTask| async {
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until(&mut procedure, |p| {
|
||||
p.data.state == DropTableState::InvalidateTableCache
|
||||
})
|
||||
@@ -304,14 +290,13 @@ async fn test_from_json() {
|
||||
(DropTableState::DatanodeDropRegions, 1, 1),
|
||||
(DropTableState::DeleteTombstone, 1, 0),
|
||||
] {
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let task = new_drop_table_task("t", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until(&mut procedure, |p| p.data.state == state).await;
|
||||
let data = procedure.dump().unwrap();
|
||||
assert_eq!(
|
||||
@@ -334,14 +319,13 @@ async fn test_from_json() {
|
||||
|
||||
let num_operating_regions = 0;
|
||||
let num_operating_regions_after_recovery = 0;
|
||||
let cluster_id = 1;
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
|
||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
||||
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
|
||||
|
||||
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
|
||||
let physical_table_id = create_physical_table(&ddl_context, "t").await;
|
||||
let task = new_drop_table_task("t", physical_table_id, false);
|
||||
let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
let data = procedure.dump().unwrap();
|
||||
assert_eq!(
|
||||
|
||||
@@ -41,7 +41,6 @@ fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> Dro
|
||||
async fn test_on_prepare_view_not_exists_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let mut task = test_create_view_task("foo");
|
||||
task.view_info.ident.table_id = view_id;
|
||||
@@ -60,7 +59,7 @@ async fn test_on_prepare_view_not_exists_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_view_task("bar", view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
@@ -69,7 +68,6 @@ async fn test_on_prepare_view_not_exists_err() {
|
||||
async fn test_on_prepare_not_view_err() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let task = test_create_table_task(view_name, view_id);
|
||||
@@ -85,7 +83,7 @@ async fn test_on_prepare_not_view_err() {
|
||||
.unwrap();
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
// It's not a view, expect error
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::InvalidArguments);
|
||||
@@ -95,7 +93,6 @@ async fn test_on_prepare_not_view_err() {
|
||||
async fn test_on_prepare_success() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let mut task = test_create_view_task("foo");
|
||||
@@ -116,12 +113,12 @@ async fn test_on_prepare_success() {
|
||||
|
||||
let task = new_drop_view_task("bar", view_id, true);
|
||||
// Drop if exists
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
|
||||
procedure.on_prepare().await.unwrap();
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
// Prepare success
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
procedure.on_prepare().await.unwrap();
|
||||
assert_eq!(DropViewState::DeleteMetadata, procedure.state());
|
||||
}
|
||||
@@ -130,7 +127,6 @@ async fn test_on_prepare_success() {
|
||||
async fn test_drop_view_success() {
|
||||
let node_manager = Arc::new(MockDatanodeManager::new(()));
|
||||
let ddl_context = new_ddl_context(node_manager);
|
||||
let cluster_id = 1;
|
||||
let view_id = 1024;
|
||||
let view_name = "foo";
|
||||
let mut task = test_create_view_task("foo");
|
||||
@@ -159,7 +155,7 @@ async fn test_drop_view_success() {
|
||||
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
// Prepare success
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
|
||||
execute_procedure_until_done(&mut procedure).await;
|
||||
assert_eq!(DropViewState::InvalidateViewCache, procedure.state());
|
||||
|
||||
@@ -174,7 +170,7 @@ async fn test_drop_view_success() {
|
||||
|
||||
// Drop again
|
||||
let task = new_drop_view_task(view_name, view_id, false);
|
||||
let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
|
||||
let mut procedure = DropViewProcedure::new(task, ddl_context);
|
||||
let err = procedure.on_prepare().await.unwrap_err();
|
||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
||||
}
|
||||
|
||||
@@ -39,9 +39,9 @@ use crate::key::table_info::TableInfoValue;
|
||||
use crate::key::table_name::TableNameKey;
|
||||
use crate::key::DeserializedValueWithBytes;
|
||||
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
|
||||
use crate::metrics;
|
||||
use crate::rpc::ddl::TruncateTableTask;
|
||||
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
|
||||
use crate::{metrics, ClusterId};
|
||||
|
||||
pub struct TruncateTableProcedure {
|
||||
context: DdlContext,
|
||||
@@ -91,7 +91,6 @@ impl TruncateTableProcedure {
|
||||
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
|
||||
|
||||
pub(crate) fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
@@ -99,7 +98,7 @@ impl TruncateTableProcedure {
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
|
||||
data: TruncateTableData::new(task, table_info_value, region_routes),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,7 +188,6 @@ impl TruncateTableProcedure {
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct TruncateTableData {
|
||||
state: TruncateTableState,
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
@@ -197,14 +195,12 @@ pub struct TruncateTableData {
|
||||
|
||||
impl TruncateTableData {
|
||||
pub fn new(
|
||||
cluster_id: ClusterId,
|
||||
task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: TruncateTableState::Prepare,
|
||||
cluster_id,
|
||||
task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
|
||||
@@ -34,7 +34,6 @@ use crate::key::TableMetadataManagerRef;
|
||||
use crate::peer::Peer;
|
||||
use crate::rpc::ddl::CreateTableTask;
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::ClusterId;
|
||||
|
||||
/// Adds [Peer] context if the error is unretryable.
|
||||
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
|
||||
@@ -144,7 +143,6 @@ pub async fn get_physical_table_id(
|
||||
|
||||
/// Converts a list of [`RegionRoute`] to a list of [`DetectingRegion`].
|
||||
pub fn convert_region_routes_to_detecting_regions(
|
||||
cluster_id: ClusterId,
|
||||
region_routes: &[RegionRoute],
|
||||
) -> Vec<DetectingRegion> {
|
||||
region_routes
|
||||
@@ -153,7 +151,7 @@ pub fn convert_region_routes_to_detecting_regions(
|
||||
route
|
||||
.leader_peer
|
||||
.as_ref()
|
||||
.map(|peer| (cluster_id, peer.id, route.region.id))
|
||||
.map(|peer| (peer.id, route.region.id))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
@@ -60,7 +60,6 @@ use crate::rpc::ddl::{
|
||||
use crate::rpc::procedure;
|
||||
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
|
||||
use crate::rpc::router::RegionRoute;
|
||||
use crate::ClusterId;
|
||||
|
||||
pub type DdlManagerRef = Arc<DdlManager>;
|
||||
|
||||
@@ -154,13 +153,12 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_alter_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
table_id: TableId,
|
||||
alter_table_task: AlterTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = AlterTableProcedure::new(cluster_id, table_id, alter_table_task, context)?;
|
||||
let procedure = AlterTableProcedure::new(table_id, alter_table_task, context)?;
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -171,12 +169,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_table_task: CreateTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateTableProcedure::new(cluster_id, create_table_task, context);
|
||||
let procedure = CreateTableProcedure::new(create_table_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -187,12 +184,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_view_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_view_task: CreateViewTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateViewProcedure::new(cluster_id, create_view_task, context);
|
||||
let procedure = CreateViewProcedure::new(create_view_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -203,18 +199,13 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_logical_table_tasks(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_table_tasks: Vec<CreateTableTask>,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = CreateLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
create_table_tasks,
|
||||
physical_table_id,
|
||||
context,
|
||||
);
|
||||
let procedure =
|
||||
CreateLogicalTablesProcedure::new(create_table_tasks, physical_table_id, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -225,18 +216,13 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_alter_logical_table_tasks(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_tasks: Vec<AlterTableTask>,
|
||||
physical_table_id: TableId,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = AlterLogicalTablesProcedure::new(
|
||||
cluster_id,
|
||||
alter_table_tasks,
|
||||
physical_table_id,
|
||||
context,
|
||||
);
|
||||
let procedure =
|
||||
AlterLogicalTablesProcedure::new(alter_table_tasks, physical_table_id, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -247,12 +233,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_table_task: DropTableTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
|
||||
let procedure = DropTableProcedure::new(cluster_id, drop_table_task, context);
|
||||
let procedure = DropTableProcedure::new(drop_table_task, context);
|
||||
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
@@ -263,7 +248,6 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_database(
|
||||
&self,
|
||||
_cluster_id: ClusterId,
|
||||
CreateDatabaseTask {
|
||||
catalog,
|
||||
schema,
|
||||
@@ -283,7 +267,6 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_database(
|
||||
&self,
|
||||
_cluster_id: ClusterId,
|
||||
DropDatabaseTask {
|
||||
catalog,
|
||||
schema,
|
||||
@@ -299,11 +282,10 @@ impl DdlManager {
|
||||
|
||||
pub async fn submit_alter_database(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
|
||||
let procedure = AlterDatabaseProcedure::new(alter_database_task, context)?;
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -313,12 +295,11 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_create_flow_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
create_flow: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = CreateFlowProcedure::new(cluster_id, create_flow, query_context, context);
|
||||
let procedure = CreateFlowProcedure::new(create_flow, query_context, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -328,11 +309,10 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_flow_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_flow: DropFlowTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = DropFlowProcedure::new(cluster_id, drop_flow, context);
|
||||
let procedure = DropFlowProcedure::new(drop_flow, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -342,11 +322,10 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_drop_view_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
drop_view: DropViewTask,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = DropViewProcedure::new(cluster_id, drop_view, context);
|
||||
let procedure = DropViewProcedure::new(drop_view, context);
|
||||
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
|
||||
|
||||
self.submit_procedure(procedure_with_id).await
|
||||
@@ -356,14 +335,12 @@ impl DdlManager {
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub async fn submit_truncate_table_task(
|
||||
&self,
|
||||
cluster_id: ClusterId,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
|
||||
region_routes: Vec<RegionRoute>,
|
||||
) -> Result<(ProcedureId, Option<Output>)> {
|
||||
let context = self.create_context();
|
||||
let procedure = TruncateTableProcedure::new(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
region_routes,
|
||||
@@ -397,7 +374,6 @@ impl DdlManager {
|
||||
|
||||
async fn handle_truncate_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
truncate_table_task: TruncateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_id = truncate_table_task.table_id;
|
||||
@@ -416,12 +392,7 @@ async fn handle_truncate_table_task(
|
||||
let table_route = table_route_value.into_inner().region_routes()?.clone();
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_truncate_table_task(
|
||||
cluster_id,
|
||||
truncate_table_task,
|
||||
table_info_value,
|
||||
table_route,
|
||||
)
|
||||
.submit_truncate_table_task(truncate_table_task, table_info_value, table_route)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is truncated via procedure_id {id:?}");
|
||||
@@ -434,7 +405,6 @@ async fn handle_truncate_table_task(
|
||||
|
||||
async fn handle_alter_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_task: AlterTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_ref = alter_table_task.table_ref();
|
||||
@@ -468,7 +438,7 @@ async fn handle_alter_table_task(
|
||||
);
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_table_task(cluster_id, table_id, alter_table_task)
|
||||
.submit_alter_table_task(table_id, alter_table_task)
|
||||
.await?;
|
||||
|
||||
info!("Table: {table_id} is altered via procedure_id {id:?}");
|
||||
@@ -481,13 +451,10 @@ async fn handle_alter_table_task(
|
||||
|
||||
async fn handle_drop_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_table_task: DropTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let table_id = drop_table_task.table_id;
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_table_task(cluster_id, drop_table_task)
|
||||
.await?;
|
||||
let (id, _) = ddl_manager.submit_drop_table_task(drop_table_task).await?;
|
||||
|
||||
info!("Table: {table_id} is dropped via procedure_id {id:?}");
|
||||
|
||||
@@ -499,11 +466,10 @@ async fn handle_drop_table_task(
|
||||
|
||||
async fn handle_create_table_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_table_task: CreateTableTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_table_task(cluster_id, create_table_task)
|
||||
.submit_create_table_task(create_table_task)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -525,7 +491,6 @@ async fn handle_create_table_task(
|
||||
|
||||
async fn handle_create_logical_table_tasks(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_table_tasks: Vec<CreateTableTask>,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
ensure!(
|
||||
@@ -542,7 +507,7 @@ async fn handle_create_logical_table_tasks(
|
||||
let num_logical_tables = create_table_tasks.len();
|
||||
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_logical_table_tasks(cluster_id, create_table_tasks, physical_table_id)
|
||||
.submit_create_logical_table_tasks(create_table_tasks, physical_table_id)
|
||||
.await?;
|
||||
|
||||
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}");
|
||||
@@ -568,11 +533,10 @@ async fn handle_create_logical_table_tasks(
|
||||
|
||||
async fn handle_create_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_database_task: CreateDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_create_database(cluster_id, create_database_task.clone())
|
||||
.submit_create_database(create_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -589,11 +553,10 @@ async fn handle_create_database_task(
|
||||
|
||||
async fn handle_drop_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_database_task: DropDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_database(cluster_id, drop_database_task.clone())
|
||||
.submit_drop_database(drop_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -610,11 +573,10 @@ async fn handle_drop_database_task(
|
||||
|
||||
async fn handle_alter_database_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_database_task: AlterDatabaseTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_database(cluster_id, alter_database_task.clone())
|
||||
.submit_alter_database(alter_database_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -632,11 +594,10 @@ async fn handle_alter_database_task(
|
||||
|
||||
async fn handle_drop_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_flow_task: DropFlowTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_flow_task(cluster_id, drop_flow_task.clone())
|
||||
.submit_drop_flow_task(drop_flow_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -653,11 +614,10 @@ async fn handle_drop_flow_task(
|
||||
|
||||
async fn handle_drop_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
drop_view_task: DropViewTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, _) = ddl_manager
|
||||
.submit_drop_view_task(cluster_id, drop_view_task.clone())
|
||||
.submit_drop_view_task(drop_view_task.clone())
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -675,12 +635,11 @@ async fn handle_drop_view_task(
|
||||
|
||||
async fn handle_create_flow_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_flow_task: CreateFlowTask,
|
||||
query_context: QueryContext,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_flow_task(cluster_id, create_flow_task.clone(), query_context)
|
||||
.submit_create_flow_task(create_flow_task.clone(), query_context)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -712,7 +671,6 @@ async fn handle_create_flow_task(
|
||||
|
||||
async fn handle_alter_logical_table_tasks(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
alter_table_tasks: Vec<AlterTableTask>,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
ensure!(
|
||||
@@ -733,7 +691,7 @@ async fn handle_alter_logical_table_tasks(
|
||||
let num_logical_tables = alter_table_tasks.len();
|
||||
|
||||
let (id, _) = ddl_manager
|
||||
.submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id)
|
||||
.submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id)
|
||||
.await?;
|
||||
|
||||
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}");
|
||||
@@ -749,11 +707,10 @@ async fn handle_alter_logical_table_tasks(
|
||||
/// Handle the `[CreateViewTask]` and returns the DDL response when success.
|
||||
async fn handle_create_view_task(
|
||||
ddl_manager: &DdlManager,
|
||||
cluster_id: ClusterId,
|
||||
create_view_task: CreateViewTask,
|
||||
) -> Result<SubmitDdlTaskResponse> {
|
||||
let (id, output) = ddl_manager
|
||||
.submit_create_view_task(cluster_id, create_view_task)
|
||||
.submit_create_view_task(create_view_task)
|
||||
.await?;
|
||||
|
||||
let procedure_id = id.to_string();
|
||||
@@ -788,55 +745,43 @@ impl ProcedureExecutor for DdlManager {
|
||||
.unwrap_or(TracingContext::from_current_span())
|
||||
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
|
||||
async move {
|
||||
let cluster_id = ctx.cluster_id.unwrap_or_default();
|
||||
debug!("Submitting Ddl task: {:?}", request.task);
|
||||
match request.task {
|
||||
CreateTable(create_table_task) => {
|
||||
handle_create_table_task(self, cluster_id, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => {
|
||||
handle_drop_table_task(self, cluster_id, drop_table_task).await
|
||||
handle_create_table_task(self, create_table_task).await
|
||||
}
|
||||
DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
|
||||
AlterTable(alter_table_task) => {
|
||||
handle_alter_table_task(self, cluster_id, alter_table_task).await
|
||||
handle_alter_table_task(self, alter_table_task).await
|
||||
}
|
||||
TruncateTable(truncate_table_task) => {
|
||||
handle_truncate_table_task(self, cluster_id, truncate_table_task).await
|
||||
handle_truncate_table_task(self, truncate_table_task).await
|
||||
}
|
||||
CreateLogicalTables(create_table_tasks) => {
|
||||
handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await
|
||||
handle_create_logical_table_tasks(self, create_table_tasks).await
|
||||
}
|
||||
AlterLogicalTables(alter_table_tasks) => {
|
||||
handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await
|
||||
handle_alter_logical_table_tasks(self, alter_table_tasks).await
|
||||
}
|
||||
DropLogicalTables(_) => todo!(),
|
||||
CreateDatabase(create_database_task) => {
|
||||
handle_create_database_task(self, cluster_id, create_database_task).await
|
||||
handle_create_database_task(self, create_database_task).await
|
||||
}
|
||||
DropDatabase(drop_database_task) => {
|
||||
handle_drop_database_task(self, cluster_id, drop_database_task).await
|
||||
handle_drop_database_task(self, drop_database_task).await
|
||||
}
|
||||
AlterDatabase(alter_database_task) => {
|
||||
handle_alter_database_task(self, cluster_id, alter_database_task).await
|
||||
handle_alter_database_task(self, alter_database_task).await
|
||||
}
|
||||
CreateFlow(create_flow_task) => {
|
||||
handle_create_flow_task(
|
||||
self,
|
||||
cluster_id,
|
||||
create_flow_task,
|
||||
request.query_context.into(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => {
|
||||
handle_drop_flow_task(self, cluster_id, drop_flow_task).await
|
||||
handle_create_flow_task(self, create_flow_task, request.query_context.into())
|
||||
.await
|
||||
}
|
||||
DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
|
||||
CreateView(create_view_task) => {
|
||||
handle_create_view_task(self, cluster_id, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => {
|
||||
handle_drop_view_task(self, cluster_id, drop_view_task).await
|
||||
handle_create_view_task(self, create_view_task).await
|
||||
}
|
||||
DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
|
||||
}
|
||||
}
|
||||
.trace(span)
|
||||
|
||||
@@ -685,7 +685,36 @@ pub enum Error {
|
||||
operation: String,
|
||||
},
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to execute via MySql, sql: {}", sql))]
|
||||
MySqlExecution {
|
||||
sql: String,
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to create connection pool for MySql"))]
|
||||
CreateMySqlPool {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
},
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
#[snafu(display("Failed to {} MySql transaction", operation))]
|
||||
MySqlTransaction {
|
||||
#[snafu(source)]
|
||||
error: sqlx::Error,
|
||||
#[snafu(implicit)]
|
||||
location: Location,
|
||||
operation: String,
|
||||
},
|
||||
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
#[snafu(display("Rds transaction retry failed"))]
|
||||
RdsTransactionRetryFailed {
|
||||
#[snafu(implicit)]
|
||||
@@ -823,8 +852,13 @@ impl ErrorExt for Error {
|
||||
PostgresExecution { .. }
|
||||
| CreatePostgresPool { .. }
|
||||
| GetPostgresConnection { .. }
|
||||
| PostgresTransaction { .. }
|
||||
| RdsTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
| PostgresTransaction { .. } => StatusCode::Internal,
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
MySqlExecution { .. } | CreateMySqlPool { .. } | MySqlTransaction { .. } => {
|
||||
StatusCode::Internal
|
||||
}
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
RdsTransactionRetryFailed { .. } => StatusCode::Internal,
|
||||
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
|
||||
}
|
||||
}
|
||||
@@ -835,16 +869,29 @@ impl ErrorExt for Error {
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||
/// Check if the error is a serialization error.
|
||||
pub fn is_serialization_error(&self) -> bool {
|
||||
match self {
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::PostgresTransaction { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
Error::PostgresExecution { error, .. } => {
|
||||
error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
|
||||
}
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
Error::MySqlExecution {
|
||||
error: sqlx::Error::Database(database_error),
|
||||
..
|
||||
} => {
|
||||
matches!(
|
||||
database_error.message(),
|
||||
"Deadlock found when trying to get lock; try restarting transaction"
|
||||
| "can't serialize access for this transaction"
|
||||
)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,11 +26,10 @@ use crate::flow_name::FlowName;
|
||||
use crate::key::schema_name::SchemaName;
|
||||
use crate::key::FlowId;
|
||||
use crate::peer::Peer;
|
||||
use crate::{ClusterId, DatanodeId, FlownodeId};
|
||||
use crate::{DatanodeId, FlownodeId};
|
||||
|
||||
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct RegionIdent {
|
||||
pub cluster_id: ClusterId,
|
||||
pub datanode_id: DatanodeId,
|
||||
pub table_id: TableId,
|
||||
pub region_number: RegionNumber,
|
||||
@@ -47,8 +46,8 @@ impl Display for RegionIdent {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"RegionIdent(datanode_id='{}.{}', table_id={}, region_number={}, engine = {})",
|
||||
self.cluster_id, self.datanode_id, self.table_id, self.region_number, self.engine
|
||||
"RegionIdent(datanode_id='{}', table_id={}, region_number={}, engine = {})",
|
||||
self.datanode_id, self.table_id, self.region_number, self.engine
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -262,7 +261,6 @@ mod tests {
|
||||
fn test_serialize_instruction() {
|
||||
let open_region = Instruction::OpenRegion(OpenRegion::new(
|
||||
RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
@@ -277,12 +275,11 @@ mod tests {
|
||||
let serialized = serde_json::to_string(&open_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
|
||||
r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
|
||||
serialized
|
||||
);
|
||||
|
||||
let close_region = Instruction::CloseRegion(RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
@@ -292,7 +289,7 @@ mod tests {
|
||||
let serialized = serde_json::to_string(&close_region).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
r#"{"CloseRegion":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
|
||||
r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
|
||||
serialized
|
||||
);
|
||||
}
|
||||
@@ -307,7 +304,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_compatible_serialize_open_region() {
|
||||
let region_ident = RegionIdent {
|
||||
cluster_id: 1,
|
||||
datanode_id: 2,
|
||||
table_id: 1024,
|
||||
region_number: 1,
|
||||
|
||||
@@ -461,6 +461,8 @@ mod tests {
|
||||
expire_after: Some(300),
|
||||
comment: "hi".to_string(),
|
||||
options: Default::default(),
|
||||
created_time: chrono::Utc::now(),
|
||||
updated_time: chrono::Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -632,6 +634,8 @@ mod tests {
|
||||
expire_after: Some(300),
|
||||
comment: "hi".to_string(),
|
||||
options: Default::default(),
|
||||
created_time: chrono::Utc::now(),
|
||||
updated_time: chrono::Utc::now(),
|
||||
};
|
||||
let err = flow_metadata_manager
|
||||
.create_flow_metadata(flow_id, flow_value, flow_routes.clone())
|
||||
@@ -869,6 +873,8 @@ mod tests {
|
||||
expire_after: Some(300),
|
||||
comment: "hi".to_string(),
|
||||
options: Default::default(),
|
||||
created_time: chrono::Utc::now(),
|
||||
updated_time: chrono::Utc::now(),
|
||||
};
|
||||
let err = flow_metadata_manager
|
||||
.update_flow_metadata(
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -131,6 +132,12 @@ pub struct FlowInfoValue {
|
||||
pub(crate) comment: String,
|
||||
/// The options.
|
||||
pub(crate) options: HashMap<String, String>,
|
||||
/// The created time
|
||||
#[serde(default)]
|
||||
pub(crate) created_time: DateTime<Utc>,
|
||||
/// The updated time.
|
||||
#[serde(default)]
|
||||
pub(crate) updated_time: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl FlowInfoValue {
|
||||
@@ -171,6 +178,14 @@ impl FlowInfoValue {
|
||||
pub fn options(&self) -> &HashMap<String, String> {
|
||||
&self.options
|
||||
}
|
||||
|
||||
pub fn created_time(&self) -> &DateTime<Utc> {
|
||||
&self.created_time
|
||||
}
|
||||
|
||||
pub fn updated_time(&self) -> &DateTime<Utc> {
|
||||
&self.updated_time
|
||||
}
|
||||
}
|
||||
|
||||
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;
|
||||
|
||||
@@ -97,11 +97,19 @@ impl<'a> MetadataKey<'a, FlowStateKey> for FlowStateKey {
|
||||
pub struct FlowStateValue {
|
||||
/// For each key, the bytes of the state in memory
|
||||
pub state_size: BTreeMap<FlowId, usize>,
|
||||
/// For each key, the last execution time of flow in unix timestamp milliseconds.
|
||||
pub last_exec_time_map: BTreeMap<FlowId, i64>,
|
||||
}
|
||||
|
||||
impl FlowStateValue {
|
||||
pub fn new(state_size: BTreeMap<FlowId, usize>) -> Self {
|
||||
Self { state_size }
|
||||
pub fn new(
|
||||
state_size: BTreeMap<FlowId, usize>,
|
||||
last_exec_time_map: BTreeMap<FlowId, i64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state_size,
|
||||
last_exec_time_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,12 +151,15 @@ impl FlowStateManager {
|
||||
pub struct FlowStat {
|
||||
/// For each key, the bytes of the state in memory
|
||||
pub state_size: BTreeMap<u32, usize>,
|
||||
/// For each key, the last execution time of flow in unix timestamp milliseconds.
|
||||
pub last_exec_time_map: BTreeMap<FlowId, i64>,
|
||||
}
|
||||
|
||||
impl From<FlowStateValue> for FlowStat {
|
||||
fn from(value: FlowStateValue) -> Self {
|
||||
Self {
|
||||
state_size: value.state_size,
|
||||
last_exec_time_map: value.last_exec_time_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -157,6 +168,7 @@ impl From<FlowStat> for FlowStateValue {
|
||||
fn from(value: FlowStat) -> Self {
|
||||
Self {
|
||||
state_size: value.state_size,
|
||||
last_exec_time_map: value.last_exec_time_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ pub fn new_test_table_info_with_name<I: IntoIterator<Item = u32>>(
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let meta = TableMetaBuilder::default()
|
||||
let meta = TableMetaBuilder::empty()
|
||||
.schema(Arc::new(schema))
|
||||
.primary_key_indices(vec![0])
|
||||
.engine("engine")
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::rpc::KeyValue;
|
||||
pub mod chroot;
|
||||
pub mod etcd;
|
||||
pub mod memory;
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
#[cfg(any(feature = "mysql_kvbackend", feature = "pg_kvbackend"))]
|
||||
pub mod rds;
|
||||
pub mod test;
|
||||
pub mod txn;
|
||||
|
||||
@@ -14,13 +14,11 @@
|
||||
|
||||
use std::any::Any;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common_error::ext::ErrorExt;
|
||||
use serde::Serializer;
|
||||
|
||||
use super::{KvBackendRef, ResettableKvBackend};
|
||||
use crate::kv_backend::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse};
|
||||
@@ -38,19 +36,6 @@ pub struct MemoryKvBackend<T> {
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Display for MemoryKvBackend<T> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let kvs = self.kvs.read().unwrap();
|
||||
for (k, v) in kvs.iter() {
|
||||
f.serialize_str(&String::from_utf8_lossy(k))?;
|
||||
f.serialize_str(" -> ")?;
|
||||
f.serialize_str(&String::from_utf8_lossy(v))?;
|
||||
f.serialize_str("\n")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for MemoryKvBackend<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
||||
@@ -33,10 +33,16 @@ use crate::rpc::store::{
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
mod postgres;
|
||||
|
||||
#[cfg(feature = "pg_kvbackend")]
|
||||
pub use postgres::PgStore;
|
||||
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
mod mysql;
|
||||
#[cfg(feature = "mysql_kvbackend")]
|
||||
pub use mysql::MySqlStore;
|
||||
|
||||
const RDS_STORE_TXN_RETRY_COUNT: usize = 3;
|
||||
|
||||
/// Query executor for rds. It can execute queries or generate a transaction executor.
|
||||
@@ -106,6 +112,14 @@ impl<T: Executor> ExecutorImpl<'_, T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[warn(dead_code)] // Used in #[cfg(feature = "mysql_kvbackend")]
|
||||
async fn execute(&mut self, query: &str, params: &Vec<&Vec<u8>>) -> Result<()> {
|
||||
match self {
|
||||
Self::Default(executor) => executor.execute(query, params).await,
|
||||
Self::Txn(executor) => executor.execute(query, params).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn commit(self) -> Result<()> {
|
||||
match self {
|
||||
Self::Txn(executor) => executor.commit().await,
|
||||
|
||||
650
src/common/meta/src/kv_backend/rds/mysql.rs
Normal file
650
src/common/meta/src/kv_backend/rds/mysql.rs
Normal file
@@ -0,0 +1,650 @@
|
||||
// Copyright 2023 Greptime Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
|
||||
use common_telemetry::debug;
|
||||
use snafu::ResultExt;
|
||||
use sqlx::mysql::MySqlRow;
|
||||
use sqlx::pool::Pool;
|
||||
use sqlx::{MySql, MySqlPool, Row, Transaction as MySqlTransaction};
|
||||
|
||||
use crate::error::{CreateMySqlPoolSnafu, MySqlExecutionSnafu, MySqlTransactionSnafu, Result};
|
||||
use crate::kv_backend::rds::{
|
||||
Executor, ExecutorFactory, ExecutorImpl, KvQueryExecutor, RdsStore, Transaction,
|
||||
RDS_STORE_TXN_RETRY_COUNT,
|
||||
};
|
||||
use crate::kv_backend::KvBackendRef;
|
||||
use crate::rpc::store::{
|
||||
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
|
||||
BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, RangeRequest, RangeResponse,
|
||||
};
|
||||
use crate::rpc::KeyValue;
|
||||
|
||||
type MySqlClient = Arc<Pool<MySql>>;
|
||||
pub struct MySqlTxnClient(MySqlTransaction<'static, MySql>);
|
||||
|
||||
fn key_value_from_row(row: MySqlRow) -> KeyValue {
|
||||
// Safety: key and value are the first two columns in the row
|
||||
KeyValue {
|
||||
key: row.get_unchecked(0),
|
||||
value: row.get_unchecked(1),
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY: &[u8] = &[0];
|
||||
|
||||
/// Type of range template.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum RangeTemplateType {
|
||||
Point,
|
||||
Range,
|
||||
Full,
|
||||
LeftBounded,
|
||||
Prefix,
|
||||
}
|
||||
|
||||
/// Builds params for the given range template type.
|
||||
impl RangeTemplateType {
|
||||
fn build_params(&self, mut key: Vec<u8>, range_end: Vec<u8>) -> Vec<Vec<u8>> {
|
||||
match self {
|
||||
RangeTemplateType::Point => vec![key],
|
||||
RangeTemplateType::Range => vec![key, range_end],
|
||||
RangeTemplateType::Full => vec![],
|
||||
RangeTemplateType::LeftBounded => vec![key],
|
||||
RangeTemplateType::Prefix => {
|
||||
key.push(b'%');
|
||||
vec![key]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Templates for range request.
|
||||
#[derive(Debug, Clone)]
|
||||
struct RangeTemplate {
|
||||
point: String,
|
||||
range: String,
|
||||
full: String,
|
||||
left_bounded: String,
|
||||
prefix: String,
|
||||
}
|
||||
|
||||
impl RangeTemplate {
|
||||
/// Gets the template for the given type.
|
||||
fn get(&self, typ: RangeTemplateType) -> &str {
|
||||
match typ {
|
||||
RangeTemplateType::Point => &self.point,
|
||||
RangeTemplateType::Range => &self.range,
|
||||
RangeTemplateType::Full => &self.full,
|
||||
RangeTemplateType::LeftBounded => &self.left_bounded,
|
||||
RangeTemplateType::Prefix => &self.prefix,
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds limit to the template.
|
||||
fn with_limit(template: &str, limit: i64) -> String {
|
||||
if limit == 0 {
|
||||
return format!("{};", template);
|
||||
}
|
||||
format!("{} LIMIT {};", template, limit)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_prefix_range(start: &[u8], end: &[u8]) -> bool {
|
||||
if start.len() != end.len() {
|
||||
return false;
|
||||
}
|
||||
let l = start.len();
|
||||
let same_prefix = start[0..l - 1] == end[0..l - 1];
|
||||
if let (Some(rhs), Some(lhs)) = (start.last(), end.last()) {
|
||||
return same_prefix && (*rhs + 1) == *lhs;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Determine the template type for range request.
|
||||
fn range_template(key: &[u8], range_end: &[u8]) -> RangeTemplateType {
|
||||
match (key, range_end) {
|
||||
(_, &[]) => RangeTemplateType::Point,
|
||||
(EMPTY, EMPTY) => RangeTemplateType::Full,
|
||||
(_, EMPTY) => RangeTemplateType::LeftBounded,
|
||||
(start, end) => {
|
||||
if is_prefix_range(start, end) {
|
||||
RangeTemplateType::Prefix
|
||||
} else {
|
||||
RangeTemplateType::Range
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate in placeholders for MySQL.
|
||||
fn mysql_generate_in_placeholders(from: usize, to: usize) -> Vec<String> {
|
||||
(from..=to).map(|_| "?".to_string()).collect()
|
||||
}
|
||||
|
||||
/// Factory for building sql templates.
|
||||
struct MySqlTemplateFactory<'a> {
|
||||
table_name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> MySqlTemplateFactory<'a> {
|
||||
/// Creates a new [`SqlTemplateFactory`] with the given table name.
|
||||
fn new(table_name: &'a str) -> Self {
|
||||
Self { table_name }
|
||||
}
|
||||
|
||||
/// Builds the template set for the given table name.
|
||||
fn build(&self) -> MySqlTemplateSet {
|
||||
let table_name = self.table_name;
|
||||
// Some of queries don't end with `;`, because we need to add `LIMIT` clause.
|
||||
MySqlTemplateSet {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
// Cannot be more than 3072 bytes in PRIMARY KEY
|
||||
"CREATE TABLE IF NOT EXISTS {table_name}(k VARBINARY(3072) PRIMARY KEY, v BLOB);",
|
||||
),
|
||||
range_template: RangeTemplate {
|
||||
point: format!("SELECT k, v FROM {table_name} WHERE k = ?"),
|
||||
range: format!("SELECT k, v FROM {table_name} WHERE k >= ? AND k < ? ORDER BY k"),
|
||||
full: format!("SELECT k, v FROM {table_name} ? ORDER BY k"),
|
||||
left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= ? ORDER BY k"),
|
||||
prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE ? ORDER BY k"),
|
||||
},
|
||||
delete_template: RangeTemplate {
|
||||
point: format!("DELETE FROM {table_name} WHERE k = ?;"),
|
||||
range: format!("DELETE FROM {table_name} WHERE k >= ? AND k < ?;"),
|
||||
full: format!("DELETE FROM {table_name}"),
|
||||
left_bounded: format!("DELETE FROM {table_name} WHERE k >= ?;"),
|
||||
prefix: format!("DELETE FROM {table_name} WHERE k LIKE ?;"),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Templates for the given table name.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MySqlTemplateSet {
|
||||
table_name: String,
|
||||
create_table_statement: String,
|
||||
range_template: RangeTemplate,
|
||||
delete_template: RangeTemplate,
|
||||
}
|
||||
|
||||
impl MySqlTemplateSet {
|
||||
/// Generates the sql for batch get.
|
||||
fn generate_batch_get_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch delete.
|
||||
fn generate_batch_delete_query(&self, key_len: usize) -> String {
|
||||
let table_name = &self.table_name;
|
||||
let in_clause = mysql_generate_in_placeholders(1, key_len).join(", ");
|
||||
format!("DELETE FROM {table_name} WHERE k in ({});", in_clause)
|
||||
}
|
||||
|
||||
/// Generates the sql for batch upsert.
|
||||
/// For MySQL, it also generates a select query to get the previous values.
|
||||
fn generate_batch_upsert_query(&self, kv_len: usize) -> (String, String) {
|
||||
let table_name = &self.table_name;
|
||||
let in_placeholders: Vec<String> = (1..=kv_len).map(|_| "?".to_string()).collect();
|
||||
let in_clause = in_placeholders.join(", ");
|
||||
let mut values_placeholders = Vec::new();
|
||||
for _ in 0..kv_len {
|
||||
values_placeholders.push("(?, ?)".to_string());
|
||||
}
|
||||
let values_clause = values_placeholders.join(", ");
|
||||
|
||||
(
|
||||
format!(r#"SELECT k, v FROM {table_name} WHERE k IN ({in_clause})"#,),
|
||||
format!(
|
||||
r#"INSERT INTO {table_name} (k, v) VALUES {values_clause} ON DUPLICATE KEY UPDATE v = VALUES(v);"#,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Executor for MySqlClient {
|
||||
type Transaction<'a>
|
||||
= MySqlTxnClient
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn name() -> &'static str {
|
||||
"MySql"
|
||||
}
|
||||
|
||||
async fn query(&mut self, raw_query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>> {
|
||||
let query = sqlx::query(raw_query);
|
||||
let query = params.iter().fold(query, |query, param| query.bind(param));
|
||||
let rows = query
|
||||
.fetch_all(&**self)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: raw_query })?;
|
||||
Ok(rows.into_iter().map(key_value_from_row).collect())
|
||||
}
|
||||
|
||||
async fn execute(&mut self, raw_query: &str, params: &[&Vec<u8>]) -> Result<()> {
|
||||
let query = sqlx::query(raw_query);
|
||||
let query = params.iter().fold(query, |query, param| query.bind(param));
|
||||
query
|
||||
.execute(&**self)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: raw_query })?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn txn_executor<'a>(&'a mut self) -> Result<Self::Transaction<'a>> {
|
||||
// sqlx has no isolation level support for now, so we have to set it manually.
|
||||
// TODO(CookiePie): Waiting for https://github.com/launchbadge/sqlx/pull/3614 and remove this.
|
||||
sqlx::query("SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE")
|
||||
.execute(&**self)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: "SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE",
|
||||
})?;
|
||||
let txn = self
|
||||
.begin()
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: "begin" })?;
|
||||
Ok(MySqlTxnClient(txn))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Transaction<'_> for MySqlTxnClient {
|
||||
async fn query(&mut self, raw_query: &str, params: &[&Vec<u8>]) -> Result<Vec<KeyValue>> {
|
||||
let query = sqlx::query(raw_query);
|
||||
let query = params.iter().fold(query, |query, param| query.bind(param));
|
||||
// As said in https://docs.rs/sqlx/latest/sqlx/trait.Executor.html, we need a `&mut *transaction`. Weird.
|
||||
let rows = query
|
||||
.fetch_all(&mut *(self.0))
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: raw_query })?;
|
||||
Ok(rows.into_iter().map(key_value_from_row).collect())
|
||||
}
|
||||
|
||||
async fn execute(&mut self, raw_query: &str, params: &[&Vec<u8>]) -> Result<()> {
|
||||
let query = sqlx::query(raw_query);
|
||||
let query = params.iter().fold(query, |query, param| query.bind(param));
|
||||
// As said in https://docs.rs/sqlx/latest/sqlx/trait.Executor.html, we need a `&mut *transaction`. Weird.
|
||||
query
|
||||
.execute(&mut *(self.0))
|
||||
.await
|
||||
.context(MySqlExecutionSnafu { sql: raw_query })?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Caution: sqlx will stuck on the query if two transactions conflict with each other.
|
||||
/// Don't know if it's a feature or it depends on the database. Be careful.
|
||||
async fn commit(self) -> Result<()> {
|
||||
self.0.commit().await.context(MySqlTransactionSnafu {
|
||||
operation: "commit",
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MySqlExecutorFactory {
|
||||
pool: Arc<Pool<MySql>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ExecutorFactory<MySqlClient> for MySqlExecutorFactory {
|
||||
async fn default_executor(&self) -> Result<MySqlClient> {
|
||||
Ok(self.pool.clone())
|
||||
}
|
||||
|
||||
async fn txn_executor<'a>(
|
||||
&self,
|
||||
default_executor: &'a mut MySqlClient,
|
||||
) -> Result<MySqlTxnClient> {
|
||||
default_executor.txn_executor().await
|
||||
}
|
||||
}
|
||||
|
||||
/// A MySQL-backed key-value store.
|
||||
/// It uses [sqlx::Pool<MySql>] as the connection pool for [RdsStore].
|
||||
pub type MySqlStore = RdsStore<MySqlClient, MySqlExecutorFactory, MySqlTemplateSet>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl KvQueryExecutor<MySqlClient> for MySqlStore {
|
||||
async fn range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, MySqlClient>,
|
||||
req: RangeRequest,
|
||||
) -> Result<RangeResponse> {
|
||||
let template_type = range_template(&req.key, &req.range_end);
|
||||
let template = self.sql_template_set.range_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().collect::<Vec<_>>();
|
||||
// Always add 1 to limit to check if there is more data
|
||||
let query =
|
||||
RangeTemplate::with_limit(template, if req.limit == 0 { 0 } else { req.limit + 1 });
|
||||
let limit = req.limit as usize;
|
||||
debug!("query: {:?}, params: {:?}", query, params);
|
||||
let mut kvs = query_executor.query(&query, ¶ms_ref).await?;
|
||||
if req.keys_only {
|
||||
kvs.iter_mut().for_each(|kv| kv.value = vec![]);
|
||||
}
|
||||
// If limit is 0, we always return all data
|
||||
if limit == 0 || kvs.len() <= limit {
|
||||
return Ok(RangeResponse { kvs, more: false });
|
||||
}
|
||||
// If limit is greater than the number of rows, we remove the last row and set more to true
|
||||
let removed = kvs.pop();
|
||||
debug_assert!(removed.is_some());
|
||||
Ok(RangeResponse { kvs, more: true })
|
||||
}
|
||||
|
||||
async fn batch_put_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, MySqlClient>,
|
||||
req: BatchPutRequest,
|
||||
) -> Result<BatchPutResponse> {
|
||||
let mut in_params = Vec::with_capacity(req.kvs.len() * 3);
|
||||
let mut values_params = Vec::with_capacity(req.kvs.len() * 2);
|
||||
|
||||
for kv in &req.kvs {
|
||||
let processed_key = &kv.key;
|
||||
in_params.push(processed_key);
|
||||
|
||||
let processed_value = &kv.value;
|
||||
values_params.push(processed_key);
|
||||
values_params.push(processed_value);
|
||||
}
|
||||
let in_params = in_params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let values_params = values_params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let (select, update) = self
|
||||
.sql_template_set
|
||||
.generate_batch_upsert_query(req.kvs.len());
|
||||
|
||||
// Fast path: if we don't need previous kvs, we can just upsert the keys.
|
||||
if !req.prev_kv {
|
||||
query_executor.execute(&update, &values_params).await?;
|
||||
return Ok(BatchPutResponse::default());
|
||||
}
|
||||
// Should use transaction to ensure atomicity.
|
||||
if let ExecutorImpl::Default(query_executor) = query_executor {
|
||||
let txn = query_executor.txn_executor().await?;
|
||||
let mut txn = ExecutorImpl::Txn(txn);
|
||||
let res = self.batch_put_with_query_executor(&mut txn, req).await;
|
||||
txn.commit().await?;
|
||||
return res;
|
||||
}
|
||||
let prev_kvs = query_executor.query(&select, &in_params).await?;
|
||||
query_executor.execute(&update, &values_params).await?;
|
||||
Ok(BatchPutResponse { prev_kvs })
|
||||
}
|
||||
|
||||
async fn batch_get_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, MySqlClient>,
|
||||
req: BatchGetRequest,
|
||||
) -> Result<BatchGetResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchGetResponse { kvs: vec![] });
|
||||
}
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_get_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
let kvs = query_executor.query(&query, ¶ms).await?;
|
||||
Ok(BatchGetResponse { kvs })
|
||||
}
|
||||
|
||||
async fn delete_range_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, MySqlClient>,
|
||||
req: DeleteRangeRequest,
|
||||
) -> Result<DeleteRangeResponse> {
|
||||
// Since we need to know the number of deleted keys, we have no fast path here.
|
||||
// Should use transaction to ensure atomicity.
|
||||
if let ExecutorImpl::Default(query_executor) = query_executor {
|
||||
let txn = query_executor.txn_executor().await?;
|
||||
let mut txn = ExecutorImpl::Txn(txn);
|
||||
let res = self.delete_range_with_query_executor(&mut txn, req).await;
|
||||
txn.commit().await?;
|
||||
return res;
|
||||
}
|
||||
let range_get_req = RangeRequest {
|
||||
key: req.key.clone(),
|
||||
range_end: req.range_end.clone(),
|
||||
limit: 0,
|
||||
keys_only: false,
|
||||
};
|
||||
let prev_kvs = self
|
||||
.range_with_query_executor(query_executor, range_get_req)
|
||||
.await?
|
||||
.kvs;
|
||||
let template_type = range_template(&req.key, &req.range_end);
|
||||
let template = self.sql_template_set.delete_template.get(template_type);
|
||||
let params = template_type.build_params(req.key, req.range_end);
|
||||
let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
query_executor.execute(template, ¶ms_ref).await?;
|
||||
let mut resp = DeleteRangeResponse::new(prev_kvs.len() as i64);
|
||||
if req.prev_kv {
|
||||
resp.with_prev_kvs(prev_kvs);
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
async fn batch_delete_with_query_executor(
|
||||
&self,
|
||||
query_executor: &mut ExecutorImpl<'_, MySqlClient>,
|
||||
req: BatchDeleteRequest,
|
||||
) -> Result<BatchDeleteResponse> {
|
||||
if req.keys.is_empty() {
|
||||
return Ok(BatchDeleteResponse::default());
|
||||
}
|
||||
let query = self
|
||||
.sql_template_set
|
||||
.generate_batch_delete_query(req.keys.len());
|
||||
let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
|
||||
// Fast path: if we don't need previous kvs, we can just delete the keys.
|
||||
if !req.prev_kv {
|
||||
query_executor.execute(&query, ¶ms).await?;
|
||||
return Ok(BatchDeleteResponse::default());
|
||||
}
|
||||
// Should use transaction to ensure atomicity.
|
||||
if let ExecutorImpl::Default(query_executor) = query_executor {
|
||||
let txn = query_executor.txn_executor().await?;
|
||||
let mut txn = ExecutorImpl::Txn(txn);
|
||||
let res = self.batch_delete_with_query_executor(&mut txn, req).await;
|
||||
txn.commit().await?;
|
||||
return res;
|
||||
}
|
||||
// Should get previous kvs first
|
||||
let batch_get_req = BatchGetRequest {
|
||||
keys: req.keys.clone(),
|
||||
};
|
||||
let prev_kvs = self
|
||||
.batch_get_with_query_executor(query_executor, batch_get_req)
|
||||
.await?
|
||||
.kvs;
|
||||
// Pure `DELETE` has no return value, so we need to use `execute` instead of `query`.
|
||||
query_executor.execute(&query, ¶ms).await?;
|
||||
if req.prev_kv {
|
||||
Ok(BatchDeleteResponse { prev_kvs })
|
||||
} else {
|
||||
Ok(BatchDeleteResponse::default())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MySqlStore {
|
||||
/// Create [MySqlStore] impl of [KvBackendRef] from url.
|
||||
pub async fn with_url(url: &str, table_name: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
|
||||
let pool = MySqlPool::connect(url)
|
||||
.await
|
||||
.context(CreateMySqlPoolSnafu)?;
|
||||
Self::with_mysql_pool(pool, table_name, max_txn_ops).await
|
||||
}
|
||||
|
||||
/// Create [MySqlStore] impl of [KvBackendRef] from [sqlx::Pool<MySql>].
|
||||
pub async fn with_mysql_pool(
|
||||
pool: Pool<MySql>,
|
||||
table_name: &str,
|
||||
max_txn_ops: usize,
|
||||
) -> Result<KvBackendRef> {
|
||||
// This step ensures the mysql metadata backend is ready to use.
|
||||
// We check if greptime_metakv table exists, and we will create a new table
|
||||
// if it does not exist.
|
||||
let sql_template_set = MySqlTemplateFactory::new(table_name).build();
|
||||
sqlx::query(&sql_template_set.create_table_statement)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.context(MySqlExecutionSnafu {
|
||||
sql: sql_template_set.create_table_statement.to_string(),
|
||||
})?;
|
||||
Ok(Arc::new(MySqlStore {
|
||||
max_txn_ops,
|
||||
sql_template_set,
|
||||
txn_retry_count: RDS_STORE_TXN_RETRY_COUNT,
|
||||
executor_factory: MySqlExecutorFactory {
|
||||
pool: Arc::new(pool),
|
||||
},
|
||||
_phantom: PhantomData,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use common_telemetry::init_default_ut_logging;
|
||||
|
||||
use super::*;
|
||||
use crate::kv_backend::test::{
|
||||
prepare_kv_with_prefix, test_kv_batch_delete_with_prefix, test_kv_batch_get_with_prefix,
|
||||
test_kv_compare_and_put_with_prefix, test_kv_delete_range_with_prefix,
|
||||
test_kv_put_with_prefix, test_kv_range_2_with_prefix, test_kv_range_with_prefix,
|
||||
test_txn_compare_equal, test_txn_compare_greater, test_txn_compare_less,
|
||||
test_txn_compare_not_equal, test_txn_one_compare_op, text_txn_multi_compare_op,
|
||||
unprepare_kv,
|
||||
};
|
||||
|
||||
async fn build_mysql_kv_backend(table_name: &str) -> Option<MySqlStore> {
|
||||
init_default_ut_logging();
|
||||
let endpoints = std::env::var("GT_MYSQL_ENDPOINTS").unwrap_or_default();
|
||||
if endpoints.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let pool = MySqlPool::connect(&endpoints).await.unwrap();
|
||||
let sql_templates = MySqlTemplateFactory::new(table_name).build();
|
||||
sqlx::query(&sql_templates.create_table_statement)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
Some(MySqlStore {
|
||||
max_txn_ops: 128,
|
||||
sql_template_set: sql_templates,
|
||||
txn_retry_count: RDS_STORE_TXN_RETRY_COUNT,
|
||||
executor_factory: MySqlExecutorFactory {
|
||||
pool: Arc::new(pool),
|
||||
},
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_put() {
|
||||
let kv_backend = build_mysql_kv_backend("put_test").await.unwrap();
|
||||
let prefix = b"put/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_put_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_range() {
|
||||
let kv_backend = build_mysql_kv_backend("range_test").await.unwrap();
|
||||
let prefix = b"range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_range_2() {
|
||||
let kv_backend = build_mysql_kv_backend("range2_test").await.unwrap();
|
||||
let prefix = b"range2/";
|
||||
test_kv_range_2_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_get() {
|
||||
let kv_backend = build_mysql_kv_backend("batch_get_test").await.unwrap();
|
||||
let prefix = b"batch_get/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_get_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_delete() {
|
||||
let kv_backend = build_mysql_kv_backend("batch_delete_test").await.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_batch_delete_with_prefix() {
|
||||
let kv_backend = build_mysql_kv_backend("batch_delete_with_prefix_test")
|
||||
.await
|
||||
.unwrap();
|
||||
let prefix = b"batch_delete/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_delete_range() {
|
||||
let kv_backend = build_mysql_kv_backend("delete_range_test").await.unwrap();
|
||||
let prefix = b"delete_range/";
|
||||
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
|
||||
unprepare_kv(&kv_backend, prefix).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_compare_and_put() {
|
||||
let kv_backend = build_mysql_kv_backend("compare_and_put_test")
|
||||
.await
|
||||
.unwrap();
|
||||
let prefix = b"compare_and_put/";
|
||||
let kv_backend = Arc::new(kv_backend);
|
||||
test_kv_compare_and_put_with_prefix(kv_backend.clone(), prefix.to_vec()).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_mysql_txn() {
|
||||
let kv_backend = build_mysql_kv_backend("txn_test").await.unwrap();
|
||||
test_txn_one_compare_op(&kv_backend).await;
|
||||
text_txn_multi_compare_op(&kv_backend).await;
|
||||
test_txn_compare_equal(&kv_backend).await;
|
||||
test_txn_compare_greater(&kv_backend).await;
|
||||
test_txn_compare_less(&kv_backend).await;
|
||||
test_txn_compare_not_equal(&kv_backend).await;
|
||||
}
|
||||
}
|
||||
@@ -153,6 +153,7 @@ impl<'a> PgSqlTemplateFactory<'a> {
|
||||
/// Builds the template set for the given table name.
|
||||
fn build(&self) -> PgSqlTemplateSet {
|
||||
let table_name = self.table_name;
|
||||
// Some of queries don't end with `;`, because we need to add `LIMIT` clause.
|
||||
PgSqlTemplateSet {
|
||||
table_name: table_name.to_string(),
|
||||
create_table_statement: format!(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user