mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2026-01-07 13:52:59 +00:00
Compare commits
2 Commits
release/v0
...
basic_with
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22c61432f6 | ||
|
|
91f373e66e |
@@ -12,6 +12,3 @@ fetch = true
|
|||||||
checkout = true
|
checkout = true
|
||||||
list_files = true
|
list_files = true
|
||||||
internal_use_git2 = false
|
internal_use_git2 = false
|
||||||
|
|
||||||
[env]
|
|
||||||
CARGO_WORKSPACE_DIR = { value = "", relative = true }
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install Etcd cluster
|
- name: Install Etcd cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
--install etcd oci://registry-1.docker.io/bitnamicharts/etcd \
|
||||||
--set replicaCount=${{ inputs.etcd-replicas }} \
|
--set replicaCount=${{ inputs.etcd-replicas }} \
|
||||||
@@ -24,9 +24,4 @@ runs:
|
|||||||
--set auth.rbac.token.enabled=false \
|
--set auth.rbac.token.enabled=false \
|
||||||
--set persistence.size=2Gi \
|
--set persistence.size=2Gi \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set global.security.allowInsecureImages=true \
|
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/etcd \
|
|
||||||
--set image.tag=3.6.1-debian-12-r3 \
|
|
||||||
--version 12.0.8 \
|
|
||||||
-n ${{ inputs.namespace }}
|
-n ${{ inputs.namespace }}
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ inputs:
|
|||||||
meta-replicas:
|
meta-replicas:
|
||||||
default: 2
|
default: 2
|
||||||
description: "Number of Metasrv replicas"
|
description: "Number of Metasrv replicas"
|
||||||
image-registry:
|
image-registry:
|
||||||
default: "docker.io"
|
default: "docker.io"
|
||||||
description: "Image registry"
|
description: "Image registry"
|
||||||
image-repository:
|
image-repository:
|
||||||
default: "greptime/greptimedb"
|
default: "greptime/greptimedb"
|
||||||
description: "Image repository"
|
description: "Image repository"
|
||||||
image-tag:
|
image-tag:
|
||||||
default: "latest"
|
default: "latest"
|
||||||
description: 'Image tag'
|
description: 'Image tag'
|
||||||
etcd-endpoints:
|
etcd-endpoints:
|
||||||
@@ -32,12 +32,12 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install GreptimeDB operator
|
- name: Install GreptimeDB operator
|
||||||
uses: nick-fields/retry@v3
|
uses: nick-fields/retry@v3
|
||||||
with:
|
with:
|
||||||
timeout_minutes: 3
|
timeout_minutes: 3
|
||||||
max_attempts: 3
|
max_attempts: 3
|
||||||
shell: bash
|
shell: bash
|
||||||
command: |
|
command: |
|
||||||
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
helm repo add greptime https://greptimeteam.github.io/helm-charts/
|
||||||
helm repo update
|
helm repo update
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install \
|
--install \
|
||||||
@@ -48,10 +48,10 @@ runs:
|
|||||||
--wait-for-jobs
|
--wait-for-jobs
|
||||||
- name: Install GreptimeDB cluster
|
- name: Install GreptimeDB cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install my-greptimedb \
|
--install my-greptimedb \
|
||||||
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \
|
--set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
|
||||||
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
|
||||||
--set image.registry=${{ inputs.image-registry }} \
|
--set image.registry=${{ inputs.image-registry }} \
|
||||||
--set image.repository=${{ inputs.image-repository }} \
|
--set image.repository=${{ inputs.image-repository }} \
|
||||||
@@ -72,7 +72,7 @@ runs:
|
|||||||
- name: Wait for GreptimeDB
|
- name: Wait for GreptimeDB
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
while true; do
|
while true; do
|
||||||
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
PHASE=$(kubectl -n my-greptimedb get gtc my-greptimedb -o jsonpath='{.status.clusterPhase}')
|
||||||
if [ "$PHASE" == "Running" ]; then
|
if [ "$PHASE" == "Running" ]; then
|
||||||
echo "Cluster is ready"
|
echo "Cluster is ready"
|
||||||
@@ -86,10 +86,10 @@ runs:
|
|||||||
- name: Print GreptimeDB info
|
- name: Print GreptimeDB info
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl get all --show-labels -n my-greptimedb
|
kubectl get all --show-labels -n my-greptimedb
|
||||||
- name: Describe Nodes
|
- name: Describe Nodes
|
||||||
if: always()
|
if: always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
kubectl describe nodes
|
kubectl describe nodes
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Install Kafka cluster
|
- name: Install Kafka cluster
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
--install kafka oci://registry-1.docker.io/bitnamicharts/kafka \
|
||||||
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
--set controller.replicaCount=${{ inputs.controller-replicas }} \
|
||||||
@@ -23,8 +23,4 @@ runs:
|
|||||||
--set listeners.controller.protocol=PLAINTEXT \
|
--set listeners.controller.protocol=PLAINTEXT \
|
||||||
--set listeners.client.protocol=PLAINTEXT \
|
--set listeners.client.protocol=PLAINTEXT \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/kafka \
|
|
||||||
--set image.tag=3.9.0-debian-12-r1 \
|
|
||||||
--version 31.0.0 \
|
|
||||||
-n ${{ inputs.namespace }}
|
-n ${{ inputs.namespace }}
|
||||||
|
|||||||
@@ -6,7 +6,9 @@ inputs:
|
|||||||
description: "Number of PostgreSQL replicas"
|
description: "Number of PostgreSQL replicas"
|
||||||
namespace:
|
namespace:
|
||||||
default: "postgres-namespace"
|
default: "postgres-namespace"
|
||||||
description: "The PostgreSQL namespace"
|
postgres-version:
|
||||||
|
default: "14.2"
|
||||||
|
description: "PostgreSQL version"
|
||||||
storage-size:
|
storage-size:
|
||||||
default: "1Gi"
|
default: "1Gi"
|
||||||
description: "Storage size for PostgreSQL"
|
description: "Storage size for PostgreSQL"
|
||||||
@@ -20,11 +22,7 @@ runs:
|
|||||||
helm upgrade \
|
helm upgrade \
|
||||||
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
|
||||||
--set replicaCount=${{ inputs.postgres-replicas }} \
|
--set replicaCount=${{ inputs.postgres-replicas }} \
|
||||||
--set global.security.allowInsecureImages=true \
|
--set image.tag=${{ inputs.postgres-version }} \
|
||||||
--set image.registry=docker.io \
|
|
||||||
--set image.repository=greptime/postgresql \
|
|
||||||
--set image.tag=17.5.0-debian-12-r3 \
|
|
||||||
--version 16.7.4 \
|
|
||||||
--set persistence.size=${{ inputs.storage-size }} \
|
--set persistence.size=${{ inputs.storage-size }} \
|
||||||
--set postgresql.username=greptimedb \
|
--set postgresql.username=greptimedb \
|
||||||
--set postgresql.password=admin \
|
--set postgresql.password=admin \
|
||||||
|
|||||||
15
.github/labeler.yaml
vendored
15
.github/labeler.yaml
vendored
@@ -1,15 +0,0 @@
|
|||||||
ci:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: .github/**
|
|
||||||
|
|
||||||
docker:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docker/**
|
|
||||||
|
|
||||||
documentation:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: docs/**
|
|
||||||
|
|
||||||
dashboard:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: grafana/**
|
|
||||||
42
.github/scripts/check-version.sh
vendored
42
.github/scripts/check-version.sh
vendored
@@ -1,42 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Get current version
|
|
||||||
CURRENT_VERSION=$1
|
|
||||||
if [ -z "$CURRENT_VERSION" ]; then
|
|
||||||
echo "Error: Failed to get current version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version from GitHub Releases
|
|
||||||
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
|
|
||||||
|
|
||||||
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
|
|
||||||
echo "Error: Failed to fetch latest version from GitHub"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the latest version
|
|
||||||
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
|
|
||||||
|
|
||||||
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
|
|
||||||
echo "Error: No valid version found in GitHub releases"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
|
|
||||||
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
|
|
||||||
|
|
||||||
echo "Current version: $CLEAN_CURRENT"
|
|
||||||
echo "Latest release version: $CLEAN_LATEST"
|
|
||||||
|
|
||||||
# Use sort -V to compare versions
|
|
||||||
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
|
|
||||||
|
|
||||||
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
|
|
||||||
echo "should-push-latest-tag=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
|
|
||||||
echo "should-push-latest-tag=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
4
.github/scripts/deploy-greptimedb.sh
vendored
4
.github/scripts/deploy-greptimedb.sh
vendored
@@ -68,7 +68,7 @@ function deploy_greptimedb_cluster() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster \
|
helm install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
|
|
||||||
# Wait for greptimedb cluster to be ready.
|
# Wait for greptimedb cluster to be ready.
|
||||||
@@ -103,7 +103,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
|
|
||||||
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.etcdEndpoints="etcd.$install_namespace:2379" \
|
||||||
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
--set storage.s3.region="$AWS_REGION" \
|
--set storage.s3.region="$AWS_REGION" \
|
||||||
--set storage.s3.root="$DATA_ROOT" \
|
--set storage.s3.root="$DATA_ROOT" \
|
||||||
|
|||||||
34
.github/scripts/pull-test-deps-images.sh
vendored
34
.github/scripts/pull-test-deps-images.sh
vendored
@@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
MAX_RETRIES=3
|
|
||||||
|
|
||||||
IMAGES=(
|
|
||||||
"greptime/zookeeper:3.7"
|
|
||||||
"greptime/kafka:3.9.0-debian-12-r1"
|
|
||||||
"greptime/etcd:3.6.1-debian-12-r3"
|
|
||||||
"greptime/minio:2024"
|
|
||||||
"greptime/mysql:5.7"
|
|
||||||
)
|
|
||||||
|
|
||||||
for image in "${IMAGES[@]}"; do
|
|
||||||
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
|
|
||||||
if docker pull "$image"; then
|
|
||||||
# Successfully pulled the image.
|
|
||||||
break
|
|
||||||
else
|
|
||||||
# Use some simple exponential backoff to avoid rate limiting.
|
|
||||||
if [ $attempt -lt $MAX_RETRIES ]; then
|
|
||||||
sleep_seconds=$((attempt * 5))
|
|
||||||
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
|
|
||||||
sleep $sleep_seconds # 5s, 10s delays
|
|
||||||
else
|
|
||||||
echo "Failed to pull $image after $MAX_RETRIES attempts"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
4
.github/workflows/develop.yml
vendored
4
.github/workflows/develop.yml
vendored
@@ -719,10 +719,6 @@ jobs:
|
|||||||
save-if: ${{ github.ref == 'refs/heads/main' }}
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
- name: Install latest nextest release
|
- name: Install latest nextest release
|
||||||
uses: taiki-e/install-action@nextest
|
uses: taiki-e/install-action@nextest
|
||||||
|
|
||||||
- name: Pull test dependencies images
|
|
||||||
run: ./.github/scripts/pull-test-deps-images.sh
|
|
||||||
|
|
||||||
- name: Setup external services
|
- name: Setup external services
|
||||||
working-directory: tests-integration/fixtures
|
working-directory: tests-integration/fixtures
|
||||||
run: docker compose up -d --wait
|
run: docker compose up -d --wait
|
||||||
|
|||||||
42
.github/workflows/pr-labeling.yaml
vendored
42
.github/workflows/pr-labeling.yaml
vendored
@@ -1,42 +0,0 @@
|
|||||||
name: 'PR Labeling'
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
labeler:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/labeler@v5
|
|
||||||
with:
|
|
||||||
configuration-path: ".github/labeler.yaml"
|
|
||||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
|
|
||||||
size-label:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: pascalgn/size-label-action@v0.5.5
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
with:
|
|
||||||
sizes: >
|
|
||||||
{
|
|
||||||
"0": "XS",
|
|
||||||
"100": "S",
|
|
||||||
"300": "M",
|
|
||||||
"1000": "L",
|
|
||||||
"1500": "XL",
|
|
||||||
"2000": "XXL"
|
|
||||||
}
|
|
||||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -110,8 +110,6 @@ jobs:
|
|||||||
|
|
||||||
# The 'version' use as the global tag name of the release workflow.
|
# The 'version' use as the global tag name of the release workflow.
|
||||||
version: ${{ steps.create-version.outputs.version }}
|
version: ${{ steps.create-version.outputs.version }}
|
||||||
|
|
||||||
should-push-latest-tag: ${{ steps.check-version.outputs.should-push-latest-tag }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -137,11 +135,6 @@ jobs:
|
|||||||
GITHUB_REF_NAME: ${{ github.ref_name }}
|
GITHUB_REF_NAME: ${{ github.ref_name }}
|
||||||
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
|
||||||
|
|
||||||
- name: Check version
|
|
||||||
id: check-version
|
|
||||||
run: |
|
|
||||||
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
|
|
||||||
|
|
||||||
- name: Allocate linux-amd64 runner
|
- name: Allocate linux-amd64 runner
|
||||||
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
uses: ./.github/actions/start-runner
|
uses: ./.github/actions/start-runner
|
||||||
@@ -321,7 +314,7 @@ jobs:
|
|||||||
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
version: ${{ needs.allocate-runners.outputs.version }}
|
version: ${{ needs.allocate-runners.outputs.version }}
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
- name: Set build image result
|
- name: Set build image result
|
||||||
id: set-build-image-result
|
id: set-build-image-result
|
||||||
@@ -368,7 +361,7 @@ jobs:
|
|||||||
dev-mode: false
|
dev-mode: false
|
||||||
upload-to-s3: true
|
upload-to-s3: true
|
||||||
update-version-info: true
|
update-version-info: true
|
||||||
push-latest-tag: ${{ needs.allocate-runners.outputs.should-push-latest-tag == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
push-latest-tag: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
|
||||||
|
|
||||||
publish-github-release:
|
publish-github-release:
|
||||||
name: Create GitHub release and upload artifacts
|
name: Create GitHub release and upload artifacts
|
||||||
|
|||||||
247
Cargo.lock
generated
247
Cargo.lock
generated
@@ -211,7 +211,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "api"
|
name = "api"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-decimal",
|
"common-decimal",
|
||||||
@@ -944,7 +944,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "auth"
|
name = "auth"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1586,7 +1586,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cache"
|
name = "cache"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"catalog",
|
"catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -1602,17 +1602,6 @@ version = "1.0.7"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "acbc26382d871df4b7442e3df10a9402bf3cf5e55cbd66f12be38861425f0564"
|
checksum = "acbc26382d871df4b7442e3df10a9402bf3cf5e55cbd66f12be38861425f0564"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "cargo-manifest"
|
|
||||||
version = "0.19.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "a1d8af896b707212cd0e99c112a78c9497dd32994192a463ed2f7419d29bd8c6"
|
|
||||||
dependencies = [
|
|
||||||
"serde",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
"toml 0.8.19",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cast"
|
name = "cast"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
@@ -1621,7 +1610,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "catalog"
|
name = "catalog"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -1632,10 +1621,8 @@ dependencies = [
|
|||||||
"cache",
|
"cache",
|
||||||
"catalog",
|
"catalog",
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-base",
|
|
||||||
"common-catalog",
|
"common-catalog",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-frontend",
|
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-meta",
|
"common-meta",
|
||||||
"common-procedure",
|
"common-procedure",
|
||||||
@@ -1659,8 +1646,6 @@ dependencies = [
|
|||||||
"partition",
|
"partition",
|
||||||
"paste",
|
"paste",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
"promql-parser",
|
|
||||||
"rand 0.9.0",
|
|
||||||
"rustc-hash 2.0.0",
|
"rustc-hash 2.0.0",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"session",
|
"session",
|
||||||
@@ -1683,9 +1668,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cc"
|
name = "cc"
|
||||||
version = "1.2.27"
|
version = "1.1.24"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc"
|
checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jobserver",
|
"jobserver",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -1961,9 +1946,8 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cli"
|
name = "cli"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
@@ -1988,6 +1972,7 @@ dependencies = [
|
|||||||
"common-version",
|
"common-version",
|
||||||
"common-wal",
|
"common-wal",
|
||||||
"datatypes",
|
"datatypes",
|
||||||
|
"either",
|
||||||
"etcd-client",
|
"etcd-client",
|
||||||
"futures",
|
"futures",
|
||||||
"humantime",
|
"humantime",
|
||||||
@@ -1995,7 +1980,6 @@ dependencies = [
|
|||||||
"meta-srv",
|
"meta-srv",
|
||||||
"nu-ansi-term",
|
"nu-ansi-term",
|
||||||
"object-store",
|
"object-store",
|
||||||
"operator",
|
|
||||||
"query",
|
"query",
|
||||||
"rand 0.9.0",
|
"rand 0.9.0",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
@@ -2005,7 +1989,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -2014,7 +1998,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "client"
|
name = "client"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -2044,7 +2028,7 @@ dependencies = [
|
|||||||
"rand 0.9.0",
|
"rand 0.9.0",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"substrait 0.37.3",
|
"substrait 0.37.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -2085,7 +2069,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmd"
|
name = "cmd"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
@@ -2115,6 +2099,7 @@ dependencies = [
|
|||||||
"common-wal",
|
"common-wal",
|
||||||
"datanode",
|
"datanode",
|
||||||
"datatypes",
|
"datatypes",
|
||||||
|
"either",
|
||||||
"etcd-client",
|
"etcd-client",
|
||||||
"file-engine",
|
"file-engine",
|
||||||
"flow",
|
"flow",
|
||||||
@@ -2145,7 +2130,7 @@ dependencies = [
|
|||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"stat",
|
"stat",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"temp-env",
|
"temp-env",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -2192,7 +2177,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-base"
|
name = "common-base"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2214,11 +2199,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-catalog"
|
name = "common-catalog"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-config"
|
name = "common-config"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2243,7 +2228,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-datasource"
|
name = "common-datasource"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-schema 54.3.1",
|
"arrow-schema 54.3.1",
|
||||||
@@ -2280,7 +2265,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-decimal"
|
name = "common-decimal"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigdecimal 0.4.8",
|
"bigdecimal 0.4.8",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2293,7 +2278,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-error"
|
name = "common-error"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
@@ -2304,24 +2289,17 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-frontend"
|
name = "common-frontend"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-grpc",
|
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-meta",
|
|
||||||
"greptime-proto",
|
|
||||||
"meta-client",
|
|
||||||
"session",
|
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"tokio",
|
|
||||||
"tonic 0.12.3",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-function"
|
name = "common-function"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -2374,7 +2352,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-greptimedb-telemetry"
|
name = "common-greptimedb-telemetry"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-runtime",
|
"common-runtime",
|
||||||
@@ -2391,7 +2369,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc"
|
name = "common-grpc"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -2423,7 +2401,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-grpc-expr"
|
name = "common-grpc-expr"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"common-base",
|
"common-base",
|
||||||
@@ -2442,7 +2420,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-macro"
|
name = "common-macro"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-query",
|
"common-query",
|
||||||
@@ -2456,7 +2434,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-mem-prof"
|
name = "common-mem-prof"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2472,7 +2450,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-meta"
|
name = "common-meta"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anymap2",
|
"anymap2",
|
||||||
"api",
|
"api",
|
||||||
@@ -2492,7 +2470,6 @@ dependencies = [
|
|||||||
"common-procedure-test",
|
"common-procedure-test",
|
||||||
"common-query",
|
"common-query",
|
||||||
"common-recordbatch",
|
"common-recordbatch",
|
||||||
"common-runtime",
|
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-test-util",
|
"common-test-util",
|
||||||
"common-time",
|
"common-time",
|
||||||
@@ -2538,7 +2515,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-options"
|
name = "common-options"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
@@ -2547,11 +2524,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-plugins"
|
name = "common-plugins"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-pprof"
|
name = "common-pprof"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -2563,7 +2540,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure"
|
name = "common-procedure"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2590,17 +2567,16 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-procedure-test"
|
name = "common-procedure-test"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"common-procedure",
|
"common-procedure",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"tokio",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-query"
|
name = "common-query"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -2626,7 +2602,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-recordbatch"
|
name = "common-recordbatch"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2646,7 +2622,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-runtime"
|
name = "common-runtime"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -2676,18 +2652,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-session"
|
name = "common-session"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-telemetry"
|
name = "common-telemetry"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"atty",
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-version",
|
|
||||||
"console-subscriber",
|
"console-subscriber",
|
||||||
"greptime-proto",
|
"greptime-proto",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
@@ -2711,7 +2687,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-test-util"
|
name = "common-test-util"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"client",
|
"client",
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
@@ -2724,7 +2700,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-time"
|
name = "common-time"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -2742,10 +2718,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-version"
|
name = "common-version"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"build-data",
|
"build-data",
|
||||||
"cargo-manifest",
|
|
||||||
"const_format",
|
"const_format",
|
||||||
"serde",
|
"serde",
|
||||||
"shadow-rs",
|
"shadow-rs",
|
||||||
@@ -2753,7 +2728,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-wal"
|
name = "common-wal"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -2776,7 +2751,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-workload"
|
name = "common-workload"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
@@ -3087,9 +3062,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-channel"
|
name = "crossbeam-channel"
|
||||||
version = "0.5.15"
|
version = "0.5.13"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
]
|
]
|
||||||
@@ -3732,7 +3707,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datanode"
|
name = "datanode"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -3785,7 +3760,7 @@ dependencies = [
|
|||||||
"session",
|
"session",
|
||||||
"snafu 0.8.5",
|
"snafu 0.8.5",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"toml 0.8.19",
|
"toml 0.8.19",
|
||||||
@@ -3794,7 +3769,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "datatypes"
|
name = "datatypes"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
"arrow-array 54.2.1",
|
"arrow-array 54.2.1",
|
||||||
@@ -4214,9 +4189,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "either"
|
name = "either"
|
||||||
version = "1.15.0"
|
version = "1.13.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@@ -4454,7 +4429,7 @@ checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "file-engine"
|
name = "file-engine"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4591,7 +4566,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flow"
|
name = "flow"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow 54.2.1",
|
"arrow 54.2.1",
|
||||||
@@ -4656,7 +4631,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strum 0.27.1",
|
"strum 0.27.1",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
@@ -4711,11 +4686,10 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "frontend"
|
name = "frontend"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-stream",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"auth",
|
"auth",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -4727,7 +4701,6 @@ dependencies = [
|
|||||||
"common-config",
|
"common-config",
|
||||||
"common-datasource",
|
"common-datasource",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-frontend",
|
|
||||||
"common-function",
|
"common-function",
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
@@ -4771,10 +4744,9 @@ dependencies = [
|
|||||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||||
"store-api",
|
"store-api",
|
||||||
"strfmt",
|
"strfmt",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
|
||||||
"toml 0.8.19",
|
"toml 0.8.19",
|
||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
"tower 0.5.2",
|
"tower 0.5.2",
|
||||||
@@ -5161,7 +5133,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "greptime-proto"
|
name = "greptime-proto"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=f3103a8c9b8ce162457d0a3e3ca00d53d1a8bd06#f3103a8c9b8ce162457d0a3e3ca00d53d1a8bd06"
|
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=454c52634c3bac27de10bf0d85d5533eed1cf03f#454c52634c3bac27de10bf0d85d5533eed1cf03f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -5932,7 +5904,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "index"
|
name = "index"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"asynchronous-codec",
|
"asynchronous-codec",
|
||||||
@@ -5944,7 +5916,6 @@ dependencies = [
|
|||||||
"common-runtime",
|
"common-runtime",
|
||||||
"common-telemetry",
|
"common-telemetry",
|
||||||
"common-test-util",
|
"common-test-util",
|
||||||
"criterion 0.4.0",
|
|
||||||
"fastbloom",
|
"fastbloom",
|
||||||
"fst",
|
"fst",
|
||||||
"futures",
|
"futures",
|
||||||
@@ -5957,7 +5928,6 @@ dependencies = [
|
|||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"puffin",
|
"puffin",
|
||||||
"rand 0.9.0",
|
"rand 0.9.0",
|
||||||
"rand_chacha 0.9.0",
|
|
||||||
"regex",
|
"regex",
|
||||||
"regex-automata 0.4.8",
|
"regex-automata 0.4.8",
|
||||||
"roaring",
|
"roaring",
|
||||||
@@ -6819,7 +6789,7 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-query"
|
name = "log-query"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"common-error",
|
"common-error",
|
||||||
@@ -6831,7 +6801,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log-store"
|
name = "log-store"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -7129,7 +7099,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-client"
|
name = "meta-client"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -7157,13 +7127,10 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meta-srv"
|
name = "meta-srv"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum 0.8.1",
|
|
||||||
"axum-extra",
|
|
||||||
"axum-macros",
|
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -7190,14 +7157,12 @@ dependencies = [
|
|||||||
"deadpool",
|
"deadpool",
|
||||||
"deadpool-postgres",
|
"deadpool-postgres",
|
||||||
"derive_builder 0.20.1",
|
"derive_builder 0.20.1",
|
||||||
"either",
|
|
||||||
"etcd-client",
|
"etcd-client",
|
||||||
"futures",
|
"futures",
|
||||||
"h2 0.3.26",
|
"h2 0.3.26",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"humantime",
|
"humantime",
|
||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"hyper 0.14.30",
|
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
@@ -7225,7 +7190,6 @@ dependencies = [
|
|||||||
"toml 0.8.19",
|
"toml 0.8.19",
|
||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
"tower 0.5.2",
|
"tower 0.5.2",
|
||||||
"tower-http 0.6.2",
|
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"typetag",
|
"typetag",
|
||||||
@@ -7254,7 +7218,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "metric-engine"
|
name = "metric-engine"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7264,7 +7228,6 @@ dependencies = [
|
|||||||
"common-base",
|
"common-base",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-meta",
|
|
||||||
"common-query",
|
"common-query",
|
||||||
"common-recordbatch",
|
"common-recordbatch",
|
||||||
"common-runtime",
|
"common-runtime",
|
||||||
@@ -7277,9 +7240,7 @@ dependencies = [
|
|||||||
"humantime-serde",
|
"humantime-serde",
|
||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"mito-codec",
|
|
||||||
"mito2",
|
"mito2",
|
||||||
"moka",
|
|
||||||
"mur3",
|
"mur3",
|
||||||
"object-store",
|
"object-store",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
@@ -7344,32 +7305,9 @@ dependencies = [
|
|||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "mito-codec"
|
|
||||||
version = "0.15.5"
|
|
||||||
dependencies = [
|
|
||||||
"api",
|
|
||||||
"bytes",
|
|
||||||
"common-base",
|
|
||||||
"common-decimal",
|
|
||||||
"common-error",
|
|
||||||
"common-macro",
|
|
||||||
"common-recordbatch",
|
|
||||||
"common-telemetry",
|
|
||||||
"common-time",
|
|
||||||
"datafusion-common",
|
|
||||||
"datafusion-expr",
|
|
||||||
"datatypes",
|
|
||||||
"memcomparable",
|
|
||||||
"paste",
|
|
||||||
"serde",
|
|
||||||
"snafu 0.8.5",
|
|
||||||
"store-api",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mito2"
|
name = "mito2"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -7409,7 +7347,6 @@ dependencies = [
|
|||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log-store",
|
"log-store",
|
||||||
"memcomparable",
|
"memcomparable",
|
||||||
"mito-codec",
|
|
||||||
"moka",
|
"moka",
|
||||||
"object-store",
|
"object-store",
|
||||||
"parquet",
|
"parquet",
|
||||||
@@ -8119,7 +8056,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object-store"
|
name = "object-store"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -8433,7 +8370,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "operator"
|
name = "operator"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -8449,7 +8386,6 @@ dependencies = [
|
|||||||
"common-catalog",
|
"common-catalog",
|
||||||
"common-datasource",
|
"common-datasource",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-frontend",
|
|
||||||
"common-function",
|
"common-function",
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"common-grpc-expr",
|
"common-grpc-expr",
|
||||||
@@ -8488,7 +8424,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -8755,7 +8691,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "partition"
|
name = "partition"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -8949,8 +8885,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pgwire"
|
name = "pgwire"
|
||||||
version = "0.30.2"
|
version = "0.30.1"
|
||||||
source = "git+https://github.com/sunng87/pgwire?rev=127573d997228cfb70c7699881c568eae8131270#127573d997228cfb70c7699881c568eae8131270"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ec79ee18e6cafde8698885646780b967ecc905120798b8359dd0da64f9688e89"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -9043,7 +8980,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pipeline"
|
name = "pipeline"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -9186,7 +9123,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "plugins"
|
name = "plugins"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"auth",
|
"auth",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -9499,7 +9436,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "promql"
|
name = "promql"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -9781,7 +9718,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "puffin"
|
name = "puffin"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression 0.4.13",
|
"async-compression 0.4.13",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -9823,7 +9760,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "query"
|
name = "query"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -9889,7 +9826,7 @@ dependencies = [
|
|||||||
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
"sqlparser 0.54.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=0cf6c04490d59435ee965edd2078e8855bd8471e)",
|
||||||
"statrs",
|
"statrs",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
@@ -10416,14 +10353,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ring"
|
name = "ring"
|
||||||
version = "0.17.14"
|
version = "0.17.8"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
|
checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"getrandom 0.2.15",
|
"getrandom 0.2.15",
|
||||||
"libc",
|
"libc",
|
||||||
|
"spin",
|
||||||
"untrusted",
|
"untrusted",
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
@@ -11175,7 +11113,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "servers"
|
name = "servers"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.11",
|
"ahash 0.8.11",
|
||||||
"api",
|
"api",
|
||||||
@@ -11197,7 +11135,6 @@ dependencies = [
|
|||||||
"common-catalog",
|
"common-catalog",
|
||||||
"common-config",
|
"common-config",
|
||||||
"common-error",
|
"common-error",
|
||||||
"common-frontend",
|
|
||||||
"common-grpc",
|
"common-grpc",
|
||||||
"common-macro",
|
"common-macro",
|
||||||
"common-mem-prof",
|
"common-mem-prof",
|
||||||
@@ -11296,7 +11233,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "session"
|
name = "session"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
@@ -11635,7 +11572,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sql"
|
name = "sql"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"chrono",
|
"chrono",
|
||||||
@@ -11690,7 +11627,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlness-runner"
|
name = "sqlness-runner"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.19",
|
"clap 4.5.19",
|
||||||
@@ -11990,7 +11927,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "stat"
|
name = "stat"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"nix 0.30.1",
|
"nix 0.30.1",
|
||||||
]
|
]
|
||||||
@@ -12016,7 +11953,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "store-api"
|
name = "store-api"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"aquamarine",
|
"aquamarine",
|
||||||
@@ -12177,7 +12114,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "substrait"
|
name = "substrait"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -12357,7 +12294,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "table"
|
name = "table"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -12618,7 +12555,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-fuzz"
|
name = "tests-fuzz"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arbitrary",
|
"arbitrary",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -12662,7 +12599,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tests-integration"
|
name = "tests-integration"
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"api",
|
"api",
|
||||||
"arrow-flight",
|
"arrow-flight",
|
||||||
@@ -12729,7 +12666,7 @@ dependencies = [
|
|||||||
"sql",
|
"sql",
|
||||||
"sqlx",
|
"sqlx",
|
||||||
"store-api",
|
"store-api",
|
||||||
"substrait 0.15.5",
|
"substrait 0.15.0",
|
||||||
"table",
|
"table",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"time",
|
"time",
|
||||||
@@ -12739,7 +12676,6 @@ dependencies = [
|
|||||||
"tonic 0.12.3",
|
"tonic 0.12.3",
|
||||||
"tower 0.5.2",
|
"tower 0.5.2",
|
||||||
"url",
|
"url",
|
||||||
"urlencoding",
|
|
||||||
"uuid",
|
"uuid",
|
||||||
"yaml-rust",
|
"yaml-rust",
|
||||||
"zstd 0.13.2",
|
"zstd 0.13.2",
|
||||||
@@ -13100,7 +13036,6 @@ version = "0.8.19"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
|
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.9.0",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
|
|||||||
@@ -49,7 +49,6 @@ members = [
|
|||||||
"src/meta-client",
|
"src/meta-client",
|
||||||
"src/meta-srv",
|
"src/meta-srv",
|
||||||
"src/metric-engine",
|
"src/metric-engine",
|
||||||
"src/mito-codec",
|
|
||||||
"src/mito2",
|
"src/mito2",
|
||||||
"src/object-store",
|
"src/object-store",
|
||||||
"src/operator",
|
"src/operator",
|
||||||
@@ -71,7 +70,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.15.5"
|
version = "0.15.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -130,12 +129,11 @@ deadpool = "0.12"
|
|||||||
deadpool-postgres = "0.14"
|
deadpool-postgres = "0.14"
|
||||||
derive_builder = "0.20"
|
derive_builder = "0.20"
|
||||||
dotenv = "0.15"
|
dotenv = "0.15"
|
||||||
either = "1.15"
|
|
||||||
etcd-client = "0.14"
|
etcd-client = "0.14"
|
||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "f3103a8c9b8ce162457d0a3e3ca00d53d1a8bd06" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "454c52634c3bac27de10bf0d85d5533eed1cf03f" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -221,8 +219,6 @@ tokio-util = { version = "0.7", features = ["io-util", "compat"] }
|
|||||||
toml = "0.8.8"
|
toml = "0.8.8"
|
||||||
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
tonic = { version = "0.12", features = ["tls", "gzip", "zstd"] }
|
||||||
tower = "0.5"
|
tower = "0.5"
|
||||||
tower-http = "0.6"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
@@ -278,7 +274,6 @@ log-store = { path = "src/log-store" }
|
|||||||
meta-client = { path = "src/meta-client" }
|
meta-client = { path = "src/meta-client" }
|
||||||
meta-srv = { path = "src/meta-srv" }
|
meta-srv = { path = "src/meta-srv" }
|
||||||
metric-engine = { path = "src/metric-engine" }
|
metric-engine = { path = "src/metric-engine" }
|
||||||
mito-codec = { path = "src/mito-codec" }
|
|
||||||
mito2 = { path = "src/mito2" }
|
mito2 = { path = "src/mito2" }
|
||||||
object-store = { path = "src/object-store" }
|
object-store = { path = "src/object-store" }
|
||||||
operator = { path = "src/operator" }
|
operator = { path = "src/operator" }
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
|
|||||||
IMAGE_REGISTRY ?= docker.io
|
IMAGE_REGISTRY ?= docker.io
|
||||||
IMAGE_NAMESPACE ?= greptime
|
IMAGE_NAMESPACE ?= greptime
|
||||||
IMAGE_TAG ?= latest
|
IMAGE_TAG ?= latest
|
||||||
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-f55023f3-20250829091211
|
DEV_BUILDER_IMAGE_TAG ?= 2025-05-19-b2377d4b-20250520045554
|
||||||
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
BUILDX_MULTI_PLATFORM_BUILD ?= false
|
||||||
BUILDX_BUILDER_NAME ?= gtbuilder
|
BUILDX_BUILDER_NAME ?= gtbuilder
|
||||||
BASE_IMAGE ?= ubuntu
|
BASE_IMAGE ?= ubuntu
|
||||||
|
|||||||
@@ -189,8 +189,7 @@ We invite you to engage and contribute!
|
|||||||
- [Official Website](https://greptime.com/)
|
- [Official Website](https://greptime.com/)
|
||||||
- [Blog](https://greptime.com/blogs/)
|
- [Blog](https://greptime.com/blogs/)
|
||||||
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
- [LinkedIn](https://www.linkedin.com/company/greptime/)
|
||||||
- [X (Twitter)](https://X.com/greptime)
|
- [Twitter](https://twitter.com/greptime)
|
||||||
- [YouTube](https://www.youtube.com/@greptime)
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -123,7 +123,6 @@
|
|||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
@@ -147,7 +146,6 @@
|
|||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
@@ -234,7 +232,7 @@
|
|||||||
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
|
||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -407,7 +405,7 @@
|
|||||||
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
|
||||||
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
|
||||||
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
|
||||||
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
|
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression. |
|
||||||
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
|
||||||
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
| `grpc.tls.mode` | String | `disable` | TLS mode. |
|
||||||
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
| `grpc.tls.cert_path` | String | Unset | Certificate file path. |
|
||||||
@@ -473,7 +471,6 @@
|
|||||||
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
|
||||||
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
|
||||||
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
|
||||||
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
|
|
||||||
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
|
||||||
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
| `region_engine.mito` | -- | -- | The Mito engine options. |
|
||||||
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
|
||||||
@@ -497,7 +494,6 @@
|
|||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `128` | Maximum number of SST files to scan concurrently. |
|
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
|
|||||||
@@ -367,10 +367,6 @@ timeout = "30s"
|
|||||||
## The timeout for idle sockets being kept-alive.
|
## The timeout for idle sockets being kept-alive.
|
||||||
pool_idle_timeout = "90s"
|
pool_idle_timeout = "90s"
|
||||||
|
|
||||||
## To skip the ssl verification
|
|
||||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
|
||||||
skip_ssl_validation = false
|
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "S3"
|
# name = "S3"
|
||||||
@@ -474,9 +470,6 @@ sst_write_buffer_size = "8MB"
|
|||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Maximum number of SST files to scan concurrently.
|
|
||||||
max_concurrent_scan_files = 128
|
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
|||||||
@@ -458,10 +458,6 @@ timeout = "30s"
|
|||||||
## The timeout for idle sockets being kept-alive.
|
## The timeout for idle sockets being kept-alive.
|
||||||
pool_idle_timeout = "90s"
|
pool_idle_timeout = "90s"
|
||||||
|
|
||||||
## To skip the ssl verification
|
|
||||||
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
|
|
||||||
skip_ssl_validation = false
|
|
||||||
|
|
||||||
# Custom storage options
|
# Custom storage options
|
||||||
# [[storage.providers]]
|
# [[storage.providers]]
|
||||||
# name = "S3"
|
# name = "S3"
|
||||||
@@ -565,9 +561,6 @@ sst_write_buffer_size = "8MB"
|
|||||||
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
## Capacity of the channel to send data from parallel scan tasks to the main task.
|
||||||
parallel_scan_channel_size = 32
|
parallel_scan_channel_size = 32
|
||||||
|
|
||||||
## Maximum number of SST files to scan concurrently.
|
|
||||||
max_concurrent_scan_files = 128
|
|
||||||
|
|
||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ We highly recommend using the self-monitoring feature provided by [GreptimeDB Op
|
|||||||
- **Metrics Dashboards**
|
- **Metrics Dashboards**
|
||||||
|
|
||||||
- `dashboards/metrics/cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/metrics/cluster/dashboard.md) for more details.
|
- `dashboards/metrics/cluster/dashboard.json`: The Grafana dashboard for the GreptimeDB cluster. Read the [dashboard.md](./dashboards/metrics/cluster/dashboard.md) for more details.
|
||||||
|
|
||||||
- `dashboards/metrics/standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/metrics/standalone/dashboard.md) for more details.
|
- `dashboards/metrics/standalone/dashboard.json`: The Grafana dashboard for the standalone GreptimeDB instance. **It's generated from the `cluster/dashboard.json` by removing the instance filter through the `make dashboards` command**. Read the [dashboard.md](./dashboards/metrics/standalone/dashboard.md) for more details.
|
||||||
|
|
||||||
- **Logs Dashboard**
|
- **Logs Dashboard**
|
||||||
@@ -83,7 +83,7 @@ If you use the [Helm Chart](https://github.com/GreptimeTeam/helm-charts) to depl
|
|||||||
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
- `monitoring.enabled=true`: Deploys a standalone GreptimeDB instance dedicated to monitoring the cluster;
|
||||||
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
- `grafana.enabled=true`: Deploys Grafana and automatically imports the monitoring dashboard;
|
||||||
|
|
||||||
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/getting-started).
|
The standalone GreptimeDB instance will collect metrics from your cluster, and the dashboard will be available in the Grafana UI. For detailed deployment instructions, please refer to our [Kubernetes deployment guide](https://docs.greptime.com/nightly/user-guide/deployments/deploy-on-kubernetes/getting-started).
|
||||||
|
|
||||||
### Self-host Prometheus and import dashboards manually
|
### Self-host Prometheus and import dashboards manually
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,6 @@
|
|||||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
|
||||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
|
|||||||
@@ -612,21 +612,6 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||||
- title: Active Series and Field Builders Count
|
|
||||||
type: timeseries
|
|
||||||
description: Compaction oinput output bytes
|
|
||||||
unit: none
|
|
||||||
queries:
|
|
||||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: ${metrics}
|
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
|
||||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: ${metrics}
|
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
|
||||||
- title: Region Worker Convert Requests
|
- title: Region Worker Convert Requests
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Per-stage elapsed time for region worker to decode requests.
|
description: Per-stage elapsed time for region worker to decode requests.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,6 @@
|
|||||||
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
| Inflight Flush | `greptime_mito_inflight_flush_count` | `timeseries` | Ongoing flush task count | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]` |
|
||||||
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
| Compaction Input/Output Bytes | `sum by(instance, pod) (greptime_mito_compaction_input_bytes)`<br/>`sum by(instance, pod) (greptime_mito_compaction_output_bytes)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `bytes` | `[{{instance}}]-[{{pod}}]-input` |
|
||||||
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Handle Bulk Insert Requests | `histogram_quantile(0.95, sum by(le,instance, stage, pod) (rate(greptime_region_worker_handle_write_bucket[$__rate_interval])))`<br/>`sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_sum[$__rate_interval]))/sum by(instance, stage, pod) (rate(greptime_region_worker_handle_write_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to handle bulk insert region requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
| Active Series and Field Builders Count | `sum by(instance, pod) (greptime_mito_memtable_active_series_count)`<br/>`sum by(instance, pod) (greptime_mito_memtable_field_builder_count)` | `timeseries` | Compaction oinput output bytes | `prometheus` | `none` | `[{{instance}}]-[{{pod}}]-series` |
|
|
||||||
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
| Region Worker Convert Requests | `histogram_quantile(0.95, sum by(le, instance, stage, pod) (rate(greptime_datanode_convert_region_request_bucket[$__rate_interval])))`<br/>`sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_sum[$__rate_interval]))/sum by(le,instance, stage, pod) (rate(greptime_datanode_convert_region_request_count[$__rate_interval]))` | `timeseries` | Per-stage elapsed time for region worker to decode requests. | `prometheus` | `s` | `[{{instance}}]-[{{pod}}]-[{{stage}}]-P95` |
|
||||||
# OpenDAL
|
# OpenDAL
|
||||||
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
| Title | Query | Type | Description | Datasource | Unit | Legend Format |
|
||||||
|
|||||||
@@ -612,21 +612,6 @@ groups:
|
|||||||
type: prometheus
|
type: prometheus
|
||||||
uid: ${metrics}
|
uid: ${metrics}
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
legendFormat: '[{{instance}}]-[{{pod}}]-[{{stage}}]-AVG'
|
||||||
- title: Active Series and Field Builders Count
|
|
||||||
type: timeseries
|
|
||||||
description: Compaction oinput output bytes
|
|
||||||
unit: none
|
|
||||||
queries:
|
|
||||||
- expr: sum by(instance, pod) (greptime_mito_memtable_active_series_count)
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: ${metrics}
|
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-series'
|
|
||||||
- expr: sum by(instance, pod) (greptime_mito_memtable_field_builder_count)
|
|
||||||
datasource:
|
|
||||||
type: prometheus
|
|
||||||
uid: ${metrics}
|
|
||||||
legendFormat: '[{{instance}}]-[{{pod}}]-field_builders'
|
|
||||||
- title: Region Worker Convert Requests
|
- title: Region Worker Convert Requests
|
||||||
type: timeseries
|
type: timeseries
|
||||||
description: Per-stage elapsed time for region worker to decode requests.
|
description: Per-stage elapsed time for region worker to decode requests.
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ use greptime_proto::v1::region::RegionResponse as RegionResponseV1;
|
|||||||
pub struct RegionResponse {
|
pub struct RegionResponse {
|
||||||
pub affected_rows: AffectedRows,
|
pub affected_rows: AffectedRows,
|
||||||
pub extensions: HashMap<String, Vec<u8>>,
|
pub extensions: HashMap<String, Vec<u8>>,
|
||||||
pub metadata: Vec<u8>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RegionResponse {
|
impl RegionResponse {
|
||||||
@@ -30,7 +29,6 @@ impl RegionResponse {
|
|||||||
Self {
|
Self {
|
||||||
affected_rows: region_response.affected_rows as _,
|
affected_rows: region_response.affected_rows as _,
|
||||||
extensions: region_response.extensions,
|
extensions: region_response.extensions,
|
||||||
metadata: region_response.metadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,16 +37,6 @@ impl RegionResponse {
|
|||||||
Self {
|
Self {
|
||||||
affected_rows,
|
affected_rows,
|
||||||
extensions: Default::default(),
|
extensions: Default::default(),
|
||||||
metadata: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates one response with metadata.
|
|
||||||
pub fn from_metadata(metadata: Vec<u8>) -> Self {
|
|
||||||
Self {
|
|
||||||
affected_rows: 0,
|
|
||||||
extensions: Default::default(),
|
|
||||||
metadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use greptime_proto::v1::{
|
|||||||
};
|
};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::helper::ColumnDataTypeWrapper;
|
use crate::helper::ColumnDataTypeWrapper;
|
||||||
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
use crate::v1::{ColumnDef, ColumnOptions, SemanticType};
|
||||||
|
|
||||||
@@ -77,48 +77,6 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to construct a `ColumnDef` from the given `ColumnSchema`.
|
|
||||||
///
|
|
||||||
/// TODO(weny): Add tests for this function.
|
|
||||||
pub fn try_as_column_def(column_schema: &ColumnSchema, is_primary_key: bool) -> Result<ColumnDef> {
|
|
||||||
let column_datatype =
|
|
||||||
ColumnDataTypeWrapper::try_from(column_schema.data_type.clone()).map(|w| w.to_parts())?;
|
|
||||||
|
|
||||||
let semantic_type = if column_schema.is_time_index() {
|
|
||||||
SemanticType::Timestamp
|
|
||||||
} else if is_primary_key {
|
|
||||||
SemanticType::Tag
|
|
||||||
} else {
|
|
||||||
SemanticType::Field
|
|
||||||
} as i32;
|
|
||||||
let comment = column_schema
|
|
||||||
.metadata()
|
|
||||||
.get(COMMENT_KEY)
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let default_constraint = match column_schema.default_constraint() {
|
|
||||||
None => vec![],
|
|
||||||
Some(v) => v
|
|
||||||
.clone()
|
|
||||||
.try_into()
|
|
||||||
.context(ConvertColumnDefaultConstraintSnafu {
|
|
||||||
column: &column_schema.name,
|
|
||||||
})?,
|
|
||||||
};
|
|
||||||
let options = options_from_column_schema(column_schema);
|
|
||||||
Ok(ColumnDef {
|
|
||||||
name: column_schema.name.clone(),
|
|
||||||
data_type: column_datatype.0 as i32,
|
|
||||||
is_nullable: column_schema.is_nullable(),
|
|
||||||
default_constraint,
|
|
||||||
semantic_type,
|
|
||||||
comment,
|
|
||||||
datatype_extension: column_datatype.1,
|
|
||||||
options,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
/// Constructs a `ColumnOptions` from the given `ColumnSchema`.
|
||||||
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<ColumnOptions> {
|
||||||
let mut options = ColumnOptions::default();
|
let mut options = ColumnOptions::default();
|
||||||
@@ -268,20 +226,18 @@ mod tests {
|
|||||||
assert!(options.is_none());
|
assert!(options.is_none());
|
||||||
|
|
||||||
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
let mut schema = ColumnSchema::new("test", ConcreteDataType::string_datatype(), true)
|
||||||
.with_fulltext_options(FulltextOptions::new_unchecked(
|
.with_fulltext_options(FulltextOptions {
|
||||||
true,
|
enable: true,
|
||||||
FulltextAnalyzer::English,
|
analyzer: FulltextAnalyzer::English,
|
||||||
false,
|
case_sensitive: false,
|
||||||
FulltextBackend::Bloom,
|
backend: FulltextBackend::Bloom,
|
||||||
10240,
|
})
|
||||||
0.01,
|
|
||||||
))
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
schema.set_inverted_index(true);
|
schema.set_inverted_index(true);
|
||||||
let options = options_from_column_schema(&schema).unwrap();
|
let options = options_from_column_schema(&schema).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
options.options.get(INVERTED_INDEX_GRPC_KEY).unwrap(),
|
||||||
@@ -291,18 +247,16 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_options_with_fulltext() {
|
fn test_options_with_fulltext() {
|
||||||
let fulltext = FulltextOptions::new_unchecked(
|
let fulltext = FulltextOptions {
|
||||||
true,
|
enable: true,
|
||||||
FulltextAnalyzer::English,
|
analyzer: FulltextAnalyzer::English,
|
||||||
false,
|
case_sensitive: false,
|
||||||
FulltextBackend::Bloom,
|
backend: FulltextBackend::Bloom,
|
||||||
10240,
|
};
|
||||||
0.01,
|
|
||||||
);
|
|
||||||
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
let options = options_from_fulltext(&fulltext).unwrap().unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
options.options.get(FULLTEXT_GRPC_KEY).unwrap(),
|
||||||
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\",\"granularity\":10240,\"false-positive-rate-in-10000\":100}"
|
"{\"enable\":true,\"analyzer\":\"English\",\"case-sensitive\":false,\"backend\":\"bloom\"}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,10 +17,8 @@ arrow-schema.workspace = true
|
|||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
common-base.workspace = true
|
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-frontend.workspace = true
|
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
common-meta.workspace = true
|
||||||
common-procedure.workspace = true
|
common-procedure.workspace = true
|
||||||
@@ -43,8 +41,6 @@ moka = { workspace = true, features = ["future", "sync"] }
|
|||||||
partition.workspace = true
|
partition.workspace = true
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
promql-parser.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
rustc-hash.workspace = true
|
rustc-hash.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
session.workspace = true
|
session.workspace = true
|
||||||
|
|||||||
@@ -277,26 +277,6 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to invoke frontend services"))]
|
|
||||||
InvokeFrontend {
|
|
||||||
source: common_frontend::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Meta client is not provided"))]
|
|
||||||
MetaClientMissing {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to find frontend node: {}", addr))]
|
|
||||||
FrontendNotFound {
|
|
||||||
addr: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
@@ -365,10 +345,6 @@ impl ErrorExt for Error {
|
|||||||
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
Error::GetViewCache { source, .. } | Error::GetTableCache { source, .. } => {
|
||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
Error::InvokeFrontend { source, .. } => source.status_code(),
|
|
||||||
Error::FrontendNotFound { .. } | Error::MetaClientMissing { .. } => {
|
|
||||||
StatusCode::Unexpected
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ use api::v1::meta::ProcedureStatus;
|
|||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
use common_meta::cluster::{ClusterInfo, NodeInfo};
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
|
use common_meta::ddl::{ExecutorContext, ProcedureExecutor};
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
|
|
||||||
use common_meta::rpc::procedure;
|
use common_meta::rpc::procedure;
|
||||||
use common_procedure::{ProcedureInfo, ProcedureState};
|
use common_procedure::{ProcedureInfo, ProcedureState};
|
||||||
use meta_client::MetaClientRef;
|
use meta_client::MetaClientRef;
|
||||||
|
|||||||
@@ -22,13 +22,11 @@ use common_catalog::consts::{
|
|||||||
PG_CATALOG_NAME,
|
PG_CATALOG_NAME,
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{LayeredCacheRegistryRef, ViewInfoCacheRef};
|
||||||
LayeredCacheRegistryRef, TableRoute, TableRouteCacheRef, ViewInfoCacheRef,
|
|
||||||
};
|
|
||||||
use common_meta::key::catalog_name::CatalogNameKey;
|
use common_meta::key::catalog_name::CatalogNameKey;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::schema_name::SchemaNameKey;
|
use common_meta::key::schema_name::SchemaNameKey;
|
||||||
use common_meta::key::table_info::{TableInfoManager, TableInfoValue};
|
use common_meta::key::table_info::TableInfoValue;
|
||||||
use common_meta::key::table_name::TableNameKey;
|
use common_meta::key::table_name::TableNameKey;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
@@ -39,7 +37,6 @@ use moka::sync::Cache;
|
|||||||
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
|
||||||
use session::context::{Channel, QueryContext};
|
use session::context::{Channel, QueryContext};
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
|
|
||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
|
||||||
@@ -54,7 +51,6 @@ use crate::error::{
|
|||||||
};
|
};
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
use crate::process_manager::ProcessManagerRef;
|
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
use crate::system_schema::SystemSchemaProvider;
|
use crate::system_schema::SystemSchemaProvider;
|
||||||
use crate::CatalogManager;
|
use crate::CatalogManager;
|
||||||
@@ -88,7 +84,6 @@ impl KvBackendCatalogManager {
|
|||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
cache_registry: LayeredCacheRegistryRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
procedure_manager: Option<ProcedureManagerRef>,
|
procedure_manager: Option<ProcedureManagerRef>,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new_cyclic(|me| Self {
|
Arc::new_cyclic(|me| Self {
|
||||||
information_extension,
|
information_extension,
|
||||||
@@ -107,14 +102,12 @@ impl KvBackendCatalogManager {
|
|||||||
DEFAULT_CATALOG_NAME.to_string(),
|
DEFAULT_CATALOG_NAME.to_string(),
|
||||||
me.clone(),
|
me.clone(),
|
||||||
Arc::new(FlowMetadataManager::new(backend.clone())),
|
Arc::new(FlowMetadataManager::new(backend.clone())),
|
||||||
process_manager.clone(),
|
|
||||||
)),
|
)),
|
||||||
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
pg_catalog_provider: Arc::new(PGCatalogProvider::new(
|
||||||
DEFAULT_CATALOG_NAME.to_string(),
|
DEFAULT_CATALOG_NAME.to_string(),
|
||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
backend,
|
backend,
|
||||||
process_manager,
|
|
||||||
},
|
},
|
||||||
cache_registry,
|
cache_registry,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
@@ -143,61 +136,6 @@ impl KvBackendCatalogManager {
|
|||||||
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
pub fn procedure_manager(&self) -> Option<ProcedureManagerRef> {
|
||||||
self.procedure_manager.clone()
|
self.procedure_manager.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override logical table's partition key indices with physical table's.
|
|
||||||
async fn override_logical_table_partition_key_indices(
|
|
||||||
table_route_cache: &TableRouteCacheRef,
|
|
||||||
table_info_manager: &TableInfoManager,
|
|
||||||
table: TableRef,
|
|
||||||
) -> Result<TableRef> {
|
|
||||||
// If the table is not a metric table, return the table directly.
|
|
||||||
if table.table_info().meta.engine != METRIC_ENGINE_NAME {
|
|
||||||
return Ok(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(table_route_value) = table_route_cache
|
|
||||||
.get(table.table_info().table_id())
|
|
||||||
.await
|
|
||||||
.context(TableMetadataManagerSnafu)?
|
|
||||||
&& let TableRoute::Logical(logical_route) = &*table_route_value
|
|
||||||
&& let Some(physical_table_info_value) = table_info_manager
|
|
||||||
.get(logical_route.physical_table_id())
|
|
||||||
.await
|
|
||||||
.context(TableMetadataManagerSnafu)?
|
|
||||||
{
|
|
||||||
let mut new_table_info = (*table.table_info()).clone();
|
|
||||||
|
|
||||||
// Remap partition key indices from physical table to logical table
|
|
||||||
new_table_info.meta.partition_key_indices = physical_table_info_value
|
|
||||||
.table_info
|
|
||||||
.meta
|
|
||||||
.partition_key_indices
|
|
||||||
.iter()
|
|
||||||
.filter_map(|&physical_index| {
|
|
||||||
// Get the column name from the physical table using the physical index
|
|
||||||
physical_table_info_value
|
|
||||||
.table_info
|
|
||||||
.meta
|
|
||||||
.schema
|
|
||||||
.column_schemas
|
|
||||||
.get(physical_index)
|
|
||||||
.and_then(|physical_column| {
|
|
||||||
// Find the corresponding index in the logical table schema
|
|
||||||
new_table_info
|
|
||||||
.meta
|
|
||||||
.schema
|
|
||||||
.column_index_by_name(physical_column.name.as_str())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let new_table = DistTable::table(Arc::new(new_table_info));
|
|
||||||
|
|
||||||
return Ok(new_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(table)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -324,28 +262,16 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
let table_cache: TableCacheRef = self.cache_registry.get().context(CacheNotFoundSnafu {
|
||||||
name: "table_cache",
|
name: "table_cache",
|
||||||
})?;
|
})?;
|
||||||
|
if let Some(table) = table_cache
|
||||||
let table = table_cache
|
|
||||||
.get_by_ref(&TableName {
|
.get_by_ref(&TableName {
|
||||||
catalog_name: catalog_name.to_string(),
|
catalog_name: catalog_name.to_string(),
|
||||||
schema_name: schema_name.to_string(),
|
schema_name: schema_name.to_string(),
|
||||||
table_name: table_name.to_string(),
|
table_name: table_name.to_string(),
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.context(GetTableCacheSnafu)?;
|
.context(GetTableCacheSnafu)?
|
||||||
|
{
|
||||||
if let Some(table) = table {
|
return Ok(Some(table));
|
||||||
let table_route_cache: TableRouteCacheRef =
|
|
||||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
|
||||||
name: "table_route_cache",
|
|
||||||
})?;
|
|
||||||
return Self::override_logical_table_partition_key_indices(
|
|
||||||
&table_route_cache,
|
|
||||||
self.table_metadata_manager.table_info_manager(),
|
|
||||||
table,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(Some);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if channel == Channel::Postgres {
|
if channel == Channel::Postgres {
|
||||||
@@ -358,7 +284,7 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tables_by_ids(
|
async fn tables_by_ids(
|
||||||
@@ -410,20 +336,8 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
let catalog = catalog.to_string();
|
let catalog = catalog.to_string();
|
||||||
let schema = schema.to_string();
|
let schema = schema.to_string();
|
||||||
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
let semaphore = Arc::new(Semaphore::new(CONCURRENCY));
|
||||||
let table_route_cache: Result<TableRouteCacheRef> =
|
|
||||||
self.cache_registry.get().context(CacheNotFoundSnafu {
|
|
||||||
name: "table_route_cache",
|
|
||||||
});
|
|
||||||
|
|
||||||
common_runtime::spawn_global(async move {
|
common_runtime::spawn_global(async move {
|
||||||
let table_route_cache = match table_route_cache {
|
|
||||||
Ok(table_route_cache) => table_route_cache,
|
|
||||||
Err(e) => {
|
|
||||||
let _ = tx.send(Err(e)).await;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let table_id_stream = metadata_manager
|
let table_id_stream = metadata_manager
|
||||||
.table_name_manager()
|
.table_name_manager()
|
||||||
.tables(&catalog, &schema)
|
.tables(&catalog, &schema)
|
||||||
@@ -450,7 +364,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
let metadata_manager = metadata_manager.clone();
|
let metadata_manager = metadata_manager.clone();
|
||||||
let tx = tx.clone();
|
let tx = tx.clone();
|
||||||
let semaphore = semaphore.clone();
|
let semaphore = semaphore.clone();
|
||||||
let table_route_cache = table_route_cache.clone();
|
|
||||||
common_runtime::spawn_global(async move {
|
common_runtime::spawn_global(async move {
|
||||||
// we don't explicitly close the semaphore so just ignore the potential error.
|
// we don't explicitly close the semaphore so just ignore the potential error.
|
||||||
let _ = semaphore.acquire().await;
|
let _ = semaphore.acquire().await;
|
||||||
@@ -468,16 +381,6 @@ impl CatalogManager for KvBackendCatalogManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for table in table_info_values.into_values().map(build_table) {
|
for table in table_info_values.into_values().map(build_table) {
|
||||||
let table = if let Ok(table) = table {
|
|
||||||
Self::override_logical_table_partition_key_indices(
|
|
||||||
&table_route_cache,
|
|
||||||
metadata_manager.table_info_manager(),
|
|
||||||
table,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
table
|
|
||||||
};
|
|
||||||
if tx.send(table).await.is_err() {
|
if tx.send(table).await.is_err() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -516,7 +419,6 @@ struct SystemCatalog {
|
|||||||
information_schema_provider: Arc<InformationSchemaProvider>,
|
information_schema_provider: Arc<InformationSchemaProvider>,
|
||||||
pg_catalog_provider: Arc<PGCatalogProvider>,
|
pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SystemCatalog {
|
impl SystemCatalog {
|
||||||
@@ -584,7 +486,6 @@ impl SystemCatalog {
|
|||||||
catalog.to_string(),
|
catalog.to_string(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
Arc::new(FlowMetadataManager::new(self.backend.clone())),
|
||||||
self.process_manager.clone(),
|
|
||||||
))
|
))
|
||||||
});
|
});
|
||||||
information_schema_provider.table(table_name)
|
information_schema_provider.table(table_name)
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
#![feature(assert_matches)]
|
#![feature(assert_matches)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(let_chains)]
|
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::fmt::{Debug, Formatter};
|
use std::fmt::{Debug, Formatter};
|
||||||
@@ -41,7 +40,6 @@ pub mod information_schema {
|
|||||||
pub use crate::system_schema::information_schema::*;
|
pub use crate::system_schema::information_schema::*;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod process_manager;
|
|
||||||
pub mod table_source;
|
pub mod table_source;
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
|
|||||||
@@ -356,7 +356,6 @@ impl MemoryCatalogManager {
|
|||||||
catalog,
|
catalog,
|
||||||
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
Arc::downgrade(self) as Weak<dyn CatalogManager>,
|
||||||
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
Arc::new(FlowMetadataManager::new(Arc::new(MemoryKvBackend::new()))),
|
||||||
None, // we don't need ProcessManager on regions server.
|
|
||||||
);
|
);
|
||||||
let information_schema = information_schema_provider.tables().clone();
|
let information_schema = information_schema_provider.tables().clone();
|
||||||
|
|
||||||
|
|||||||
@@ -34,20 +34,4 @@ lazy_static! {
|
|||||||
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
|
register_histogram!("greptime_catalog_kv_get", "catalog kv get").unwrap();
|
||||||
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
|
pub static ref METRIC_CATALOG_KV_BATCH_GET: Histogram =
|
||||||
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
|
register_histogram!("greptime_catalog_kv_batch_get", "catalog kv batch get").unwrap();
|
||||||
|
|
||||||
/// Count of running process in each catalog.
|
|
||||||
pub static ref PROCESS_LIST_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
|
||||||
"greptime_process_list_count",
|
|
||||||
"Running process count per catalog",
|
|
||||||
&["catalog"]
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
/// Count of killed process in each catalog.
|
|
||||||
pub static ref PROCESS_KILL_COUNT: IntCounterVec = register_int_counter_vec!(
|
|
||||||
"greptime_process_kill_count",
|
|
||||||
"Completed kill process requests count",
|
|
||||||
&["catalog"]
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,639 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::hash_map::Entry;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fmt::{Debug, Display, Formatter};
|
|
||||||
use std::sync::atomic::{AtomicU32, Ordering};
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
use std::time::{Duration, Instant, UNIX_EPOCH};
|
|
||||||
|
|
||||||
use api::v1::frontend::{KillProcessRequest, ListProcessRequest, ProcessInfo};
|
|
||||||
use common_base::cancellation::CancellationHandle;
|
|
||||||
use common_frontend::selector::{FrontendSelector, MetaClientSelector};
|
|
||||||
use common_frontend::slow_query_event::SlowQueryEvent;
|
|
||||||
use common_telemetry::{debug, error, info, warn};
|
|
||||||
use common_time::util::current_time_millis;
|
|
||||||
use meta_client::MetaClientRef;
|
|
||||||
use promql_parser::parser::EvalStmt;
|
|
||||||
use rand::random;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use snafu::{ensure, OptionExt, ResultExt};
|
|
||||||
use sql::statements::statement::Statement;
|
|
||||||
use tokio::sync::mpsc::Sender;
|
|
||||||
|
|
||||||
use crate::error;
|
|
||||||
use crate::metrics::{PROCESS_KILL_COUNT, PROCESS_LIST_COUNT};
|
|
||||||
|
|
||||||
pub type ProcessId = u32;
|
|
||||||
pub type ProcessManagerRef = Arc<ProcessManager>;
|
|
||||||
|
|
||||||
/// Query process manager.
|
|
||||||
pub struct ProcessManager {
|
|
||||||
/// Local frontend server address,
|
|
||||||
server_addr: String,
|
|
||||||
/// Next process id for local queries.
|
|
||||||
next_id: AtomicU32,
|
|
||||||
/// Running process per catalog.
|
|
||||||
catalogs: RwLock<HashMap<String, HashMap<ProcessId, CancellableProcess>>>,
|
|
||||||
/// Frontend selector to locate frontend nodes.
|
|
||||||
frontend_selector: Option<MetaClientSelector>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a parsed query statement, functionally equivalent to [query::parser::QueryStatement].
|
|
||||||
/// This enum is defined here to avoid cyclic dependencies with the query parser module.
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum QueryStatement {
|
|
||||||
Sql(Statement),
|
|
||||||
Promql(EvalStmt),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for QueryStatement {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
QueryStatement::Sql(stmt) => write!(f, "{}", stmt),
|
|
||||||
QueryStatement::Promql(eval_stmt) => write!(f, "{}", eval_stmt),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ProcessManager {
|
|
||||||
/// Create a [ProcessManager] instance with server address and kv client.
|
|
||||||
pub fn new(server_addr: String, meta_client: Option<MetaClientRef>) -> Self {
|
|
||||||
let frontend_selector = meta_client.map(MetaClientSelector::new);
|
|
||||||
Self {
|
|
||||||
server_addr,
|
|
||||||
next_id: Default::default(),
|
|
||||||
catalogs: Default::default(),
|
|
||||||
frontend_selector,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ProcessManager {
|
|
||||||
/// Registers a submitted query. Use the provided id if present.
|
|
||||||
#[must_use]
|
|
||||||
pub fn register_query(
|
|
||||||
self: &Arc<Self>,
|
|
||||||
catalog: String,
|
|
||||||
schemas: Vec<String>,
|
|
||||||
query: String,
|
|
||||||
client: String,
|
|
||||||
query_id: Option<ProcessId>,
|
|
||||||
_slow_query_timer: Option<SlowQueryTimer>,
|
|
||||||
) -> Ticket {
|
|
||||||
let id = query_id.unwrap_or_else(|| self.next_id.fetch_add(1, Ordering::Relaxed));
|
|
||||||
let process = ProcessInfo {
|
|
||||||
id,
|
|
||||||
catalog: catalog.clone(),
|
|
||||||
schemas,
|
|
||||||
query,
|
|
||||||
start_timestamp: current_time_millis(),
|
|
||||||
client,
|
|
||||||
frontend: self.server_addr.clone(),
|
|
||||||
};
|
|
||||||
let cancellation_handle = Arc::new(CancellationHandle::default());
|
|
||||||
let cancellable_process = CancellableProcess::new(cancellation_handle.clone(), process);
|
|
||||||
|
|
||||||
self.catalogs
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(catalog.clone())
|
|
||||||
.or_default()
|
|
||||||
.insert(id, cancellable_process);
|
|
||||||
|
|
||||||
Ticket {
|
|
||||||
catalog,
|
|
||||||
manager: self.clone(),
|
|
||||||
id,
|
|
||||||
cancellation_handle,
|
|
||||||
_slow_query_timer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generates the next process id.
|
|
||||||
pub fn next_id(&self) -> u32 {
|
|
||||||
self.next_id.fetch_add(1, Ordering::Relaxed)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// De-register a query from process list.
|
|
||||||
pub fn deregister_query(&self, catalog: String, id: ProcessId) {
|
|
||||||
if let Entry::Occupied(mut o) = self.catalogs.write().unwrap().entry(catalog) {
|
|
||||||
let process = o.get_mut().remove(&id);
|
|
||||||
debug!("Deregister process: {:?}", process);
|
|
||||||
if o.get().is_empty() {
|
|
||||||
o.remove();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List local running processes in given catalog.
|
|
||||||
pub fn local_processes(&self, catalog: Option<&str>) -> error::Result<Vec<ProcessInfo>> {
|
|
||||||
let catalogs = self.catalogs.read().unwrap();
|
|
||||||
let result = if let Some(catalog) = catalog {
|
|
||||||
if let Some(catalogs) = catalogs.get(catalog) {
|
|
||||||
catalogs.values().map(|p| p.process.clone()).collect()
|
|
||||||
} else {
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
catalogs
|
|
||||||
.values()
|
|
||||||
.flat_map(|v| v.values().map(|p| p.process.clone()))
|
|
||||||
.collect()
|
|
||||||
};
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list_all_processes(
|
|
||||||
&self,
|
|
||||||
catalog: Option<&str>,
|
|
||||||
) -> error::Result<Vec<ProcessInfo>> {
|
|
||||||
let mut processes = vec![];
|
|
||||||
if let Some(remote_frontend_selector) = self.frontend_selector.as_ref() {
|
|
||||||
let frontends = remote_frontend_selector
|
|
||||||
.select(|node| node.peer.addr != self.server_addr)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu)?;
|
|
||||||
for mut f in frontends {
|
|
||||||
let result = f
|
|
||||||
.list_process(ListProcessRequest {
|
|
||||||
catalog: catalog.unwrap_or_default().to_string(),
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu);
|
|
||||||
match result {
|
|
||||||
Ok(resp) => {
|
|
||||||
processes.extend(resp.processes);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!(e; "Skipping failing node: {:?}", f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
processes.extend(self.local_processes(catalog)?);
|
|
||||||
Ok(processes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Kills query with provided catalog and id.
|
|
||||||
pub async fn kill_process(
|
|
||||||
&self,
|
|
||||||
server_addr: String,
|
|
||||||
catalog: String,
|
|
||||||
id: ProcessId,
|
|
||||||
) -> error::Result<bool> {
|
|
||||||
if server_addr == self.server_addr {
|
|
||||||
self.kill_local_process(catalog, id).await
|
|
||||||
} else {
|
|
||||||
let mut nodes = self
|
|
||||||
.frontend_selector
|
|
||||||
.as_ref()
|
|
||||||
.context(error::MetaClientMissingSnafu)?
|
|
||||||
.select(|node| node.peer.addr == server_addr)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu)?;
|
|
||||||
ensure!(
|
|
||||||
!nodes.is_empty(),
|
|
||||||
error::FrontendNotFoundSnafu { addr: server_addr }
|
|
||||||
);
|
|
||||||
|
|
||||||
let request = KillProcessRequest {
|
|
||||||
server_addr,
|
|
||||||
catalog,
|
|
||||||
process_id: id,
|
|
||||||
};
|
|
||||||
nodes[0]
|
|
||||||
.kill_process(request)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu)?;
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Kills local query with provided catalog and id.
|
|
||||||
pub async fn kill_local_process(&self, catalog: String, id: ProcessId) -> error::Result<bool> {
|
|
||||||
if let Some(catalogs) = self.catalogs.write().unwrap().get_mut(&catalog) {
|
|
||||||
if let Some(process) = catalogs.remove(&id) {
|
|
||||||
process.handle.cancel();
|
|
||||||
info!(
|
|
||||||
"Killed process, catalog: {}, id: {:?}",
|
|
||||||
process.process.catalog, process.process.id
|
|
||||||
);
|
|
||||||
PROCESS_KILL_COUNT.with_label_values(&[&catalog]).inc();
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
debug!("Failed to kill process, id not found: {}", id);
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
debug!("Failed to kill process, catalog not found: {}", catalog);
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Ticket {
|
|
||||||
pub(crate) catalog: String,
|
|
||||||
pub(crate) manager: ProcessManagerRef,
|
|
||||||
pub(crate) id: ProcessId,
|
|
||||||
pub cancellation_handle: Arc<CancellationHandle>,
|
|
||||||
_slow_query_timer: Option<SlowQueryTimer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Ticket {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.manager
|
|
||||||
.deregister_query(std::mem::take(&mut self.catalog), self.id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CancellableProcess {
|
|
||||||
handle: Arc<CancellationHandle>,
|
|
||||||
process: ProcessInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for CancellableProcess {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
PROCESS_LIST_COUNT
|
|
||||||
.with_label_values(&[&self.process.catalog])
|
|
||||||
.dec();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CancellableProcess {
|
|
||||||
fn new(handle: Arc<CancellationHandle>, process: ProcessInfo) -> Self {
|
|
||||||
PROCESS_LIST_COUNT
|
|
||||||
.with_label_values(&[&process.catalog])
|
|
||||||
.inc();
|
|
||||||
Self { handle, process }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for CancellableProcess {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("CancellableProcess")
|
|
||||||
.field("cancelled", &self.handle.is_cancelled())
|
|
||||||
.field("process", &self.process)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SlowQueryTimer is used to log slow query when it's dropped.
|
|
||||||
/// In drop(), it will check if the query is slow and send the slow query event to the handler.
|
|
||||||
pub struct SlowQueryTimer {
|
|
||||||
start: Instant,
|
|
||||||
stmt: QueryStatement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
threshold: Option<Duration>,
|
|
||||||
sample_ratio: Option<f64>,
|
|
||||||
tx: Sender<SlowQueryEvent>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SlowQueryTimer {
|
|
||||||
pub fn new(
|
|
||||||
stmt: QueryStatement,
|
|
||||||
query_ctx: QueryContextRef,
|
|
||||||
threshold: Option<Duration>,
|
|
||||||
sample_ratio: Option<f64>,
|
|
||||||
tx: Sender<SlowQueryEvent>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
start: Instant::now(),
|
|
||||||
stmt,
|
|
||||||
query_ctx,
|
|
||||||
threshold,
|
|
||||||
sample_ratio,
|
|
||||||
tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SlowQueryTimer {
|
|
||||||
fn send_slow_query_event(&self, elapsed: Duration, threshold: Duration) {
|
|
||||||
let mut slow_query_event = SlowQueryEvent {
|
|
||||||
cost: elapsed.as_millis() as u64,
|
|
||||||
threshold: threshold.as_millis() as u64,
|
|
||||||
query: "".to_string(),
|
|
||||||
query_ctx: self.query_ctx.clone(),
|
|
||||||
|
|
||||||
// The following fields are only used for PromQL queries.
|
|
||||||
is_promql: false,
|
|
||||||
promql_range: None,
|
|
||||||
promql_step: None,
|
|
||||||
promql_start: None,
|
|
||||||
promql_end: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
match &self.stmt {
|
|
||||||
QueryStatement::Promql(stmt) => {
|
|
||||||
slow_query_event.is_promql = true;
|
|
||||||
slow_query_event.query = stmt.expr.to_string();
|
|
||||||
slow_query_event.promql_step = Some(stmt.interval.as_millis() as u64);
|
|
||||||
|
|
||||||
let start = stmt
|
|
||||||
.start
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_millis() as i64;
|
|
||||||
|
|
||||||
let end = stmt
|
|
||||||
.end
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_millis() as i64;
|
|
||||||
|
|
||||||
slow_query_event.promql_range = Some((end - start) as u64);
|
|
||||||
slow_query_event.promql_start = Some(start);
|
|
||||||
slow_query_event.promql_end = Some(end);
|
|
||||||
}
|
|
||||||
QueryStatement::Sql(stmt) => {
|
|
||||||
slow_query_event.query = stmt.to_string();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send SlowQueryEvent to the handler.
|
|
||||||
if let Err(e) = self.tx.try_send(slow_query_event) {
|
|
||||||
error!(e; "Failed to send slow query event");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for SlowQueryTimer {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(threshold) = self.threshold {
|
|
||||||
// Calculate the elaspsed duration since the timer is created.
|
|
||||||
let elapsed = self.start.elapsed();
|
|
||||||
if elapsed > threshold {
|
|
||||||
if let Some(ratio) = self.sample_ratio {
|
|
||||||
// Only capture a portion of slow queries based on sample_ratio.
|
|
||||||
// Generate a random number in [0, 1) and compare it with sample_ratio.
|
|
||||||
if ratio >= 1.0 || random::<f64>() <= ratio {
|
|
||||||
self.send_slow_query_event(elapsed, threshold);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Captures all slow queries if sample_ratio is not set.
|
|
||||||
self.send_slow_query_event(elapsed, threshold);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::process_manager::ProcessManager;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_register_query() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
let ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
let running_processes = process_manager.local_processes(None).unwrap();
|
|
||||||
assert_eq!(running_processes.len(), 1);
|
|
||||||
assert_eq!(&running_processes[0].frontend, "127.0.0.1:8000");
|
|
||||||
assert_eq!(running_processes[0].id, ticket.id);
|
|
||||||
assert_eq!(&running_processes[0].query, "SELECT * FROM table");
|
|
||||||
|
|
||||||
drop(ticket);
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_register_query_with_custom_id() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
let custom_id = 12345;
|
|
||||||
|
|
||||||
let ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
Some(custom_id),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(ticket.id, custom_id);
|
|
||||||
|
|
||||||
let running_processes = process_manager.local_processes(None).unwrap();
|
|
||||||
assert_eq!(running_processes.len(), 1);
|
|
||||||
assert_eq!(running_processes[0].id, custom_id);
|
|
||||||
assert_eq!(&running_processes[0].client, "client1");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_multiple_queries_same_catalog() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let ticket1 = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["schema1".to_string()],
|
|
||||||
"SELECT * FROM table1".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
let ticket2 = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["schema2".to_string()],
|
|
||||||
"SELECT * FROM table2".to_string(),
|
|
||||||
"client2".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
let running_processes = process_manager.local_processes(Some("public")).unwrap();
|
|
||||||
assert_eq!(running_processes.len(), 2);
|
|
||||||
|
|
||||||
// Verify both processes are present
|
|
||||||
let ids: Vec<u32> = running_processes.iter().map(|p| p.id).collect();
|
|
||||||
assert!(ids.contains(&ticket1.id));
|
|
||||||
assert!(ids.contains(&ticket2.id));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_multiple_catalogs() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let _ticket1 = process_manager.clone().register_query(
|
|
||||||
"catalog1".to_string(),
|
|
||||||
vec!["schema1".to_string()],
|
|
||||||
"SELECT * FROM table1".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ticket2 = process_manager.clone().register_query(
|
|
||||||
"catalog2".to_string(),
|
|
||||||
vec!["schema2".to_string()],
|
|
||||||
"SELECT * FROM table2".to_string(),
|
|
||||||
"client2".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Test listing processes for specific catalog
|
|
||||||
let catalog1_processes = process_manager.local_processes(Some("catalog1")).unwrap();
|
|
||||||
assert_eq!(catalog1_processes.len(), 1);
|
|
||||||
assert_eq!(&catalog1_processes[0].catalog, "catalog1");
|
|
||||||
|
|
||||||
let catalog2_processes = process_manager.local_processes(Some("catalog2")).unwrap();
|
|
||||||
assert_eq!(catalog2_processes.len(), 1);
|
|
||||||
assert_eq!(&catalog2_processes[0].catalog, "catalog2");
|
|
||||||
|
|
||||||
// Test listing all processes
|
|
||||||
let all_processes = process_manager.local_processes(None).unwrap();
|
|
||||||
assert_eq!(all_processes.len(), 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deregister_query() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
|
||||||
process_manager.deregister_query("public".to_string(), ticket.id);
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellation_handle() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
|
||||||
ticket.cancellation_handle.cancel();
|
|
||||||
assert!(ticket.cancellation_handle.is_cancelled());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_kill_local_process() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
assert!(!ticket.cancellation_handle.is_cancelled());
|
|
||||||
let killed = process_manager
|
|
||||||
.kill_process(
|
|
||||||
"127.0.0.1:8000".to_string(),
|
|
||||||
"public".to_string(),
|
|
||||||
ticket.id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert!(killed);
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_kill_nonexistent_process() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
let killed = process_manager
|
|
||||||
.kill_process("127.0.0.1:8000".to_string(), "public".to_string(), 999)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(!killed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_kill_process_nonexistent_catalog() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
let killed = process_manager
|
|
||||||
.kill_process("127.0.0.1:8000".to_string(), "nonexistent".to_string(), 1)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(!killed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_process_info_fields() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
let _ticket = process_manager.clone().register_query(
|
|
||||||
"test_catalog".to_string(),
|
|
||||||
vec!["schema1".to_string(), "schema2".to_string()],
|
|
||||||
"SELECT COUNT(*) FROM users WHERE age > 18".to_string(),
|
|
||||||
"test_client".to_string(),
|
|
||||||
Some(42),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
let processes = process_manager.local_processes(None).unwrap();
|
|
||||||
assert_eq!(processes.len(), 1);
|
|
||||||
|
|
||||||
let process = &processes[0];
|
|
||||||
assert_eq!(process.id, 42);
|
|
||||||
assert_eq!(&process.catalog, "test_catalog");
|
|
||||||
assert_eq!(process.schemas, vec!["schema1", "schema2"]);
|
|
||||||
assert_eq!(&process.query, "SELECT COUNT(*) FROM users WHERE age > 18");
|
|
||||||
assert_eq!(&process.client, "test_client");
|
|
||||||
assert_eq!(&process.frontend, "127.0.0.1:8000");
|
|
||||||
assert!(process.start_timestamp > 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_ticket_drop_deregisters_process() {
|
|
||||||
let process_manager = Arc::new(ProcessManager::new("127.0.0.1:8000".to_string(), None));
|
|
||||||
|
|
||||||
{
|
|
||||||
let _ticket = process_manager.clone().register_query(
|
|
||||||
"public".to_string(),
|
|
||||||
vec!["test".to_string()],
|
|
||||||
"SELECT * FROM table".to_string(),
|
|
||||||
"client1".to_string(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Process should be registered
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 1);
|
|
||||||
} // ticket goes out of scope here
|
|
||||||
|
|
||||||
// Process should be automatically deregistered
|
|
||||||
assert_eq!(process_manager.local_processes(None).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -19,7 +19,6 @@ mod information_memory_table;
|
|||||||
pub mod key_column_usage;
|
pub mod key_column_usage;
|
||||||
mod partitions;
|
mod partitions;
|
||||||
mod procedure_info;
|
mod procedure_info;
|
||||||
mod process_list;
|
|
||||||
pub mod region_peers;
|
pub mod region_peers;
|
||||||
mod region_statistics;
|
mod region_statistics;
|
||||||
mod runtime_metrics;
|
mod runtime_metrics;
|
||||||
@@ -43,7 +42,6 @@ use common_recordbatch::SendableRecordBatchStream;
|
|||||||
use datatypes::schema::SchemaRef;
|
use datatypes::schema::SchemaRef;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use process_list::InformationSchemaProcessList;
|
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
use store_api::storage::{ScanRequest, TableId};
|
||||||
use table::metadata::TableType;
|
use table::metadata::TableType;
|
||||||
use table::TableRef;
|
use table::TableRef;
|
||||||
@@ -52,7 +50,6 @@ use views::InformationSchemaViews;
|
|||||||
|
|
||||||
use self::columns::InformationSchemaColumns;
|
use self::columns::InformationSchemaColumns;
|
||||||
use crate::error::{Error, Result};
|
use crate::error::{Error, Result};
|
||||||
use crate::process_manager::ProcessManagerRef;
|
|
||||||
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
|
||||||
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
use crate::system_schema::information_schema::flows::InformationSchemaFlows;
|
||||||
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
|
||||||
@@ -116,7 +113,6 @@ macro_rules! setup_memory_table {
|
|||||||
pub struct InformationSchemaProvider {
|
pub struct InformationSchemaProvider {
|
||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
tables: HashMap<String, TableRef>,
|
tables: HashMap<String, TableRef>,
|
||||||
}
|
}
|
||||||
@@ -211,10 +207,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
),
|
),
|
||||||
) as _),
|
) as _),
|
||||||
PROCESS_LIST => self
|
|
||||||
.process_manager
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
|
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -225,13 +217,11 @@ impl InformationSchemaProvider {
|
|||||||
catalog_name: String,
|
catalog_name: String,
|
||||||
catalog_manager: Weak<dyn CatalogManager>,
|
catalog_manager: Weak<dyn CatalogManager>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut provider = Self {
|
let mut provider = Self {
|
||||||
catalog_name,
|
catalog_name,
|
||||||
catalog_manager,
|
catalog_manager,
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
process_manager,
|
|
||||||
tables: HashMap::new(),
|
tables: HashMap::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -287,9 +277,6 @@ impl InformationSchemaProvider {
|
|||||||
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
self.build_table(TABLE_CONSTRAINTS).unwrap(),
|
||||||
);
|
);
|
||||||
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
|
||||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
|
||||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
|
||||||
}
|
|
||||||
// Add memory tables
|
// Add memory tables
|
||||||
for name in MEMORY_TABLES.iter() {
|
for name in MEMORY_TABLES.iter() {
|
||||||
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
tables.insert((*name).to_string(), self.build_table(name).expect(name));
|
||||||
|
|||||||
@@ -1,189 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_frontend::DisplayProcessId;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
|
||||||
use common_time::util::current_time_millis;
|
|
||||||
use common_time::{Duration, Timestamp};
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
|
||||||
use datatypes::prelude::ConcreteDataType as CDT;
|
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
|
||||||
use datatypes::value::Value;
|
|
||||||
use datatypes::vectors::{
|
|
||||||
DurationMillisecondVectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
|
||||||
VectorRef,
|
|
||||||
};
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
|
||||||
|
|
||||||
use crate::error::{self, InternalSnafu};
|
|
||||||
use crate::information_schema::Predicates;
|
|
||||||
use crate::process_manager::ProcessManagerRef;
|
|
||||||
use crate::system_schema::information_schema::InformationTable;
|
|
||||||
|
|
||||||
/// Column names of `information_schema.process_list`
|
|
||||||
const ID: &str = "id";
|
|
||||||
const CATALOG: &str = "catalog";
|
|
||||||
const SCHEMAS: &str = "schemas";
|
|
||||||
const QUERY: &str = "query";
|
|
||||||
const CLIENT: &str = "client";
|
|
||||||
const FRONTEND: &str = "frontend";
|
|
||||||
const START_TIMESTAMP: &str = "start_timestamp";
|
|
||||||
const ELAPSED_TIME: &str = "elapsed_time";
|
|
||||||
|
|
||||||
/// `information_schema.process_list` table implementation that tracks running
|
|
||||||
/// queries in current cluster.
|
|
||||||
pub struct InformationSchemaProcessList {
|
|
||||||
schema: SchemaRef,
|
|
||||||
process_manager: ProcessManagerRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InformationSchemaProcessList {
|
|
||||||
pub fn new(process_manager: ProcessManagerRef) -> Self {
|
|
||||||
Self {
|
|
||||||
schema: Self::schema(),
|
|
||||||
process_manager,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema() -> SchemaRef {
|
|
||||||
Arc::new(Schema::new(vec![
|
|
||||||
ColumnSchema::new(ID, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(CATALOG, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(SCHEMAS, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(QUERY, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(CLIENT, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(FRONTEND, CDT::string_datatype(), false),
|
|
||||||
ColumnSchema::new(
|
|
||||||
START_TIMESTAMP,
|
|
||||||
CDT::timestamp_millisecond_datatype(),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
ColumnSchema::new(ELAPSED_TIME, CDT::duration_millisecond_datatype(), false),
|
|
||||||
]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InformationTable for InformationSchemaProcessList {
|
|
||||||
fn table_id(&self) -> TableId {
|
|
||||||
INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID
|
|
||||||
}
|
|
||||||
|
|
||||||
fn table_name(&self) -> &'static str {
|
|
||||||
"process_list"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema(&self) -> SchemaRef {
|
|
||||||
self.schema.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_stream(&self, request: ScanRequest) -> error::Result<SendableRecordBatchStream> {
|
|
||||||
let process_manager = self.process_manager.clone();
|
|
||||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
|
||||||
self.schema.arrow_schema().clone(),
|
|
||||||
futures::stream::once(async move {
|
|
||||||
make_process_list(process_manager, request)
|
|
||||||
.await
|
|
||||||
.map(RecordBatch::into_df_record_batch)
|
|
||||||
.map_err(|e| datafusion::error::DataFusionError::External(Box::new(e)))
|
|
||||||
}),
|
|
||||||
));
|
|
||||||
|
|
||||||
Ok(Box::pin(
|
|
||||||
RecordBatchStreamAdapter::try_new(stream)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(InternalSnafu)?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build running process list.
|
|
||||||
async fn make_process_list(
|
|
||||||
process_manager: ProcessManagerRef,
|
|
||||||
request: ScanRequest,
|
|
||||||
) -> error::Result<RecordBatch> {
|
|
||||||
let predicates = Predicates::from_scan_request(&Some(request));
|
|
||||||
let current_time = current_time_millis();
|
|
||||||
// todo(hl): find a way to extract user catalog to filter queries from other users.
|
|
||||||
let queries = process_manager.list_all_processes(None).await?;
|
|
||||||
|
|
||||||
let mut id_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut catalog_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut schemas_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut query_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut client_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut frontend_builder = StringVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut start_time_builder = TimestampMillisecondVectorBuilder::with_capacity(queries.len());
|
|
||||||
let mut elapsed_time_builder = DurationMillisecondVectorBuilder::with_capacity(queries.len());
|
|
||||||
|
|
||||||
for process in queries {
|
|
||||||
let display_id = DisplayProcessId {
|
|
||||||
server_addr: process.frontend.to_string(),
|
|
||||||
id: process.id,
|
|
||||||
}
|
|
||||||
.to_string();
|
|
||||||
let schemas = process.schemas.join(",");
|
|
||||||
let id = Value::from(display_id);
|
|
||||||
let catalog = Value::from(process.catalog);
|
|
||||||
let schemas = Value::from(schemas);
|
|
||||||
let query = Value::from(process.query);
|
|
||||||
let client = Value::from(process.client);
|
|
||||||
let frontend = Value::from(process.frontend);
|
|
||||||
let start_timestamp = Value::from(Timestamp::new_millisecond(process.start_timestamp));
|
|
||||||
let elapsed_time = Value::from(Duration::new_millisecond(
|
|
||||||
current_time - process.start_timestamp,
|
|
||||||
));
|
|
||||||
let row = [
|
|
||||||
(ID, &id),
|
|
||||||
(CATALOG, &catalog),
|
|
||||||
(SCHEMAS, &schemas),
|
|
||||||
(QUERY, &query),
|
|
||||||
(CLIENT, &client),
|
|
||||||
(FRONTEND, &frontend),
|
|
||||||
(START_TIMESTAMP, &start_timestamp),
|
|
||||||
(ELAPSED_TIME, &elapsed_time),
|
|
||||||
];
|
|
||||||
if predicates.eval(&row) {
|
|
||||||
id_builder.push(id.as_string().as_deref());
|
|
||||||
catalog_builder.push(catalog.as_string().as_deref());
|
|
||||||
schemas_builder.push(schemas.as_string().as_deref());
|
|
||||||
query_builder.push(query.as_string().as_deref());
|
|
||||||
client_builder.push(client.as_string().as_deref());
|
|
||||||
frontend_builder.push(frontend.as_string().as_deref());
|
|
||||||
start_time_builder.push(start_timestamp.as_timestamp().map(|t| t.value().into()));
|
|
||||||
elapsed_time_builder.push(elapsed_time.as_duration().map(|d| d.value().into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RecordBatch::new(
|
|
||||||
InformationSchemaProcessList::schema(),
|
|
||||||
vec![
|
|
||||||
Arc::new(id_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(catalog_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(schemas_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(query_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(client_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(frontend_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(start_time_builder.finish()) as VectorRef,
|
|
||||||
Arc::new(elapsed_time_builder.finish()) as VectorRef,
|
|
||||||
],
|
|
||||||
)
|
|
||||||
.context(error::CreateRecordBatchSnafu)
|
|
||||||
}
|
|
||||||
@@ -40,7 +40,6 @@ const REGION_ID: &str = "region_id";
|
|||||||
const TABLE_ID: &str = "table_id";
|
const TABLE_ID: &str = "table_id";
|
||||||
const REGION_NUMBER: &str = "region_number";
|
const REGION_NUMBER: &str = "region_number";
|
||||||
const REGION_ROWS: &str = "region_rows";
|
const REGION_ROWS: &str = "region_rows";
|
||||||
const WRITTEN_BYTES: &str = "written_bytes_since_open";
|
|
||||||
const DISK_SIZE: &str = "disk_size";
|
const DISK_SIZE: &str = "disk_size";
|
||||||
const MEMTABLE_SIZE: &str = "memtable_size";
|
const MEMTABLE_SIZE: &str = "memtable_size";
|
||||||
const MANIFEST_SIZE: &str = "manifest_size";
|
const MANIFEST_SIZE: &str = "manifest_size";
|
||||||
@@ -57,7 +56,6 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `table_id`: The table id.
|
/// - `table_id`: The table id.
|
||||||
/// - `region_number`: The region number.
|
/// - `region_number`: The region number.
|
||||||
/// - `region_rows`: The number of rows in region.
|
/// - `region_rows`: The number of rows in region.
|
||||||
/// - `written_bytes_since_open`: The total bytes written of the region since region opened.
|
|
||||||
/// - `memtable_size`: The memtable size in bytes.
|
/// - `memtable_size`: The memtable size in bytes.
|
||||||
/// - `disk_size`: The approximate disk size in bytes.
|
/// - `disk_size`: The approximate disk size in bytes.
|
||||||
/// - `manifest_size`: The manifest size in bytes.
|
/// - `manifest_size`: The manifest size in bytes.
|
||||||
@@ -85,7 +83,6 @@ impl InformationSchemaRegionStatistics {
|
|||||||
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
ColumnSchema::new(TABLE_ID, ConcreteDataType::uint32_datatype(), false),
|
||||||
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
ColumnSchema::new(REGION_NUMBER, ConcreteDataType::uint32_datatype(), false),
|
||||||
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(REGION_ROWS, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(WRITTEN_BYTES, ConcreteDataType::uint64_datatype(), true),
|
|
||||||
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(DISK_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(MEMTABLE_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
ColumnSchema::new(MANIFEST_SIZE, ConcreteDataType::uint64_datatype(), true),
|
||||||
@@ -148,7 +145,6 @@ struct InformationSchemaRegionStatisticsBuilder {
|
|||||||
table_ids: UInt32VectorBuilder,
|
table_ids: UInt32VectorBuilder,
|
||||||
region_numbers: UInt32VectorBuilder,
|
region_numbers: UInt32VectorBuilder,
|
||||||
region_rows: UInt64VectorBuilder,
|
region_rows: UInt64VectorBuilder,
|
||||||
written_bytes: UInt64VectorBuilder,
|
|
||||||
disk_sizes: UInt64VectorBuilder,
|
disk_sizes: UInt64VectorBuilder,
|
||||||
memtable_sizes: UInt64VectorBuilder,
|
memtable_sizes: UInt64VectorBuilder,
|
||||||
manifest_sizes: UInt64VectorBuilder,
|
manifest_sizes: UInt64VectorBuilder,
|
||||||
@@ -167,7 +163,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
table_ids: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
region_numbers: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
region_rows: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
written_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
|
||||||
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
disk_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
memtable_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
manifest_sizes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
@@ -198,7 +193,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
(TABLE_ID, &Value::from(region_stat.id.table_id())),
|
||||||
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
(REGION_NUMBER, &Value::from(region_stat.id.region_number())),
|
||||||
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
(REGION_ROWS, &Value::from(region_stat.num_rows)),
|
||||||
(WRITTEN_BYTES, &Value::from(region_stat.written_bytes)),
|
|
||||||
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
(DISK_SIZE, &Value::from(region_stat.approximate_bytes)),
|
||||||
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
(MEMTABLE_SIZE, &Value::from(region_stat.memtable_size)),
|
||||||
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
(MANIFEST_SIZE, &Value::from(region_stat.manifest_size)),
|
||||||
@@ -217,7 +211,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
self.region_numbers
|
self.region_numbers
|
||||||
.push(Some(region_stat.id.region_number()));
|
.push(Some(region_stat.id.region_number()));
|
||||||
self.region_rows.push(Some(region_stat.num_rows));
|
self.region_rows.push(Some(region_stat.num_rows));
|
||||||
self.written_bytes.push(Some(region_stat.written_bytes));
|
|
||||||
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
self.disk_sizes.push(Some(region_stat.approximate_bytes));
|
||||||
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
self.memtable_sizes.push(Some(region_stat.memtable_size));
|
||||||
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
self.manifest_sizes.push(Some(region_stat.manifest_size));
|
||||||
@@ -233,7 +226,6 @@ impl InformationSchemaRegionStatisticsBuilder {
|
|||||||
Arc::new(self.table_ids.finish()),
|
Arc::new(self.table_ids.finish()),
|
||||||
Arc::new(self.region_numbers.finish()),
|
Arc::new(self.region_numbers.finish()),
|
||||||
Arc::new(self.region_rows.finish()),
|
Arc::new(self.region_rows.finish()),
|
||||||
Arc::new(self.written_bytes.finish()),
|
|
||||||
Arc::new(self.disk_sizes.finish()),
|
Arc::new(self.disk_sizes.finish()),
|
||||||
Arc::new(self.memtable_sizes.finish()),
|
Arc::new(self.memtable_sizes.finish()),
|
||||||
Arc::new(self.manifest_sizes.finish()),
|
Arc::new(self.manifest_sizes.finish()),
|
||||||
|
|||||||
@@ -47,4 +47,3 @@ pub const VIEWS: &str = "views";
|
|||||||
pub const FLOWS: &str = "flows";
|
pub const FLOWS: &str = "flows";
|
||||||
pub const PROCEDURE_INFO: &str = "procedure_info";
|
pub const PROCEDURE_INFO: &str = "procedure_info";
|
||||||
pub const REGION_STATISTICS: &str = "region_statistics";
|
pub const REGION_STATISTICS: &str = "region_statistics";
|
||||||
pub const PROCESS_LIST: &str = "process_list";
|
|
||||||
|
|||||||
@@ -328,7 +328,6 @@ mod tests {
|
|||||||
backend.clone(),
|
backend.clone(),
|
||||||
layered_cache_registry,
|
layered_cache_registry,
|
||||||
None,
|
None,
|
||||||
None,
|
|
||||||
);
|
);
|
||||||
let table_metadata_manager = TableMetadataManager::new(backend);
|
let table_metadata_manager = TableMetadataManager::new(backend);
|
||||||
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
let mut view_info = common_meta::key::test_utils::new_test_table_info(1024, vec![]);
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ mysql_kvbackend = ["common-meta/mysql_kvbackend", "meta-srv/mysql_kvbackend"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-stream.workspace = true
|
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
auth.workspace = true
|
auth.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -43,6 +42,7 @@ common-time.workspace = true
|
|||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
humantime.workspace = true
|
humantime.workspace = true
|
||||||
@@ -50,7 +50,6 @@ meta-client.workspace = true
|
|||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
operator.workspace = true
|
|
||||||
query.workspace = true
|
query.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
@@ -66,7 +65,6 @@ tokio.workspace = true
|
|||||||
tracing-appender.workspace = true
|
tracing-appender.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
common-meta = { workspace = true, features = ["testing"] }
|
|
||||||
common-version.workspace = true
|
common-version.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|||||||
@@ -160,7 +160,6 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
|
|||||||
options: Default::default(),
|
options: Default::default(),
|
||||||
region_numbers: (1..=100).collect(),
|
region_numbers: (1..=100).collect(),
|
||||||
partition_key_indices: vec![],
|
partition_key_indices: vec![],
|
||||||
column_ids: vec![],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
RawTableInfo {
|
RawTableInfo {
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
mod export;
|
|
||||||
mod import;
|
|
||||||
|
|
||||||
use clap::Subcommand;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
|
|
||||||
use crate::data::export::ExportCommand;
|
|
||||||
use crate::data::import::ImportCommand;
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum DataCommand {
|
|
||||||
Export(ExportCommand),
|
|
||||||
Import(ImportCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DataCommand {
|
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
DataCommand::Export(cmd) => cmd.build().await,
|
|
||||||
DataCommand::Import(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -17,10 +17,8 @@ use std::any::Any;
|
|||||||
use common_error::ext::{BoxedError, ErrorExt};
|
use common_error::ext::{BoxedError, ErrorExt};
|
||||||
use common_error::status_code::StatusCode;
|
use common_error::status_code::StatusCode;
|
||||||
use common_macro::stack_trace_debug;
|
use common_macro::stack_trace_debug;
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use object_store::Error as ObjectStoreError;
|
use object_store::Error as ObjectStoreError;
|
||||||
use snafu::{Location, Snafu};
|
use snafu::{Location, Snafu};
|
||||||
use store_api::storage::TableId;
|
|
||||||
|
|
||||||
#[derive(Snafu)]
|
#[derive(Snafu)]
|
||||||
#[snafu(visibility(pub))]
|
#[snafu(visibility(pub))]
|
||||||
@@ -32,7 +30,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
msg: String,
|
msg: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create default catalog and schema"))]
|
#[snafu(display("Failed to create default catalog and schema"))]
|
||||||
InitMetadata {
|
InitMetadata {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -75,20 +72,6 @@ pub enum Error {
|
|||||||
source: common_meta::error::Error,
|
source: common_meta::error::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to get table metadata"))]
|
|
||||||
TableMetadata {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_meta::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Unexpected error: {}", msg))]
|
|
||||||
Unexpected {
|
|
||||||
msg: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Missing config, msg: {}", msg))]
|
#[snafu(display("Missing config, msg: {}", msg))]
|
||||||
MissingConfig {
|
MissingConfig {
|
||||||
msg: String,
|
msg: String,
|
||||||
@@ -238,13 +221,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Table not found: {table_id}"))]
|
|
||||||
TableNotFound {
|
|
||||||
table_id: TableId,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("OpenDAL operator failed"))]
|
#[snafu(display("OpenDAL operator failed"))]
|
||||||
OpenDal {
|
OpenDal {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -252,25 +228,22 @@ pub enum Error {
|
|||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: ObjectStoreError,
|
error: ObjectStoreError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("S3 config need be set"))]
|
#[snafu(display("S3 config need be set"))]
|
||||||
S3ConfigNotSet {
|
S3ConfigNotSet {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Output directory not set"))]
|
#[snafu(display("Output directory not set"))]
|
||||||
OutputDirNotSet {
|
OutputDirNotSet {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
#[snafu(display("KV backend not set: {}", backend))]
|
||||||
#[snafu(display("Empty store addresses"))]
|
KvBackendNotSet {
|
||||||
EmptyStoreAddrs {
|
backend: String,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Unsupported memory backend"))]
|
#[snafu(display("Unsupported memory backend"))]
|
||||||
UnsupportedMemoryBackend {
|
UnsupportedMemoryBackend {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -283,36 +256,6 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid arguments: {}", msg))]
|
|
||||||
InvalidArguments {
|
|
||||||
msg: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to init backend"))]
|
|
||||||
InitBackend {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
#[snafu(source)]
|
|
||||||
error: ObjectStoreError,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Covert column schemas to defs failed"))]
|
|
||||||
CovertColumnSchemasToDefs {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: operator::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to send request to datanode: {}", peer))]
|
|
||||||
SendRequestToDatanode {
|
|
||||||
peer: Peer,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: common_meta::error::Error,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -320,9 +263,9 @@ pub type Result<T> = std::result::Result<T, Error>;
|
|||||||
impl ErrorExt for Error {
|
impl ErrorExt for Error {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self {
|
match self {
|
||||||
Error::InitMetadata { source, .. }
|
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
|
||||||
| Error::InitDdlManager { source, .. }
|
source.status_code()
|
||||||
| Error::TableMetadata { source, .. } => source.status_code(),
|
}
|
||||||
|
|
||||||
Error::MissingConfig { .. }
|
Error::MissingConfig { .. }
|
||||||
| Error::LoadLayeredConfig { .. }
|
| Error::LoadLayeredConfig { .. }
|
||||||
@@ -333,12 +276,8 @@ impl ErrorExt for Error {
|
|||||||
| Error::EmptyResult { .. }
|
| Error::EmptyResult { .. }
|
||||||
| Error::InvalidFilePath { .. }
|
| Error::InvalidFilePath { .. }
|
||||||
| Error::UnsupportedMemoryBackend { .. }
|
| Error::UnsupportedMemoryBackend { .. }
|
||||||
| Error::InvalidArguments { .. }
|
|
||||||
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
| Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::CovertColumnSchemasToDefs { source, .. } => source.status_code(),
|
|
||||||
Error::SendRequestToDatanode { source, .. } => source.status_code(),
|
|
||||||
|
|
||||||
Error::StartProcedureManager { source, .. }
|
Error::StartProcedureManager { source, .. }
|
||||||
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
||||||
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
Error::StartWalOptionsAllocator { source, .. } => source.status_code(),
|
||||||
@@ -346,7 +285,6 @@ impl ErrorExt for Error {
|
|||||||
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
Error::ParseSql { source, .. } | Error::PlanStatement { source, .. } => {
|
||||||
source.status_code()
|
source.status_code()
|
||||||
}
|
}
|
||||||
Error::Unexpected { .. } => StatusCode::Unexpected,
|
|
||||||
|
|
||||||
Error::SerdeJson { .. }
|
Error::SerdeJson { .. }
|
||||||
| Error::FileIo { .. }
|
| Error::FileIo { .. }
|
||||||
@@ -355,16 +293,15 @@ impl ErrorExt for Error {
|
|||||||
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
| Error::BuildClient { .. } => StatusCode::Unexpected,
|
||||||
|
|
||||||
Error::Other { source, .. } => source.status_code(),
|
Error::Other { source, .. } => source.status_code(),
|
||||||
Error::OpenDal { .. } | Error::InitBackend { .. } => StatusCode::Internal,
|
Error::OpenDal { .. } => StatusCode::Internal,
|
||||||
Error::S3ConfigNotSet { .. }
|
Error::S3ConfigNotSet { .. }
|
||||||
| Error::OutputDirNotSet { .. }
|
| Error::OutputDirNotSet { .. }
|
||||||
| Error::EmptyStoreAddrs { .. } => StatusCode::InvalidArguments,
|
| Error::KvBackendNotSet { .. } => StatusCode::InvalidArguments,
|
||||||
|
|
||||||
Error::BuildRuntime { source, .. } => source.status_code(),
|
Error::BuildRuntime { source, .. } => source.status_code(),
|
||||||
|
|
||||||
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
|
||||||
Error::MetaClientInit { source, .. } => source.status_code(),
|
Error::MetaClientInit { source, .. } => source.status_code(),
|
||||||
Error::TableNotFound { .. } => StatusCode::TableNotFound,
|
|
||||||
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
Error::SchemaNotFound { .. } => StatusCode::DatabaseNotFound,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,20 +13,22 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
mod bench;
|
mod bench;
|
||||||
mod data;
|
|
||||||
mod database;
|
mod database;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
mod metadata;
|
mod export;
|
||||||
|
mod import;
|
||||||
|
mod meta_snapshot;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::Parser;
|
use clap::{Parser, Subcommand};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
pub use database::DatabaseClient;
|
pub use database::DatabaseClient;
|
||||||
use error::Result;
|
use error::Result;
|
||||||
|
|
||||||
pub use crate::bench::BenchTableMetadataCommand;
|
pub use crate::bench::BenchTableMetadataCommand;
|
||||||
pub use crate::data::DataCommand;
|
pub use crate::export::ExportCommand;
|
||||||
pub use crate::metadata::MetadataCommand;
|
pub use crate::import::ImportCommand;
|
||||||
|
pub use crate::meta_snapshot::{MetaCommand, MetaInfoCommand, MetaRestoreCommand, MetaSaveCommand};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Tool: Send + Sync {
|
pub trait Tool: Send + Sync {
|
||||||
@@ -49,3 +51,19 @@ impl AttachCommand {
|
|||||||
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
unimplemented!("Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Subcommand for data operations like export and import.
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum DataCommand {
|
||||||
|
Export(ExportCommand),
|
||||||
|
Import(ImportCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DataCommand {
|
||||||
|
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
||||||
|
match self {
|
||||||
|
DataCommand::Export(cmd) => cmd.build().await,
|
||||||
|
DataCommand::Import(cmd) => cmd.build().await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,37 +13,139 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use common_base::secrets::{ExposeSecret, SecretString};
|
use common_base::secrets::{ExposeSecret, SecretString};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
||||||
|
use common_meta::kv_backend::etcd::EtcdStore;
|
||||||
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
use common_meta::snapshot::MetadataSnapshotManager;
|
use common_meta::snapshot::MetadataSnapshotManager;
|
||||||
|
use meta_srv::bootstrap::create_etcd_client;
|
||||||
|
use meta_srv::metasrv::BackendImpl;
|
||||||
use object_store::services::{Fs, S3};
|
use object_store::services::{Fs, S3};
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
|
|
||||||
use crate::error::{InvalidFilePathSnafu, OpenDalSnafu, S3ConfigNotSetSnafu};
|
use crate::error::{
|
||||||
use crate::metadata::common::StoreConfig;
|
InvalidFilePathSnafu, KvBackendNotSetSnafu, OpenDalSnafu, S3ConfigNotSetSnafu,
|
||||||
|
UnsupportedMemoryBackendSnafu,
|
||||||
|
};
|
||||||
use crate::Tool;
|
use crate::Tool;
|
||||||
|
|
||||||
/// Subcommand for metadata snapshot operations, including saving snapshots, restoring from snapshots, and viewing snapshot information.
|
/// Subcommand for metadata snapshot management.
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
pub enum SnapshotCommand {
|
pub enum MetaCommand {
|
||||||
/// Save a snapshot of the current metadata state to a specified location.
|
#[clap(subcommand)]
|
||||||
Save(SaveCommand),
|
Snapshot(MetaSnapshotCommand),
|
||||||
/// Restore metadata from a snapshot.
|
|
||||||
Restore(RestoreCommand),
|
|
||||||
/// Explore metadata from a snapshot.
|
|
||||||
Info(InfoCommand),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SnapshotCommand {
|
impl MetaCommand {
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||||
match self {
|
match self {
|
||||||
SnapshotCommand::Save(cmd) => cmd.build().await,
|
MetaCommand::Snapshot(cmd) => cmd.build().await,
|
||||||
SnapshotCommand::Restore(cmd) => cmd.build().await,
|
}
|
||||||
SnapshotCommand::Info(cmd) => cmd.build().await,
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Subcommand for metadata snapshot operations. such as save, restore and info.
|
||||||
|
#[derive(Subcommand)]
|
||||||
|
pub enum MetaSnapshotCommand {
|
||||||
|
/// Export metadata snapshot tool.
|
||||||
|
Save(MetaSaveCommand),
|
||||||
|
/// Restore metadata snapshot tool.
|
||||||
|
Restore(MetaRestoreCommand),
|
||||||
|
/// Explore metadata from metadata snapshot.
|
||||||
|
Info(MetaInfoCommand),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MetaSnapshotCommand {
|
||||||
|
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||||
|
match self {
|
||||||
|
MetaSnapshotCommand::Save(cmd) => cmd.build().await,
|
||||||
|
MetaSnapshotCommand::Restore(cmd) => cmd.build().await,
|
||||||
|
MetaSnapshotCommand::Info(cmd) => cmd.build().await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Parser)]
|
||||||
|
struct MetaConnection {
|
||||||
|
/// The endpoint of store. one of etcd, pg or mysql.
|
||||||
|
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
||||||
|
store_addrs: Vec<String>,
|
||||||
|
/// The database backend.
|
||||||
|
#[clap(long, value_enum)]
|
||||||
|
backend: Option<BackendImpl>,
|
||||||
|
#[clap(long, default_value = "")]
|
||||||
|
store_key_prefix: String,
|
||||||
|
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
||||||
|
#[clap(long,default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
||||||
|
meta_table_name: String,
|
||||||
|
#[clap(long, default_value = "128")]
|
||||||
|
max_txn_ops: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MetaConnection {
|
||||||
|
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
||||||
|
let max_txn_ops = self.max_txn_ops;
|
||||||
|
let store_addrs = &self.store_addrs;
|
||||||
|
if store_addrs.is_empty() {
|
||||||
|
KvBackendNotSetSnafu { backend: "all" }
|
||||||
|
.fail()
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
} else {
|
||||||
|
let kvbackend = match self.backend {
|
||||||
|
Some(BackendImpl::EtcdStore) => {
|
||||||
|
let etcd_client = create_etcd_client(store_addrs)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)?;
|
||||||
|
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
||||||
|
}
|
||||||
|
#[cfg(feature = "pg_kvbackend")]
|
||||||
|
Some(BackendImpl::PostgresStore) => {
|
||||||
|
let table_name = &self.meta_table_name;
|
||||||
|
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)?;
|
||||||
|
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
||||||
|
pool,
|
||||||
|
table_name,
|
||||||
|
max_txn_ops,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)?)
|
||||||
|
}
|
||||||
|
#[cfg(feature = "mysql_kvbackend")]
|
||||||
|
Some(BackendImpl::MysqlStore) => {
|
||||||
|
let table_name = &self.meta_table_name;
|
||||||
|
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)?;
|
||||||
|
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
|
||||||
|
pool,
|
||||||
|
table_name,
|
||||||
|
max_txn_ops,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(BoxedError::new)?)
|
||||||
|
}
|
||||||
|
Some(BackendImpl::MemoryStore) => UnsupportedMemoryBackendSnafu
|
||||||
|
.fail()
|
||||||
|
.map_err(BoxedError::new),
|
||||||
|
_ => KvBackendNotSetSnafu { backend: "all" }
|
||||||
|
.fail()
|
||||||
|
.map_err(BoxedError::new),
|
||||||
|
};
|
||||||
|
if self.store_key_prefix.is_empty() {
|
||||||
|
kvbackend
|
||||||
|
} else {
|
||||||
|
let chroot_kvbackend =
|
||||||
|
ChrootKvBackend::new(self.store_key_prefix.as_bytes().to_vec(), kvbackend?);
|
||||||
|
Ok(Arc::new(chroot_kvbackend))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -112,10 +214,10 @@ impl S3Config {
|
|||||||
/// It will dump the metadata snapshot to local file or s3 bucket.
|
/// It will dump the metadata snapshot to local file or s3 bucket.
|
||||||
/// The snapshot file will be in binary format.
|
/// The snapshot file will be in binary format.
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct SaveCommand {
|
pub struct MetaSaveCommand {
|
||||||
/// The store configuration.
|
/// The connection to the metadata store.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
store: StoreConfig,
|
connection: MetaConnection,
|
||||||
/// The s3 config.
|
/// The s3 config.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
s3_config: S3Config,
|
s3_config: S3Config,
|
||||||
@@ -138,9 +240,9 @@ fn create_local_file_object_store(root: &str) -> Result<ObjectStore, BoxedError>
|
|||||||
Ok(object_store)
|
Ok(object_store)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SaveCommand {
|
impl MetaSaveCommand {
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||||
let kvbackend = self.store.build().await?;
|
let kvbackend = self.connection.build().await?;
|
||||||
let output_dir = &self.output_dir;
|
let output_dir = &self.output_dir;
|
||||||
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
|
let object_store = self.s3_config.build(output_dir).map_err(BoxedError::new)?;
|
||||||
if let Some(store) = object_store {
|
if let Some(store) = object_store {
|
||||||
@@ -160,7 +262,7 @@ impl SaveCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MetaSnapshotTool {
|
pub struct MetaSnapshotTool {
|
||||||
inner: MetadataSnapshotManager,
|
inner: MetadataSnapshotManager,
|
||||||
target_file: String,
|
target_file: String,
|
||||||
}
|
}
|
||||||
@@ -176,16 +278,14 @@ impl Tool for MetaSnapshotTool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restore metadata from a snapshot file.
|
/// Restore metadata snapshot tool.
|
||||||
///
|
/// This tool is used to restore metadata snapshot from etcd, pg or mysql.
|
||||||
/// This command restores the metadata state from a previously saved snapshot.
|
/// It will restore the metadata snapshot from local file or s3 bucket.
|
||||||
/// The snapshot can be loaded from either a local file system or an S3 bucket,
|
|
||||||
/// depending on the provided configuration.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct RestoreCommand {
|
pub struct MetaRestoreCommand {
|
||||||
/// The store configuration.
|
/// The connection to the metadata store.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
store: StoreConfig,
|
connection: MetaConnection,
|
||||||
/// The s3 config.
|
/// The s3 config.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
s3_config: S3Config,
|
s3_config: S3Config,
|
||||||
@@ -199,9 +299,9 @@ pub struct RestoreCommand {
|
|||||||
force: bool,
|
force: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RestoreCommand {
|
impl MetaRestoreCommand {
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
||||||
let kvbackend = self.store.build().await?;
|
let kvbackend = self.connection.build().await?;
|
||||||
let input_dir = &self.input_dir;
|
let input_dir = &self.input_dir;
|
||||||
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
|
let object_store = self.s3_config.build(input_dir).map_err(BoxedError::new)?;
|
||||||
if let Some(store) = object_store {
|
if let Some(store) = object_store {
|
||||||
@@ -223,7 +323,7 @@ impl RestoreCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MetaRestoreTool {
|
pub struct MetaRestoreTool {
|
||||||
inner: MetadataSnapshotManager,
|
inner: MetadataSnapshotManager,
|
||||||
source_file: String,
|
source_file: String,
|
||||||
force: bool,
|
force: bool,
|
||||||
@@ -272,12 +372,9 @@ impl Tool for MetaRestoreTool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Explore metadata from a snapshot file.
|
/// Explore metadata from metadata snapshot.
|
||||||
///
|
|
||||||
/// This command allows filtering the metadata by a specific key and limiting the number of results.
|
|
||||||
/// It prints the filtered metadata to the console.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
#[derive(Debug, Default, Parser)]
|
||||||
pub struct InfoCommand {
|
pub struct MetaInfoCommand {
|
||||||
/// The s3 config.
|
/// The s3 config.
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
s3_config: S3Config,
|
s3_config: S3Config,
|
||||||
@@ -292,7 +389,7 @@ pub struct InfoCommand {
|
|||||||
limit: Option<usize>,
|
limit: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MetaInfoTool {
|
pub struct MetaInfoTool {
|
||||||
inner: ObjectStore,
|
inner: ObjectStore,
|
||||||
source_file: String,
|
source_file: String,
|
||||||
inspect_key: String,
|
inspect_key: String,
|
||||||
@@ -301,7 +398,6 @@ struct MetaInfoTool {
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Tool for MetaInfoTool {
|
impl Tool for MetaInfoTool {
|
||||||
#[allow(clippy::print_stdout)]
|
|
||||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
||||||
let result = MetadataSnapshotManager::info(
|
let result = MetadataSnapshotManager::info(
|
||||||
&self.inner,
|
&self.inner,
|
||||||
@@ -318,7 +414,7 @@ impl Tool for MetaInfoTool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InfoCommand {
|
impl MetaInfoCommand {
|
||||||
fn decide_object_store_root_for_local_store(
|
fn decide_object_store_root_for_local_store(
|
||||||
file_path: &str,
|
file_path: &str,
|
||||||
) -> Result<(&str, &str), BoxedError> {
|
) -> Result<(&str, &str), BoxedError> {
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
mod common;
|
|
||||||
mod control;
|
|
||||||
mod repair;
|
|
||||||
mod snapshot;
|
|
||||||
mod utils;
|
|
||||||
|
|
||||||
use clap::Subcommand;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
|
|
||||||
use crate::metadata::control::{DelCommand, GetCommand};
|
|
||||||
use crate::metadata::repair::RepairLogicalTablesCommand;
|
|
||||||
use crate::metadata::snapshot::SnapshotCommand;
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Command for managing metadata operations,
|
|
||||||
/// including saving and restoring metadata snapshots,
|
|
||||||
/// controlling metadata operations, and diagnosing and repairing metadata.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum MetadataCommand {
|
|
||||||
#[clap(subcommand)]
|
|
||||||
Snapshot(SnapshotCommand),
|
|
||||||
#[clap(subcommand)]
|
|
||||||
Get(GetCommand),
|
|
||||||
#[clap(subcommand)]
|
|
||||||
Del(DelCommand),
|
|
||||||
RepairLogicalTables(RepairLogicalTablesCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetadataCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
MetadataCommand::Snapshot(cmd) => cmd.build().await,
|
|
||||||
MetadataCommand::RepairLogicalTables(cmd) => cmd.build().await,
|
|
||||||
MetadataCommand::Get(cmd) => cmd.build().await,
|
|
||||||
MetadataCommand::Del(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use clap::Parser;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
|
||||||
use common_meta::kv_backend::etcd::EtcdStore;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use meta_srv::bootstrap::create_etcd_client;
|
|
||||||
use meta_srv::metasrv::BackendImpl;
|
|
||||||
|
|
||||||
use crate::error::{EmptyStoreAddrsSnafu, UnsupportedMemoryBackendSnafu};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub(crate) struct StoreConfig {
|
|
||||||
/// The endpoint of store. one of etcd, postgres or mysql.
|
|
||||||
///
|
|
||||||
/// For postgres store, the format is:
|
|
||||||
/// "password=password dbname=postgres user=postgres host=localhost port=5432"
|
|
||||||
///
|
|
||||||
/// For etcd store, the format is:
|
|
||||||
/// "127.0.0.1:2379"
|
|
||||||
///
|
|
||||||
/// For mysql store, the format is:
|
|
||||||
/// "mysql://user:password@ip:port/dbname"
|
|
||||||
#[clap(long, alias = "store-addr", value_delimiter = ',', num_args = 1..)]
|
|
||||||
store_addrs: Vec<String>,
|
|
||||||
|
|
||||||
/// The maximum number of operations in a transaction. Only used when using [etcd-store].
|
|
||||||
#[clap(long, default_value = "128")]
|
|
||||||
max_txn_ops: usize,
|
|
||||||
|
|
||||||
/// The metadata store backend.
|
|
||||||
#[clap(long, value_enum, default_value = "etcd-store")]
|
|
||||||
backend: BackendImpl,
|
|
||||||
|
|
||||||
/// The key prefix of the metadata store.
|
|
||||||
#[clap(long, default_value = "")]
|
|
||||||
store_key_prefix: String,
|
|
||||||
|
|
||||||
/// The table name in RDS to store metadata. Only used when using [postgres-store] or [mysql-store].
|
|
||||||
#[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
|
|
||||||
#[clap(long, default_value = common_meta::kv_backend::DEFAULT_META_TABLE_NAME)]
|
|
||||||
meta_table_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StoreConfig {
|
|
||||||
/// Builds a [`KvBackendRef`] from the store configuration.
|
|
||||||
pub async fn build(&self) -> Result<KvBackendRef, BoxedError> {
|
|
||||||
let max_txn_ops = self.max_txn_ops;
|
|
||||||
let store_addrs = &self.store_addrs;
|
|
||||||
if store_addrs.is_empty() {
|
|
||||||
EmptyStoreAddrsSnafu.fail().map_err(BoxedError::new)
|
|
||||||
} else {
|
|
||||||
let kvbackend = match self.backend {
|
|
||||||
BackendImpl::EtcdStore => {
|
|
||||||
let etcd_client = create_etcd_client(store_addrs)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
Ok(EtcdStore::with_etcd_client(etcd_client, max_txn_ops))
|
|
||||||
}
|
|
||||||
#[cfg(feature = "pg_kvbackend")]
|
|
||||||
BackendImpl::PostgresStore => {
|
|
||||||
let table_name = &self.meta_table_name;
|
|
||||||
let pool = meta_srv::bootstrap::create_postgres_pool(store_addrs)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
Ok(common_meta::kv_backend::rds::PgStore::with_pg_pool(
|
|
||||||
pool,
|
|
||||||
table_name,
|
|
||||||
max_txn_ops,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?)
|
|
||||||
}
|
|
||||||
#[cfg(feature = "mysql_kvbackend")]
|
|
||||||
BackendImpl::MysqlStore => {
|
|
||||||
let table_name = &self.meta_table_name;
|
|
||||||
let pool = meta_srv::bootstrap::create_mysql_pool(store_addrs)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
Ok(common_meta::kv_backend::rds::MySqlStore::with_mysql_pool(
|
|
||||||
pool,
|
|
||||||
table_name,
|
|
||||||
max_txn_ops,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?)
|
|
||||||
}
|
|
||||||
BackendImpl::MemoryStore => UnsupportedMemoryBackendSnafu
|
|
||||||
.fail()
|
|
||||||
.map_err(BoxedError::new),
|
|
||||||
};
|
|
||||||
if self.store_key_prefix.is_empty() {
|
|
||||||
kvbackend
|
|
||||||
} else {
|
|
||||||
let chroot_kvbackend =
|
|
||||||
ChrootKvBackend::new(self.store_key_prefix.as_bytes().to_vec(), kvbackend?);
|
|
||||||
Ok(Arc::new(chroot_kvbackend))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
mod del;
|
|
||||||
mod get;
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test_utils;
|
|
||||||
mod utils;
|
|
||||||
|
|
||||||
pub(crate) use del::DelCommand;
|
|
||||||
pub(crate) use get::GetCommand;
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
mod key;
|
|
||||||
mod table;
|
|
||||||
|
|
||||||
use clap::Subcommand;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
|
|
||||||
use crate::metadata::control::del::key::DelKeyCommand;
|
|
||||||
use crate::metadata::control::del::table::DelTableCommand;
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// The prefix of the tombstone keys.
|
|
||||||
pub(crate) const CLI_TOMBSTONE_PREFIX: &str = "__cli_tombstone/";
|
|
||||||
|
|
||||||
/// Subcommand for deleting metadata from the metadata store.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum DelCommand {
|
|
||||||
Key(DelKeyCommand),
|
|
||||||
Table(DelTableCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DelCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
DelCommand::Key(cmd) => cmd.build().await,
|
|
||||||
DelCommand::Table(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_meta::key::tombstone::TombstoneManager;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::rpc::store::RangeRequest;
|
|
||||||
|
|
||||||
use crate::metadata::common::StoreConfig;
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Delete key-value pairs logically from the metadata store.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct DelKeyCommand {
|
|
||||||
/// The key to delete from the metadata store.
|
|
||||||
key: String,
|
|
||||||
|
|
||||||
/// Delete key-value pairs with the given prefix.
|
|
||||||
#[clap(long)]
|
|
||||||
prefix: bool,
|
|
||||||
|
|
||||||
#[clap(flatten)]
|
|
||||||
store: StoreConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DelKeyCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
let kv_backend = self.store.build().await?;
|
|
||||||
Ok(Box::new(DelKeyTool {
|
|
||||||
key: self.key.to_string(),
|
|
||||||
prefix: self.prefix,
|
|
||||||
key_deleter: KeyDeleter::new(kv_backend),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct KeyDeleter {
|
|
||||||
kv_backend: KvBackendRef,
|
|
||||||
tombstone_manager: TombstoneManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyDeleter {
|
|
||||||
fn new(kv_backend: KvBackendRef) -> Self {
|
|
||||||
Self {
|
|
||||||
kv_backend: kv_backend.clone(),
|
|
||||||
tombstone_manager: TombstoneManager::new_with_prefix(kv_backend, CLI_TOMBSTONE_PREFIX),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete(&self, key: &str, prefix: bool) -> Result<usize, BoxedError> {
|
|
||||||
let mut req = RangeRequest::default().with_keys_only();
|
|
||||||
if prefix {
|
|
||||||
req = req.with_prefix(key.as_bytes());
|
|
||||||
} else {
|
|
||||||
req = req.with_key(key.as_bytes());
|
|
||||||
}
|
|
||||||
let resp = self.kv_backend.range(req).await.map_err(BoxedError::new)?;
|
|
||||||
let keys = resp.kvs.iter().map(|kv| kv.key.clone()).collect::<Vec<_>>();
|
|
||||||
self.tombstone_manager
|
|
||||||
.create(keys)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DelKeyTool {
|
|
||||||
key: String,
|
|
||||||
prefix: bool,
|
|
||||||
key_deleter: KeyDeleter,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for DelKeyTool {
|
|
||||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
|
||||||
let deleted = self.key_deleter.delete(&self.key, self.prefix).await?;
|
|
||||||
// Print the number of deleted keys.
|
|
||||||
println!("{}", deleted);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
|
||||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
|
||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
|
||||||
use common_meta::rpc::store::RangeRequest;
|
|
||||||
|
|
||||||
use crate::metadata::control::del::key::KeyDeleter;
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::metadata::control::test_utils::put_key;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_delete_keys() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
|
||||||
let key_deleter = KeyDeleter::new(kv_backend.clone());
|
|
||||||
put_key(&kv_backend, "foo", "bar").await;
|
|
||||||
put_key(&kv_backend, "foo/bar", "baz").await;
|
|
||||||
put_key(&kv_backend, "foo/baz", "qux").await;
|
|
||||||
let deleted = key_deleter.delete("foo", true).await.unwrap();
|
|
||||||
assert_eq!(deleted, 3);
|
|
||||||
let deleted = key_deleter.delete("foo/bar", false).await.unwrap();
|
|
||||||
assert_eq!(deleted, 0);
|
|
||||||
|
|
||||||
let chroot = ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend);
|
|
||||||
let req = RangeRequest::default().with_prefix(b"foo");
|
|
||||||
let resp = chroot.range(req).await.unwrap();
|
|
||||||
assert_eq!(resp.kvs.len(), 3);
|
|
||||||
assert_eq!(resp.kvs[0].key, b"foo");
|
|
||||||
assert_eq!(resp.kvs[0].value, b"bar");
|
|
||||||
assert_eq!(resp.kvs[1].key, b"foo/bar");
|
|
||||||
assert_eq!(resp.kvs[1].value, b"baz");
|
|
||||||
assert_eq!(resp.kvs[2].key, b"foo/baz");
|
|
||||||
assert_eq!(resp.kvs[2].value, b"qux");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,235 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_catalog::format_full_table_name;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_meta::ddl::utils::get_region_wal_options;
|
|
||||||
use common_meta::key::table_name::TableNameManager;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use store_api::storage::TableId;
|
|
||||||
|
|
||||||
use crate::error::{InvalidArgumentsSnafu, TableNotFoundSnafu};
|
|
||||||
use crate::metadata::common::StoreConfig;
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::metadata::control::utils::get_table_id_by_name;
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Delete table metadata logically from the metadata store.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct DelTableCommand {
|
|
||||||
/// The table id to delete from the metadata store.
|
|
||||||
#[clap(long)]
|
|
||||||
table_id: Option<u32>,
|
|
||||||
|
|
||||||
/// The table name to delete from the metadata store.
|
|
||||||
#[clap(long)]
|
|
||||||
table_name: Option<String>,
|
|
||||||
|
|
||||||
/// The schema name of the table.
|
|
||||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
|
||||||
schema_name: String,
|
|
||||||
|
|
||||||
/// The catalog name of the table.
|
|
||||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
|
||||||
catalog_name: String,
|
|
||||||
|
|
||||||
#[clap(flatten)]
|
|
||||||
store: StoreConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DelTableCommand {
|
|
||||||
fn validate(&self) -> Result<(), BoxedError> {
|
|
||||||
if matches!(
|
|
||||||
(&self.table_id, &self.table_name),
|
|
||||||
(Some(_), Some(_)) | (None, None)
|
|
||||||
) {
|
|
||||||
return Err(BoxedError::new(
|
|
||||||
InvalidArgumentsSnafu {
|
|
||||||
msg: "You must specify either --table-id or --table-name.",
|
|
||||||
}
|
|
||||||
.build(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DelTableCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
self.validate()?;
|
|
||||||
let kv_backend = self.store.build().await?;
|
|
||||||
Ok(Box::new(DelTableTool {
|
|
||||||
table_id: self.table_id,
|
|
||||||
table_name: self.table_name.clone(),
|
|
||||||
schema_name: self.schema_name.clone(),
|
|
||||||
catalog_name: self.catalog_name.clone(),
|
|
||||||
table_name_manager: TableNameManager::new(kv_backend.clone()),
|
|
||||||
table_metadata_deleter: TableMetadataDeleter::new(kv_backend),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct DelTableTool {
|
|
||||||
table_id: Option<u32>,
|
|
||||||
table_name: Option<String>,
|
|
||||||
schema_name: String,
|
|
||||||
catalog_name: String,
|
|
||||||
table_name_manager: TableNameManager,
|
|
||||||
table_metadata_deleter: TableMetadataDeleter,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for DelTableTool {
|
|
||||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
|
||||||
let table_id = if let Some(table_name) = &self.table_name {
|
|
||||||
let catalog_name = &self.catalog_name;
|
|
||||||
let schema_name = &self.schema_name;
|
|
||||||
|
|
||||||
let Some(table_id) = get_table_id_by_name(
|
|
||||||
&self.table_name_manager,
|
|
||||||
catalog_name,
|
|
||||||
schema_name,
|
|
||||||
table_name,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
else {
|
|
||||||
println!(
|
|
||||||
"Table({}) not found",
|
|
||||||
format_full_table_name(catalog_name, schema_name, table_name)
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
table_id
|
|
||||||
} else {
|
|
||||||
// Safety: we have validated that table_id or table_name is not None
|
|
||||||
self.table_id.unwrap()
|
|
||||||
};
|
|
||||||
self.table_metadata_deleter.delete(table_id).await?;
|
|
||||||
println!("Table({}) deleted", table_id);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TableMetadataDeleter {
|
|
||||||
table_metadata_manager: TableMetadataManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableMetadataDeleter {
|
|
||||||
fn new(kv_backend: KvBackendRef) -> Self {
|
|
||||||
Self {
|
|
||||||
table_metadata_manager: TableMetadataManager::new_with_custom_tombstone_prefix(
|
|
||||||
kv_backend,
|
|
||||||
CLI_TOMBSTONE_PREFIX,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete(&self, table_id: TableId) -> Result<(), BoxedError> {
|
|
||||||
let (table_info, table_route) = self
|
|
||||||
.table_metadata_manager
|
|
||||||
.get_full_table_info(table_id)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
let Some(table_info) = table_info else {
|
|
||||||
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
|
|
||||||
};
|
|
||||||
let Some(table_route) = table_route else {
|
|
||||||
return Err(BoxedError::new(TableNotFoundSnafu { table_id }.build()));
|
|
||||||
};
|
|
||||||
let physical_table_id = self
|
|
||||||
.table_metadata_manager
|
|
||||||
.table_route_manager()
|
|
||||||
.get_physical_table_id(table_id)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
|
|
||||||
let table_name = table_info.table_name();
|
|
||||||
let region_wal_options = get_region_wal_options(
|
|
||||||
&self.table_metadata_manager,
|
|
||||||
&table_route,
|
|
||||||
physical_table_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
|
|
||||||
self.table_metadata_manager
|
|
||||||
.delete_table_metadata(table_id, &table_name, &table_route, ®ion_wal_options)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_error::ext::ErrorExt;
|
|
||||||
use common_error::status_code::StatusCode;
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::kv_backend::chroot::ChrootKvBackend;
|
|
||||||
use common_meta::kv_backend::memory::MemoryKvBackend;
|
|
||||||
use common_meta::kv_backend::{KvBackend, KvBackendRef};
|
|
||||||
use common_meta::rpc::store::RangeRequest;
|
|
||||||
|
|
||||||
use crate::metadata::control::del::table::TableMetadataDeleter;
|
|
||||||
use crate::metadata::control::del::CLI_TOMBSTONE_PREFIX;
|
|
||||||
use crate::metadata::control::test_utils::prepare_physical_table_metadata;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_delete_table_not_found() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
|
|
||||||
|
|
||||||
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend);
|
|
||||||
let table_id = 1;
|
|
||||||
let err = table_metadata_deleter.delete(table_id).await.unwrap_err();
|
|
||||||
assert_eq!(err.status_code(), StatusCode::TableNotFound);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_delete_table_metadata() {
|
|
||||||
let kv_backend = Arc::new(MemoryKvBackend::new());
|
|
||||||
let table_metadata_manager = TableMetadataManager::new(kv_backend.clone());
|
|
||||||
let table_id = 1024;
|
|
||||||
let (table_info, table_route) = prepare_physical_table_metadata("my_table", table_id).await;
|
|
||||||
table_metadata_manager
|
|
||||||
.create_table_metadata(
|
|
||||||
table_info,
|
|
||||||
TableRouteValue::Physical(table_route),
|
|
||||||
HashMap::new(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let total_keys = kv_backend.len();
|
|
||||||
assert!(total_keys > 0);
|
|
||||||
|
|
||||||
let table_metadata_deleter = TableMetadataDeleter::new(kv_backend.clone());
|
|
||||||
table_metadata_deleter.delete(table_id).await.unwrap();
|
|
||||||
|
|
||||||
// Check the tombstone keys are deleted
|
|
||||||
let chroot =
|
|
||||||
ChrootKvBackend::new(CLI_TOMBSTONE_PREFIX.as_bytes().to_vec(), kv_backend.clone());
|
|
||||||
let req = RangeRequest::default().with_range(vec![0], vec![0]);
|
|
||||||
let resp = chroot.range(req).await.unwrap();
|
|
||||||
assert_eq!(resp.kvs.len(), total_keys);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,247 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::cmp::min;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::{Parser, Subcommand};
|
|
||||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_catalog::format_full_table_name;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_meta::key::table_info::TableInfoKey;
|
|
||||||
use common_meta::key::table_route::TableRouteKey;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
|
|
||||||
use common_meta::rpc::store::RangeRequest;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
|
|
||||||
use crate::error::InvalidArgumentsSnafu;
|
|
||||||
use crate::metadata::common::StoreConfig;
|
|
||||||
use crate::metadata::control::utils::{decode_key_value, get_table_id_by_name, json_formatter};
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Getting metadata from metadata store.
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
pub enum GetCommand {
|
|
||||||
Key(GetKeyCommand),
|
|
||||||
Table(GetTableCommand),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
match self {
|
|
||||||
GetCommand::Key(cmd) => cmd.build().await,
|
|
||||||
GetCommand::Table(cmd) => cmd.build().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get key-value pairs from the metadata store.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct GetKeyCommand {
|
|
||||||
/// The key to get from the metadata store.
|
|
||||||
#[clap(default_value = "")]
|
|
||||||
key: String,
|
|
||||||
|
|
||||||
/// Whether to perform a prefix query. If true, returns all key-value pairs where the key starts with the given prefix.
|
|
||||||
#[clap(long, default_value = "false")]
|
|
||||||
prefix: bool,
|
|
||||||
|
|
||||||
/// The maximum number of key-value pairs to return. If 0, returns all key-value pairs.
|
|
||||||
#[clap(long, default_value = "0")]
|
|
||||||
limit: u64,
|
|
||||||
|
|
||||||
#[clap(flatten)]
|
|
||||||
store: StoreConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetKeyCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
let kvbackend = self.store.build().await?;
|
|
||||||
Ok(Box::new(GetKeyTool {
|
|
||||||
kvbackend,
|
|
||||||
key: self.key.clone(),
|
|
||||||
prefix: self.prefix,
|
|
||||||
limit: self.limit,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct GetKeyTool {
|
|
||||||
kvbackend: KvBackendRef,
|
|
||||||
key: String,
|
|
||||||
prefix: bool,
|
|
||||||
limit: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for GetKeyTool {
|
|
||||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
|
||||||
let mut req = RangeRequest::default();
|
|
||||||
if self.prefix {
|
|
||||||
req = req.with_prefix(self.key.as_bytes());
|
|
||||||
} else {
|
|
||||||
req = req.with_key(self.key.as_bytes());
|
|
||||||
}
|
|
||||||
let page_size = if self.limit > 0 {
|
|
||||||
min(self.limit as usize, DEFAULT_PAGE_SIZE)
|
|
||||||
} else {
|
|
||||||
DEFAULT_PAGE_SIZE
|
|
||||||
};
|
|
||||||
let pagination_stream =
|
|
||||||
PaginationStream::new(self.kvbackend.clone(), req, page_size, decode_key_value);
|
|
||||||
let mut stream = Box::pin(pagination_stream.into_stream());
|
|
||||||
let mut counter = 0;
|
|
||||||
|
|
||||||
while let Some((key, value)) = stream.try_next().await.map_err(BoxedError::new)? {
|
|
||||||
print!("{}\n{}\n", key, value);
|
|
||||||
counter += 1;
|
|
||||||
if self.limit > 0 && counter >= self.limit {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get table metadata from the metadata store via table id.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct GetTableCommand {
|
|
||||||
/// Get table metadata by table id.
|
|
||||||
#[clap(long)]
|
|
||||||
table_id: Option<u32>,
|
|
||||||
|
|
||||||
/// Get table metadata by table name.
|
|
||||||
#[clap(long)]
|
|
||||||
table_name: Option<String>,
|
|
||||||
|
|
||||||
/// The schema name of the table.
|
|
||||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
|
||||||
schema_name: String,
|
|
||||||
|
|
||||||
/// The catalog name of the table.
|
|
||||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
|
||||||
catalog_name: String,
|
|
||||||
|
|
||||||
/// Pretty print the output.
|
|
||||||
#[clap(long, default_value = "false")]
|
|
||||||
pretty: bool,
|
|
||||||
|
|
||||||
#[clap(flatten)]
|
|
||||||
store: StoreConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetTableCommand {
|
|
||||||
pub fn validate(&self) -> Result<(), BoxedError> {
|
|
||||||
if matches!(
|
|
||||||
(&self.table_id, &self.table_name),
|
|
||||||
(Some(_), Some(_)) | (None, None)
|
|
||||||
) {
|
|
||||||
return Err(BoxedError::new(
|
|
||||||
InvalidArgumentsSnafu {
|
|
||||||
msg: "You must specify either --table-id or --table-name.",
|
|
||||||
}
|
|
||||||
.build(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct GetTableTool {
|
|
||||||
kvbackend: KvBackendRef,
|
|
||||||
table_id: Option<u32>,
|
|
||||||
table_name: Option<String>,
|
|
||||||
schema_name: String,
|
|
||||||
catalog_name: String,
|
|
||||||
pretty: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for GetTableTool {
|
|
||||||
async fn do_work(&self) -> Result<(), BoxedError> {
|
|
||||||
let table_metadata_manager = TableMetadataManager::new(self.kvbackend.clone());
|
|
||||||
let table_name_manager = table_metadata_manager.table_name_manager();
|
|
||||||
let table_info_manager = table_metadata_manager.table_info_manager();
|
|
||||||
let table_route_manager = table_metadata_manager.table_route_manager();
|
|
||||||
|
|
||||||
let table_id = if let Some(table_name) = &self.table_name {
|
|
||||||
let catalog_name = &self.catalog_name;
|
|
||||||
let schema_name = &self.schema_name;
|
|
||||||
|
|
||||||
let Some(table_id) =
|
|
||||||
get_table_id_by_name(table_name_manager, catalog_name, schema_name, table_name)
|
|
||||||
.await?
|
|
||||||
else {
|
|
||||||
println!(
|
|
||||||
"Table({}) not found",
|
|
||||||
format_full_table_name(catalog_name, schema_name, table_name)
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
table_id
|
|
||||||
} else {
|
|
||||||
// Safety: we have validated that table_id or table_name is not None
|
|
||||||
self.table_id.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
let table_info = table_info_manager
|
|
||||||
.get(table_id)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
if let Some(table_info) = table_info {
|
|
||||||
println!(
|
|
||||||
"{}\n{}",
|
|
||||||
TableInfoKey::new(table_id),
|
|
||||||
json_formatter(self.pretty, &*table_info)
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
println!("Table info not found");
|
|
||||||
}
|
|
||||||
|
|
||||||
let table_route = table_route_manager
|
|
||||||
.table_route_storage()
|
|
||||||
.get(table_id)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?;
|
|
||||||
if let Some(table_route) = table_route {
|
|
||||||
println!(
|
|
||||||
"{}\n{}",
|
|
||||||
TableRouteKey::new(table_id),
|
|
||||||
json_formatter(self.pretty, &table_route)
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
println!("Table route not found");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GetTableCommand {
|
|
||||||
pub async fn build(&self) -> Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
self.validate()?;
|
|
||||||
let kvbackend = self.store.build().await?;
|
|
||||||
Ok(Box::new(GetTableTool {
|
|
||||||
kvbackend,
|
|
||||||
table_id: self.table_id,
|
|
||||||
table_name: self.table_name.clone(),
|
|
||||||
schema_name: self.schema_name.clone(),
|
|
||||||
catalog_name: self.catalog_name.clone(),
|
|
||||||
pretty: self.pretty,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use common_meta::ddl::test_util::test_create_physical_table_task;
|
|
||||||
use common_meta::key::table_route::PhysicalTableRouteValue;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use common_meta::rpc::router::{Region, RegionRoute};
|
|
||||||
use common_meta::rpc::store::PutRequest;
|
|
||||||
use store_api::storage::{RegionId, TableId};
|
|
||||||
use table::metadata::RawTableInfo;
|
|
||||||
|
|
||||||
/// Puts a key-value pair into the kv backend.
|
|
||||||
pub async fn put_key(kv_backend: &KvBackendRef, key: &str, value: &str) {
|
|
||||||
let put_req = PutRequest::new()
|
|
||||||
.with_key(key.as_bytes())
|
|
||||||
.with_value(value.as_bytes());
|
|
||||||
kv_backend.put(put_req).await.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prepares the physical table metadata for testing.
|
|
||||||
///
|
|
||||||
/// Returns the table info and the table route.
|
|
||||||
pub async fn prepare_physical_table_metadata(
|
|
||||||
table_name: &str,
|
|
||||||
table_id: TableId,
|
|
||||||
) -> (RawTableInfo, PhysicalTableRouteValue) {
|
|
||||||
let mut create_physical_table_task = test_create_physical_table_task(table_name);
|
|
||||||
let table_route = PhysicalTableRouteValue::new(vec![RegionRoute {
|
|
||||||
region: Region {
|
|
||||||
id: RegionId::new(table_id, 1),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
leader_peer: Some(Peer::empty(1)),
|
|
||||||
..Default::default()
|
|
||||||
}]);
|
|
||||||
create_physical_table_task.set_table_id(table_id);
|
|
||||||
|
|
||||||
(create_physical_table_task.table_info, table_route)
|
|
||||||
}
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_meta::error::Result as CommonMetaResult;
|
|
||||||
use common_meta::key::table_name::{TableNameKey, TableNameManager};
|
|
||||||
use common_meta::rpc::KeyValue;
|
|
||||||
use serde::Serialize;
|
|
||||||
use store_api::storage::TableId;
|
|
||||||
|
|
||||||
/// Decodes a key-value pair into a string.
|
|
||||||
pub fn decode_key_value(kv: KeyValue) -> CommonMetaResult<(String, String)> {
|
|
||||||
let key = String::from_utf8_lossy(&kv.key).to_string();
|
|
||||||
let value = String::from_utf8_lossy(&kv.value).to_string();
|
|
||||||
Ok((key, value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Formats a value as a JSON string.
|
|
||||||
pub fn json_formatter<T>(pretty: bool, value: &T) -> String
|
|
||||||
where
|
|
||||||
T: Serialize,
|
|
||||||
{
|
|
||||||
if pretty {
|
|
||||||
serde_json::to_string_pretty(value).unwrap()
|
|
||||||
} else {
|
|
||||||
serde_json::to_string(value).unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the table id by table name.
|
|
||||||
pub async fn get_table_id_by_name(
|
|
||||||
table_name_manager: &TableNameManager,
|
|
||||||
catalog_name: &str,
|
|
||||||
schema_name: &str,
|
|
||||||
table_name: &str,
|
|
||||||
) -> Result<Option<TableId>, BoxedError> {
|
|
||||||
let table_name_key = TableNameKey::new(catalog_name, schema_name, table_name);
|
|
||||||
let Some(table_name_value) = table_name_manager
|
|
||||||
.get(table_name_key)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)?
|
|
||||||
else {
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
Ok(Some(table_name_value.table_id()))
|
|
||||||
}
|
|
||||||
@@ -1,368 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
mod alter_table;
|
|
||||||
mod create_table;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use clap::Parser;
|
|
||||||
use client::api::v1::CreateTableExpr;
|
|
||||||
use client::client_manager::NodeClients;
|
|
||||||
use client::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
|
|
||||||
use common_error::ext::{BoxedError, ErrorExt};
|
|
||||||
use common_error::status_code::StatusCode;
|
|
||||||
use common_grpc::channel_manager::ChannelConfig;
|
|
||||||
use common_meta::error::Error as CommonMetaError;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use common_meta::node_manager::NodeManagerRef;
|
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use common_meta::rpc::router::{find_leaders, RegionRoute};
|
|
||||||
use common_telemetry::{error, info, warn};
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use snafu::{ensure, ResultExt};
|
|
||||||
use store_api::storage::TableId;
|
|
||||||
|
|
||||||
use crate::error::{
|
|
||||||
InvalidArgumentsSnafu, Result, SendRequestToDatanodeSnafu, TableMetadataSnafu, UnexpectedSnafu,
|
|
||||||
};
|
|
||||||
use crate::metadata::common::StoreConfig;
|
|
||||||
use crate::metadata::utils::{FullTableMetadata, IteratorInput, TableMetadataIterator};
|
|
||||||
use crate::Tool;
|
|
||||||
|
|
||||||
/// Repair metadata of logical tables.
|
|
||||||
#[derive(Debug, Default, Parser)]
|
|
||||||
pub struct RepairLogicalTablesCommand {
|
|
||||||
/// The names of the tables to repair.
|
|
||||||
#[clap(long, value_delimiter = ',', alias = "table-name")]
|
|
||||||
table_names: Vec<String>,
|
|
||||||
|
|
||||||
/// The id of the table to repair.
|
|
||||||
#[clap(long, value_delimiter = ',', alias = "table-id")]
|
|
||||||
table_ids: Vec<TableId>,
|
|
||||||
|
|
||||||
/// The schema of the tables to repair.
|
|
||||||
#[clap(long, default_value = DEFAULT_SCHEMA_NAME)]
|
|
||||||
schema_name: String,
|
|
||||||
|
|
||||||
/// The catalog of the tables to repair.
|
|
||||||
#[clap(long, default_value = DEFAULT_CATALOG_NAME)]
|
|
||||||
catalog_name: String,
|
|
||||||
|
|
||||||
/// Whether to fail fast if any repair operation fails.
|
|
||||||
#[clap(long)]
|
|
||||||
fail_fast: bool,
|
|
||||||
|
|
||||||
#[clap(flatten)]
|
|
||||||
store: StoreConfig,
|
|
||||||
|
|
||||||
/// The timeout for the client to operate the datanode.
|
|
||||||
#[clap(long, default_value_t = 30)]
|
|
||||||
client_timeout_secs: u64,
|
|
||||||
|
|
||||||
/// The timeout for the client to connect to the datanode.
|
|
||||||
#[clap(long, default_value_t = 3)]
|
|
||||||
client_connect_timeout_secs: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RepairLogicalTablesCommand {
|
|
||||||
fn validate(&self) -> Result<()> {
|
|
||||||
ensure!(
|
|
||||||
!self.table_names.is_empty() || !self.table_ids.is_empty(),
|
|
||||||
InvalidArgumentsSnafu {
|
|
||||||
msg: "You must specify --table-names or --table-ids.",
|
|
||||||
}
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RepairLogicalTablesCommand {
|
|
||||||
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
|
|
||||||
self.validate().map_err(BoxedError::new)?;
|
|
||||||
let kv_backend = self.store.build().await?;
|
|
||||||
let node_client_channel_config = ChannelConfig::new()
|
|
||||||
.timeout(Duration::from_secs(self.client_timeout_secs))
|
|
||||||
.connect_timeout(Duration::from_secs(self.client_connect_timeout_secs));
|
|
||||||
let node_manager = Arc::new(NodeClients::new(node_client_channel_config));
|
|
||||||
|
|
||||||
Ok(Box::new(RepairTool {
|
|
||||||
table_names: self.table_names.clone(),
|
|
||||||
table_ids: self.table_ids.clone(),
|
|
||||||
schema_name: self.schema_name.clone(),
|
|
||||||
catalog_name: self.catalog_name.clone(),
|
|
||||||
fail_fast: self.fail_fast,
|
|
||||||
kv_backend,
|
|
||||||
node_manager,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct RepairTool {
|
|
||||||
table_names: Vec<String>,
|
|
||||||
table_ids: Vec<TableId>,
|
|
||||||
schema_name: String,
|
|
||||||
catalog_name: String,
|
|
||||||
fail_fast: bool,
|
|
||||||
kv_backend: KvBackendRef,
|
|
||||||
node_manager: NodeManagerRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Tool for RepairTool {
|
|
||||||
async fn do_work(&self) -> std::result::Result<(), BoxedError> {
|
|
||||||
self.repair_tables().await.map_err(BoxedError::new)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RepairTool {
|
|
||||||
fn generate_iterator_input(&self) -> Result<IteratorInput> {
|
|
||||||
if !self.table_names.is_empty() {
|
|
||||||
let table_names = &self.table_names;
|
|
||||||
let catalog = &self.catalog_name;
|
|
||||||
let schema_name = &self.schema_name;
|
|
||||||
|
|
||||||
let table_names = table_names
|
|
||||||
.iter()
|
|
||||||
.map(|table_name| {
|
|
||||||
(
|
|
||||||
catalog.to_string(),
|
|
||||||
schema_name.to_string(),
|
|
||||||
table_name.to_string(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
return Ok(IteratorInput::new_table_names(table_names));
|
|
||||||
} else if !self.table_ids.is_empty() {
|
|
||||||
return Ok(IteratorInput::new_table_ids(self.table_ids.clone()));
|
|
||||||
};
|
|
||||||
|
|
||||||
InvalidArgumentsSnafu {
|
|
||||||
msg: "You must specify --table-names or --table-id.",
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn repair_tables(&self) -> Result<()> {
|
|
||||||
let input = self.generate_iterator_input()?;
|
|
||||||
let mut table_metadata_iterator =
|
|
||||||
Box::pin(TableMetadataIterator::new(self.kv_backend.clone(), input).into_stream());
|
|
||||||
let table_metadata_manager = TableMetadataManager::new(self.kv_backend.clone());
|
|
||||||
|
|
||||||
let mut skipped_table = 0;
|
|
||||||
let mut success_table = 0;
|
|
||||||
while let Some(full_table_metadata) = table_metadata_iterator.try_next().await? {
|
|
||||||
let full_table_name = full_table_metadata.full_table_name();
|
|
||||||
if !full_table_metadata.is_metric_engine() {
|
|
||||||
warn!(
|
|
||||||
"Skipping repair for non-metric engine table: {}",
|
|
||||||
full_table_name
|
|
||||||
);
|
|
||||||
skipped_table += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if full_table_metadata.is_physical_table() {
|
|
||||||
warn!("Skipping repair for physical table: {}", full_table_name);
|
|
||||||
skipped_table += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (physical_table_id, physical_table_route) = table_metadata_manager
|
|
||||||
.table_route_manager()
|
|
||||||
.get_physical_table_route(full_table_metadata.table_id)
|
|
||||||
.await
|
|
||||||
.context(TableMetadataSnafu)?;
|
|
||||||
|
|
||||||
if let Err(err) = self
|
|
||||||
.repair_table(
|
|
||||||
&full_table_metadata,
|
|
||||||
physical_table_id,
|
|
||||||
&physical_table_route.region_routes,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
error!(
|
|
||||||
err;
|
|
||||||
"Failed to repair table: {}, skipped table: {}",
|
|
||||||
full_table_name,
|
|
||||||
skipped_table,
|
|
||||||
);
|
|
||||||
|
|
||||||
if self.fail_fast {
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
success_table += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Repair logical tables result: {} tables repaired, {} tables skipped",
|
|
||||||
success_table, skipped_table
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn alter_table_on_datanodes(
|
|
||||||
&self,
|
|
||||||
full_table_metadata: &FullTableMetadata,
|
|
||||||
physical_region_routes: &[RegionRoute],
|
|
||||||
) -> Result<Vec<(Peer, CommonMetaError)>> {
|
|
||||||
let logical_table_id = full_table_metadata.table_id;
|
|
||||||
let alter_table_expr = alter_table::generate_alter_table_expr_for_all_columns(
|
|
||||||
&full_table_metadata.table_info,
|
|
||||||
)?;
|
|
||||||
let node_manager = self.node_manager.clone();
|
|
||||||
|
|
||||||
let mut failed_peers = Vec::new();
|
|
||||||
info!(
|
|
||||||
"Sending alter table requests to all datanodes for table: {}, number of regions:{}.",
|
|
||||||
full_table_metadata.full_table_name(),
|
|
||||||
physical_region_routes.len()
|
|
||||||
);
|
|
||||||
let leaders = find_leaders(physical_region_routes);
|
|
||||||
for peer in &leaders {
|
|
||||||
let alter_table_request = alter_table::make_alter_region_request_for_peer(
|
|
||||||
logical_table_id,
|
|
||||||
&alter_table_expr,
|
|
||||||
peer,
|
|
||||||
physical_region_routes,
|
|
||||||
)?;
|
|
||||||
let datanode = node_manager.datanode(peer).await;
|
|
||||||
if let Err(err) = datanode.handle(alter_table_request).await {
|
|
||||||
failed_peers.push((peer.clone(), err));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(failed_peers)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_table_on_datanode(
|
|
||||||
&self,
|
|
||||||
create_table_expr: &CreateTableExpr,
|
|
||||||
logical_table_id: TableId,
|
|
||||||
physical_table_id: TableId,
|
|
||||||
peer: &Peer,
|
|
||||||
physical_region_routes: &[RegionRoute],
|
|
||||||
) -> Result<()> {
|
|
||||||
let node_manager = self.node_manager.clone();
|
|
||||||
let datanode = node_manager.datanode(peer).await;
|
|
||||||
let create_table_request = create_table::make_create_region_request_for_peer(
|
|
||||||
logical_table_id,
|
|
||||||
physical_table_id,
|
|
||||||
create_table_expr,
|
|
||||||
peer,
|
|
||||||
physical_region_routes,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
datanode
|
|
||||||
.handle(create_table_request)
|
|
||||||
.await
|
|
||||||
.with_context(|_| SendRequestToDatanodeSnafu { peer: peer.clone() })?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn repair_table(
|
|
||||||
&self,
|
|
||||||
full_table_metadata: &FullTableMetadata,
|
|
||||||
physical_table_id: TableId,
|
|
||||||
physical_region_routes: &[RegionRoute],
|
|
||||||
) -> Result<()> {
|
|
||||||
let full_table_name = full_table_metadata.full_table_name();
|
|
||||||
// First we sends alter table requests to all datanodes with all columns.
|
|
||||||
let failed_peers = self
|
|
||||||
.alter_table_on_datanodes(full_table_metadata, physical_region_routes)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if failed_peers.is_empty() {
|
|
||||||
info!(
|
|
||||||
"All alter table requests sent successfully for table: {}",
|
|
||||||
full_table_name
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
warn!(
|
|
||||||
"Sending alter table requests to datanodes for table: {} failed for the datanodes: {:?}",
|
|
||||||
full_table_name,
|
|
||||||
failed_peers.iter().map(|(peer, _)| peer.id).collect::<Vec<_>>()
|
|
||||||
);
|
|
||||||
|
|
||||||
let create_table_expr =
|
|
||||||
create_table::generate_create_table_expr(&full_table_metadata.table_info)?;
|
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
|
||||||
for (peer, err) in failed_peers {
|
|
||||||
if err.status_code() != StatusCode::RegionNotFound {
|
|
||||||
error!(
|
|
||||||
err;
|
|
||||||
"Sending alter table requests to datanode: {} for table: {} failed",
|
|
||||||
peer.id,
|
|
||||||
full_table_name,
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"Region not found for table: {}, datanode: {}, trying to create the logical table on that datanode",
|
|
||||||
full_table_name,
|
|
||||||
peer.id
|
|
||||||
);
|
|
||||||
|
|
||||||
// If the alter table request fails for any datanode, we attempt to create the table on that datanode
|
|
||||||
// as a fallback mechanism to ensure table consistency across the cluster.
|
|
||||||
if let Err(err) = self
|
|
||||||
.create_table_on_datanode(
|
|
||||||
&create_table_expr,
|
|
||||||
full_table_metadata.table_id,
|
|
||||||
physical_table_id,
|
|
||||||
&peer,
|
|
||||||
physical_region_routes,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
error!(
|
|
||||||
err;
|
|
||||||
"Failed to create table on datanode: {} for table: {}",
|
|
||||||
peer.id, full_table_name
|
|
||||||
);
|
|
||||||
errors.push(err);
|
|
||||||
if self.fail_fast {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"Created table on datanode: {} for table: {}",
|
|
||||||
peer.id, full_table_name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !errors.is_empty() {
|
|
||||||
return UnexpectedSnafu {
|
|
||||||
msg: format!(
|
|
||||||
"Failed to create table on datanodes for table: {}",
|
|
||||||
full_table_name,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use client::api::v1::alter_table_expr::Kind;
|
|
||||||
use client::api::v1::region::{region_request, AlterRequests, RegionRequest, RegionRequestHeader};
|
|
||||||
use client::api::v1::{AddColumn, AddColumns, AlterTableExpr};
|
|
||||||
use common_meta::ddl::alter_logical_tables::make_alter_region_request;
|
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
|
||||||
use operator::expr_helper::column_schemas_to_defs;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use store_api::storage::{RegionId, TableId};
|
|
||||||
use table::metadata::RawTableInfo;
|
|
||||||
|
|
||||||
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
|
|
||||||
|
|
||||||
/// Generates alter table expression for all columns.
|
|
||||||
pub fn generate_alter_table_expr_for_all_columns(
|
|
||||||
table_info: &RawTableInfo,
|
|
||||||
) -> Result<AlterTableExpr> {
|
|
||||||
let schema = &table_info.meta.schema;
|
|
||||||
|
|
||||||
let mut alter_table_expr = AlterTableExpr {
|
|
||||||
catalog_name: table_info.catalog_name.to_string(),
|
|
||||||
schema_name: table_info.schema_name.to_string(),
|
|
||||||
table_name: table_info.name.to_string(),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let primary_keys = table_info
|
|
||||||
.meta
|
|
||||||
.primary_key_indices
|
|
||||||
.iter()
|
|
||||||
.map(|i| schema.column_schemas[*i].name.clone())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let add_columns = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
|
|
||||||
.context(CovertColumnSchemasToDefsSnafu)?;
|
|
||||||
|
|
||||||
alter_table_expr.kind = Some(Kind::AddColumns(AddColumns {
|
|
||||||
add_columns: add_columns
|
|
||||||
.into_iter()
|
|
||||||
.map(|col| AddColumn {
|
|
||||||
column_def: Some(col),
|
|
||||||
location: None,
|
|
||||||
add_if_not_exists: true,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
}));
|
|
||||||
|
|
||||||
Ok(alter_table_expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Makes an alter region request for a peer.
|
|
||||||
pub fn make_alter_region_request_for_peer(
|
|
||||||
logical_table_id: TableId,
|
|
||||||
alter_table_expr: &AlterTableExpr,
|
|
||||||
peer: &Peer,
|
|
||||||
region_routes: &[RegionRoute],
|
|
||||||
) -> Result<RegionRequest> {
|
|
||||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
|
||||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
|
||||||
for region_number in ®ions_on_this_peer {
|
|
||||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
|
||||||
let request = make_alter_region_request(region_id, alter_table_expr);
|
|
||||||
requests.push(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(RegionRequest {
|
|
||||||
header: Some(RegionRequestHeader::default()),
|
|
||||||
body: Some(region_request::Body::Alters(AlterRequests { requests })),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use client::api::v1::region::{region_request, CreateRequests, RegionRequest, RegionRequestHeader};
|
|
||||||
use client::api::v1::CreateTableExpr;
|
|
||||||
use common_meta::ddl::create_logical_tables::create_region_request_builder;
|
|
||||||
use common_meta::ddl::utils::region_storage_path;
|
|
||||||
use common_meta::peer::Peer;
|
|
||||||
use common_meta::rpc::router::{find_leader_regions, RegionRoute};
|
|
||||||
use operator::expr_helper::column_schemas_to_defs;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use store_api::storage::{RegionId, TableId};
|
|
||||||
use table::metadata::RawTableInfo;
|
|
||||||
|
|
||||||
use crate::error::{CovertColumnSchemasToDefsSnafu, Result};
|
|
||||||
|
|
||||||
/// Generates a `CreateTableExpr` from a `RawTableInfo`.
|
|
||||||
pub fn generate_create_table_expr(table_info: &RawTableInfo) -> Result<CreateTableExpr> {
|
|
||||||
let schema = &table_info.meta.schema;
|
|
||||||
let primary_keys = table_info
|
|
||||||
.meta
|
|
||||||
.primary_key_indices
|
|
||||||
.iter()
|
|
||||||
.map(|i| schema.column_schemas[*i].name.clone())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let timestamp_index = schema.timestamp_index.as_ref().unwrap();
|
|
||||||
let time_index = schema.column_schemas[*timestamp_index].name.clone();
|
|
||||||
let column_defs = column_schemas_to_defs(schema.column_schemas.clone(), &primary_keys)
|
|
||||||
.context(CovertColumnSchemasToDefsSnafu)?;
|
|
||||||
let table_options = HashMap::from(&table_info.meta.options);
|
|
||||||
|
|
||||||
Ok(CreateTableExpr {
|
|
||||||
catalog_name: table_info.catalog_name.to_string(),
|
|
||||||
schema_name: table_info.schema_name.to_string(),
|
|
||||||
table_name: table_info.name.to_string(),
|
|
||||||
desc: String::default(),
|
|
||||||
column_defs,
|
|
||||||
time_index,
|
|
||||||
primary_keys,
|
|
||||||
create_if_not_exists: true,
|
|
||||||
table_options,
|
|
||||||
table_id: None,
|
|
||||||
engine: table_info.meta.engine.to_string(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Makes a create region request for a peer.
|
|
||||||
pub fn make_create_region_request_for_peer(
|
|
||||||
logical_table_id: TableId,
|
|
||||||
physical_table_id: TableId,
|
|
||||||
create_table_expr: &CreateTableExpr,
|
|
||||||
peer: &Peer,
|
|
||||||
region_routes: &[RegionRoute],
|
|
||||||
) -> Result<RegionRequest> {
|
|
||||||
let regions_on_this_peer = find_leader_regions(region_routes, peer);
|
|
||||||
let mut requests = Vec::with_capacity(regions_on_this_peer.len());
|
|
||||||
let request_builder =
|
|
||||||
create_region_request_builder(create_table_expr, physical_table_id).unwrap();
|
|
||||||
|
|
||||||
let catalog = &create_table_expr.catalog_name;
|
|
||||||
let schema = &create_table_expr.schema_name;
|
|
||||||
let storage_path = region_storage_path(catalog, schema);
|
|
||||||
|
|
||||||
for region_number in ®ions_on_this_peer {
|
|
||||||
let region_id = RegionId::new(logical_table_id, *region_number);
|
|
||||||
let region_request =
|
|
||||||
request_builder.build_one(region_id, storage_path.clone(), &HashMap::new());
|
|
||||||
requests.push(region_request);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(RegionRequest {
|
|
||||||
header: Some(RegionRequestHeader::default()),
|
|
||||||
body: Some(region_request::Body::Creates(CreateRequests { requests })),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
|
|
||||||
use async_stream::try_stream;
|
|
||||||
use common_catalog::consts::METRIC_ENGINE;
|
|
||||||
use common_catalog::format_full_table_name;
|
|
||||||
use common_meta::key::table_name::TableNameKey;
|
|
||||||
use common_meta::key::table_route::TableRouteValue;
|
|
||||||
use common_meta::key::TableMetadataManager;
|
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
|
||||||
use futures::Stream;
|
|
||||||
use snafu::{OptionExt, ResultExt};
|
|
||||||
use store_api::storage::TableId;
|
|
||||||
use table::metadata::RawTableInfo;
|
|
||||||
|
|
||||||
use crate::error::{Result, TableMetadataSnafu, UnexpectedSnafu};
|
|
||||||
|
|
||||||
/// The input for the iterator.
|
|
||||||
pub enum IteratorInput {
|
|
||||||
TableIds(VecDeque<TableId>),
|
|
||||||
TableNames(VecDeque<(String, String, String)>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IteratorInput {
|
|
||||||
/// Creates a new iterator input from a list of table ids.
|
|
||||||
pub fn new_table_ids(table_ids: Vec<TableId>) -> Self {
|
|
||||||
Self::TableIds(table_ids.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new iterator input from a list of table names.
|
|
||||||
pub fn new_table_names(table_names: Vec<(String, String, String)>) -> Self {
|
|
||||||
Self::TableNames(table_names.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An iterator for retrieving table metadata from the metadata store.
|
|
||||||
///
|
|
||||||
/// This struct provides functionality to iterate over table metadata based on
|
|
||||||
/// either [`TableId`] and their associated regions or fully qualified table names.
|
|
||||||
pub struct TableMetadataIterator {
|
|
||||||
input: IteratorInput,
|
|
||||||
table_metadata_manager: TableMetadataManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The full table metadata.
|
|
||||||
pub struct FullTableMetadata {
|
|
||||||
pub table_id: TableId,
|
|
||||||
pub table_info: RawTableInfo,
|
|
||||||
pub table_route: TableRouteValue,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FullTableMetadata {
|
|
||||||
/// Returns true if it's [TableRouteValue::Physical].
|
|
||||||
pub fn is_physical_table(&self) -> bool {
|
|
||||||
self.table_route.is_physical()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if it's a metric engine table.
|
|
||||||
pub fn is_metric_engine(&self) -> bool {
|
|
||||||
self.table_info.meta.engine == METRIC_ENGINE
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the full table name.
|
|
||||||
pub fn full_table_name(&self) -> String {
|
|
||||||
format_full_table_name(
|
|
||||||
&self.table_info.catalog_name,
|
|
||||||
&self.table_info.schema_name,
|
|
||||||
&self.table_info.name,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TableMetadataIterator {
|
|
||||||
pub fn new(kvbackend: KvBackendRef, input: IteratorInput) -> Self {
|
|
||||||
let table_metadata_manager = TableMetadataManager::new(kvbackend);
|
|
||||||
Self {
|
|
||||||
input,
|
|
||||||
table_metadata_manager,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the next table metadata.
|
|
||||||
///
|
|
||||||
/// This method handles two types of inputs:
|
|
||||||
/// - TableIds: Returns metadata for a specific [`TableId`].
|
|
||||||
/// - TableNames: Returns metadata for a table identified by its full name (catalog.schema.table).
|
|
||||||
///
|
|
||||||
/// Returns `None` when there are no more tables to process.
|
|
||||||
pub async fn next(&mut self) -> Result<Option<FullTableMetadata>> {
|
|
||||||
match &mut self.input {
|
|
||||||
IteratorInput::TableIds(table_ids) => {
|
|
||||||
if let Some(table_id) = table_ids.pop_front() {
|
|
||||||
let full_table_metadata = self.get_table_metadata(table_id).await?;
|
|
||||||
return Ok(Some(full_table_metadata));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
IteratorInput::TableNames(table_names) => {
|
|
||||||
if let Some(full_table_name) = table_names.pop_front() {
|
|
||||||
let table_id = self.get_table_id_by_name(full_table_name).await?;
|
|
||||||
let full_table_metadata = self.get_table_metadata(table_id).await?;
|
|
||||||
return Ok(Some(full_table_metadata));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts the iterator into a stream of table metadata.
|
|
||||||
pub fn into_stream(mut self) -> impl Stream<Item = Result<FullTableMetadata>> {
|
|
||||||
try_stream!({
|
|
||||||
while let Some(full_table_metadata) = self.next().await? {
|
|
||||||
yield full_table_metadata;
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_table_id_by_name(
|
|
||||||
&mut self,
|
|
||||||
(catalog_name, schema_name, table_name): (String, String, String),
|
|
||||||
) -> Result<TableId> {
|
|
||||||
let key = TableNameKey::new(&catalog_name, &schema_name, &table_name);
|
|
||||||
let table_id = self
|
|
||||||
.table_metadata_manager
|
|
||||||
.table_name_manager()
|
|
||||||
.get(key)
|
|
||||||
.await
|
|
||||||
.context(TableMetadataSnafu)?
|
|
||||||
.with_context(|| UnexpectedSnafu {
|
|
||||||
msg: format!(
|
|
||||||
"Table not found: {}",
|
|
||||||
format_full_table_name(&catalog_name, &schema_name, &table_name)
|
|
||||||
),
|
|
||||||
})?
|
|
||||||
.table_id();
|
|
||||||
Ok(table_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_table_metadata(&mut self, table_id: TableId) -> Result<FullTableMetadata> {
|
|
||||||
let (table_info, table_route) = self
|
|
||||||
.table_metadata_manager
|
|
||||||
.get_full_table_info(table_id)
|
|
||||||
.await
|
|
||||||
.context(TableMetadataSnafu)?;
|
|
||||||
|
|
||||||
let table_info = table_info
|
|
||||||
.with_context(|| UnexpectedSnafu {
|
|
||||||
msg: format!("Table info not found for table id: {table_id}"),
|
|
||||||
})?
|
|
||||||
.into_inner()
|
|
||||||
.table_info;
|
|
||||||
let table_route = table_route
|
|
||||||
.with_context(|| UnexpectedSnafu {
|
|
||||||
msg: format!("Table route not found for table id: {table_id}"),
|
|
||||||
})?
|
|
||||||
.into_inner();
|
|
||||||
|
|
||||||
Ok(FullTableMetadata {
|
|
||||||
table_id,
|
|
||||||
table_info,
|
|
||||||
table_route,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -211,18 +211,12 @@ impl Database {
|
|||||||
retries += 1;
|
retries += 1;
|
||||||
warn!("Retrying {} times with error = {:?}", retries, err);
|
warn!("Retrying {} times with error = {:?}", retries, err);
|
||||||
continue;
|
continue;
|
||||||
} else {
|
|
||||||
error!(
|
|
||||||
err; "Failed to send request to grpc handle, retries = {}, not retryable error, aborting",
|
|
||||||
retries
|
|
||||||
);
|
|
||||||
return Err(err.into());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Err(err), false) => {
|
(Err(err), false) => {
|
||||||
error!(
|
error!(
|
||||||
err; "Failed to send request to grpc handle after {} retries",
|
"Failed to send request to grpc handle after {} retries, error = {:?}",
|
||||||
retries,
|
retries, err
|
||||||
);
|
);
|
||||||
return Err(err.into());
|
return Err(err.into());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::flow::{DirtyWindowRequest, DirtyWindowRequests, FlowRequest, FlowResponse};
|
use api::v1::flow::{FlowRequest, FlowResponse};
|
||||||
use api::v1::region::InsertRequests;
|
use api::v1::region::InsertRequests;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::node_manager::Flownode;
|
use common_meta::node_manager::Flownode;
|
||||||
@@ -44,16 +44,6 @@ impl Flownode for FlowRequester {
|
|||||||
.map_err(BoxedError::new)
|
.map_err(BoxedError::new)
|
||||||
.context(common_meta::error::ExternalSnafu)
|
.context(common_meta::error::ExternalSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_mark_window_dirty(
|
|
||||||
&self,
|
|
||||||
req: DirtyWindowRequest,
|
|
||||||
) -> common_meta::error::Result<FlowResponse> {
|
|
||||||
self.handle_mark_window_dirty(req)
|
|
||||||
.await
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(common_meta::error::ExternalSnafu)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FlowRequester {
|
impl FlowRequester {
|
||||||
@@ -101,20 +91,4 @@ impl FlowRequester {
|
|||||||
.into_inner();
|
.into_inner();
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_mark_window_dirty(&self, req: DirtyWindowRequest) -> Result<FlowResponse> {
|
|
||||||
let (addr, mut client) = self.client.raw_flow_client()?;
|
|
||||||
let response = client
|
|
||||||
.handle_mark_dirty_time_window(DirtyWindowRequests {
|
|
||||||
requests: vec![req],
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.or_else(|e| {
|
|
||||||
let code = e.code();
|
|
||||||
let err: crate::error::Error = e.into();
|
|
||||||
Err(BoxedError::new(err)).context(FlowServerSnafu { addr, code })
|
|
||||||
})?
|
|
||||||
.into_inner();
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -163,70 +163,19 @@ impl RegionRequester {
|
|||||||
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
|
let _span = tracing_context.attach(common_telemetry::tracing::info_span!(
|
||||||
"poll_flight_data_stream"
|
"poll_flight_data_stream"
|
||||||
));
|
));
|
||||||
|
while let Some(flight_message) = flight_message_stream.next().await {
|
||||||
let mut buffered_message: Option<FlightMessage> = None;
|
let flight_message = flight_message
|
||||||
let mut stream_ended = false;
|
.map_err(BoxedError::new)
|
||||||
|
.context(ExternalSnafu)?;
|
||||||
while !stream_ended {
|
|
||||||
// get the next message from the buffered message or read from the flight message stream
|
|
||||||
let flight_message_item = if let Some(msg) = buffered_message.take() {
|
|
||||||
Some(Ok(msg))
|
|
||||||
} else {
|
|
||||||
flight_message_stream.next().await
|
|
||||||
};
|
|
||||||
|
|
||||||
let flight_message = match flight_message_item {
|
|
||||||
Some(Ok(message)) => message,
|
|
||||||
Some(Err(e)) => {
|
|
||||||
yield Err(BoxedError::new(e)).context(ExternalSnafu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
None => break,
|
|
||||||
};
|
|
||||||
|
|
||||||
match flight_message {
|
match flight_message {
|
||||||
FlightMessage::RecordBatch(record_batch) => {
|
FlightMessage::RecordBatch(record_batch) => {
|
||||||
let result_to_yield = RecordBatch::try_from_df_record_batch(
|
yield RecordBatch::try_from_df_record_batch(
|
||||||
schema_cloned.clone(),
|
schema_cloned.clone(),
|
||||||
record_batch,
|
record_batch,
|
||||||
);
|
)
|
||||||
|
|
||||||
// get the next message from the stream. normally it should be a metrics message.
|
|
||||||
if let Some(next_flight_message_result) = flight_message_stream.next().await
|
|
||||||
{
|
|
||||||
match next_flight_message_result {
|
|
||||||
Ok(FlightMessage::Metrics(s)) => {
|
|
||||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
|
||||||
metrics_ref.swap(m);
|
|
||||||
}
|
|
||||||
Ok(FlightMessage::RecordBatch(rb)) => {
|
|
||||||
// for some reason it's not a metrics message, so we need to buffer this record batch
|
|
||||||
// and yield it in the next iteration.
|
|
||||||
buffered_message = Some(FlightMessage::RecordBatch(rb));
|
|
||||||
}
|
|
||||||
Ok(_) => {
|
|
||||||
yield IllegalFlightMessagesSnafu {
|
|
||||||
reason: "A RecordBatch message can only be succeeded by a Metrics message or another RecordBatch message"
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(ExternalSnafu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
yield Err(BoxedError::new(e)).context(ExternalSnafu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// the stream has ended
|
|
||||||
stream_ended = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
yield result_to_yield;
|
|
||||||
}
|
}
|
||||||
FlightMessage::Metrics(s) => {
|
FlightMessage::Metrics(s) => {
|
||||||
// just a branch in case of some metrics message comes after other things.
|
|
||||||
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
let m = serde_json::from_str(&s).ok().map(Arc::new);
|
||||||
metrics_ref.swap(m);
|
metrics_ref.swap(m);
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -52,6 +52,7 @@ common-version.workspace = true
|
|||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
datanode.workspace = true
|
datanode.workspace = true
|
||||||
datatypes.workspace = true
|
datatypes.workspace = true
|
||||||
|
either = "1.8"
|
||||||
etcd-client.workspace = true
|
etcd-client.workspace = true
|
||||||
file-engine.workspace = true
|
file-engine.workspace = true
|
||||||
flow.workspace = true
|
flow.workspace = true
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ use cmd::error::{InitTlsProviderSnafu, Result};
|
|||||||
use cmd::options::GlobalOptions;
|
use cmd::options::GlobalOptions;
|
||||||
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_version::{verbose_version, version};
|
use common_version::version;
|
||||||
use servers::install_ring_crypto_provider;
|
use servers::install_ring_crypto_provider;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "greptime", author, version, long_version = verbose_version(), about)]
|
#[command(name = "greptime", author, version, long_version = version(), about)]
|
||||||
#[command(propagate_version = true)]
|
#[command(propagate_version = true)]
|
||||||
pub(crate) struct Command {
|
pub(crate) struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
@@ -143,8 +143,10 @@ async fn start(cli: Command) -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn setup_human_panic() {
|
fn setup_human_panic() {
|
||||||
human_panic::setup_panic!(human_panic::Metadata::new("GreptimeDB", version())
|
human_panic::setup_panic!(
|
||||||
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions"));
|
human_panic::Metadata::new("GreptimeDB", env!("CARGO_PKG_VERSION"))
|
||||||
|
.homepage("https://github.com/GreptimeTeam/greptimedb/discussions")
|
||||||
|
);
|
||||||
|
|
||||||
common_telemetry::set_panic_hook();
|
common_telemetry::set_panic_hook();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ use catalog::kvbackend::MetaKvBackend;
|
|||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, version};
|
||||||
use datanode::datanode::DatanodeBuilder;
|
use datanode::datanode::DatanodeBuilder;
|
||||||
use datanode::service::DatanodeServiceBuilder;
|
use datanode::service::DatanodeServiceBuilder;
|
||||||
use meta_client::MetaClientType;
|
use meta_client::MetaClientType;
|
||||||
@@ -67,7 +67,7 @@ impl InstanceBuilder {
|
|||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
log_versions(version(), short_version(), APP_NAME);
|
||||||
create_resource_limit_metrics(APP_NAME);
|
create_resource_limit_metrics(APP_NAME);
|
||||||
|
|
||||||
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
plugins::setup_datanode_plugins(plugins, &opts.plugins, dn_opts)
|
||||||
@@ -93,7 +93,6 @@ impl InstanceBuilder {
|
|||||||
MetaClientType::Datanode { member_id },
|
MetaClientType::Datanode { member_id },
|
||||||
meta_client_options,
|
meta_client_options,
|
||||||
Some(&plugins),
|
Some(&plugins),
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(MetaClientInitSnafu)?;
|
.context(MetaClientInitSnafu)?;
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ use common_meta::key::flow::FlowMetadataManager;
|
|||||||
use common_meta::key::TableMetadataManager;
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, version};
|
||||||
use flow::{
|
use flow::{
|
||||||
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
get_flow_auth_options, FlownodeBuilder, FlownodeInstance, FlownodeServiceBuilder,
|
||||||
FrontendClient, FrontendInvoker,
|
FrontendClient, FrontendInvoker,
|
||||||
@@ -55,32 +55,14 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
|||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
flownode: FlownodeInstance,
|
flownode: FlownodeInstance,
|
||||||
|
|
||||||
// The components of flownode, which make it easier to expand based
|
|
||||||
// on the components.
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components: Components,
|
|
||||||
|
|
||||||
// Keep the logging guard to prevent the worker from being dropped.
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
_guard: Vec<WorkerGuard>,
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub struct Components {
|
|
||||||
pub catalog_manager: catalog::CatalogManagerRef,
|
|
||||||
pub fe_client: Arc<FrontendClient>,
|
|
||||||
pub kv_backend: common_meta::kv_backend::KvBackendRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub fn new(
|
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||||
flownode: FlownodeInstance,
|
|
||||||
#[cfg(feature = "enterprise")] components: Components,
|
|
||||||
guard: Vec<WorkerGuard>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
flownode,
|
flownode,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components,
|
|
||||||
_guard: guard,
|
_guard: guard,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -93,11 +75,6 @@ impl Instance {
|
|||||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||||
&mut self.flownode
|
&mut self.flownode
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub fn components(&self) -> &Components {
|
|
||||||
&self.components
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -279,7 +256,7 @@ impl StartCommand {
|
|||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
log_versions(version(), short_version(), APP_NAME);
|
||||||
create_resource_limit_metrics(APP_NAME);
|
create_resource_limit_metrics(APP_NAME);
|
||||||
|
|
||||||
info!("Flownode start command: {:#?}", self);
|
info!("Flownode start command: {:#?}", self);
|
||||||
@@ -306,7 +283,6 @@ impl StartCommand {
|
|||||||
MetaClientType::Flownode { member_id },
|
MetaClientType::Flownode { member_id },
|
||||||
meta_config,
|
meta_config,
|
||||||
None,
|
None,
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(MetaClientInitSnafu)?;
|
.context(MetaClientInitSnafu)?;
|
||||||
@@ -347,7 +323,6 @@ impl StartCommand {
|
|||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
None,
|
None,
|
||||||
None,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_metadata_manager =
|
let table_metadata_manager =
|
||||||
@@ -373,20 +348,19 @@ impl StartCommand {
|
|||||||
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
|
let flow_auth_header = get_flow_auth_options(&opts).context(StartFlownodeSnafu)?;
|
||||||
let frontend_client =
|
let frontend_client =
|
||||||
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
|
FrontendClient::from_meta_client(meta_client.clone(), flow_auth_header);
|
||||||
let frontend_client = Arc::new(frontend_client);
|
|
||||||
let flownode_builder = FlownodeBuilder::new(
|
let flownode_builder = FlownodeBuilder::new(
|
||||||
opts.clone(),
|
opts.clone(),
|
||||||
plugins,
|
plugins,
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
frontend_client.clone(),
|
Arc::new(frontend_client),
|
||||||
)
|
)
|
||||||
.with_heartbeat_task(heartbeat_task);
|
.with_heartbeat_task(heartbeat_task);
|
||||||
|
|
||||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||||
let services = FlownodeServiceBuilder::new(&opts)
|
let services = FlownodeServiceBuilder::new(&opts)
|
||||||
.with_default_grpc_server(flownode.flownode_server())
|
.with_grpc_server(flownode.flownode_server().clone())
|
||||||
.enable_http_service()
|
.enable_http_service()
|
||||||
.build()
|
.build()
|
||||||
.context(StartFlownodeSnafu)?;
|
.context(StartFlownodeSnafu)?;
|
||||||
@@ -418,16 +392,6 @@ impl StartCommand {
|
|||||||
.set_frontend_invoker(invoker)
|
.set_frontend_invoker(invoker)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
Ok(Instance::new(flownode, guard))
|
||||||
let components = Components {
|
|
||||||
catalog_manager: catalog_manager.clone(),
|
|
||||||
fe_client: frontend_client,
|
|
||||||
kv_backend: cached_meta_backend,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
return Ok(Instance::new(flownode, guard));
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
Ok(Instance::new(flownode, components, guard))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ use async_trait::async_trait;
|
|||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_extension::DistributedInformationExtension;
|
use catalog::information_extension::DistributedInformationExtension;
|
||||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
|
||||||
use catalog::process_manager::ProcessManager;
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::client_manager::NodeClients;
|
use client::client_manager::NodeClients;
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
@@ -33,13 +32,12 @@ use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, version};
|
||||||
use frontend::frontend::Frontend;
|
use frontend::frontend::Frontend;
|
||||||
use frontend::heartbeat::HeartbeatTask;
|
use frontend::heartbeat::HeartbeatTask;
|
||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::server::Services;
|
use frontend::server::Services;
|
||||||
use meta_client::{MetaClientOptions, MetaClientType};
|
use meta_client::{MetaClientOptions, MetaClientType};
|
||||||
use servers::addrs;
|
|
||||||
use servers::export_metrics::ExportMetricsTask;
|
use servers::export_metrics::ExportMetricsTask;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
@@ -282,7 +280,7 @@ impl StartCommand {
|
|||||||
opts.component.slow_query.as_ref(),
|
opts.component.slow_query.as_ref(),
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
log_versions(version(), short_version(), APP_NAME);
|
||||||
create_resource_limit_metrics(APP_NAME);
|
create_resource_limit_metrics(APP_NAME);
|
||||||
|
|
||||||
info!("Frontend start command: {:#?}", self);
|
info!("Frontend start command: {:#?}", self);
|
||||||
@@ -313,7 +311,6 @@ impl StartCommand {
|
|||||||
MetaClientType::Frontend,
|
MetaClientType::Frontend,
|
||||||
meta_client_options,
|
meta_client_options,
|
||||||
Some(&plugins),
|
Some(&plugins),
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(error::MetaClientInitSnafu)?;
|
.context(error::MetaClientInitSnafu)?;
|
||||||
@@ -345,17 +342,11 @@ impl StartCommand {
|
|||||||
|
|
||||||
let information_extension =
|
let information_extension =
|
||||||
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
Arc::new(DistributedInformationExtension::new(meta_client.clone()));
|
||||||
|
|
||||||
let process_manager = Arc::new(ProcessManager::new(
|
|
||||||
addrs::resolve_addr(&opts.grpc.bind_addr, Some(&opts.grpc.server_addr)),
|
|
||||||
Some(meta_client.clone()),
|
|
||||||
));
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
information_extension,
|
information_extension,
|
||||||
cached_meta_backend.clone(),
|
cached_meta_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
None,
|
None,
|
||||||
Some(process_manager.clone()),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let executor = HandlerGroupExecutor::new(vec![
|
let executor = HandlerGroupExecutor::new(vec![
|
||||||
@@ -392,7 +383,6 @@ impl StartCommand {
|
|||||||
catalog_manager,
|
catalog_manager,
|
||||||
Arc::new(client),
|
Arc::new(client),
|
||||||
meta_client,
|
meta_client,
|
||||||
process_manager,
|
|
||||||
)
|
)
|
||||||
.with_plugin(plugins.clone())
|
.with_plugin(plugins.clone())
|
||||||
.with_local_cache_invalidator(layered_cache_registry)
|
.with_local_cache_invalidator(layered_cache_registry)
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ pub trait App: Send {
|
|||||||
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
pub fn log_versions(version: &str, short_version: &str, app: &str) {
|
||||||
// Report app version as gauge.
|
// Report app version as gauge.
|
||||||
APP_VERSION
|
APP_VERSION
|
||||||
.with_label_values(&[common_version::version(), short_version, app])
|
.with_label_values(&[env!("CARGO_PKG_VERSION"), short_version, app])
|
||||||
.inc();
|
.inc();
|
||||||
|
|
||||||
// Log version and argument flags.
|
// Log version and argument flags.
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use common_base::Plugins;
|
|||||||
use common_config::Configurable;
|
use common_config::Configurable;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
use common_telemetry::logging::{TracingOptions, DEFAULT_LOGGING_DIR};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, version};
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::MetasrvInstance;
|
||||||
use meta_srv::metasrv::BackendImpl;
|
use meta_srv::metasrv::BackendImpl;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -54,10 +54,6 @@ impl Instance {
|
|||||||
pub fn get_inner(&self) -> &MetasrvInstance {
|
pub fn get_inner(&self) -> &MetasrvInstance {
|
||||||
&self.instance
|
&self.instance
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mut_inner(&mut self) -> &mut MetasrvInstance {
|
|
||||||
&mut self.instance
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -324,7 +320,7 @@ impl StartCommand {
|
|||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
log_versions(version(), short_version(), APP_NAME);
|
||||||
create_resource_limit_metrics(APP_NAME);
|
create_resource_limit_metrics(APP_NAME);
|
||||||
|
|
||||||
info!("Metasrv start command: {:#?}", self);
|
info!("Metasrv start command: {:#?}", self);
|
||||||
@@ -345,7 +341,7 @@ impl StartCommand {
|
|||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
let instance = MetasrvInstance::new(metasrv)
|
let instance = MetasrvInstance::new(opts, plugins, metasrv)
|
||||||
.await
|
.await
|
||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ use async_trait::async_trait;
|
|||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_schema::InformationExtension;
|
use catalog::information_schema::InformationExtension;
|
||||||
use catalog::kvbackend::KvBackendCatalogManager;
|
use catalog::kvbackend::KvBackendCatalogManager;
|
||||||
use catalog::process_manager::ProcessManager;
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::api::v1::meta::RegionRole;
|
use client::api::v1::meta::RegionRole;
|
||||||
use common_base::readable_size::ReadableSize;
|
use common_base::readable_size::ReadableSize;
|
||||||
@@ -30,18 +29,21 @@ use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
|
|||||||
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::LayeredCacheRegistryBuilder;
|
use common_meta::cache::LayeredCacheRegistryBuilder;
|
||||||
|
use common_meta::cache_invalidator::CacheInvalidatorRef;
|
||||||
use common_meta::cluster::{NodeInfo, NodeStatus};
|
use common_meta::cluster::{NodeInfo, NodeStatus};
|
||||||
use common_meta::datanode::RegionStat;
|
use common_meta::datanode::RegionStat;
|
||||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
|
||||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
|
||||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl, ProcedureExecutorRef};
|
||||||
use common_meta::ddl_manager::DdlManager;
|
use common_meta::ddl_manager::DdlManager;
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
use common_meta::ddl_manager::TriggerDdlManagerRef;
|
||||||
use common_meta::key::flow::flow_state::FlowStat;
|
use common_meta::key::flow::flow_state::FlowStat;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
|
use common_meta::node_manager::NodeManagerRef;
|
||||||
use common_meta::peer::Peer;
|
use common_meta::peer::Peer;
|
||||||
use common_meta::procedure_executor::LocalProcedureExecutor;
|
|
||||||
use common_meta::region_keeper::MemoryRegionKeeper;
|
use common_meta::region_keeper::MemoryRegionKeeper;
|
||||||
use common_meta::region_registry::LeaderRegionRegistry;
|
use common_meta::region_registry::LeaderRegionRegistry;
|
||||||
use common_meta::sequence::SequenceBuilder;
|
use common_meta::sequence::SequenceBuilder;
|
||||||
@@ -52,7 +54,7 @@ use common_telemetry::logging::{
|
|||||||
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
LoggingOptions, SlowQueryOptions, TracingOptions, DEFAULT_LOGGING_DIR,
|
||||||
};
|
};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, version};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
|
||||||
use datanode::datanode::{Datanode, DatanodeBuilder};
|
use datanode::datanode::{Datanode, DatanodeBuilder};
|
||||||
@@ -467,7 +469,7 @@ impl StartCommand {
|
|||||||
opts.component.slow_query.as_ref(),
|
opts.component.slow_query.as_ref(),
|
||||||
);
|
);
|
||||||
|
|
||||||
log_versions(verbose_version(), short_version(), APP_NAME);
|
log_versions(version(), short_version(), APP_NAME);
|
||||||
create_resource_limit_metrics(APP_NAME);
|
create_resource_limit_metrics(APP_NAME);
|
||||||
|
|
||||||
info!("Standalone start command: {:#?}", self);
|
info!("Standalone start command: {:#?}", self);
|
||||||
@@ -524,14 +526,11 @@ impl StartCommand {
|
|||||||
datanode.region_server(),
|
datanode.region_server(),
|
||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
|
||||||
let catalog_manager = KvBackendCatalogManager::new(
|
let catalog_manager = KvBackendCatalogManager::new(
|
||||||
information_extension.clone(),
|
information_extension.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
Some(procedure_manager.clone()),
|
Some(procedure_manager.clone()),
|
||||||
Some(process_manager.clone()),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let table_metadata_manager =
|
let table_metadata_manager =
|
||||||
@@ -591,39 +590,28 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
.context(error::BuildWalOptionsAllocatorSnafu)?;
|
||||||
let wal_options_allocator = Arc::new(wal_options_allocator);
|
let wal_options_allocator = Arc::new(wal_options_allocator);
|
||||||
let table_metadata_allocator = Arc::new(TableMetadataAllocator::new(
|
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
|
||||||
table_id_sequence,
|
table_id_sequence,
|
||||||
wal_options_allocator.clone(),
|
wal_options_allocator.clone(),
|
||||||
));
|
));
|
||||||
let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
|
||||||
flow_id_sequence,
|
flow_id_sequence,
|
||||||
));
|
));
|
||||||
|
|
||||||
let ddl_context = DdlContext {
|
|
||||||
node_manager: node_manager.clone(),
|
|
||||||
cache_invalidator: layered_cache_registry.clone(),
|
|
||||||
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
|
||||||
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
|
||||||
table_metadata_manager: table_metadata_manager.clone(),
|
|
||||||
table_metadata_allocator: table_metadata_allocator.clone(),
|
|
||||||
flow_metadata_manager: flow_metadata_manager.clone(),
|
|
||||||
flow_metadata_allocator: flow_metadata_allocator.clone(),
|
|
||||||
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
|
||||||
};
|
|
||||||
|
|
||||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
|
||||||
.context(error::InitDdlManagerSnafu)?;
|
|
||||||
#[cfg(feature = "enterprise")]
|
#[cfg(feature = "enterprise")]
|
||||||
let ddl_manager = {
|
let trigger_ddl_manager: Option<TriggerDdlManagerRef> = plugins.get();
|
||||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
let ddl_task_executor = Self::create_ddl_task_executor(
|
||||||
plugins.get();
|
|
||||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
|
||||||
};
|
|
||||||
|
|
||||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
|
||||||
Arc::new(ddl_manager),
|
|
||||||
procedure_manager.clone(),
|
procedure_manager.clone(),
|
||||||
));
|
node_manager.clone(),
|
||||||
|
layered_cache_registry.clone(),
|
||||||
|
table_metadata_manager,
|
||||||
|
table_meta_allocator,
|
||||||
|
flow_metadata_manager,
|
||||||
|
flow_meta_allocator,
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
trigger_ddl_manager,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let fe_instance = FrontendBuilder::new(
|
let fe_instance = FrontendBuilder::new(
|
||||||
fe_opts.clone(),
|
fe_opts.clone(),
|
||||||
@@ -631,8 +619,7 @@ impl StartCommand {
|
|||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
node_manager.clone(),
|
node_manager.clone(),
|
||||||
procedure_executor.clone(),
|
ddl_task_executor.clone(),
|
||||||
process_manager,
|
|
||||||
)
|
)
|
||||||
.with_plugin(plugins.clone())
|
.with_plugin(plugins.clone())
|
||||||
.try_build()
|
.try_build()
|
||||||
@@ -656,11 +643,11 @@ impl StartCommand {
|
|||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
procedure_executor,
|
ddl_task_executor.clone(),
|
||||||
node_manager,
|
node_manager,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.context(StartFlownodeSnafu)?;
|
.context(error::StartFlownodeSnafu)?;
|
||||||
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||||
|
|
||||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
||||||
@@ -687,6 +674,41 @@ impl StartCommand {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub async fn create_ddl_task_executor(
|
||||||
|
procedure_manager: ProcedureManagerRef,
|
||||||
|
node_manager: NodeManagerRef,
|
||||||
|
cache_invalidator: CacheInvalidatorRef,
|
||||||
|
table_metadata_manager: TableMetadataManagerRef,
|
||||||
|
table_metadata_allocator: TableMetadataAllocatorRef,
|
||||||
|
flow_metadata_manager: FlowMetadataManagerRef,
|
||||||
|
flow_metadata_allocator: FlowMetadataAllocatorRef,
|
||||||
|
#[cfg(feature = "enterprise")] trigger_ddl_manager: Option<TriggerDdlManagerRef>,
|
||||||
|
) -> Result<ProcedureExecutorRef> {
|
||||||
|
let procedure_executor: ProcedureExecutorRef = Arc::new(
|
||||||
|
DdlManager::try_new(
|
||||||
|
DdlContext {
|
||||||
|
node_manager,
|
||||||
|
cache_invalidator,
|
||||||
|
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
|
||||||
|
leader_region_registry: Arc::new(LeaderRegionRegistry::default()),
|
||||||
|
table_metadata_manager,
|
||||||
|
table_metadata_allocator,
|
||||||
|
flow_metadata_manager,
|
||||||
|
flow_metadata_allocator,
|
||||||
|
region_failure_detector_controller: Arc::new(NoopRegionFailureDetectorControl),
|
||||||
|
},
|
||||||
|
procedure_manager,
|
||||||
|
true,
|
||||||
|
#[cfg(feature = "enterprise")]
|
||||||
|
trigger_ddl_manager,
|
||||||
|
)
|
||||||
|
.context(error::InitDdlManagerSnafu)?,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(procedure_executor)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn create_table_metadata_manager(
|
pub async fn create_table_metadata_manager(
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
) -> Result<TableMetadataManagerRef> {
|
) -> Result<TableMetadataManagerRef> {
|
||||||
@@ -792,7 +814,6 @@ impl InformationExtension for StandaloneInformationExtension {
|
|||||||
region_manifest: region_stat.manifest.into(),
|
region_manifest: region_stat.manifest.into(),
|
||||||
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
data_topic_latest_entry_id: region_stat.data_topic_latest_entry_id,
|
||||||
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
metadata_topic_latest_entry_id: region_stat.metadata_topic_latest_entry_id,
|
||||||
written_bytes: region_stat.written_bytes,
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use cmd::options::GreptimeOptions;
|
use cmd::options::GreptimeOptions;
|
||||||
@@ -57,7 +58,12 @@ fn test_load_datanode_example_config() {
|
|||||||
metadata_cache_tti: Duration::from_secs(300),
|
metadata_cache_tti: Duration::from_secs(300),
|
||||||
}),
|
}),
|
||||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||||
dir: Some(format!("{}/{}", DEFAULT_DATA_HOME, WAL_DIR)),
|
dir: Some(
|
||||||
|
Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(WAL_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
|
),
|
||||||
sync_period: Some(Duration::from_secs(10)),
|
sync_period: Some(Duration::from_secs(10)),
|
||||||
recovery_parallelism: 2,
|
recovery_parallelism: 2,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -80,7 +86,10 @@ fn test_load_datanode_example_config() {
|
|||||||
],
|
],
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
dir: Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(DEFAULT_LOGGING_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -123,7 +132,10 @@ fn test_load_frontend_example_config() {
|
|||||||
}),
|
}),
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
dir: Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(DEFAULT_LOGGING_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -170,7 +182,10 @@ fn test_load_metasrv_example_config() {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
dir: Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(DEFAULT_LOGGING_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
@@ -205,7 +220,12 @@ fn test_load_standalone_example_config() {
|
|||||||
component: StandaloneOptions {
|
component: StandaloneOptions {
|
||||||
default_timezone: Some("UTC".to_string()),
|
default_timezone: Some("UTC".to_string()),
|
||||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||||
dir: Some(format!("{}/{}", DEFAULT_DATA_HOME, WAL_DIR)),
|
dir: Some(
|
||||||
|
Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(WAL_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
|
),
|
||||||
sync_period: Some(Duration::from_secs(10)),
|
sync_period: Some(Duration::from_secs(10)),
|
||||||
recovery_parallelism: 2,
|
recovery_parallelism: 2,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -228,7 +248,10 @@ fn test_load_standalone_example_config() {
|
|||||||
},
|
},
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
dir: Path::new(DEFAULT_DATA_HOME)
|
||||||
|
.join(DEFAULT_LOGGING_DIR)
|
||||||
|
.to_string_lossy()
|
||||||
|
.to_string(),
|
||||||
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
otlp_endpoint: Some(DEFAULT_OTLP_ENDPOINT.to_string()),
|
||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
|
|||||||
@@ -1,240 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//! [CancellationHandle] is used to compose with manual implementation of [futures::future::Future]
|
|
||||||
//! or [futures::stream::Stream] to facilitate cancellation.
|
|
||||||
//! See example in [frontend::stream_wrapper::CancellableStreamWrapper] and [CancellableFuture].
|
|
||||||
|
|
||||||
use std::fmt::{Debug, Display, Formatter};
|
|
||||||
use std::future::Future;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
|
|
||||||
use futures::task::AtomicWaker;
|
|
||||||
use pin_project::pin_project;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct CancellationHandle {
|
|
||||||
waker: AtomicWaker,
|
|
||||||
cancelled: AtomicBool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for CancellationHandle {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("CancellationHandle")
|
|
||||||
.field("cancelled", &self.is_cancelled())
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CancellationHandle {
|
|
||||||
pub fn waker(&self) -> &AtomicWaker {
|
|
||||||
&self.waker
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cancels a future or stream.
|
|
||||||
pub fn cancel(&self) {
|
|
||||||
if self
|
|
||||||
.cancelled
|
|
||||||
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
self.waker.wake();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Is this handle cancelled.
|
|
||||||
pub fn is_cancelled(&self) -> bool {
|
|
||||||
self.cancelled.load(Ordering::Relaxed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[pin_project]
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CancellableFuture<T> {
|
|
||||||
#[pin]
|
|
||||||
fut: T,
|
|
||||||
handle: Arc<CancellationHandle>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> CancellableFuture<T> {
|
|
||||||
pub fn new(fut: T, handle: Arc<CancellationHandle>) -> Self {
|
|
||||||
Self { fut, handle }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Future for CancellableFuture<T>
|
|
||||||
where
|
|
||||||
T: Future,
|
|
||||||
{
|
|
||||||
type Output = Result<T::Output, Cancelled>;
|
|
||||||
|
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
let this = self.as_mut().project();
|
|
||||||
// Check if the task has been aborted
|
|
||||||
if this.handle.is_cancelled() {
|
|
||||||
return Poll::Ready(Err(Cancelled));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Poll::Ready(x) = this.fut.poll(cx) {
|
|
||||||
return Poll::Ready(Ok(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
this.handle.waker().register(cx.waker());
|
|
||||||
if this.handle.is_cancelled() {
|
|
||||||
return Poll::Ready(Err(Cancelled));
|
|
||||||
}
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
|
||||||
pub struct Cancelled;
|
|
||||||
|
|
||||||
impl Display for Cancelled {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "Future has been cancelled")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use tokio::time::{sleep, timeout};
|
|
||||||
|
|
||||||
use crate::cancellation::{CancellableFuture, CancellationHandle, Cancelled};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellable_future_completes_normally() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
let future = async { 42 };
|
|
||||||
let cancellable = CancellableFuture::new(future, handle);
|
|
||||||
|
|
||||||
let result = cancellable.await;
|
|
||||||
assert!(result.is_ok());
|
|
||||||
assert_eq!(result.unwrap(), 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellable_future_cancelled_before_start() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
handle.cancel();
|
|
||||||
|
|
||||||
let future = async { 42 };
|
|
||||||
let cancellable = CancellableFuture::new(future, handle);
|
|
||||||
|
|
||||||
let result = cancellable.await;
|
|
||||||
assert!(result.is_err());
|
|
||||||
assert!(matches!(result.unwrap_err(), Cancelled));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellable_future_cancelled_during_execution() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
let handle_clone = handle.clone();
|
|
||||||
|
|
||||||
// Create a future that sleeps for a long time
|
|
||||||
let future = async {
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
42
|
|
||||||
};
|
|
||||||
let cancellable = CancellableFuture::new(future, handle);
|
|
||||||
|
|
||||||
// Cancel the future after a short delay
|
|
||||||
tokio::spawn(async move {
|
|
||||||
sleep(Duration::from_millis(50)).await;
|
|
||||||
handle_clone.cancel();
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = cancellable.await;
|
|
||||||
assert!(result.is_err());
|
|
||||||
assert!(matches!(result.unwrap_err(), Cancelled));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellable_future_completes_before_cancellation() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
let handle_clone = handle.clone();
|
|
||||||
|
|
||||||
// Create a future that completes quickly
|
|
||||||
let future = async {
|
|
||||||
sleep(Duration::from_millis(10)).await;
|
|
||||||
42
|
|
||||||
};
|
|
||||||
let cancellable = CancellableFuture::new(future, handle);
|
|
||||||
|
|
||||||
// Try to cancel after the future should have completed
|
|
||||||
tokio::spawn(async move {
|
|
||||||
sleep(Duration::from_millis(100)).await;
|
|
||||||
handle_clone.cancel();
|
|
||||||
});
|
|
||||||
|
|
||||||
let result = cancellable.await;
|
|
||||||
assert!(result.is_ok());
|
|
||||||
assert_eq!(result.unwrap(), 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellation_handle_is_cancelled() {
|
|
||||||
let handle = CancellationHandle::default();
|
|
||||||
assert!(!handle.is_cancelled());
|
|
||||||
|
|
||||||
handle.cancel();
|
|
||||||
assert!(handle.is_cancelled());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_multiple_cancellable_futures_with_same_handle() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
|
|
||||||
let future1 = CancellableFuture::new(async { 1 }, handle.clone());
|
|
||||||
let future2 = CancellableFuture::new(async { 2 }, handle.clone());
|
|
||||||
|
|
||||||
// Cancel before starting
|
|
||||||
handle.cancel();
|
|
||||||
|
|
||||||
let (result1, result2) = tokio::join!(future1, future2);
|
|
||||||
|
|
||||||
assert!(result1.is_err());
|
|
||||||
assert!(result2.is_err());
|
|
||||||
assert!(matches!(result1.unwrap_err(), Cancelled));
|
|
||||||
assert!(matches!(result2.unwrap_err(), Cancelled));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancellable_future_with_timeout() {
|
|
||||||
let handle = Arc::new(CancellationHandle::default());
|
|
||||||
let future = async {
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
42
|
|
||||||
};
|
|
||||||
let cancellable = CancellableFuture::new(future, handle.clone());
|
|
||||||
|
|
||||||
// Use timeout to ensure the test doesn't hang
|
|
||||||
let result = timeout(Duration::from_millis(100), cancellable).await;
|
|
||||||
|
|
||||||
// Should timeout because the future takes 1 second but we timeout after 100ms
|
|
||||||
assert!(result.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_cancelled_display() {
|
|
||||||
let cancelled = Cancelled;
|
|
||||||
assert_eq!(format!("{}", cancelled), "Future has been cancelled");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
pub mod bit_vec;
|
pub mod bit_vec;
|
||||||
pub mod bytes;
|
pub mod bytes;
|
||||||
pub mod cancellation;
|
|
||||||
pub mod plugins;
|
pub mod plugins;
|
||||||
pub mod range_read;
|
pub mod range_read;
|
||||||
#[allow(clippy::all)]
|
#[allow(clippy::all)]
|
||||||
|
|||||||
@@ -102,8 +102,6 @@ pub const INFORMATION_SCHEMA_FLOW_TABLE_ID: u32 = 33;
|
|||||||
pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
pub const INFORMATION_SCHEMA_PROCEDURE_INFO_TABLE_ID: u32 = 34;
|
||||||
/// id for information_schema.region_statistics
|
/// id for information_schema.region_statistics
|
||||||
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
pub const INFORMATION_SCHEMA_REGION_STATISTICS_TABLE_ID: u32 = 35;
|
||||||
/// id for information_schema.process_list
|
|
||||||
pub const INFORMATION_SCHEMA_PROCESS_LIST_TABLE_ID: u32 = 36;
|
|
||||||
|
|
||||||
// ----- End of information_schema tables -----
|
// ----- End of information_schema tables -----
|
||||||
|
|
||||||
|
|||||||
@@ -7,14 +7,5 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-grpc.workspace = true
|
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-meta.workspace = true
|
|
||||||
greptime-proto.workspace = true
|
|
||||||
meta-client.workspace = true
|
|
||||||
session.workspace = true
|
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tonic.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio.workspace = true
|
|
||||||
|
|||||||
@@ -27,35 +27,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
source: BoxedError,
|
source: BoxedError,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to list nodes from metasrv"))]
|
|
||||||
Meta {
|
|
||||||
source: Box<meta_client::error::Error>,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to parse process id: {}", s))]
|
|
||||||
ParseProcessId {
|
|
||||||
s: String,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to invoke frontend service"))]
|
|
||||||
InvokeFrontend {
|
|
||||||
#[snafu(source)]
|
|
||||||
error: tonic::Status,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to invoke list process service"))]
|
|
||||||
CreateChannel {
|
|
||||||
source: common_grpc::error::Error,
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -65,10 +36,6 @@ impl ErrorExt for Error {
|
|||||||
use Error::*;
|
use Error::*;
|
||||||
match self {
|
match self {
|
||||||
External { source, .. } => source.status_code(),
|
External { source, .. } => source.status_code(),
|
||||||
Meta { source, .. } => source.status_code(),
|
|
||||||
ParseProcessId { .. } => StatusCode::InvalidArguments,
|
|
||||||
InvokeFrontend { .. } => StatusCode::Unexpected,
|
|
||||||
CreateChannel { source, .. } => source.status_code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,42 +12,4 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use snafu::OptionExt;
|
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod selector;
|
|
||||||
pub mod slow_query_event;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
|
||||||
pub struct DisplayProcessId {
|
|
||||||
pub server_addr: String,
|
|
||||||
pub id: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for DisplayProcessId {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}/{}", self.server_addr, self.id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<&str> for DisplayProcessId {
|
|
||||||
type Error = error::Error;
|
|
||||||
|
|
||||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
|
||||||
let mut split = value.split('/');
|
|
||||||
let server_addr = split
|
|
||||||
.next()
|
|
||||||
.context(error::ParseProcessIdSnafu { s: value })?
|
|
||||||
.to_string();
|
|
||||||
let id = split
|
|
||||||
.next()
|
|
||||||
.context(error::ParseProcessIdSnafu { s: value })?;
|
|
||||||
let id = u32::from_str(id)
|
|
||||||
.ok()
|
|
||||||
.context(error::ParseProcessIdSnafu { s: value })?;
|
|
||||||
Ok(DisplayProcessId { server_addr, id })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,113 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::fmt::Debug;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
|
|
||||||
use common_meta::cluster::{ClusterInfo, NodeInfo, Role};
|
|
||||||
use greptime_proto::v1::frontend::{
|
|
||||||
frontend_client, KillProcessRequest, KillProcessResponse, ListProcessRequest,
|
|
||||||
ListProcessResponse,
|
|
||||||
};
|
|
||||||
use meta_client::MetaClientRef;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use tonic::Response;
|
|
||||||
|
|
||||||
use crate::error;
|
|
||||||
use crate::error::{MetaSnafu, Result};
|
|
||||||
|
|
||||||
pub type FrontendClientPtr = Box<dyn FrontendClient>;
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait FrontendClient: Send + Debug {
|
|
||||||
async fn list_process(&mut self, req: ListProcessRequest) -> Result<ListProcessResponse>;
|
|
||||||
|
|
||||||
async fn kill_process(&mut self, req: KillProcessRequest) -> Result<KillProcessResponse>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl FrontendClient for frontend_client::FrontendClient<tonic::transport::channel::Channel> {
|
|
||||||
async fn list_process(&mut self, req: ListProcessRequest) -> Result<ListProcessResponse> {
|
|
||||||
frontend_client::FrontendClient::<tonic::transport::channel::Channel>::list_process(
|
|
||||||
self, req,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu)
|
|
||||||
.map(Response::into_inner)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn kill_process(&mut self, req: KillProcessRequest) -> Result<KillProcessResponse> {
|
|
||||||
frontend_client::FrontendClient::<tonic::transport::channel::Channel>::kill_process(
|
|
||||||
self, req,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context(error::InvokeFrontendSnafu)
|
|
||||||
.map(Response::into_inner)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait FrontendSelector {
|
|
||||||
async fn select<F>(&self, predicate: F) -> Result<Vec<FrontendClientPtr>>
|
|
||||||
where
|
|
||||||
F: Fn(&NodeInfo) -> bool + Send;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct MetaClientSelector {
|
|
||||||
meta_client: MetaClientRef,
|
|
||||||
channel_manager: ChannelManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl FrontendSelector for MetaClientSelector {
|
|
||||||
async fn select<F>(&self, predicate: F) -> Result<Vec<FrontendClientPtr>>
|
|
||||||
where
|
|
||||||
F: Fn(&NodeInfo) -> bool + Send,
|
|
||||||
{
|
|
||||||
let nodes = self
|
|
||||||
.meta_client
|
|
||||||
.list_nodes(Some(Role::Frontend))
|
|
||||||
.await
|
|
||||||
.map_err(Box::new)
|
|
||||||
.context(MetaSnafu)?;
|
|
||||||
|
|
||||||
nodes
|
|
||||||
.into_iter()
|
|
||||||
.filter(predicate)
|
|
||||||
.map(|node| {
|
|
||||||
let channel = self
|
|
||||||
.channel_manager
|
|
||||||
.get(node.peer.addr)
|
|
||||||
.context(error::CreateChannelSnafu)?;
|
|
||||||
let client = frontend_client::FrontendClient::new(channel);
|
|
||||||
Ok(Box::new(client) as FrontendClientPtr)
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>>>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MetaClientSelector {
|
|
||||||
pub fn new(meta_client: MetaClientRef) -> Self {
|
|
||||||
let cfg = ChannelConfig::new()
|
|
||||||
.connect_timeout(Duration::from_secs(30))
|
|
||||||
.timeout(Duration::from_secs(30));
|
|
||||||
let channel_manager = ChannelManager::with_config(cfg);
|
|
||||||
Self {
|
|
||||||
meta_client,
|
|
||||||
channel_manager,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SlowQueryEvent {
|
|
||||||
pub cost: u64,
|
|
||||||
pub threshold: u64,
|
|
||||||
pub query: String,
|
|
||||||
pub is_promql: bool,
|
|
||||||
pub query_ctx: QueryContextRef,
|
|
||||||
pub promql_range: Option<u64>,
|
|
||||||
pub promql_step: Option<u64>,
|
|
||||||
pub promql_start: Option<i64>,
|
|
||||||
pub promql_end: Option<i64>,
|
|
||||||
}
|
|
||||||
@@ -16,9 +16,6 @@ mod add_region_follower;
|
|||||||
mod flush_compact_region;
|
mod flush_compact_region;
|
||||||
mod flush_compact_table;
|
mod flush_compact_table;
|
||||||
mod migrate_region;
|
mod migrate_region;
|
||||||
mod reconcile_catalog;
|
|
||||||
mod reconcile_database;
|
|
||||||
mod reconcile_table;
|
|
||||||
mod remove_region_follower;
|
mod remove_region_follower;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -27,9 +24,6 @@ use add_region_follower::AddRegionFollowerFunction;
|
|||||||
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
use flush_compact_region::{CompactRegionFunction, FlushRegionFunction};
|
||||||
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
use flush_compact_table::{CompactTableFunction, FlushTableFunction};
|
||||||
use migrate_region::MigrateRegionFunction;
|
use migrate_region::MigrateRegionFunction;
|
||||||
use reconcile_catalog::ReconcileCatalogFunction;
|
|
||||||
use reconcile_database::ReconcileDatabaseFunction;
|
|
||||||
use reconcile_table::ReconcileTableFunction;
|
|
||||||
use remove_region_follower::RemoveRegionFollowerFunction;
|
use remove_region_follower::RemoveRegionFollowerFunction;
|
||||||
|
|
||||||
use crate::flush_flow::FlushFlowFunction;
|
use crate::flush_flow::FlushFlowFunction;
|
||||||
@@ -49,8 +43,5 @@ impl AdminFunction {
|
|||||||
registry.register_async(Arc::new(FlushTableFunction));
|
registry.register_async(Arc::new(FlushTableFunction));
|
||||||
registry.register_async(Arc::new(CompactTableFunction));
|
registry.register_async(Arc::new(CompactTableFunction));
|
||||||
registry.register_async(Arc::new(FlushFlowFunction));
|
registry.register_async(Arc::new(FlushFlowFunction));
|
||||||
registry.register_async(Arc::new(ReconcileCatalogFunction));
|
|
||||||
registry.register_async(Arc::new(ReconcileDatabaseFunction));
|
|
||||||
registry.register_async(Arc::new(ReconcileTableFunction));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,179 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::meta::reconcile_request::Target;
|
|
||||||
use api::v1::meta::{ReconcileCatalog, ReconcileRequest};
|
|
||||||
use common_macro::admin_fn;
|
|
||||||
use common_query::error::{
|
|
||||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
|
||||||
UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
|
||||||
use common_telemetry::info;
|
|
||||||
use datatypes::prelude::*;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
|
|
||||||
use crate::handlers::ProcedureServiceHandlerRef;
|
|
||||||
use crate::helper::{
|
|
||||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
|
||||||
parse_resolve_strategy,
|
|
||||||
};
|
|
||||||
|
|
||||||
const FN_NAME: &str = "reconcile_catalog";
|
|
||||||
|
|
||||||
/// A function to reconcile a catalog.
|
|
||||||
/// Returns the procedure id if success.
|
|
||||||
///
|
|
||||||
/// - `reconcile_catalog(resolve_strategy)`.
|
|
||||||
/// - `reconcile_catalog(resolve_strategy, parallelism)`.
|
|
||||||
///
|
|
||||||
/// - `reconcile_catalog()`.
|
|
||||||
#[admin_fn(
|
|
||||||
name = ReconcileCatalogFunction,
|
|
||||||
display_name = reconcile_catalog,
|
|
||||||
sig_fn = signature,
|
|
||||||
ret = string
|
|
||||||
)]
|
|
||||||
pub(crate) async fn reconcile_catalog(
|
|
||||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
|
||||||
query_ctx: &QueryContextRef,
|
|
||||||
params: &[ValueRef<'_>],
|
|
||||||
) -> Result<Value> {
|
|
||||||
let (resolve_strategy, parallelism) = match params.len() {
|
|
||||||
0 => (default_resolve_strategy(), default_parallelism()),
|
|
||||||
1 => (
|
|
||||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
|
||||||
default_parallelism(),
|
|
||||||
),
|
|
||||||
2 => {
|
|
||||||
let Some(parallelism) = cast_u32(¶ms[1])? else {
|
|
||||||
return UnsupportedInputDataTypeSnafu {
|
|
||||||
function: FN_NAME,
|
|
||||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
(
|
|
||||||
parse_resolve_strategy(get_string_from_params(params, 0, FN_NAME)?)?,
|
|
||||||
parallelism,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
size => {
|
|
||||||
return InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not correct, expect 0, 1 or 2, have: {}",
|
|
||||||
size
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
info!(
|
|
||||||
"Reconciling catalog with resolve_strategy: {:?}, parallelism: {}",
|
|
||||||
resolve_strategy, parallelism
|
|
||||||
);
|
|
||||||
let pid = procedure_service_handler
|
|
||||||
.reconcile(ReconcileRequest {
|
|
||||||
target: Some(Target::ReconcileCatalog(ReconcileCatalog {
|
|
||||||
catalog_name: query_ctx.current_catalog().to_string(),
|
|
||||||
parallelism,
|
|
||||||
resolve_strategy: resolve_strategy as i32,
|
|
||||||
})),
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
match pid {
|
|
||||||
Some(pid) => Ok(Value::from(pid)),
|
|
||||||
None => Ok(Value::Null),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature() -> Signature {
|
|
||||||
let nums = ConcreteDataType::numerics();
|
|
||||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
|
||||||
signs.extend([
|
|
||||||
// reconcile_catalog()
|
|
||||||
TypeSignature::NullAry,
|
|
||||||
// reconcile_catalog(resolve_strategy)
|
|
||||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
|
||||||
]);
|
|
||||||
for sign in nums {
|
|
||||||
// reconcile_catalog(resolve_strategy, parallelism)
|
|
||||||
signs.push(TypeSignature::Exact(vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
sign,
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
Signature::one_of(signs, Volatility::Immutable)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::assert_matches::assert_matches;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::Error;
|
|
||||||
use datatypes::vectors::{StringVector, UInt64Vector, VectorRef};
|
|
||||||
|
|
||||||
use crate::admin::reconcile_catalog::ReconcileCatalogFunction;
|
|
||||||
use crate::function::{AsyncFunction, FunctionContext};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_reconcile_catalog() {
|
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
|
|
||||||
// reconcile_catalog()
|
|
||||||
let f = ReconcileCatalogFunction;
|
|
||||||
let args = vec![];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// reconcile_catalog(resolve_strategy)
|
|
||||||
let f = ReconcileCatalogFunction;
|
|
||||||
let args = vec![Arc::new(StringVector::from(vec!["UseMetasrv"])) as _];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// reconcile_catalog(resolve_strategy, parallelism)
|
|
||||||
let f = ReconcileCatalogFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
|
||||||
];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// unsupported input data type
|
|
||||||
let f = ReconcileCatalogFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
|
||||||
];
|
|
||||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
|
||||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
|
||||||
|
|
||||||
// invalid function args
|
|
||||||
let f = ReconcileCatalogFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(UInt64Vector::from_slice([10])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
|
||||||
];
|
|
||||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
|
||||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,198 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::meta::reconcile_request::Target;
|
|
||||||
use api::v1::meta::{ReconcileDatabase, ReconcileRequest};
|
|
||||||
use common_macro::admin_fn;
|
|
||||||
use common_query::error::{
|
|
||||||
InvalidFuncArgsSnafu, MissingProcedureServiceHandlerSnafu, Result,
|
|
||||||
UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
|
||||||
use common_telemetry::info;
|
|
||||||
use datatypes::prelude::*;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
|
|
||||||
use crate::handlers::ProcedureServiceHandlerRef;
|
|
||||||
use crate::helper::{
|
|
||||||
cast_u32, default_parallelism, default_resolve_strategy, get_string_from_params,
|
|
||||||
parse_resolve_strategy,
|
|
||||||
};
|
|
||||||
|
|
||||||
const FN_NAME: &str = "reconcile_database";
|
|
||||||
|
|
||||||
/// A function to reconcile a database.
|
|
||||||
/// Returns the procedure id if success.
|
|
||||||
///
|
|
||||||
/// - `reconcile_database(database_name)`.
|
|
||||||
/// - `reconcile_database(database_name, resolve_strategy)`.
|
|
||||||
/// - `reconcile_database(database_name, resolve_strategy, parallelism)`.
|
|
||||||
///
|
|
||||||
/// The parameters:
|
|
||||||
/// - `database_name`: the database name
|
|
||||||
#[admin_fn(
|
|
||||||
name = ReconcileDatabaseFunction,
|
|
||||||
display_name = reconcile_database,
|
|
||||||
sig_fn = signature,
|
|
||||||
ret = string
|
|
||||||
)]
|
|
||||||
pub(crate) async fn reconcile_database(
|
|
||||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
|
||||||
query_ctx: &QueryContextRef,
|
|
||||||
params: &[ValueRef<'_>],
|
|
||||||
) -> Result<Value> {
|
|
||||||
let (database_name, resolve_strategy, parallelism) = match params.len() {
|
|
||||||
1 => (
|
|
||||||
get_string_from_params(params, 0, FN_NAME)?,
|
|
||||||
default_resolve_strategy(),
|
|
||||||
default_parallelism(),
|
|
||||||
),
|
|
||||||
2 => (
|
|
||||||
get_string_from_params(params, 0, FN_NAME)?,
|
|
||||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
|
||||||
default_parallelism(),
|
|
||||||
),
|
|
||||||
3 => {
|
|
||||||
let Some(parallelism) = cast_u32(¶ms[2])? else {
|
|
||||||
return UnsupportedInputDataTypeSnafu {
|
|
||||||
function: FN_NAME,
|
|
||||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
(
|
|
||||||
get_string_from_params(params, 0, FN_NAME)?,
|
|
||||||
parse_resolve_strategy(get_string_from_params(params, 1, FN_NAME)?)?,
|
|
||||||
parallelism,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
size => {
|
|
||||||
return InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"The length of the args is not correct, expect 1, 2 or 3, have: {}",
|
|
||||||
size
|
|
||||||
),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
info!(
|
|
||||||
"Reconciling database: {}, resolve_strategy: {:?}, parallelism: {}",
|
|
||||||
database_name, resolve_strategy, parallelism
|
|
||||||
);
|
|
||||||
let pid = procedure_service_handler
|
|
||||||
.reconcile(ReconcileRequest {
|
|
||||||
target: Some(Target::ReconcileDatabase(ReconcileDatabase {
|
|
||||||
catalog_name: query_ctx.current_catalog().to_string(),
|
|
||||||
database_name: database_name.to_string(),
|
|
||||||
parallelism,
|
|
||||||
resolve_strategy: resolve_strategy as i32,
|
|
||||||
})),
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
match pid {
|
|
||||||
Some(pid) => Ok(Value::from(pid)),
|
|
||||||
None => Ok(Value::Null),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature() -> Signature {
|
|
||||||
let nums = ConcreteDataType::numerics();
|
|
||||||
let mut signs = Vec::with_capacity(2 + nums.len());
|
|
||||||
signs.extend([
|
|
||||||
// reconcile_database(datanode_name)
|
|
||||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
|
||||||
// reconcile_database(database_name, resolve_strategy)
|
|
||||||
TypeSignature::Exact(vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
]),
|
|
||||||
]);
|
|
||||||
for sign in nums {
|
|
||||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
|
||||||
signs.push(TypeSignature::Exact(vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
sign,
|
|
||||||
]));
|
|
||||||
}
|
|
||||||
Signature::one_of(signs, Volatility::Immutable)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::assert_matches::assert_matches;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::Error;
|
|
||||||
use datatypes::vectors::{StringVector, UInt32Vector, VectorRef};
|
|
||||||
|
|
||||||
use crate::admin::reconcile_database::ReconcileDatabaseFunction;
|
|
||||||
use crate::function::{AsyncFunction, FunctionContext};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_reconcile_catalog() {
|
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
|
|
||||||
// reconcile_database(database_name)
|
|
||||||
let f = ReconcileDatabaseFunction;
|
|
||||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// reconcile_database(database_name, resolve_strategy)
|
|
||||||
let f = ReconcileDatabaseFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// reconcile_database(database_name, resolve_strategy, parallelism)
|
|
||||||
let f = ReconcileDatabaseFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
|
||||||
];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// invalid function args
|
|
||||||
let f = ReconcileDatabaseFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["v2"])) as _,
|
|
||||||
];
|
|
||||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
|
||||||
assert_matches!(err, Error::InvalidFuncArgs { .. });
|
|
||||||
|
|
||||||
// unsupported input data type
|
|
||||||
let f = ReconcileDatabaseFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["UseLatest"])) as _,
|
|
||||||
Arc::new(UInt32Vector::from_slice([10])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["v1"])) as _,
|
|
||||||
];
|
|
||||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
|
||||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,149 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use api::v1::meta::reconcile_request::Target;
|
|
||||||
use api::v1::meta::{ReconcileRequest, ReconcileTable, ResolveStrategy};
|
|
||||||
use common_catalog::format_full_table_name;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_macro::admin_fn;
|
|
||||||
use common_query::error::{
|
|
||||||
MissingProcedureServiceHandlerSnafu, Result, TableMutationSnafu, UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
|
||||||
use common_telemetry::info;
|
|
||||||
use datatypes::prelude::*;
|
|
||||||
use session::context::QueryContextRef;
|
|
||||||
use session::table_name::table_name_to_full_name;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
|
|
||||||
use crate::handlers::ProcedureServiceHandlerRef;
|
|
||||||
use crate::helper::parse_resolve_strategy;
|
|
||||||
|
|
||||||
const FN_NAME: &str = "reconcile_table";
|
|
||||||
|
|
||||||
/// A function to reconcile a table.
|
|
||||||
/// Returns the procedure id if success.
|
|
||||||
///
|
|
||||||
/// - `reconcile_table(table_name)`.
|
|
||||||
/// - `reconcile_table(table_name, resolve_strategy)`.
|
|
||||||
///
|
|
||||||
/// The parameters:
|
|
||||||
/// - `table_name`: the table name
|
|
||||||
#[admin_fn(
|
|
||||||
name = ReconcileTableFunction,
|
|
||||||
display_name = reconcile_table,
|
|
||||||
sig_fn = signature,
|
|
||||||
ret = string
|
|
||||||
)]
|
|
||||||
pub(crate) async fn reconcile_table(
|
|
||||||
procedure_service_handler: &ProcedureServiceHandlerRef,
|
|
||||||
query_ctx: &QueryContextRef,
|
|
||||||
params: &[ValueRef<'_>],
|
|
||||||
) -> Result<Value> {
|
|
||||||
let (table_name, resolve_strategy) = match params {
|
|
||||||
[ValueRef::String(table_name)] => (table_name, ResolveStrategy::UseLatest),
|
|
||||||
[ValueRef::String(table_name), ValueRef::String(resolve_strategy)] => {
|
|
||||||
(table_name, parse_resolve_strategy(resolve_strategy)?)
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
return UnsupportedInputDataTypeSnafu {
|
|
||||||
function: FN_NAME,
|
|
||||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(TableMutationSnafu)?;
|
|
||||||
info!(
|
|
||||||
"Reconciling table: {} with resolve_strategy: {:?}",
|
|
||||||
format_full_table_name(&catalog_name, &schema_name, &table_name),
|
|
||||||
resolve_strategy
|
|
||||||
);
|
|
||||||
let pid = procedure_service_handler
|
|
||||||
.reconcile(ReconcileRequest {
|
|
||||||
target: Some(Target::ReconcileTable(ReconcileTable {
|
|
||||||
catalog_name,
|
|
||||||
schema_name,
|
|
||||||
table_name,
|
|
||||||
resolve_strategy: resolve_strategy as i32,
|
|
||||||
})),
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
match pid {
|
|
||||||
Some(pid) => Ok(Value::from(pid)),
|
|
||||||
None => Ok(Value::Null),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature() -> Signature {
|
|
||||||
Signature::one_of(
|
|
||||||
vec![
|
|
||||||
// reconcile_table(table_name)
|
|
||||||
TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
|
|
||||||
// reconcile_table(table_name, resolve_strategy)
|
|
||||||
TypeSignature::Exact(vec![
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
ConcreteDataType::string_datatype(),
|
|
||||||
]),
|
|
||||||
],
|
|
||||||
Volatility::Immutable,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::assert_matches::assert_matches;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use common_query::error::Error;
|
|
||||||
use datatypes::vectors::{StringVector, VectorRef};
|
|
||||||
|
|
||||||
use crate::admin::reconcile_table::ReconcileTableFunction;
|
|
||||||
use crate::function::{AsyncFunction, FunctionContext};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_reconcile_table() {
|
|
||||||
common_telemetry::init_default_ut_logging();
|
|
||||||
|
|
||||||
// reconcile_table(table_name)
|
|
||||||
let f = ReconcileTableFunction;
|
|
||||||
let args = vec![Arc::new(StringVector::from(vec!["test"])) as _];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// reconcile_table(table_name, resolve_strategy)
|
|
||||||
let f = ReconcileTableFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
|
||||||
];
|
|
||||||
let result = f.eval(FunctionContext::mock(), &args).await.unwrap();
|
|
||||||
let expect: VectorRef = Arc::new(StringVector::from(vec!["test_pid"]));
|
|
||||||
assert_eq!(expect, result);
|
|
||||||
|
|
||||||
// unsupported input data type
|
|
||||||
let f = ReconcileTableFunction;
|
|
||||||
let args = vec![
|
|
||||||
Arc::new(StringVector::from(vec!["test"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["UseMetasrv"])) as _,
|
|
||||||
Arc::new(StringVector::from(vec!["10"])) as _,
|
|
||||||
];
|
|
||||||
let err = f.eval(FunctionContext::mock(), &args).await.unwrap_err();
|
|
||||||
assert_matches!(err, Error::UnsupportedInputDataType { .. });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
pub mod hll;
|
pub(crate) mod hll;
|
||||||
pub mod uddsketch;
|
mod uddsketch;
|
||||||
|
|
||||||
pub(crate) struct ApproximateFunction;
|
pub(crate) struct ApproximateFunction;
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::meta::ReconcileRequest;
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::CatalogManagerRef;
|
use catalog::CatalogManagerRef;
|
||||||
use common_base::AffectedRows;
|
use common_base::AffectedRows;
|
||||||
@@ -66,9 +65,6 @@ pub trait ProcedureServiceHandler: Send + Sync {
|
|||||||
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
/// Migrate a region from source peer to target peer, returns the procedure id if success.
|
||||||
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
|
async fn migrate_region(&self, request: MigrateRegionRequest) -> Result<Option<String>>;
|
||||||
|
|
||||||
/// Reconcile a table, database or catalog, returns the procedure id if success.
|
|
||||||
async fn reconcile(&self, request: ReconcileRequest) -> Result<Option<String>>;
|
|
||||||
|
|
||||||
/// Query the procedure' state by its id
|
/// Query the procedure' state by its id
|
||||||
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse>;
|
||||||
|
|
||||||
|
|||||||
@@ -12,15 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use api::v1::meta::ResolveStrategy;
|
use common_query::error::{InvalidInputTypeSnafu, Result};
|
||||||
use common_query::error::{
|
|
||||||
InvalidFuncArgsSnafu, InvalidInputTypeSnafu, Result, UnsupportedInputDataTypeSnafu,
|
|
||||||
};
|
|
||||||
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
use common_query::prelude::{Signature, TypeSignature, Volatility};
|
||||||
use datatypes::prelude::ConcreteDataType;
|
use datatypes::prelude::ConcreteDataType;
|
||||||
use datatypes::types::cast::cast;
|
use datatypes::types::cast::cast;
|
||||||
use datatypes::value::ValueRef;
|
use datatypes::value::ValueRef;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::ResultExt;
|
||||||
|
|
||||||
/// Create a function signature with oneof signatures of interleaving two arguments.
|
/// Create a function signature with oneof signatures of interleaving two arguments.
|
||||||
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
pub fn one_of_sigs2(args1: Vec<ConcreteDataType>, args2: Vec<ConcreteDataType>) -> Signature {
|
||||||
@@ -46,64 +43,3 @@ pub fn cast_u64(value: &ValueRef) -> Result<Option<u64>> {
|
|||||||
})
|
})
|
||||||
.map(|v| v.as_u64())
|
.map(|v| v.as_u64())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cast a [`ValueRef`] to u32, returns `None` if fails
|
|
||||||
pub fn cast_u32(value: &ValueRef) -> Result<Option<u32>> {
|
|
||||||
cast((*value).into(), &ConcreteDataType::uint32_datatype())
|
|
||||||
.context(InvalidInputTypeSnafu {
|
|
||||||
err_msg: format!(
|
|
||||||
"Failed to cast input into uint32, actual type: {:#?}",
|
|
||||||
value.data_type(),
|
|
||||||
),
|
|
||||||
})
|
|
||||||
.map(|v| v.as_u64().map(|v| v as u32))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a resolve strategy from a string.
|
|
||||||
pub fn parse_resolve_strategy(strategy: &str) -> Result<ResolveStrategy> {
|
|
||||||
ResolveStrategy::from_str_name(strategy).context(InvalidFuncArgsSnafu {
|
|
||||||
err_msg: format!("Invalid resolve strategy: {}", strategy),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default parallelism for reconcile operations.
|
|
||||||
pub fn default_parallelism() -> u32 {
|
|
||||||
64
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default resolve strategy for reconcile operations.
|
|
||||||
pub fn default_resolve_strategy() -> ResolveStrategy {
|
|
||||||
ResolveStrategy::UseLatest
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the string value from the params.
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
/// Returns an error if the input type is not a string.
|
|
||||||
pub fn get_string_from_params<'a>(
|
|
||||||
params: &'a [ValueRef<'a>],
|
|
||||||
index: usize,
|
|
||||||
fn_name: &'a str,
|
|
||||||
) -> Result<&'a str> {
|
|
||||||
let ValueRef::String(s) = ¶ms[index] else {
|
|
||||||
return UnsupportedInputDataTypeSnafu {
|
|
||||||
function: fn_name,
|
|
||||||
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
|
||||||
}
|
|
||||||
.fail();
|
|
||||||
};
|
|
||||||
Ok(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parse_resolve_strategy() {
|
|
||||||
assert_eq!(
|
|
||||||
parse_resolve_strategy("UseLatest").unwrap(),
|
|
||||||
ResolveStrategy::UseLatest
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
#![feature(let_chains)]
|
#![feature(let_chains)]
|
||||||
#![feature(try_blocks)]
|
#![feature(try_blocks)]
|
||||||
#![feature(assert_matches)]
|
|
||||||
|
|
||||||
mod admin;
|
mod admin;
|
||||||
mod flush_flow;
|
mod flush_flow;
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ impl FunctionState {
|
|||||||
pub fn mock() -> Self {
|
pub fn mock() -> Self {
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use api::v1::meta::{ProcedureStatus, ReconcileRequest};
|
use api::v1::meta::ProcedureStatus;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use catalog::CatalogManagerRef;
|
use catalog::CatalogManagerRef;
|
||||||
use common_base::AffectedRows;
|
use common_base::AffectedRows;
|
||||||
@@ -63,10 +63,6 @@ impl FunctionState {
|
|||||||
Ok(Some("test_pid".to_string()))
|
Ok(Some("test_pid".to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn reconcile(&self, _request: ReconcileRequest) -> Result<Option<String>> {
|
|
||||||
Ok(Some("test_pid".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn query_procedure_state(&self, _pid: &str) -> Result<ProcedureStateResponse> {
|
async fn query_procedure_state(&self, _pid: &str) -> Result<ProcedureStateResponse> {
|
||||||
Ok(ProcedureStateResponse {
|
Ok(ProcedureStateResponse {
|
||||||
status: ProcedureStatus::Done.into(),
|
status: ProcedureStatus::Done.into(),
|
||||||
|
|||||||
@@ -23,8 +23,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use build::BuildFunction;
|
use build::BuildFunction;
|
||||||
use database::{
|
use database::{
|
||||||
ConnectionIdFunction, CurrentSchemaFunction, DatabaseFunction, PgBackendPidFunction,
|
CurrentSchemaFunction, DatabaseFunction, ReadPreferenceFunction, SessionUserFunction,
|
||||||
ReadPreferenceFunction, SessionUserFunction,
|
|
||||||
};
|
};
|
||||||
use pg_catalog::PGCatalogFunction;
|
use pg_catalog::PGCatalogFunction;
|
||||||
use procedure_state::ProcedureStateFunction;
|
use procedure_state::ProcedureStateFunction;
|
||||||
@@ -43,8 +42,6 @@ impl SystemFunction {
|
|||||||
registry.register_scalar(DatabaseFunction);
|
registry.register_scalar(DatabaseFunction);
|
||||||
registry.register_scalar(SessionUserFunction);
|
registry.register_scalar(SessionUserFunction);
|
||||||
registry.register_scalar(ReadPreferenceFunction);
|
registry.register_scalar(ReadPreferenceFunction);
|
||||||
registry.register_scalar(PgBackendPidFunction);
|
|
||||||
registry.register_scalar(ConnectionIdFunction);
|
|
||||||
registry.register_scalar(TimezoneFunction);
|
registry.register_scalar(TimezoneFunction);
|
||||||
registry.register_async(Arc::new(ProcedureStateFunction));
|
registry.register_async(Arc::new(ProcedureStateFunction));
|
||||||
PGCatalogFunction::register(registry);
|
PGCatalogFunction::register(registry);
|
||||||
|
|||||||
@@ -18,8 +18,7 @@ use std::sync::Arc;
|
|||||||
use common_query::error::Result;
|
use common_query::error::Result;
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
use datatypes::prelude::{ConcreteDataType, ScalarVector};
|
||||||
use datatypes::vectors::{StringVector, UInt32Vector, VectorRef};
|
use datatypes::vectors::{StringVector, VectorRef};
|
||||||
use derive_more::Display;
|
|
||||||
|
|
||||||
use crate::function::{Function, FunctionContext};
|
use crate::function::{Function, FunctionContext};
|
||||||
|
|
||||||
@@ -33,20 +32,10 @@ pub struct SessionUserFunction;
|
|||||||
|
|
||||||
pub struct ReadPreferenceFunction;
|
pub struct ReadPreferenceFunction;
|
||||||
|
|
||||||
#[derive(Display)]
|
|
||||||
#[display("{}", self.name())]
|
|
||||||
pub struct PgBackendPidFunction;
|
|
||||||
|
|
||||||
#[derive(Display)]
|
|
||||||
#[display("{}", self.name())]
|
|
||||||
pub struct ConnectionIdFunction;
|
|
||||||
|
|
||||||
const DATABASE_FUNCTION_NAME: &str = "database";
|
const DATABASE_FUNCTION_NAME: &str = "database";
|
||||||
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
const CURRENT_SCHEMA_FUNCTION_NAME: &str = "current_schema";
|
||||||
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
const SESSION_USER_FUNCTION_NAME: &str = "session_user";
|
||||||
const READ_PREFERENCE_FUNCTION_NAME: &str = "read_preference";
|
const READ_PREFERENCE_FUNCTION_NAME: &str = "read_preference";
|
||||||
const PG_BACKEND_PID: &str = "pg_backend_pid";
|
|
||||||
const CONNECTION_ID: &str = "connection_id";
|
|
||||||
|
|
||||||
impl Function for DatabaseFunction {
|
impl Function for DatabaseFunction {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
@@ -128,46 +117,6 @@ impl Function for ReadPreferenceFunction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Function for PgBackendPidFunction {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
PG_BACKEND_PID
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
|
||||||
Ok(ConcreteDataType::uint64_datatype())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> Signature {
|
|
||||||
Signature::nullary(Volatility::Immutable)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval(&self, func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
let pid = func_ctx.query_ctx.process_id();
|
|
||||||
|
|
||||||
Ok(Arc::new(UInt32Vector::from_slice([pid])) as _)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Function for ConnectionIdFunction {
|
|
||||||
fn name(&self) -> &str {
|
|
||||||
CONNECTION_ID
|
|
||||||
}
|
|
||||||
|
|
||||||
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
|
|
||||||
Ok(ConcreteDataType::uint64_datatype())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn signature(&self) -> Signature {
|
|
||||||
Signature::nullary(Volatility::Immutable)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eval(&self, func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
|
||||||
let pid = func_ctx.query_ctx.process_id();
|
|
||||||
|
|
||||||
Ok(Arc::new(UInt32Vector::from_slice([pid])) as _)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for DatabaseFunction {
|
impl fmt::Display for DatabaseFunction {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "DATABASE")
|
write!(f, "DATABASE")
|
||||||
|
|||||||
@@ -12,8 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::{env, fmt};
|
||||||
|
|
||||||
use common_query::error::Result;
|
use common_query::error::Result;
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
@@ -47,7 +47,7 @@ impl Function for PGVersionFunction {
|
|||||||
fn eval(&self, _func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
fn eval(&self, _func_ctx: &FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
|
||||||
let result = StringVector::from(vec![format!(
|
let result = StringVector::from(vec![format!(
|
||||||
"PostgreSQL 16.3 GreptimeDB {}",
|
"PostgreSQL 16.3 GreptimeDB {}",
|
||||||
common_version::version()
|
env!("CARGO_PKG_VERSION")
|
||||||
)]);
|
)]);
|
||||||
Ok(Arc::new(result))
|
Ok(Arc::new(result))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,8 +12,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::{env, fmt};
|
||||||
|
|
||||||
use common_query::error::Result;
|
use common_query::error::Result;
|
||||||
use common_query::prelude::{Signature, Volatility};
|
use common_query::prelude::{Signature, Volatility};
|
||||||
@@ -52,13 +52,13 @@ impl Function for VersionFunction {
|
|||||||
"{}-greptimedb-{}",
|
"{}-greptimedb-{}",
|
||||||
std::env::var("GREPTIMEDB_MYSQL_SERVER_VERSION")
|
std::env::var("GREPTIMEDB_MYSQL_SERVER_VERSION")
|
||||||
.unwrap_or_else(|_| "8.4.2".to_string()),
|
.unwrap_or_else(|_| "8.4.2".to_string()),
|
||||||
common_version::version()
|
env!("CARGO_PKG_VERSION")
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
Channel::Postgres => {
|
Channel::Postgres => {
|
||||||
format!("16.3-greptimedb-{}", common_version::version())
|
format!("16.3-greptimedb-{}", env!("CARGO_PKG_VERSION"))
|
||||||
}
|
}
|
||||||
_ => common_version::version().to_string(),
|
_ => env!("CARGO_PKG_VERSION").to_string(),
|
||||||
};
|
};
|
||||||
let result = StringVector::from(vec![version]);
|
let result = StringVector::from(vec![version]);
|
||||||
Ok(Arc::new(result))
|
Ok(Arc::new(result))
|
||||||
|
|||||||
@@ -29,12 +29,12 @@ use snafu::{ensure, OptionExt, ResultExt};
|
|||||||
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
use store_api::region_request::{SetRegionOption, UnsetRegionOption};
|
||||||
use table::metadata::TableId;
|
use table::metadata::TableId;
|
||||||
use table::requests::{
|
use table::requests::{
|
||||||
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetIndexOption,
|
AddColumnRequest, AlterKind, AlterTableRequest, ModifyColumnTypeRequest, SetIndexOptions,
|
||||||
UnsetIndexOption,
|
UnsetIndexOptions,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
InvalidColumnDefSnafu, InvalidIndexOptionSnafu, InvalidSetFulltextOptionRequestSnafu,
|
InvalidColumnDefSnafu, InvalidSetFulltextOptionRequestSnafu,
|
||||||
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
InvalidSetSkippingIndexOptionRequestSnafu, InvalidSetTableOptionRequestSnafu,
|
||||||
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
InvalidUnsetTableOptionRequestSnafu, MissingAlterIndexOptionSnafu, MissingFieldSnafu,
|
||||||
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
|
||||||
@@ -43,59 +43,6 @@ use crate::error::{
|
|||||||
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
|
||||||
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
const LOCATION_TYPE_AFTER: i32 = LocationType::After as i32;
|
||||||
|
|
||||||
fn set_index_option_from_proto(set_index: api::v1::SetIndex) -> Result<SetIndexOption> {
|
|
||||||
let options = set_index.options.context(MissingAlterIndexOptionSnafu)?;
|
|
||||||
Ok(match options {
|
|
||||||
api::v1::set_index::Options::Fulltext(f) => SetIndexOption::Fulltext {
|
|
||||||
column_name: f.column_name.clone(),
|
|
||||||
options: FulltextOptions::new(
|
|
||||||
f.enable,
|
|
||||||
as_fulltext_option_analyzer(
|
|
||||||
Analyzer::try_from(f.analyzer).context(InvalidSetFulltextOptionRequestSnafu)?,
|
|
||||||
),
|
|
||||||
f.case_sensitive,
|
|
||||||
as_fulltext_option_backend(
|
|
||||||
PbFulltextBackend::try_from(f.backend)
|
|
||||||
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
|
||||||
),
|
|
||||||
f.granularity as u32,
|
|
||||||
f.false_positive_rate,
|
|
||||||
)
|
|
||||||
.context(InvalidIndexOptionSnafu)?,
|
|
||||||
},
|
|
||||||
api::v1::set_index::Options::Inverted(i) => SetIndexOption::Inverted {
|
|
||||||
column_name: i.column_name,
|
|
||||||
},
|
|
||||||
api::v1::set_index::Options::Skipping(s) => SetIndexOption::Skipping {
|
|
||||||
column_name: s.column_name,
|
|
||||||
options: SkippingIndexOptions::new(
|
|
||||||
s.granularity as u32,
|
|
||||||
s.false_positive_rate,
|
|
||||||
as_skipping_index_type(
|
|
||||||
PbSkippingIndexType::try_from(s.skipping_index_type)
|
|
||||||
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.context(InvalidIndexOptionSnafu)?,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn unset_index_option_from_proto(unset_index: api::v1::UnsetIndex) -> Result<UnsetIndexOption> {
|
|
||||||
let options = unset_index.options.context(MissingAlterIndexOptionSnafu)?;
|
|
||||||
Ok(match options {
|
|
||||||
api::v1::unset_index::Options::Fulltext(f) => UnsetIndexOption::Fulltext {
|
|
||||||
column_name: f.column_name,
|
|
||||||
},
|
|
||||||
api::v1::unset_index::Options::Inverted(i) => UnsetIndexOption::Inverted {
|
|
||||||
column_name: i.column_name,
|
|
||||||
},
|
|
||||||
api::v1::unset_index::Options::Skipping(s) => UnsetIndexOption::Skipping {
|
|
||||||
column_name: s.column_name,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
|
/// Convert an [`AlterTableExpr`] to an [`AlterTableRequest`]
|
||||||
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
|
pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<AlterTableRequest> {
|
||||||
let catalog_name = expr.catalog_name;
|
let catalog_name = expr.catalog_name;
|
||||||
@@ -174,50 +121,65 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterTableExpr) -> Result<
|
|||||||
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
.context(InvalidUnsetTableOptionRequestSnafu)?,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Kind::SetIndex(o) => {
|
Kind::SetIndex(o) => match o.options {
|
||||||
let option = set_index_option_from_proto(o)?;
|
Some(opt) => match opt {
|
||||||
AlterKind::SetIndexes {
|
api::v1::set_index::Options::Fulltext(f) => AlterKind::SetIndex {
|
||||||
options: vec![option],
|
options: SetIndexOptions::Fulltext {
|
||||||
}
|
column_name: f.column_name.clone(),
|
||||||
}
|
options: FulltextOptions {
|
||||||
Kind::UnsetIndex(o) => {
|
enable: f.enable,
|
||||||
let option = unset_index_option_from_proto(o)?;
|
analyzer: as_fulltext_option_analyzer(
|
||||||
AlterKind::UnsetIndexes {
|
Analyzer::try_from(f.analyzer)
|
||||||
options: vec![option],
|
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||||
}
|
),
|
||||||
}
|
case_sensitive: f.case_sensitive,
|
||||||
Kind::SetIndexes(o) => {
|
backend: as_fulltext_option_backend(
|
||||||
let options = o
|
PbFulltextBackend::try_from(f.backend)
|
||||||
.set_indexes
|
.context(InvalidSetFulltextOptionRequestSnafu)?,
|
||||||
.into_iter()
|
),
|
||||||
.map(set_index_option_from_proto)
|
},
|
||||||
.collect::<Result<Vec<_>>>()?;
|
},
|
||||||
AlterKind::SetIndexes { options }
|
},
|
||||||
}
|
api::v1::set_index::Options::Inverted(i) => AlterKind::SetIndex {
|
||||||
Kind::UnsetIndexes(o) => {
|
options: SetIndexOptions::Inverted {
|
||||||
let options = o
|
column_name: i.column_name,
|
||||||
.unset_indexes
|
},
|
||||||
.into_iter()
|
},
|
||||||
.map(unset_index_option_from_proto)
|
api::v1::set_index::Options::Skipping(s) => AlterKind::SetIndex {
|
||||||
.collect::<Result<Vec<_>>>()?;
|
options: SetIndexOptions::Skipping {
|
||||||
AlterKind::UnsetIndexes { options }
|
column_name: s.column_name,
|
||||||
}
|
options: SkippingIndexOptions {
|
||||||
Kind::DropDefaults(o) => {
|
granularity: s.granularity as u32,
|
||||||
let names = o
|
index_type: as_skipping_index_type(
|
||||||
.drop_defaults
|
PbSkippingIndexType::try_from(s.skipping_index_type)
|
||||||
.into_iter()
|
.context(InvalidSetSkippingIndexOptionRequestSnafu)?,
|
||||||
.map(|col| {
|
),
|
||||||
ensure!(
|
},
|
||||||
!col.column_name.is_empty(),
|
},
|
||||||
MissingFieldSnafu {
|
},
|
||||||
field: "column_name"
|
},
|
||||||
}
|
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||||
);
|
},
|
||||||
Ok(col.column_name)
|
Kind::UnsetIndex(o) => match o.options {
|
||||||
})
|
Some(opt) => match opt {
|
||||||
.collect::<Result<Vec<_>>>()?;
|
api::v1::unset_index::Options::Fulltext(f) => AlterKind::UnsetIndex {
|
||||||
AlterKind::DropDefaults { names }
|
options: UnsetIndexOptions::Fulltext {
|
||||||
}
|
column_name: f.column_name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
api::v1::unset_index::Options::Inverted(i) => AlterKind::UnsetIndex {
|
||||||
|
options: UnsetIndexOptions::Inverted {
|
||||||
|
column_name: i.column_name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
api::v1::unset_index::Options::Skipping(s) => AlterKind::UnsetIndex {
|
||||||
|
options: UnsetIndexOptions::Skipping {
|
||||||
|
column_name: s.column_name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
None => return MissingAlterIndexOptionSnafu.fail(),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let request = AlterTableRequest {
|
let request = AlterTableRequest {
|
||||||
|
|||||||
@@ -153,14 +153,6 @@ pub enum Error {
|
|||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Invalid index option"))]
|
|
||||||
InvalidIndexOption {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
#[snafu(source)]
|
|
||||||
error: datatypes::error::Error,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -188,8 +180,7 @@ impl ErrorExt for Error {
|
|||||||
| Error::InvalidUnsetTableOptionRequest { .. }
|
| Error::InvalidUnsetTableOptionRequest { .. }
|
||||||
| Error::InvalidSetFulltextOptionRequest { .. }
|
| Error::InvalidSetFulltextOptionRequest { .. }
|
||||||
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
| Error::InvalidSetSkippingIndexOptionRequest { .. }
|
||||||
| Error::MissingAlterIndexOption { .. }
|
| Error::MissingAlterIndexOption { .. } => StatusCode::InvalidArguments,
|
||||||
| Error::InvalidIndexOption { .. } => StatusCode::InvalidArguments,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -201,8 +201,8 @@ impl ChannelManager {
|
|||||||
"http"
|
"http"
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut endpoint = Endpoint::new(format!("{http_prefix}://{addr}"))
|
let mut endpoint =
|
||||||
.context(CreateChannelSnafu { addr })?;
|
Endpoint::new(format!("{http_prefix}://{addr}")).context(CreateChannelSnafu)?;
|
||||||
|
|
||||||
if let Some(dur) = self.config().timeout {
|
if let Some(dur) = self.config().timeout {
|
||||||
endpoint = endpoint.timeout(dur);
|
endpoint = endpoint.timeout(dur);
|
||||||
@@ -237,7 +237,7 @@ impl ChannelManager {
|
|||||||
if let Some(tls_config) = &self.inner.client_tls_config {
|
if let Some(tls_config) = &self.inner.client_tls_config {
|
||||||
endpoint = endpoint
|
endpoint = endpoint
|
||||||
.tls_config(tls_config.clone())
|
.tls_config(tls_config.clone())
|
||||||
.context(CreateChannelSnafu { addr })?;
|
.context(CreateChannelSnafu)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint = endpoint
|
endpoint = endpoint
|
||||||
|
|||||||
@@ -52,9 +52,8 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Failed to create gRPC channel from '{addr}'"))]
|
#[snafu(display("Failed to create gRPC channel"))]
|
||||||
CreateChannel {
|
CreateChannel {
|
||||||
addr: String,
|
|
||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: tonic::transport::Error,
|
error: tonic::transport::Error,
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ workspace = true
|
|||||||
anymap2 = "0.13.0"
|
anymap2 = "0.13.0"
|
||||||
api.workspace = true
|
api.workspace = true
|
||||||
async-recursion = "1.0"
|
async-recursion = "1.0"
|
||||||
async-stream.workspace = true
|
async-stream = "0.3"
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
backon = { workspace = true, optional = true }
|
backon = { workspace = true, optional = true }
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
@@ -32,7 +32,6 @@ common-procedure.workspace = true
|
|||||||
common-procedure-test.workspace = true
|
common-procedure-test.workspace = true
|
||||||
common-query.workspace = true
|
common-query.workspace = true
|
||||||
common-recordbatch.workspace = true
|
common-recordbatch.workspace = true
|
||||||
common-runtime.workspace = true
|
|
||||||
common-telemetry.workspace = true
|
common-telemetry.workspace = true
|
||||||
common-time.workspace = true
|
common-time.workspace = true
|
||||||
common-wal.workspace = true
|
common-wal.workspace = true
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user