mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
Merge branch 'main' into feature/df-binary-operator-nested-data
This commit is contained in:
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -5,23 +5,23 @@
|
|||||||
* @GreptimeTeam/db-approver
|
* @GreptimeTeam/db-approver
|
||||||
|
|
||||||
## [Module] Database Engine
|
## [Module] Database Engine
|
||||||
/src/index @zhongzc
|
/src/index @evenyag @discord9 @WenyXu
|
||||||
/src/mito2 @evenyag @v0y4g3r @waynexia
|
/src/mito2 @evenyag @v0y4g3r @waynexia
|
||||||
/src/query @evenyag
|
/src/query @evenyag @waynexia @discord9
|
||||||
|
|
||||||
## [Module] Distributed
|
## [Module] Distributed
|
||||||
/src/common/meta @MichaelScofield
|
/src/common/meta @MichaelScofield @WenyXu
|
||||||
/src/common/procedure @MichaelScofield
|
/src/common/procedure @MichaelScofield @WenyXu
|
||||||
/src/meta-client @MichaelScofield
|
/src/meta-client @MichaelScofield @WenyXu
|
||||||
/src/meta-srv @MichaelScofield
|
/src/meta-srv @MichaelScofield @WenyXu
|
||||||
|
|
||||||
## [Module] Write Ahead Log
|
## [Module] Write Ahead Log
|
||||||
/src/log-store @v0y4g3r
|
/src/log-store @v0y4g3r @WenyXu
|
||||||
/src/store-api @v0y4g3r
|
/src/store-api @v0y4g3r @evenyag
|
||||||
|
|
||||||
## [Module] Metrics Engine
|
## [Module] Metrics Engine
|
||||||
/src/metric-engine @waynexia
|
/src/metric-engine @waynexia @WenyXu
|
||||||
/src/promql @waynexia
|
/src/promql @waynexia @evenyag @discord9
|
||||||
|
|
||||||
## [Module] Flow
|
## [Module] Flow
|
||||||
/src/flow @zhongzc @waynexia
|
/src/flow @discord9 @waynexia
|
||||||
|
|||||||
17
.github/actions/build-greptime-binary/action.yml
vendored
17
.github/actions/build-greptime-binary/action.yml
vendored
@@ -32,9 +32,23 @@ inputs:
|
|||||||
description: Image Registry
|
description: Image Registry
|
||||||
required: false
|
required: false
|
||||||
default: 'docker.io'
|
default: 'docker.io'
|
||||||
|
large-page-size:
|
||||||
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
|
- name: Set extra build environment variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
|
||||||
|
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Build greptime binary
|
- name: Build greptime binary
|
||||||
shell: bash
|
shell: bash
|
||||||
if: ${{ inputs.build-android-artifacts == 'false' }}
|
if: ${{ inputs.build-android-artifacts == 'false' }}
|
||||||
@@ -45,7 +59,8 @@ runs:
|
|||||||
FEATURES=${{ inputs.features }} \
|
FEATURES=${{ inputs.features }} \
|
||||||
BASE_IMAGE=${{ inputs.base-image }} \
|
BASE_IMAGE=${{ inputs.base-image }} \
|
||||||
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
|
||||||
IMAGE_REGISTRY=${{ inputs.image-registry }}
|
IMAGE_REGISTRY=${{ inputs.image-registry }} \
|
||||||
|
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: ./.github/actions/upload-artifacts
|
uses: ./.github/actions/upload-artifacts
|
||||||
|
|||||||
@@ -27,6 +27,10 @@ inputs:
|
|||||||
description: Working directory to build the artifacts
|
description: Working directory to build the artifacts
|
||||||
required: false
|
required: false
|
||||||
default: .
|
default: .
|
||||||
|
large-page-size:
|
||||||
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
runs:
|
runs:
|
||||||
using: composite
|
using: composite
|
||||||
steps:
|
steps:
|
||||||
@@ -59,6 +63,7 @@ runs:
|
|||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -77,6 +82,7 @@ runs:
|
|||||||
working-dir: ${{ inputs.working-dir }}
|
working-dir: ${{ inputs.working-dir }}
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
- name: Build greptime on android base image
|
- name: Build greptime on android base image
|
||||||
uses: ./.github/actions/build-greptime-binary
|
uses: ./.github/actions/build-greptime-binary
|
||||||
@@ -89,3 +95,4 @@ runs:
|
|||||||
build-android-artifacts: true
|
build-android-artifacts: true
|
||||||
image-registry: ${{ inputs.image-registry }}
|
image-registry: ${{ inputs.image-registry }}
|
||||||
image-namespace: ${{ inputs.image-namespace }}
|
image-namespace: ${{ inputs.image-namespace }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|||||||
6
.github/scripts/deploy-greptimedb.sh
vendored
6
.github/scripts/deploy-greptimedb.sh
vendored
@@ -7,6 +7,8 @@ KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
|
|||||||
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
|
||||||
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
|
||||||
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
|
||||||
|
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
|
||||||
|
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
|
||||||
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
|
||||||
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
|
||||||
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
|
||||||
@@ -58,7 +60,7 @@ function deploy_greptimedb_operator() {
|
|||||||
# Use the latest chart and image.
|
# Use the latest chart and image.
|
||||||
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set image.tag=latest \
|
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
|
||||||
-n "$DEFAULT_INSTALL_NAMESPACE"
|
-n "$DEFAULT_INSTALL_NAMESPACE"
|
||||||
|
|
||||||
# Wait for greptimedb-operator to be ready.
|
# Wait for greptimedb-operator to be ready.
|
||||||
@@ -78,6 +80,7 @@ function deploy_greptimedb_cluster() {
|
|||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||||
-n "$install_namespace"
|
-n "$install_namespace"
|
||||||
@@ -115,6 +118,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
|
|||||||
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
|
||||||
|
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
|
||||||
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
|
||||||
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
|
||||||
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
|
||||||
|
|||||||
@@ -39,8 +39,11 @@ update_helm_charts_version() {
|
|||||||
--body "This PR updates the GreptimeDB version." \
|
--body "This PR updates the GreptimeDB version." \
|
||||||
--base main \
|
--base main \
|
||||||
--head $BRANCH_NAME \
|
--head $BRANCH_NAME \
|
||||||
--reviewer zyy17 \
|
--reviewer sunng87 \
|
||||||
--reviewer daviderli614
|
--reviewer daviderli614 \
|
||||||
|
--reviewer killme2008 \
|
||||||
|
--reviewer evenyag \
|
||||||
|
--reviewer fengjiachun
|
||||||
}
|
}
|
||||||
|
|
||||||
update_helm_charts_version
|
update_helm_charts_version
|
||||||
|
|||||||
@@ -35,8 +35,11 @@ update_homebrew_greptime_version() {
|
|||||||
--body "This PR updates the GreptimeDB version." \
|
--body "This PR updates the GreptimeDB version." \
|
||||||
--base main \
|
--base main \
|
||||||
--head $BRANCH_NAME \
|
--head $BRANCH_NAME \
|
||||||
--reviewer zyy17 \
|
--reviewer sunng87 \
|
||||||
--reviewer daviderli614
|
--reviewer daviderli614 \
|
||||||
|
--reviewer killme2008 \
|
||||||
|
--reviewer evenyag \
|
||||||
|
--reviewer fengjiachun
|
||||||
}
|
}
|
||||||
|
|
||||||
update_homebrew_greptime_version
|
update_homebrew_greptime_version
|
||||||
|
|||||||
9
.github/workflows/dev-build.yml
vendored
9
.github/workflows/dev-build.yml
vendored
@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch: # Allows you to run this workflow manually.
|
workflow_dispatch: # Allows you to run this workflow manually.
|
||||||
inputs:
|
inputs:
|
||||||
repository:
|
large-page-size:
|
||||||
description: The public repository to build
|
description: Build GreptimeDB with large page size (65536).
|
||||||
|
type: boolean
|
||||||
required: false
|
required: false
|
||||||
default: GreptimeTeam/greptimedb
|
default: false
|
||||||
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
|
||||||
description: The commit to build
|
description: The commit to build
|
||||||
required: true
|
required: true
|
||||||
@@ -181,6 +182,7 @@ jobs:
|
|||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
build-linux-arm64-artifacts:
|
build-linux-arm64-artifacts:
|
||||||
name: Build linux-arm64 artifacts
|
name: Build linux-arm64 artifacts
|
||||||
@@ -214,6 +216,7 @@ jobs:
|
|||||||
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
|
||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
large-page-size: ${{ inputs.large-page-size }}
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
|
|||||||
3
.github/workflows/develop.yml
vendored
3
.github/workflows/develop.yml
vendored
@@ -613,6 +613,9 @@ jobs:
|
|||||||
- name: "MySQL Kvbackend"
|
- name: "MySQL Kvbackend"
|
||||||
opts: "--setup-mysql"
|
opts: "--setup-mysql"
|
||||||
kafka: false
|
kafka: false
|
||||||
|
- name: "Flat format"
|
||||||
|
opts: "--enable-flat-format"
|
||||||
|
kafka: false
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|||||||
1
.github/workflows/docs.yml
vendored
1
.github/workflows/docs.yml
vendored
@@ -92,5 +92,6 @@ jobs:
|
|||||||
mode:
|
mode:
|
||||||
- name: "Basic"
|
- name: "Basic"
|
||||||
- name: "Remote WAL"
|
- name: "Remote WAL"
|
||||||
|
- name: "Flat format"
|
||||||
steps:
|
steps:
|
||||||
- run: 'echo "No action required"'
|
- run: 'echo "No action required"'
|
||||||
|
|||||||
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
57
.github/workflows/multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: Multi-language Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-greptimedb:
|
||||||
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
|
||||||
|
name: Build GreptimeDB binary
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- uses: arduino/setup-protoc@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
with:
|
||||||
|
shared-key: "multi-lang-build"
|
||||||
|
cache-all-crates: "true"
|
||||||
|
save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
- name: Install cargo-gc-bin
|
||||||
|
shell: bash
|
||||||
|
run: cargo install cargo-gc-bin --force
|
||||||
|
- name: Build greptime binary
|
||||||
|
shell: bash
|
||||||
|
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
|
||||||
|
- name: Pack greptime binary
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
mkdir bin && \
|
||||||
|
mv ./target/debug/greptime bin
|
||||||
|
- name: Print greptime binary info
|
||||||
|
run: ls -lh bin
|
||||||
|
- name: Upload greptime binary
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: greptime-bin
|
||||||
|
path: bin/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
needs: build-greptimedb
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-bin
|
||||||
21
.github/workflows/nightly-build.yml
vendored
21
.github/workflows/nightly-build.yml
vendored
@@ -174,6 +174,18 @@ jobs:
|
|||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
|
||||||
|
needs: [
|
||||||
|
allocate-runners,
|
||||||
|
build-linux-amd64-artifacts,
|
||||||
|
]
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
artifact-is-tarball: true
|
||||||
|
|
||||||
release-images-to-dockerhub:
|
release-images-to-dockerhub:
|
||||||
name: Build and push images to DockerHub
|
name: Build and push images to DockerHub
|
||||||
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
|
||||||
@@ -301,7 +313,8 @@ jobs:
|
|||||||
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
|
||||||
name: Send notification to Greptime team
|
name: Send notification to Greptime team
|
||||||
needs: [
|
needs: [
|
||||||
release-images-to-dockerhub
|
release-images-to-dockerhub,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
@@ -319,17 +332,17 @@ jobs:
|
|||||||
run: pnpm tsx bin/report-ci-failure.ts
|
run: pnpm tsx bin/report-ci-failure.ts
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||||
- name: Notify nightly build successful result
|
- name: Notify nightly build successful result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
|
||||||
|
|
||||||
- name: Notify nightly build failed result
|
- name: Notify nightly build failed result
|
||||||
uses: slackapi/slack-github-action@v1.23.0
|
uses: slackapi/slack-github-action@v1.23.0
|
||||||
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
|
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
|
||||||
with:
|
with:
|
||||||
payload: |
|
payload: |
|
||||||
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
|
||||||
|
|||||||
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
@@ -215,6 +215,18 @@ jobs:
|
|||||||
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
|
||||||
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
|
||||||
|
|
||||||
|
run-multi-lang-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
|
||||||
|
needs: [
|
||||||
|
allocate-runners,
|
||||||
|
build-linux-amd64-artifacts,
|
||||||
|
]
|
||||||
|
uses: ./.github/workflows/run-multi-lang-tests.yml
|
||||||
|
with:
|
||||||
|
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
|
||||||
|
artifact-is-tarball: true
|
||||||
|
|
||||||
build-macos-artifacts:
|
build-macos-artifacts:
|
||||||
name: Build macOS artifacts
|
name: Build macOS artifacts
|
||||||
strategy:
|
strategy:
|
||||||
@@ -303,6 +315,7 @@ jobs:
|
|||||||
allocate-runners,
|
allocate-runners,
|
||||||
build-linux-amd64-artifacts,
|
build-linux-amd64-artifacts,
|
||||||
build-linux-arm64-artifacts,
|
build-linux-arm64-artifacts,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
@@ -381,6 +394,7 @@ jobs:
|
|||||||
build-macos-artifacts,
|
build-macos-artifacts,
|
||||||
build-windows-artifacts,
|
build-windows-artifacts,
|
||||||
release-images-to-dockerhub,
|
release-images-to-dockerhub,
|
||||||
|
run-multi-lang-tests,
|
||||||
]
|
]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
194
.github/workflows/run-multi-lang-tests.yml
vendored
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# Reusable workflow for running multi-language SDK tests against GreptimeDB
|
||||||
|
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
|
||||||
|
# Supports both direct binary artifacts and tarball artifacts
|
||||||
|
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
artifact-name:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
description: 'Name of the artifact containing greptime binary'
|
||||||
|
http-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4000'
|
||||||
|
description: 'HTTP server port'
|
||||||
|
mysql-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4002'
|
||||||
|
description: 'MySQL server port'
|
||||||
|
postgres-port:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: '4003'
|
||||||
|
description: 'PostgreSQL server port'
|
||||||
|
db-name:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'test_db'
|
||||||
|
description: 'Test database name'
|
||||||
|
username:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'greptime_user'
|
||||||
|
description: 'Authentication username'
|
||||||
|
password:
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: 'greptime_pwd'
|
||||||
|
description: 'Authentication password'
|
||||||
|
timeout-minutes:
|
||||||
|
required: false
|
||||||
|
type: number
|
||||||
|
default: 30
|
||||||
|
description: 'Job timeout in minutes'
|
||||||
|
artifact-is-tarball:
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-tests:
|
||||||
|
name: Run Multi-language SDK Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: ${{ inputs.timeout-minutes }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout greptimedb-tests repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: GreptimeTeam/greptimedb-tests
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Download pre-built greptime binary
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.artifact-name }}
|
||||||
|
path: artifact
|
||||||
|
|
||||||
|
- name: Setup greptime binary
|
||||||
|
run: |
|
||||||
|
mkdir -p bin
|
||||||
|
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
|
||||||
|
# Extract tarball and find greptime binary
|
||||||
|
tar -xzf artifact/*.tar.gz -C artifact
|
||||||
|
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
|
||||||
|
else
|
||||||
|
# Direct binary format
|
||||||
|
if [ -f artifact/greptime ]; then
|
||||||
|
cp artifact/greptime bin/greptime
|
||||||
|
else
|
||||||
|
cp artifact/* bin/greptime
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
chmod +x ./bin/greptime
|
||||||
|
ls -lh ./bin/greptime
|
||||||
|
./bin/greptime --version
|
||||||
|
|
||||||
|
- name: Setup Java 17
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
distribution: 'temurin'
|
||||||
|
java-version: '17'
|
||||||
|
cache: 'maven'
|
||||||
|
|
||||||
|
- name: Setup Python 3.8
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.8'
|
||||||
|
|
||||||
|
- name: Setup Go 1.24
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.24'
|
||||||
|
cache: true
|
||||||
|
cache-dependency-path: go-tests/go.sum
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '18'
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
run: |
|
||||||
|
pip install mysql-connector-python psycopg2-binary
|
||||||
|
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
|
||||||
|
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
|
||||||
|
|
||||||
|
- name: Install Go dependencies
|
||||||
|
working-directory: go-tests
|
||||||
|
run: |
|
||||||
|
go mod download
|
||||||
|
go mod verify
|
||||||
|
go version
|
||||||
|
|
||||||
|
- name: Kill existing GreptimeDB processes
|
||||||
|
run: |
|
||||||
|
pkill -f greptime || true
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
- name: Start GreptimeDB standalone
|
||||||
|
run: |
|
||||||
|
./bin/greptime standalone start \
|
||||||
|
--http-addr 0.0.0.0:${{ inputs.http-port }} \
|
||||||
|
--rpc-addr 0.0.0.0:4001 \
|
||||||
|
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
|
||||||
|
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
|
||||||
|
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
|
||||||
|
|
||||||
|
- name: Wait for GreptimeDB to be ready
|
||||||
|
run: |
|
||||||
|
echo "Waiting for GreptimeDB..."
|
||||||
|
for i in {1..60}; do
|
||||||
|
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
|
||||||
|
echo "✅ GreptimeDB is ready"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo "❌ GreptimeDB failed to start"
|
||||||
|
cat /tmp/greptimedb.log
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
- name: Run multi-language tests
|
||||||
|
env:
|
||||||
|
DB_NAME: ${{ inputs.db-name }}
|
||||||
|
MYSQL_HOST: 127.0.0.1
|
||||||
|
MYSQL_PORT: ${{ inputs.mysql-port }}
|
||||||
|
POSTGRES_HOST: 127.0.0.1
|
||||||
|
POSTGRES_PORT: ${{ inputs.postgres-port }}
|
||||||
|
HTTP_HOST: 127.0.0.1
|
||||||
|
HTTP_PORT: ${{ inputs.http-port }}
|
||||||
|
GREPTIME_USERNAME: ${{ inputs.username }}
|
||||||
|
GREPTIME_PASSWORD: ${{ inputs.password }}
|
||||||
|
run: |
|
||||||
|
chmod +x ./run_tests.sh
|
||||||
|
./run_tests.sh
|
||||||
|
|
||||||
|
- name: Collect logs on failure
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
echo "=== GreptimeDB Logs ==="
|
||||||
|
cat /tmp/greptimedb.log || true
|
||||||
|
|
||||||
|
- name: Upload test logs on failure
|
||||||
|
if: failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: test-logs
|
||||||
|
path: |
|
||||||
|
/tmp/greptimedb.log
|
||||||
|
java-tests/target/surefire-reports/
|
||||||
|
python-tests/.pytest_cache/
|
||||||
|
go-tests/*.log
|
||||||
|
**/test-output/
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
pkill -f greptime || true
|
||||||
331
Cargo.lock
generated
331
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
19
Cargo.toml
19
Cargo.toml
@@ -74,7 +74,7 @@ members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.18.0"
|
version = "1.0.0-beta.2"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@@ -118,9 +118,10 @@ bitflags = "2.4.1"
|
|||||||
bytemuck = "1.12"
|
bytemuck = "1.12"
|
||||||
bytes = { version = "1.7", features = ["serde"] }
|
bytes = { version = "1.7", features = ["serde"] }
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
chrono-tz = "0.10.1"
|
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
config = "0.13.0"
|
config = "0.13.0"
|
||||||
|
const_format = "0.2"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
dashmap = "6.1"
|
dashmap = "6.1"
|
||||||
datafusion = "50"
|
datafusion = "50"
|
||||||
@@ -130,7 +131,7 @@ datafusion-functions = "50"
|
|||||||
datafusion-functions-aggregate-common = "50"
|
datafusion-functions-aggregate-common = "50"
|
||||||
datafusion-optimizer = "50"
|
datafusion-optimizer = "50"
|
||||||
datafusion-orc = "0.5"
|
datafusion-orc = "0.5"
|
||||||
datafusion-pg-catalog = "0.11"
|
datafusion-pg-catalog = "0.12.2"
|
||||||
datafusion-physical-expr = "50"
|
datafusion-physical-expr = "50"
|
||||||
datafusion-physical-plan = "50"
|
datafusion-physical-plan = "50"
|
||||||
datafusion-sql = "50"
|
datafusion-sql = "50"
|
||||||
@@ -147,7 +148,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "72a0d22e0f5f716b2ee21bca091f87a88c36e5ca" }
|
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
http = "1"
|
http = "1"
|
||||||
humantime = "2.1"
|
humantime = "2.1"
|
||||||
@@ -191,7 +192,7 @@ prost-types = "0.13"
|
|||||||
raft-engine = { version = "0.4.1", default-features = false }
|
raft-engine = { version = "0.4.1", default-features = false }
|
||||||
rand = "0.9"
|
rand = "0.9"
|
||||||
ratelimit = "0.10"
|
ratelimit = "0.10"
|
||||||
regex = "1.8"
|
regex = "1.12"
|
||||||
regex-automata = "0.4"
|
regex-automata = "0.4"
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
reqwest = { version = "0.12", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
@@ -218,12 +219,7 @@ similar-asserts = "1.6.0"
|
|||||||
smallvec = { version = "1", features = ["serde"] }
|
smallvec = { version = "1", features = ["serde"] }
|
||||||
snafu = "0.8"
|
snafu = "0.8"
|
||||||
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
|
||||||
sqlx = { version = "0.8", features = [
|
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
|
||||||
"runtime-tokio-rustls",
|
|
||||||
"mysql",
|
|
||||||
"postgres",
|
|
||||||
"chrono",
|
|
||||||
] }
|
|
||||||
strum = { version = "0.27", features = ["derive"] }
|
strum = { version = "0.27", features = ["derive"] }
|
||||||
sysinfo = "0.33"
|
sysinfo = "0.33"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
@@ -238,6 +234,7 @@ tower = "0.5"
|
|||||||
tower-http = "0.6"
|
tower-http = "0.6"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-appender = "0.2"
|
tracing-appender = "0.2"
|
||||||
|
tracing-opentelemetry = "0.31.0"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
|
||||||
typetag = "0.2"
|
typetag = "0.2"
|
||||||
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
|
||||||
|
|||||||
3
Makefile
3
Makefile
@@ -17,6 +17,8 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
|
|||||||
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||||
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
|
||||||
SQLNESS_OPTS ?=
|
SQLNESS_OPTS ?=
|
||||||
|
EXTRA_BUILD_ENVS ?=
|
||||||
|
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
|
||||||
|
|
||||||
# The arguments for running integration tests.
|
# The arguments for running integration tests.
|
||||||
ETCD_VERSION ?= v3.5.9
|
ETCD_VERSION ?= v3.5.9
|
||||||
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
|
|||||||
.PHONY: build-by-dev-builder
|
.PHONY: build-by-dev-builder
|
||||||
build-by-dev-builder: ## Build greptime by dev-builder.
|
build-by-dev-builder: ## Build greptime by dev-builder.
|
||||||
docker run --network=host \
|
docker run --network=host \
|
||||||
|
${ASSEMBLED_EXTRA_BUILD_ENV} \
|
||||||
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
|
||||||
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
|
||||||
make build \
|
make build \
|
||||||
|
|||||||
72
README.md
72
README.md
@@ -12,8 +12,7 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
|
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> |
|
||||||
<a href="https://docs.greptime.com/">User Guide</a> |
|
|
||||||
<a href="https://greptimedb.rs/">API Docs</a> |
|
<a href="https://greptimedb.rs/">API Docs</a> |
|
||||||
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
|
||||||
</h4>
|
</h4>
|
||||||
@@ -67,17 +66,24 @@
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
|
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
| Feature | Description |
|
| Feature | Description |
|
||||||
| --------- | ----------- |
|
| --------- | ----------- |
|
||||||
| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
|
||||||
| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
|
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
|
||||||
| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
|
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
|
||||||
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
|
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
|
||||||
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
|
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
|
||||||
|
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
|
||||||
|
|
||||||
|
✅ **Perfect for:**
|
||||||
|
- Unified observability stack replacing Prometheus + Loki + Tempo
|
||||||
|
- Large-scale metrics with high cardinality (millions to billions of time series)
|
||||||
|
- Large-scale observability platform requiring cost efficiency and scalability
|
||||||
|
- IoT and edge computing with resource and bandwidth constraints
|
||||||
|
|
||||||
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
|
||||||
|
|
||||||
@@ -86,10 +92,10 @@ Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why
|
|||||||
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|
||||||
|----------------------------------|-----------------------|--------------------|-----------------|
|
|----------------------------------|-----------------------|--------------------|-----------------|
|
||||||
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
|
||||||
| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
|
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
|
||||||
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
|
||||||
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
|
||||||
| Integration | REST, SQL, Common protocols | Varies | Varies |
|
| Integration | REST API, SQL, Common protocols | Varies | Varies |
|
||||||
|
|
||||||
**Performance:**
|
**Performance:**
|
||||||
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
|
||||||
@@ -99,22 +105,18 @@ Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/feat
|
|||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
|
GreptimeDB can run in two modes:
|
||||||
* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
* **Standalone Mode** - Single binary for development and small deployments
|
||||||
|
* **Distributed Mode** - Separate components for production scale:
|
||||||
|
- Frontend: Query processing and protocol handling
|
||||||
|
- Datanode: Data storage and retrieval
|
||||||
|
- Metasrv: Metadata management and coordination
|
||||||
|
|
||||||
|
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
|
||||||
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
|
||||||
|
|
||||||
## Try GreptimeDB
|
## Try GreptimeDB
|
||||||
|
|
||||||
### 1. [Live Demo](https://greptime.com/playground)
|
|
||||||
|
|
||||||
Experience GreptimeDB directly in your browser.
|
|
||||||
|
|
||||||
### 2. [GreptimeCloud](https://console.greptime.cloud/)
|
|
||||||
|
|
||||||
Start instantly with a free cluster.
|
|
||||||
|
|
||||||
### 3. Docker (Local Quickstart)
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker pull greptime/greptimedb
|
docker pull greptime/greptimedb
|
||||||
```
|
```
|
||||||
@@ -130,7 +132,8 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
|
|||||||
--postgres-addr 0.0.0.0:4003
|
--postgres-addr 0.0.0.0:4003
|
||||||
```
|
```
|
||||||
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
|
||||||
[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
|
|
||||||
|
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
|
||||||
|
|
||||||
**Troubleshooting:**
|
**Troubleshooting:**
|
||||||
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
|
||||||
@@ -159,21 +162,26 @@ cargo run -- standalone start
|
|||||||
|
|
||||||
## Tools & Extensions
|
## Tools & Extensions
|
||||||
|
|
||||||
- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
|
||||||
- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
|
||||||
- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
|
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
|
||||||
- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
|
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
|
||||||
- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
|
||||||
|
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
> **Status:** Beta.
|
> **Status:** Beta — marching toward v1.0 GA!
|
||||||
> **GA (v1.0):** Targeted for mid 2025.
|
> **GA (v1.0):** January 10, 2026
|
||||||
|
|
||||||
- Being used in production by early adopters
|
- Deployed in production by open-source projects and commercial users
|
||||||
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
|
||||||
- Suitable for evaluation and pilot deployments
|
- Suitable for evaluation and pilot deployments
|
||||||
|
|
||||||
|
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
|
||||||
|
|
||||||
|
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
|
||||||
|
|
||||||
For production use, we recommend using the latest stable release.
|
For production use, we recommend using the latest stable release.
|
||||||
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
|
||||||
|
|
||||||
@@ -214,5 +222,5 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
|
|||||||
|
|
||||||
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
|
||||||
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
|
||||||
- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
|
||||||
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
|
||||||
|
|||||||
@@ -13,9 +13,10 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
@@ -103,6 +104,7 @@
|
|||||||
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
|
||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
@@ -150,10 +152,13 @@
|
|||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
|
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||||
|
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
@@ -187,7 +192,7 @@
|
|||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -205,14 +210,6 @@
|
|||||||
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
|
||||||
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
|
||||||
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
|
||||||
| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
|
|
||||||
| `export_metrics.self_import.db` | String | Unset | -- |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
| `memory` | -- | -- | The memory options. |
|
||||||
@@ -226,6 +223,7 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `default_timezone` | String | Unset | The default timezone of the server. |
|
| `default_timezone` | String | Unset | The default timezone of the server. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
|
||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
@@ -306,6 +304,7 @@
|
|||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
|
||||||
| `datanode` | -- | -- | Datanode options. |
|
| `datanode` | -- | -- | Datanode options. |
|
||||||
| `datanode.client` | -- | -- | Datanode client options. |
|
| `datanode.client` | -- | -- | Datanode client options. |
|
||||||
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
| `datanode.client.connect_timeout` | String | `10s` | -- |
|
||||||
@@ -328,12 +327,6 @@
|
|||||||
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
|
||||||
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
|
||||||
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
|
||||||
| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
| `memory` | -- | -- | The memory options. |
|
||||||
@@ -347,7 +340,7 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `store_addrs` | Array | -- | Store server address default to etcd store.<br/>For postgres store, the format is:<br/>"password=password dbname=postgres user=postgres host=localhost port=5432"<br/>For etcd store, the format is:<br/>"127.0.0.1:2379" |
|
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
|
||||||
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
|
||||||
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` |
|
||||||
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
|
||||||
@@ -363,12 +356,11 @@
|
|||||||
| `runtime` | -- | -- | The runtime options. |
|
| `runtime` | -- | -- | The runtime options. |
|
||||||
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
|
||||||
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
|
||||||
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
|
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
|
||||||
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
|
||||||
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
|
||||||
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
|
||||||
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
|
||||||
| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
|
|
||||||
| `grpc` | -- | -- | The gRPC server options. |
|
| `grpc` | -- | -- | The gRPC server options. |
|
||||||
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
|
||||||
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
|
||||||
@@ -423,12 +415,6 @@
|
|||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
| `memory` | -- | -- | The memory options. |
|
||||||
@@ -440,10 +426,11 @@
|
|||||||
| Key | Type | Default | Descriptions |
|
| Key | Type | Default | Descriptions |
|
||||||
| --- | -----| ------- | ----------- |
|
| --- | -----| ------- | ----------- |
|
||||||
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
|
||||||
|
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
|
||||||
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
|
||||||
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
|
||||||
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
|
||||||
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
|
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). |
|
||||||
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
|
||||||
| `http` | -- | -- | The HTTP server options. |
|
| `http` | -- | -- | The HTTP server options. |
|
||||||
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
|
||||||
@@ -497,6 +484,7 @@
|
|||||||
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
|
||||||
| `query` | -- | -- | The query engine options. |
|
| `query` | -- | -- | The query engine options. |
|
||||||
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
| `storage` | -- | -- | The data storage options. |
|
| `storage` | -- | -- | The data storage options. |
|
||||||
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
|
||||||
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
|
||||||
@@ -546,10 +534,13 @@
|
|||||||
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
|
||||||
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
|
||||||
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
|
||||||
|
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
|
||||||
|
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
|
||||||
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
|
||||||
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
|
||||||
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
|
||||||
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
|
||||||
|
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
|
||||||
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
|
||||||
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
|
||||||
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
|
||||||
@@ -583,7 +574,7 @@
|
|||||||
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
|
||||||
| `region_engine.file` | -- | -- | Enable the file engine. |
|
| `region_engine.file` | -- | -- | Enable the file engine. |
|
||||||
| `region_engine.metric` | -- | -- | Metric engine options. |
|
| `region_engine.metric` | -- | -- | Metric engine options. |
|
||||||
| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
|
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
|
||||||
| `logging` | -- | -- | The logging options. |
|
| `logging` | -- | -- | The logging options. |
|
||||||
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
|
||||||
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
|
||||||
@@ -596,12 +587,6 @@
|
|||||||
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
|
||||||
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
|
||||||
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
|
||||||
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
|
|
||||||
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
|
|
||||||
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
|
|
||||||
| `export_metrics.remote_write` | -- | -- | -- |
|
|
||||||
| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
|
|
||||||
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
|
|
||||||
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
|
||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
| `memory` | -- | -- | The memory options. |
|
||||||
@@ -670,5 +655,6 @@
|
|||||||
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
|
||||||
| `query` | -- | -- | -- |
|
| `query` | -- | -- | -- |
|
||||||
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
|
||||||
|
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
|
||||||
| `memory` | -- | -- | The memory options. |
|
| `memory` | -- | -- | The memory options. |
|
||||||
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
node_id = 42
|
node_id = 42
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## Start services after regions have obtained leases.
|
## Start services after regions have obtained leases.
|
||||||
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
|
||||||
require_lease_before_startup = false
|
require_lease_before_startup = false
|
||||||
@@ -14,6 +18,9 @@ init_regions_in_background = false
|
|||||||
init_regions_parallelism = 16
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||||
|
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||||
|
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
@@ -257,6 +264,13 @@ overwrite_entry_start_id = false
|
|||||||
## Default to 0, which means the number of CPU cores.
|
## Default to 0, which means the number of CPU cores.
|
||||||
parallelism = 0
|
parallelism = 0
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
@@ -485,6 +499,17 @@ write_cache_size = "5GiB"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
|
## Preload index (puffin) files into cache on region open (default: true).
|
||||||
|
## When enabled, index files are loaded into the write cache during region initialization,
|
||||||
|
## which can improve query performance at the cost of longer startup times.
|
||||||
|
preload_index_cache = true
|
||||||
|
|
||||||
|
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||||
|
## The remaining capacity is used for data (parquet) files.
|
||||||
|
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||||
|
## 1GiB is reserved for index files and 4GiB for data files.
|
||||||
|
index_cache_percent = 20
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
@@ -497,6 +522,14 @@ max_concurrent_scan_files = 384
|
|||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Memory limit for table scans across all queries.
|
||||||
|
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit.
|
||||||
|
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||||
|
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||||
|
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||||
|
scan_memory_limit = "50%"
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
## Minimum time interval between two compactions.
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
min_compaction_interval = "0m"
|
min_compaction_interval = "0m"
|
||||||
@@ -636,8 +669,8 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
## Metric engine options.
|
## Metric engine options.
|
||||||
[region_engine.metric]
|
[region_engine.metric]
|
||||||
## Whether to enable the experimental sparse primary key encoding.
|
## Whether to use sparse primary key encoding.
|
||||||
experimental_sparse_primary_key_encoding = false
|
sparse_primary_key_encoding = true
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
@@ -679,21 +712,6 @@ otlp_export_protocol = "http"
|
|||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
|
|||||||
@@ -158,6 +158,13 @@ default_ratio = 1.0
|
|||||||
## Default to 1, so it won't use too much cpu or memory
|
## Default to 1, so it won't use too much cpu or memory
|
||||||
parallelism = 1
|
parallelism = 1
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## The memory options.
|
## The memory options.
|
||||||
[memory]
|
[memory]
|
||||||
## Whether to enable heap profiling activation during startup.
|
## Whether to enable heap profiling activation during startup.
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## The maximum in-flight write bytes.
|
## The maximum in-flight write bytes.
|
||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ max_in_flight_write_bytes = "500MB"
|
#+ max_in_flight_write_bytes = "500MB"
|
||||||
@@ -252,6 +256,13 @@ parallelism = 0
|
|||||||
## Default to false, meaning when push down optimize failed, return error msg
|
## Default to false, meaning when push down optimize failed, return error msg
|
||||||
allow_query_fallback = false
|
allow_query_fallback = false
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## Datanode options.
|
## Datanode options.
|
||||||
[datanode]
|
[datanode]
|
||||||
## Datanode client options.
|
## Datanode client options.
|
||||||
@@ -318,21 +329,6 @@ sample_ratio = 1.0
|
|||||||
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
|
||||||
ttl = "90d"
|
ttl = "90d"
|
||||||
|
|
||||||
## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
## The working home directory.
|
## The working home directory.
|
||||||
data_home = "./greptimedb_data"
|
data_home = "./greptimedb_data"
|
||||||
|
|
||||||
## Store server address default to etcd store.
|
## Store server address(es). The format depends on the selected backend.
|
||||||
## For postgres store, the format is:
|
##
|
||||||
## "password=password dbname=postgres user=postgres host=localhost port=5432"
|
## For etcd: a list of "host:port" endpoints.
|
||||||
## For etcd store, the format is:
|
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
|
||||||
## "127.0.0.1:2379"
|
##
|
||||||
|
## For PostgreSQL: a connection string in libpq format or URI.
|
||||||
|
## e.g.
|
||||||
|
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
|
||||||
|
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
|
||||||
|
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
|
||||||
|
##
|
||||||
|
## For mysql store, the format is a MySQL connection URL.
|
||||||
|
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
|
||||||
store_addrs = ["127.0.0.1:2379"]
|
store_addrs = ["127.0.0.1:2379"]
|
||||||
|
|
||||||
## If it's not empty, the metasrv will store all data with this key prefix.
|
## If it's not empty, the metasrv will store all data with this key prefix.
|
||||||
@@ -75,6 +83,9 @@ node_max_idle_time = "24hours"
|
|||||||
|
|
||||||
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
|
||||||
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
|
||||||
|
##
|
||||||
|
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
|
||||||
|
## settings here will override the TLS settings in `store_addrs`.
|
||||||
[backend_tls]
|
[backend_tls]
|
||||||
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
|
||||||
## - "disable" - No TLS
|
## - "disable" - No TLS
|
||||||
@@ -98,9 +109,6 @@ key_path = ""
|
|||||||
## Like "/path/to/ca.crt"
|
## Like "/path/to/ca.crt"
|
||||||
ca_cert_path = ""
|
ca_cert_path = ""
|
||||||
|
|
||||||
## Watch for certificate file changes and auto reload
|
|
||||||
watch = false
|
|
||||||
|
|
||||||
## The gRPC server options.
|
## The gRPC server options.
|
||||||
[grpc]
|
[grpc]
|
||||||
## The address to bind the gRPC server.
|
## The address to bind the gRPC server.
|
||||||
@@ -323,21 +331,6 @@ otlp_export_protocol = "http"
|
|||||||
[logging.tracing_sample_ratio]
|
[logging.tracing_sample_ratio]
|
||||||
default_ratio = 1.0
|
default_ratio = 1.0
|
||||||
|
|
||||||
## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
default_timezone = "UTC"
|
default_timezone = "UTC"
|
||||||
|
|
||||||
|
## The default column prefix for auto-created time index and value columns.
|
||||||
|
## @toml2docs:none-default
|
||||||
|
default_column_prefix = "greptime"
|
||||||
|
|
||||||
## Initialize all regions in the background during the startup.
|
## Initialize all regions in the background during the startup.
|
||||||
## By default, it provides services after all regions have been initialized.
|
## By default, it provides services after all regions have been initialized.
|
||||||
init_regions_in_background = false
|
init_regions_in_background = false
|
||||||
@@ -10,6 +14,9 @@ init_regions_in_background = false
|
|||||||
init_regions_parallelism = 16
|
init_regions_parallelism = 16
|
||||||
|
|
||||||
## The maximum current queries allowed to be executed. Zero means unlimited.
|
## The maximum current queries allowed to be executed. Zero means unlimited.
|
||||||
|
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
|
||||||
|
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
|
||||||
|
## The remaining 30% get standard tier access (70% of scan_memory_limit).
|
||||||
max_concurrent_queries = 0
|
max_concurrent_queries = 0
|
||||||
|
|
||||||
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
## Enable telemetry to collect anonymous usage data. Enabled by default.
|
||||||
@@ -361,6 +368,13 @@ max_running_procedures = 128
|
|||||||
## Default to 0, which means the number of CPU cores.
|
## Default to 0, which means the number of CPU cores.
|
||||||
parallelism = 0
|
parallelism = 0
|
||||||
|
|
||||||
|
## Memory pool size for query execution operators (aggregation, sorting, join).
|
||||||
|
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit (unbounded, default behavior).
|
||||||
|
## When this limit is reached, queries will fail with ResourceExhausted error.
|
||||||
|
## NOTE: This does NOT limit memory used by table scans.
|
||||||
|
memory_pool_size = "50%"
|
||||||
|
|
||||||
## The data storage options.
|
## The data storage options.
|
||||||
[storage]
|
[storage]
|
||||||
## The working home directory.
|
## The working home directory.
|
||||||
@@ -576,6 +590,17 @@ write_cache_size = "5GiB"
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
write_cache_ttl = "8h"
|
write_cache_ttl = "8h"
|
||||||
|
|
||||||
|
## Preload index (puffin) files into cache on region open (default: true).
|
||||||
|
## When enabled, index files are loaded into the write cache during region initialization,
|
||||||
|
## which can improve query performance at the cost of longer startup times.
|
||||||
|
preload_index_cache = true
|
||||||
|
|
||||||
|
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
|
||||||
|
## The remaining capacity is used for data (parquet) files.
|
||||||
|
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
|
||||||
|
## 1GiB is reserved for index files and 4GiB for data files.
|
||||||
|
index_cache_percent = 20
|
||||||
|
|
||||||
## Buffer size for SST writing.
|
## Buffer size for SST writing.
|
||||||
sst_write_buffer_size = "8MB"
|
sst_write_buffer_size = "8MB"
|
||||||
|
|
||||||
@@ -588,6 +613,14 @@ max_concurrent_scan_files = 384
|
|||||||
## Whether to allow stale WAL entries read during replay.
|
## Whether to allow stale WAL entries read during replay.
|
||||||
allow_stale_entries = false
|
allow_stale_entries = false
|
||||||
|
|
||||||
|
## Memory limit for table scans across all queries.
|
||||||
|
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
|
||||||
|
## Setting it to 0 disables the limit.
|
||||||
|
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
|
||||||
|
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
|
||||||
|
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
|
||||||
|
scan_memory_limit = "50%"
|
||||||
|
|
||||||
## Minimum time interval between two compactions.
|
## Minimum time interval between two compactions.
|
||||||
## To align with the old behavior, the default value is 0 (no restrictions).
|
## To align with the old behavior, the default value is 0 (no restrictions).
|
||||||
min_compaction_interval = "0m"
|
min_compaction_interval = "0m"
|
||||||
@@ -727,8 +760,8 @@ fork_dictionary_bytes = "1GiB"
|
|||||||
[[region_engine]]
|
[[region_engine]]
|
||||||
## Metric engine options.
|
## Metric engine options.
|
||||||
[region_engine.metric]
|
[region_engine.metric]
|
||||||
## Whether to enable the experimental sparse primary key encoding.
|
## Whether to use sparse primary key encoding.
|
||||||
experimental_sparse_primary_key_encoding = false
|
sparse_primary_key_encoding = true
|
||||||
|
|
||||||
## The logging options.
|
## The logging options.
|
||||||
[logging]
|
[logging]
|
||||||
@@ -787,27 +820,6 @@ default_ratio = 1.0
|
|||||||
## @toml2docs:none-default
|
## @toml2docs:none-default
|
||||||
#+ sample_ratio = 1.0
|
#+ sample_ratio = 1.0
|
||||||
|
|
||||||
## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
|
|
||||||
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
|
|
||||||
[export_metrics]
|
|
||||||
## whether enable export metrics.
|
|
||||||
enable = false
|
|
||||||
## The interval of export metrics.
|
|
||||||
write_interval = "30s"
|
|
||||||
|
|
||||||
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
|
|
||||||
## You must create the database before enabling it.
|
|
||||||
[export_metrics.self_import]
|
|
||||||
## @toml2docs:none-default
|
|
||||||
db = "greptime_metrics"
|
|
||||||
|
|
||||||
[export_metrics.remote_write]
|
|
||||||
## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
|
|
||||||
url = ""
|
|
||||||
|
|
||||||
## HTTP headers of Prometheus remote-write carry.
|
|
||||||
headers = { }
|
|
||||||
|
|
||||||
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
## The tracing options. Only effect when compiled with `tokio-console` feature.
|
||||||
#+ [tracing]
|
#+ [tracing]
|
||||||
## The tokio console address.
|
## The tokio console address.
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
FROM centos:7 as builder
|
FROM centos:7 AS builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
|
|||||||
# Install Rust
|
# Install Rust
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
|
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=.,rw \
|
RUN --mount=target=.,rw \
|
||||||
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
|
|||||||
TARGET_DIR=/out/target
|
TARGET_DIR=/out/target
|
||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
FROM centos:7 as base
|
FROM centos:7 AS base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
|
|||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
|||||||
65
docker/buildx/distroless/Dockerfile
Normal file
65
docker/buildx/distroless/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ARG CARGO_PROFILE
|
||||||
|
ARG FEATURES
|
||||||
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
|
ENV LANG=en_US.utf8
|
||||||
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
|
||||||
|
|
||||||
|
# Install dependencies.
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt \
|
||||||
|
apt-get update && apt-get install -y \
|
||||||
|
libssl-dev \
|
||||||
|
protobuf-compiler \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
pkg-config
|
||||||
|
|
||||||
|
# Install Rust.
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
|
ENV PATH=/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
|
# Build the project in release mode.
|
||||||
|
RUN --mount=target=. \
|
||||||
|
--mount=type=cache,target=/root/.cargo/registry \
|
||||||
|
make build \
|
||||||
|
CARGO_PROFILE=${CARGO_PROFILE} \
|
||||||
|
FEATURES=${FEATURES} \
|
||||||
|
TARGET_DIR=/out/target
|
||||||
|
|
||||||
|
FROM ubuntu:22.04 AS libs
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies based on architecture
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
|
||||||
|
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
|
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Export the binary to the clean distroless image.
|
||||||
|
FROM gcr.io/distroless/cc-debian12:latest AS base
|
||||||
|
|
||||||
|
ARG OUTPUT_DIR
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies
|
||||||
|
COPY --from=libs /lib /lib
|
||||||
|
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||||
|
|
||||||
|
WORKDIR /greptime
|
||||||
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
|
||||||
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
FROM ubuntu:22.04 as builder
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
ARG CARGO_PROFILE
|
ARG CARGO_PROFILE
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
WORKDIR /greptimedb
|
WORKDIR /greptimedb
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
|
|||||||
# Install Rust.
|
# Install Rust.
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
|
||||||
ENV PATH /root/.cargo/bin/:$PATH
|
ENV PATH=/root/.cargo/bin/:$PATH
|
||||||
|
|
||||||
# Build the project in release mode.
|
# Build the project in release mode.
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
@@ -35,7 +35,7 @@ RUN --mount=target=. \
|
|||||||
|
|
||||||
# Export the binary to the clean image.
|
# Export the binary to the clean image.
|
||||||
# TODO(zyy17): Maybe should use the more secure container image.
|
# TODO(zyy17): Maybe should use the more secure container image.
|
||||||
FROM ubuntu:22.04 as base
|
FROM ubuntu:22.04 AS base
|
||||||
|
|
||||||
ARG OUTPUT_DIR
|
ARG OUTPUT_DIR
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
|
|||||||
|
|
||||||
WORKDIR /greptime
|
WORKDIR /greptime
|
||||||
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
ADD $TARGETARCH/greptime /greptime/bin/
|
ADD $TARGETARCH/greptime /greptime/bin/
|
||||||
|
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV MALLOC_CONF="prof:true,prof_active:false"
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
|||||||
40
docker/ci/distroless/Dockerfile
Normal file
40
docker/ci/distroless/Dockerfile
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
FROM ubuntu:22.04 AS libs
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies based on architecture
|
||||||
|
# TARGETARCH values: amd64, arm64
|
||||||
|
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
mkdir -p /output/x86_64-linux-gnu && \
|
||||||
|
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
|
||||||
|
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
|
mkdir -p /output/aarch64-linux-gnu && \
|
||||||
|
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
FROM gcr.io/distroless/cc-debian12:latest
|
||||||
|
|
||||||
|
# The root path under which contains all the dependencies to build this Dockerfile.
|
||||||
|
ARG DOCKER_BUILD_ROOT=.
|
||||||
|
# The binary name of GreptimeDB executable.
|
||||||
|
# Defaults to "greptime", but sometimes in other projects it might be different.
|
||||||
|
ARG TARGET_BIN=greptime
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
# Copy required library dependencies
|
||||||
|
COPY --from=libs /output /lib
|
||||||
|
COPY --from=busybox:stable /bin/busybox /bin/busybox
|
||||||
|
|
||||||
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
|
ENV MALLOC_CONF="prof:true,prof_active:false"
|
||||||
|
|
||||||
|
ENTRYPOINT ["greptime"]
|
||||||
@@ -14,7 +14,7 @@ ARG TARGETARCH
|
|||||||
|
|
||||||
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
|
||||||
|
|
||||||
ENV PATH /greptime/bin/:$PATH
|
ENV PATH=/greptime/bin/:$PATH
|
||||||
|
|
||||||
ENV TARGET_BIN=$TARGET_BIN
|
ENV TARGET_BIN=$TARGET_BIN
|
||||||
|
|
||||||
|
|||||||
@@ -14,3 +14,18 @@ Log Level changed from Some("info") to "trace,flow=debug"%
|
|||||||
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
|
||||||
|
|
||||||
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
|
||||||
|
|
||||||
|
# Enable/Disable Trace on the Fly
|
||||||
|
|
||||||
|
## HTTP API
|
||||||
|
|
||||||
|
example:
|
||||||
|
```bash
|
||||||
|
curl --data "true" 127.0.0.1:4000/debug/enable_trace
|
||||||
|
```
|
||||||
|
And database will reply with something like:
|
||||||
|
```
|
||||||
|
trace enabled%
|
||||||
|
```
|
||||||
|
|
||||||
|
Possible values are "true" or "false".
|
||||||
|
|||||||
@@ -71,6 +71,15 @@ curl -X POST localhost:4000/debug/prof/mem/activate
|
|||||||
|
|
||||||
# Deactivate heap profiling
|
# Deactivate heap profiling
|
||||||
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
curl -X POST localhost:4000/debug/prof/mem/deactivate
|
||||||
|
|
||||||
|
# Activate gdump feature that dumps memory profiling data every time virtual memory usage exceeds previous maximum value.
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=true'
|
||||||
|
|
||||||
|
# Deactivate gdump.
|
||||||
|
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=false'
|
||||||
|
|
||||||
|
# Retrieve current gdump status.
|
||||||
|
curl -X GET localhost:4000/debug/prof/mem/gdump
|
||||||
```
|
```
|
||||||
|
|
||||||
### Dump memory profiling data
|
### Dump memory profiling data
|
||||||
|
|||||||
@@ -106,6 +106,37 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
|
|||||||
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
|
||||||
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
|
||||||
|
|
||||||
|
one potential race condition with region-migration is illustrated below:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant gc_worker as GC Worker(same dn as region 1)
|
||||||
|
participant region1 as Region 1 (Leader → Follower)
|
||||||
|
participant region2 as Region 2 (Follower → Leader)
|
||||||
|
participant region_dir as Region Directory
|
||||||
|
|
||||||
|
gc_worker->>region1: Start GC, get region manifest
|
||||||
|
activate region1
|
||||||
|
region1-->>gc_worker: Region 1 manifest
|
||||||
|
deactivate region1
|
||||||
|
gc_worker->>region_dir: Scan region directory
|
||||||
|
|
||||||
|
Note over region1,region2: Region Migration Occurs
|
||||||
|
region1-->>region2: Downgrade to Follower
|
||||||
|
region2-->>region1: Becomes Leader
|
||||||
|
|
||||||
|
region2->>region_dir: Add new file
|
||||||
|
|
||||||
|
gc_worker->>region_dir: Continue scanning
|
||||||
|
gc_worker-->>region_dir: Discovers new file
|
||||||
|
Note over gc_worker: New file not in Region 1's manifest
|
||||||
|
gc_worker->>gc_worker: Mark file as orphan(incorrectly)
|
||||||
|
```
|
||||||
|
which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
|
||||||
|
|
||||||
|
A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
|
||||||
|
|
||||||
|
The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
|
||||||
|
|
||||||
## Conclusion and Rationale
|
## Conclusion and Rationale
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ license.workspace = true
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
arrow-schema.workspace = true
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-decimal.workspace = true
|
common-decimal.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::{BTreeMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_decimal::Decimal128;
|
use common_decimal::Decimal128;
|
||||||
@@ -20,13 +20,12 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
|
|||||||
use common_time::time::Time;
|
use common_time::time::Time;
|
||||||
use common_time::timestamp::TimeUnit;
|
use common_time::timestamp::TimeUnit;
|
||||||
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
|
||||||
|
use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
|
||||||
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
use datatypes::prelude::{ConcreteDataType, ValueRef};
|
||||||
use datatypes::types::{
|
use datatypes::types::{
|
||||||
IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
|
IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
|
||||||
};
|
|
||||||
use datatypes::value::{
|
|
||||||
ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
|
|
||||||
};
|
};
|
||||||
|
use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
|
||||||
use datatypes::vectors::VectorRef;
|
use datatypes::vectors::VectorRef;
|
||||||
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
use greptime_proto::v1::column_data_type_extension::TypeExt;
|
||||||
use greptime_proto::v1::ddl_request::Expr;
|
use greptime_proto::v1::ddl_request::Expr;
|
||||||
@@ -34,9 +33,9 @@ use greptime_proto::v1::greptime_request::Request;
|
|||||||
use greptime_proto::v1::query_request::Query;
|
use greptime_proto::v1::query_request::Query;
|
||||||
use greptime_proto::v1::value::ValueData;
|
use greptime_proto::v1::value::ValueData;
|
||||||
use greptime_proto::v1::{
|
use greptime_proto::v1::{
|
||||||
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonNativeTypeExtension,
|
self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, DictionaryTypeExtension,
|
||||||
JsonTypeExtension, ListTypeExtension, QueryRequest, Row, SemanticType, StructTypeExtension,
|
JsonList, JsonNativeTypeExtension, JsonObject, JsonTypeExtension, ListTypeExtension,
|
||||||
VectorTypeExtension,
|
QueryRequest, Row, SemanticType, StructTypeExtension, VectorTypeExtension, json_value,
|
||||||
};
|
};
|
||||||
use paste::paste;
|
use paste::paste;
|
||||||
use snafu::prelude::*;
|
use snafu::prelude::*;
|
||||||
@@ -81,6 +80,10 @@ impl ColumnDataTypeWrapper {
|
|||||||
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
(self.datatype, self.datatype_ext.clone())
|
(self.datatype, self.datatype_ext.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn into_parts(self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
|
||||||
|
(self.datatype, self.datatype_ext)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
||||||
@@ -126,6 +129,7 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
};
|
};
|
||||||
ConcreteDataType::json_native_datatype(inner_type.into())
|
ConcreteDataType::json_native_datatype(inner_type.into())
|
||||||
}
|
}
|
||||||
|
None => ConcreteDataType::Json(JsonType::null()),
|
||||||
_ => {
|
_ => {
|
||||||
// invalid state, type extension is missing or invalid
|
// invalid state, type extension is missing or invalid
|
||||||
ConcreteDataType::null_datatype()
|
ConcreteDataType::null_datatype()
|
||||||
@@ -215,6 +219,26 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
|
|||||||
ConcreteDataType::null_datatype()
|
ConcreteDataType::null_datatype()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ColumnDataType::Dictionary => {
|
||||||
|
if let Some(TypeExt::DictionaryType(d)) = datatype_wrapper
|
||||||
|
.datatype_ext
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
|
||||||
|
{
|
||||||
|
let key_type = ColumnDataTypeWrapper {
|
||||||
|
datatype: d.key_datatype(),
|
||||||
|
datatype_ext: d.key_datatype_extension.clone().map(|ext| *ext),
|
||||||
|
};
|
||||||
|
let value_type = ColumnDataTypeWrapper {
|
||||||
|
datatype: d.value_datatype(),
|
||||||
|
datatype_ext: d.value_datatype_extension.clone().map(|ext| *ext),
|
||||||
|
};
|
||||||
|
ConcreteDataType::dictionary_datatype(key_type.into(), value_type.into())
|
||||||
|
} else {
|
||||||
|
// invalid state: type extension not found
|
||||||
|
ConcreteDataType::null_datatype()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -338,13 +362,30 @@ impl ColumnDataTypeWrapper {
|
|||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn dictionary_datatype(
|
||||||
|
key_type: ColumnDataTypeWrapper,
|
||||||
|
value_type: ColumnDataTypeWrapper,
|
||||||
|
) -> Self {
|
||||||
|
ColumnDataTypeWrapper {
|
||||||
|
datatype: ColumnDataType::Dictionary,
|
||||||
|
datatype_ext: Some(ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DictionaryType(Box::new(DictionaryTypeExtension {
|
||||||
|
key_datatype: key_type.datatype().into(),
|
||||||
|
key_datatype_extension: key_type.datatype_ext.map(Box::new),
|
||||||
|
value_datatype: value_type.datatype().into(),
|
||||||
|
value_datatype_extension: value_type.datatype_ext.map(Box::new),
|
||||||
|
}))),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
||||||
type Error = error::Error;
|
type Error = error::Error;
|
||||||
|
|
||||||
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
|
||||||
let column_datatype = match datatype {
|
let column_datatype = match &datatype {
|
||||||
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
|
||||||
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
|
||||||
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
|
||||||
@@ -381,9 +422,8 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
|
||||||
ConcreteDataType::List(_) => ColumnDataType::List,
|
ConcreteDataType::List(_) => ColumnDataType::List,
|
||||||
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
|
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
|
||||||
ConcreteDataType::Null(_)
|
ConcreteDataType::Dictionary(_) => ColumnDataType::Dictionary,
|
||||||
| ConcreteDataType::Dictionary(_)
|
ConcreteDataType::Null(_) | ConcreteDataType::Duration(_) => {
|
||||||
| ConcreteDataType::Duration(_) => {
|
|
||||||
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -404,18 +444,24 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
|
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
|
||||||
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
|
||||||
}),
|
}),
|
||||||
JsonFormat::Native(inner) => {
|
JsonFormat::Native(native_type) => {
|
||||||
let inner_type = ColumnDataTypeWrapper::try_from(*inner.clone())?;
|
if native_type.is_null() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let native_type = ConcreteDataType::from(native_type.as_ref());
|
||||||
|
let (datatype, datatype_extension) =
|
||||||
|
ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
|
||||||
Some(ColumnDataTypeExtension {
|
Some(ColumnDataTypeExtension {
|
||||||
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
type_ext: Some(TypeExt::JsonNativeType(Box::new(
|
||||||
JsonNativeTypeExtension {
|
JsonNativeTypeExtension {
|
||||||
datatype: inner_type.datatype.into(),
|
datatype: datatype as i32,
|
||||||
datatype_extension: inner_type.datatype_ext.map(Box::new),
|
datatype_extension: datatype_extension.map(Box::new),
|
||||||
},
|
},
|
||||||
))),
|
))),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@@ -463,6 +509,25 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ColumnDataType::Dictionary => {
|
||||||
|
if let ConcreteDataType::Dictionary(dict_type) = &datatype {
|
||||||
|
let key_type = ColumnDataTypeWrapper::try_from(dict_type.key_type().clone())?;
|
||||||
|
let value_type =
|
||||||
|
ColumnDataTypeWrapper::try_from(dict_type.value_type().clone())?;
|
||||||
|
Some(ColumnDataTypeExtension {
|
||||||
|
type_ext: Some(TypeExt::DictionaryType(Box::new(
|
||||||
|
DictionaryTypeExtension {
|
||||||
|
key_datatype: key_type.datatype.into(),
|
||||||
|
key_datatype_extension: key_type.datatype_ext.map(Box::new),
|
||||||
|
value_datatype: value_type.datatype.into(),
|
||||||
|
value_datatype_extension: value_type.datatype_ext.map(Box::new),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@@ -601,6 +666,9 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
|
|||||||
struct_values: Vec::with_capacity(capacity),
|
struct_values: Vec::with_capacity(capacity),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
ColumnDataType::Dictionary => Values {
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -801,21 +869,8 @@ pub fn pb_value_to_value_ref<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ValueData::JsonValue(inner_value) => {
|
ValueData::JsonValue(inner_value) => {
|
||||||
let json_datatype_ext = datatype_ext
|
let value = decode_json_value(inner_value);
|
||||||
.as_ref()
|
ValueRef::Json(Box::new(value))
|
||||||
.and_then(|ext| {
|
|
||||||
if let Some(TypeExt::JsonNativeType(l)) = &ext.type_ext {
|
|
||||||
Some(l)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.expect("json value must contain datatype ext");
|
|
||||||
|
|
||||||
ValueRef::Json(Box::new(pb_value_to_value_ref(
|
|
||||||
inner_value,
|
|
||||||
json_datatype_ext.datatype_extension.as_deref(),
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -839,125 +894,64 @@ pub fn is_column_type_value_eq(
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert value into proto's value.
|
fn encode_json_value(value: JsonValue) -> v1::JsonValue {
|
||||||
pub fn to_proto_value(value: Value) -> v1::Value {
|
fn helper(json: JsonVariant) -> v1::JsonValue {
|
||||||
match value {
|
let value = match json {
|
||||||
Value::Null => v1::Value { value_data: None },
|
JsonVariant::Null => None,
|
||||||
Value::Boolean(v) => v1::Value {
|
JsonVariant::Bool(x) => Some(json_value::Value::Boolean(x)),
|
||||||
value_data: Some(ValueData::BoolValue(v)),
|
JsonVariant::Number(x) => Some(match x {
|
||||||
},
|
JsonNumber::PosInt(i) => json_value::Value::Uint(i),
|
||||||
Value::UInt8(v) => v1::Value {
|
JsonNumber::NegInt(i) => json_value::Value::Int(i),
|
||||||
value_data: Some(ValueData::U8Value(v.into())),
|
JsonNumber::Float(f) => json_value::Value::Float(f.0),
|
||||||
},
|
}),
|
||||||
Value::UInt16(v) => v1::Value {
|
JsonVariant::String(x) => Some(json_value::Value::Str(x)),
|
||||||
value_data: Some(ValueData::U16Value(v.into())),
|
JsonVariant::Array(x) => Some(json_value::Value::Array(JsonList {
|
||||||
},
|
items: x.into_iter().map(helper).collect::<Vec<_>>(),
|
||||||
Value::UInt32(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::U32Value(v)),
|
|
||||||
},
|
|
||||||
Value::UInt64(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::U64Value(v)),
|
|
||||||
},
|
|
||||||
Value::Int8(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::I8Value(v.into())),
|
|
||||||
},
|
|
||||||
Value::Int16(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::I16Value(v.into())),
|
|
||||||
},
|
|
||||||
Value::Int32(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::I32Value(v)),
|
|
||||||
},
|
|
||||||
Value::Int64(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::I64Value(v)),
|
|
||||||
},
|
|
||||||
Value::Float32(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::F32Value(*v)),
|
|
||||||
},
|
|
||||||
Value::Float64(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::F64Value(*v)),
|
|
||||||
},
|
|
||||||
Value::String(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
|
|
||||||
},
|
|
||||||
Value::Binary(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::BinaryValue(v.to_vec())),
|
|
||||||
},
|
|
||||||
Value::Date(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::DateValue(v.val())),
|
|
||||||
},
|
|
||||||
Value::Timestamp(v) => match v.unit() {
|
|
||||||
TimeUnit::Second => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimestampSecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Millisecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Microsecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Nanosecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Value::Time(v) => match v.unit() {
|
|
||||||
TimeUnit::Second => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimeSecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Millisecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimeMillisecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Microsecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
TimeUnit::Nanosecond => v1::Value {
|
|
||||||
value_data: Some(ValueData::TimeNanosecondValue(v.value())),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Value::IntervalYearMonth(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
|
|
||||||
},
|
|
||||||
Value::IntervalDayTime(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
|
|
||||||
},
|
|
||||||
Value::IntervalMonthDayNano(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::IntervalMonthDayNanoValue(
|
|
||||||
convert_month_day_nano_to_pb(v),
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
Value::Decimal128(v) => v1::Value {
|
|
||||||
value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
|
|
||||||
},
|
|
||||||
Value::List(list_value) => v1::Value {
|
|
||||||
value_data: Some(ValueData::ListValue(v1::ListValue {
|
|
||||||
items: convert_list_to_pb_values(list_value),
|
|
||||||
})),
|
})),
|
||||||
},
|
JsonVariant::Object(x) => {
|
||||||
Value::Struct(struct_value) => v1::Value {
|
let entries = x
|
||||||
value_data: Some(ValueData::StructValue(v1::StructValue {
|
.into_iter()
|
||||||
items: convert_struct_to_pb_values(struct_value),
|
.map(|(key, v)| v1::json_object::Entry {
|
||||||
})),
|
key,
|
||||||
},
|
value: Some(helper(v)),
|
||||||
Value::Json(v) => v1::Value {
|
})
|
||||||
value_data: Some(ValueData::JsonValue(Box::new(to_proto_value(*v)))),
|
.collect::<Vec<_>>();
|
||||||
},
|
Some(json_value::Value::Object(JsonObject { entries }))
|
||||||
Value::Duration(_) => v1::Value { value_data: None },
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
v1::JsonValue { value }
|
||||||
|
}
|
||||||
|
helper(value.into_variant())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn convert_list_to_pb_values(list_value: ListValue) -> Vec<v1::Value> {
|
fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
|
||||||
list_value
|
let Some(value) = &value.value else {
|
||||||
.take_items()
|
return JsonValueRef::null();
|
||||||
.into_iter()
|
};
|
||||||
.map(to_proto_value)
|
match value {
|
||||||
.collect()
|
json_value::Value::Boolean(x) => (*x).into(),
|
||||||
}
|
json_value::Value::Int(x) => (*x).into(),
|
||||||
|
json_value::Value::Uint(x) => (*x).into(),
|
||||||
fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec<v1::Value> {
|
json_value::Value::Float(x) => (*x).into(),
|
||||||
struct_value
|
json_value::Value::Str(x) => (x.as_str()).into(),
|
||||||
.take_items()
|
json_value::Value::Array(array) => array
|
||||||
.into_iter()
|
.items
|
||||||
.map(to_proto_value)
|
.iter()
|
||||||
.collect()
|
.map(|x| decode_json_value(x).into_variant())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.into(),
|
||||||
|
json_value::Value::Object(x) => x
|
||||||
|
.entries
|
||||||
|
.iter()
|
||||||
|
.filter_map(|entry| {
|
||||||
|
entry
|
||||||
|
.value
|
||||||
|
.as_ref()
|
||||||
|
.map(|v| (entry.key.as_str(), decode_json_value(v).into_variant()))
|
||||||
|
})
|
||||||
|
.collect::<BTreeMap<_, _>>()
|
||||||
|
.into(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [ColumnDataTypeWrapper] of the value.
|
/// Returns the [ColumnDataTypeWrapper] of the value.
|
||||||
@@ -1006,14 +1000,14 @@ pub fn vectors_to_rows<'a>(
|
|||||||
let mut rows = vec![Row { values: vec![] }; row_count];
|
let mut rows = vec![Row { values: vec![] }; row_count];
|
||||||
for column in columns {
|
for column in columns {
|
||||||
for (row_index, row) in rows.iter_mut().enumerate() {
|
for (row_index, row) in rows.iter_mut().enumerate() {
|
||||||
row.values.push(value_to_grpc_value(column.get(row_index)))
|
row.values.push(to_grpc_value(column.get(row_index)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rows
|
rows
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
pub fn to_grpc_value(value: Value) -> GrpcValue {
|
||||||
GrpcValue {
|
GrpcValue {
|
||||||
value_data: match value {
|
value_data: match value {
|
||||||
Value::Null => None,
|
Value::Null => None,
|
||||||
@@ -1053,7 +1047,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
let items = list_value
|
let items = list_value
|
||||||
.take_items()
|
.take_items()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(value_to_grpc_value)
|
.map(to_grpc_value)
|
||||||
.collect();
|
.collect();
|
||||||
Some(ValueData::ListValue(v1::ListValue { items }))
|
Some(ValueData::ListValue(v1::ListValue { items }))
|
||||||
}
|
}
|
||||||
@@ -1061,13 +1055,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
|
|||||||
let items = struct_value
|
let items = struct_value
|
||||||
.take_items()
|
.take_items()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(value_to_grpc_value)
|
.map(to_grpc_value)
|
||||||
.collect();
|
.collect();
|
||||||
Some(ValueData::StructValue(v1::StructValue { items }))
|
Some(ValueData::StructValue(v1::StructValue { items }))
|
||||||
}
|
}
|
||||||
Value::Json(inner_value) => Some(ValueData::JsonValue(Box::new(value_to_grpc_value(
|
Value::Json(v) => Some(ValueData::JsonValue(encode_json_value(*v))),
|
||||||
*inner_value,
|
|
||||||
)))),
|
|
||||||
Value::Duration(_) => unreachable!(),
|
Value::Duration(_) => unreachable!(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -1163,6 +1155,7 @@ mod tests {
|
|||||||
use common_time::interval::IntervalUnit;
|
use common_time::interval::IntervalUnit;
|
||||||
use datatypes::scalars::ScalarVector;
|
use datatypes::scalars::ScalarVector;
|
||||||
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
|
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
|
||||||
|
use datatypes::value::{ListValue, StructValue};
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
|
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
|
||||||
};
|
};
|
||||||
@@ -1259,6 +1252,9 @@ mod tests {
|
|||||||
let values = values_with_capacity(ColumnDataType::Json, 2);
|
let values = values_with_capacity(ColumnDataType::Json, 2);
|
||||||
assert_eq!(2, values.json_values.capacity());
|
assert_eq!(2, values.json_values.capacity());
|
||||||
assert_eq!(2, values.string_values.capacity());
|
assert_eq!(2, values.string_values.capacity());
|
||||||
|
|
||||||
|
let values = values_with_capacity(ColumnDataType::Dictionary, 2);
|
||||||
|
assert!(values.bool_values.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1355,6 +1351,17 @@ mod tests {
|
|||||||
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
|
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
|
||||||
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
|
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ConcreteDataType::dictionary_datatype(
|
||||||
|
ConcreteDataType::int32_datatype(),
|
||||||
|
ConcreteDataType::string_datatype()
|
||||||
|
),
|
||||||
|
ColumnDataTypeWrapper::dictionary_datatype(
|
||||||
|
ColumnDataTypeWrapper::int32_datatype(),
|
||||||
|
ColumnDataTypeWrapper::string_datatype()
|
||||||
|
)
|
||||||
|
.into()
|
||||||
|
);
|
||||||
let struct_type = StructType::new(Arc::new(vec![
|
let struct_type = StructType::new(Arc::new(vec![
|
||||||
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
|
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
|
||||||
StructField::new(
|
StructField::new(
|
||||||
@@ -1525,6 +1532,18 @@ mod tests {
|
|||||||
ColumnDataTypeWrapper::vector_datatype(3),
|
ColumnDataTypeWrapper::vector_datatype(3),
|
||||||
ConcreteDataType::vector_datatype(3).try_into().unwrap()
|
ConcreteDataType::vector_datatype(3).try_into().unwrap()
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
ColumnDataTypeWrapper::dictionary_datatype(
|
||||||
|
ColumnDataTypeWrapper::int32_datatype(),
|
||||||
|
ColumnDataTypeWrapper::string_datatype()
|
||||||
|
),
|
||||||
|
ConcreteDataType::dictionary_datatype(
|
||||||
|
ConcreteDataType::int32_datatype(),
|
||||||
|
ConcreteDataType::string_datatype()
|
||||||
|
)
|
||||||
|
.try_into()
|
||||||
|
.unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
@@ -1580,6 +1599,20 @@ mod tests {
|
|||||||
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
|
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
|
||||||
type_ext: Some(TypeExt::StructType(StructTypeExtension {
|
type_ext: Some(TypeExt::StructType(StructTypeExtension {
|
||||||
fields: vec![
|
fields: vec![
|
||||||
|
v1::StructField {
|
||||||
|
name: "address".to_string(),
|
||||||
|
datatype: ColumnDataTypeWrapper::string_datatype()
|
||||||
|
.datatype()
|
||||||
|
.into(),
|
||||||
|
datatype_extension: None
|
||||||
|
},
|
||||||
|
v1::StructField {
|
||||||
|
name: "age".to_string(),
|
||||||
|
datatype: ColumnDataTypeWrapper::int64_datatype()
|
||||||
|
.datatype()
|
||||||
|
.into(),
|
||||||
|
datatype_extension: None
|
||||||
|
},
|
||||||
v1::StructField {
|
v1::StructField {
|
||||||
name: "id".to_string(),
|
name: "id".to_string(),
|
||||||
datatype: ColumnDataTypeWrapper::int64_datatype()
|
datatype: ColumnDataTypeWrapper::int64_datatype()
|
||||||
@@ -1594,20 +1627,6 @@ mod tests {
|
|||||||
.into(),
|
.into(),
|
||||||
datatype_extension: None
|
datatype_extension: None
|
||||||
},
|
},
|
||||||
v1::StructField {
|
|
||||||
name: "age".to_string(),
|
|
||||||
datatype: ColumnDataTypeWrapper::int32_datatype()
|
|
||||||
.datatype()
|
|
||||||
.into(),
|
|
||||||
datatype_extension: None
|
|
||||||
},
|
|
||||||
v1::StructField {
|
|
||||||
name: "address".to_string(),
|
|
||||||
datatype: ColumnDataTypeWrapper::string_datatype()
|
|
||||||
.datatype()
|
|
||||||
.into(),
|
|
||||||
datatype_extension: None
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}))
|
}))
|
||||||
}))
|
}))
|
||||||
@@ -1740,7 +1759,7 @@ mod tests {
|
|||||||
Arc::new(ConcreteDataType::boolean_datatype()),
|
Arc::new(ConcreteDataType::boolean_datatype()),
|
||||||
));
|
));
|
||||||
|
|
||||||
let pb_value = to_proto_value(value);
|
let pb_value = to_grpc_value(value);
|
||||||
|
|
||||||
match pb_value.value_data.unwrap() {
|
match pb_value.value_data.unwrap() {
|
||||||
ValueData::ListValue(pb_list_value) => {
|
ValueData::ListValue(pb_list_value) => {
|
||||||
@@ -1769,7 +1788,7 @@ mod tests {
|
|||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let pb_value = to_proto_value(value);
|
let pb_value = to_grpc_value(value);
|
||||||
|
|
||||||
match pb_value.value_data.unwrap() {
|
match pb_value.value_data.unwrap() {
|
||||||
ValueData::StructValue(pb_struct_value) => {
|
ValueData::StructValue(pb_struct_value) => {
|
||||||
@@ -1778,4 +1797,199 @@ mod tests {
|
|||||||
_ => panic!("Unexpected value type"),
|
_ => panic!("Unexpected value type"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_decode_json_value() {
|
||||||
|
let json = JsonValue::null();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert!(proto.value.is_none());
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = true.into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(proto.value, Some(json_value::Value::Boolean(true)));
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = (-1i64).into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(proto.value, Some(json_value::Value::Int(-1)));
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = 1u64.into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(proto.value, Some(json_value::Value::Uint(1)));
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = 1.0f64.into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(proto.value, Some(json_value::Value::Float(1.0)));
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = "s".into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(proto.value, Some(json_value::Value::Str("s".to_string())));
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = [1i64, 2, 3].into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(
|
||||||
|
proto.value,
|
||||||
|
Some(json_value::Value::Array(JsonList {
|
||||||
|
items: vec![
|
||||||
|
v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(1))
|
||||||
|
},
|
||||||
|
v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(2))
|
||||||
|
},
|
||||||
|
v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(3))
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = [(); 0].into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(
|
||||||
|
proto.value,
|
||||||
|
Some(json_value::Value::Array(JsonList { items: vec![] }))
|
||||||
|
);
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = [("k3", 3i64), ("k2", 2i64), ("k1", 1i64)].into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(
|
||||||
|
proto.value,
|
||||||
|
Some(json_value::Value::Object(JsonObject {
|
||||||
|
entries: vec![
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "k1".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(1))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "k2".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(2))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "k3".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(3))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = [("null", ()); 0].into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(
|
||||||
|
proto.value,
|
||||||
|
Some(json_value::Value::Object(JsonObject { entries: vec![] }))
|
||||||
|
);
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
|
||||||
|
let json: JsonValue = [
|
||||||
|
("null", JsonVariant::from(())),
|
||||||
|
("bool", false.into()),
|
||||||
|
("list", ["hello", "world"].into()),
|
||||||
|
(
|
||||||
|
"object",
|
||||||
|
[
|
||||||
|
("positive_i", JsonVariant::from(42u64)),
|
||||||
|
("negative_i", (-42i64).into()),
|
||||||
|
("nested", [("what", "blah")].into()),
|
||||||
|
]
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into();
|
||||||
|
let proto = encode_json_value(json.clone());
|
||||||
|
assert_eq!(
|
||||||
|
proto.value,
|
||||||
|
Some(json_value::Value::Object(JsonObject {
|
||||||
|
entries: vec![
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "bool".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Boolean(false))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "list".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Array(JsonList {
|
||||||
|
items: vec![
|
||||||
|
v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Str("hello".to_string()))
|
||||||
|
},
|
||||||
|
v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Str("world".to_string()))
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "null".to_string(),
|
||||||
|
value: Some(v1::JsonValue { value: None }),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "object".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Object(JsonObject {
|
||||||
|
entries: vec![
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "negative_i".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Int(-42))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "nested".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Object(JsonObject {
|
||||||
|
entries: vec![v1::json_object::Entry {
|
||||||
|
key: "what".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Str(
|
||||||
|
"blah".to_string()
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
},]
|
||||||
|
}))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
v1::json_object::Entry {
|
||||||
|
key: "positive_i".to_string(),
|
||||||
|
value: Some(v1::JsonValue {
|
||||||
|
value: Some(json_value::Value::Uint(42))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}))
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
let value = decode_json_value(&proto);
|
||||||
|
assert_eq!(json.as_ref(), value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use arrow_schema::extension::{EXTENSION_TYPE_METADATA_KEY, EXTENSION_TYPE_NAME_KEY};
|
||||||
use datatypes::schema::{
|
use datatypes::schema::{
|
||||||
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
|
||||||
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
|
||||||
@@ -68,6 +69,15 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
|
|||||||
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
|
||||||
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
|
||||||
}
|
}
|
||||||
|
if let Some(extension_name) = options.options.get(EXTENSION_TYPE_NAME_KEY) {
|
||||||
|
metadata.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||||
|
}
|
||||||
|
if let Some(extension_metadata) = options.options.get(EXTENSION_TYPE_METADATA_KEY) {
|
||||||
|
metadata.insert(
|
||||||
|
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||||
|
extension_metadata.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
|
||||||
@@ -139,6 +149,17 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option<Column
|
|||||||
.options
|
.options
|
||||||
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
.insert(SKIPPING_INDEX_GRPC_KEY.to_string(), skipping_index.clone());
|
||||||
}
|
}
|
||||||
|
if let Some(extension_name) = column_schema.metadata().get(EXTENSION_TYPE_NAME_KEY) {
|
||||||
|
options
|
||||||
|
.options
|
||||||
|
.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
|
||||||
|
}
|
||||||
|
if let Some(extension_metadata) = column_schema.metadata().get(EXTENSION_TYPE_METADATA_KEY) {
|
||||||
|
options.options.insert(
|
||||||
|
EXTENSION_TYPE_METADATA_KEY.to_string(),
|
||||||
|
extension_metadata.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
(!options.options.is_empty()).then_some(options)
|
(!options.options.is_empty()).then_some(options)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ edition.workspace = true
|
|||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
enterprise = []
|
|
||||||
testing = []
|
testing = []
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
|||||||
@@ -12,13 +12,14 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
|
||||||
|
|
||||||
mod builder;
|
mod builder;
|
||||||
mod client;
|
mod client;
|
||||||
mod manager;
|
mod manager;
|
||||||
mod table_cache;
|
mod table_cache;
|
||||||
|
|
||||||
pub use builder::KvBackendCatalogManagerBuilder;
|
pub use builder::{
|
||||||
|
CatalogManagerConfigurator, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||||
|
};
|
||||||
|
pub use client::{CachedKvBackend, CachedKvBackendBuilder, MetaKvBackend};
|
||||||
pub use manager::KvBackendCatalogManager;
|
pub use manager::KvBackendCatalogManager;
|
||||||
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
pub use table_cache::{TableCache, TableCacheRef, new_table_cache};
|
||||||
|
|||||||
@@ -12,9 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
use common_catalog::consts::DEFAULT_CATALOG_NAME;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::LayeredCacheRegistryRef;
|
use common_meta::cache::LayeredCacheRegistryRef;
|
||||||
use common_meta::key::TableMetadataManager;
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
@@ -23,23 +25,34 @@ use common_procedure::ProcedureManagerRef;
|
|||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
use partition::manager::PartitionRuleManager;
|
use partition::manager::PartitionRuleManager;
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
use crate::information_schema::{
|
||||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
};
|
||||||
use crate::kvbackend::KvBackendCatalogManager;
|
use crate::kvbackend::KvBackendCatalogManager;
|
||||||
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
use crate::kvbackend::manager::{CATALOG_CACHE_MAX_CAPACITY, SystemCatalog};
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
|
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
|
||||||
|
/// The configurator that customizes or enhances the [`KvBackendCatalogManagerBuilder`].
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
pub trait CatalogManagerConfigurator<C>: Send + Sync {
|
||||||
|
async fn configure(
|
||||||
|
&self,
|
||||||
|
builder: KvBackendCatalogManagerBuilder,
|
||||||
|
ctx: C,
|
||||||
|
) -> std::result::Result<KvBackendCatalogManagerBuilder, BoxedError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type CatalogManagerConfiguratorRef<C> = Arc<dyn CatalogManagerConfigurator<C>>;
|
||||||
|
|
||||||
pub struct KvBackendCatalogManagerBuilder {
|
pub struct KvBackendCatalogManagerBuilder {
|
||||||
information_extension: InformationExtensionRef,
|
information_extension: InformationExtensionRef,
|
||||||
backend: KvBackendRef,
|
backend: KvBackendRef,
|
||||||
cache_registry: LayeredCacheRegistryRef,
|
cache_registry: LayeredCacheRegistryRef,
|
||||||
procedure_manager: Option<ProcedureManagerRef>,
|
procedure_manager: Option<ProcedureManagerRef>,
|
||||||
process_manager: Option<ProcessManagerRef>,
|
process_manager: Option<ProcessManagerRef>,
|
||||||
#[cfg(feature = "enterprise")]
|
extra_information_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
extra_information_table_factories:
|
|
||||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KvBackendCatalogManagerBuilder {
|
impl KvBackendCatalogManagerBuilder {
|
||||||
@@ -54,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
cache_registry,
|
cache_registry,
|
||||||
procedure_manager: None,
|
procedure_manager: None,
|
||||||
process_manager: None,
|
process_manager: None,
|
||||||
#[cfg(feature = "enterprise")]
|
extra_information_table_factories: HashMap::new(),
|
||||||
extra_information_table_factories: std::collections::HashMap::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the extra information tables.
|
/// Sets the extra information tables.
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub fn with_extra_information_table_factories(
|
pub fn with_extra_information_table_factories(
|
||||||
mut self,
|
mut self,
|
||||||
factories: std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.extra_information_table_factories = factories;
|
self.extra_information_table_factories = factories;
|
||||||
self
|
self
|
||||||
@@ -86,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
cache_registry,
|
cache_registry,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
process_manager,
|
process_manager,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_information_table_factories,
|
extra_information_table_factories,
|
||||||
} = self;
|
} = self;
|
||||||
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
Arc::new_cyclic(|me| KvBackendCatalogManager {
|
||||||
@@ -110,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
process_manager.clone(),
|
process_manager.clone(),
|
||||||
backend.clone(),
|
backend.clone(),
|
||||||
);
|
);
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let provider = provider
|
let provider = provider
|
||||||
.with_extra_table_factories(extra_information_table_factories.clone());
|
.with_extra_table_factories(extra_information_table_factories.clone());
|
||||||
Arc::new(provider)
|
Arc::new(provider)
|
||||||
@@ -119,9 +128,9 @@ impl KvBackendCatalogManagerBuilder {
|
|||||||
DEFAULT_CATALOG_NAME.to_string(),
|
DEFAULT_CATALOG_NAME.to_string(),
|
||||||
me.clone(),
|
me.clone(),
|
||||||
)),
|
)),
|
||||||
|
numbers_table_provider: NumbersTableProvider,
|
||||||
backend,
|
backend,
|
||||||
process_manager,
|
process_manager,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_information_table_factories,
|
extra_information_table_factories,
|
||||||
},
|
},
|
||||||
cache_registry,
|
cache_registry,
|
||||||
|
|||||||
@@ -18,8 +18,7 @@ use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
use async_stream::try_stream;
|
use async_stream::try_stream;
|
||||||
use common_catalog::consts::{
|
use common_catalog::consts::{
|
||||||
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
|
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
|
||||||
PG_CATALOG_NAME,
|
|
||||||
};
|
};
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_meta::cache::{
|
use common_meta::cache::{
|
||||||
@@ -45,7 +44,6 @@ use table::TableRef;
|
|||||||
use table::dist_table::DistTable;
|
use table::dist_table::DistTable;
|
||||||
use table::metadata::{TableId, TableInfoRef};
|
use table::metadata::{TableId, TableInfoRef};
|
||||||
use table::table::PartitionRules;
|
use table::table::PartitionRules;
|
||||||
use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
|
|
||||||
use table::table_name::TableName;
|
use table::table_name::TableName;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
@@ -55,12 +53,13 @@ use crate::error::{
|
|||||||
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
|
||||||
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
|
||||||
};
|
};
|
||||||
#[cfg(feature = "enterprise")]
|
use crate::information_schema::{
|
||||||
use crate::information_schema::InformationSchemaTableFactoryRef;
|
InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
|
||||||
use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
|
};
|
||||||
use crate::kvbackend::TableCacheRef;
|
use crate::kvbackend::TableCacheRef;
|
||||||
use crate::process_manager::ProcessManagerRef;
|
use crate::process_manager::ProcessManagerRef;
|
||||||
use crate::system_schema::SystemSchemaProvider;
|
use crate::system_schema::SystemSchemaProvider;
|
||||||
|
use crate::system_schema::numbers_table_provider::NumbersTableProvider;
|
||||||
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
use crate::system_schema::pg_catalog::PGCatalogProvider;
|
||||||
|
|
||||||
/// Access all existing catalog, schema and tables.
|
/// Access all existing catalog, schema and tables.
|
||||||
@@ -555,9 +554,9 @@ pub(super) struct SystemCatalog {
|
|||||||
// system_schema_provider for default catalog
|
// system_schema_provider for default catalog
|
||||||
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
pub(super) information_schema_provider: Arc<InformationSchemaProvider>,
|
||||||
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
pub(super) pg_catalog_provider: Arc<PGCatalogProvider>,
|
||||||
|
pub(super) numbers_table_provider: NumbersTableProvider,
|
||||||
pub(super) backend: KvBackendRef,
|
pub(super) backend: KvBackendRef,
|
||||||
pub(super) process_manager: Option<ProcessManagerRef>,
|
pub(super) process_manager: Option<ProcessManagerRef>,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub(super) extra_information_table_factories:
|
pub(super) extra_information_table_factories:
|
||||||
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
std::collections::HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
}
|
}
|
||||||
@@ -584,9 +583,7 @@ impl SystemCatalog {
|
|||||||
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
PG_CATALOG_NAME if channel == Channel::Postgres => {
|
||||||
self.pg_catalog_provider.table_names()
|
self.pg_catalog_provider.table_names()
|
||||||
}
|
}
|
||||||
DEFAULT_SCHEMA_NAME => {
|
DEFAULT_SCHEMA_NAME => self.numbers_table_provider.table_names(),
|
||||||
vec![NUMBERS_TABLE_NAME.to_string()]
|
|
||||||
}
|
|
||||||
_ => vec![],
|
_ => vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -604,7 +601,7 @@ impl SystemCatalog {
|
|||||||
if schema == INFORMATION_SCHEMA_NAME {
|
if schema == INFORMATION_SCHEMA_NAME {
|
||||||
self.information_schema_provider.table(table).is_some()
|
self.information_schema_provider.table(table).is_some()
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
table == NUMBERS_TABLE_NAME
|
self.numbers_table_provider.table_exists(table)
|
||||||
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
|
||||||
self.pg_catalog_provider.table(table).is_some()
|
self.pg_catalog_provider.table(table).is_some()
|
||||||
} else {
|
} else {
|
||||||
@@ -630,7 +627,6 @@ impl SystemCatalog {
|
|||||||
self.process_manager.clone(),
|
self.process_manager.clone(),
|
||||||
self.backend.clone(),
|
self.backend.clone(),
|
||||||
);
|
);
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let provider = provider
|
let provider = provider
|
||||||
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
.with_extra_table_factories(self.extra_information_table_factories.clone());
|
||||||
Arc::new(provider)
|
Arc::new(provider)
|
||||||
@@ -649,8 +645,8 @@ impl SystemCatalog {
|
|||||||
});
|
});
|
||||||
pg_catalog_provider.table(table_name)
|
pg_catalog_provider.table(table_name)
|
||||||
}
|
}
|
||||||
} else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
|
} else if schema == DEFAULT_SCHEMA_NAME {
|
||||||
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
self.numbers_table_provider.table(table_name)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
pub mod information_schema;
|
pub mod information_schema;
|
||||||
mod memory_table;
|
mod memory_table;
|
||||||
|
pub mod numbers_table_provider;
|
||||||
pub mod pg_catalog;
|
pub mod pg_catalog;
|
||||||
pub mod predicate;
|
pub mod predicate;
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ mod procedure_info;
|
|||||||
pub mod process_list;
|
pub mod process_list;
|
||||||
pub mod region_peers;
|
pub mod region_peers;
|
||||||
mod region_statistics;
|
mod region_statistics;
|
||||||
mod runtime_metrics;
|
|
||||||
pub mod schemata;
|
pub mod schemata;
|
||||||
mod ssts;
|
mod ssts;
|
||||||
mod table_constraints;
|
mod table_constraints;
|
||||||
@@ -65,7 +64,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
|
|||||||
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
|
||||||
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
|
||||||
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
|
||||||
use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
|
|
||||||
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
|
||||||
use crate::system_schema::information_schema::ssts::{
|
use crate::system_schema::information_schema::ssts::{
|
||||||
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
|
||||||
@@ -97,7 +95,6 @@ lazy_static! {
|
|||||||
ROUTINES,
|
ROUTINES,
|
||||||
SCHEMA_PRIVILEGES,
|
SCHEMA_PRIVILEGES,
|
||||||
TABLE_PRIVILEGES,
|
TABLE_PRIVILEGES,
|
||||||
TRIGGERS,
|
|
||||||
GLOBAL_STATUS,
|
GLOBAL_STATUS,
|
||||||
SESSION_STATUS,
|
SESSION_STATUS,
|
||||||
PARTITIONS,
|
PARTITIONS,
|
||||||
@@ -120,7 +117,6 @@ macro_rules! setup_memory_table {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub struct MakeInformationTableRequest {
|
pub struct MakeInformationTableRequest {
|
||||||
pub catalog_name: String,
|
pub catalog_name: String,
|
||||||
pub catalog_manager: Weak<dyn CatalogManager>,
|
pub catalog_manager: Weak<dyn CatalogManager>,
|
||||||
@@ -131,12 +127,10 @@ pub struct MakeInformationTableRequest {
|
|||||||
///
|
///
|
||||||
/// This trait allows for extensibility of the information schema by providing
|
/// This trait allows for extensibility of the information schema by providing
|
||||||
/// a way to dynamically create custom information schema tables.
|
/// a way to dynamically create custom information schema tables.
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub trait InformationSchemaTableFactory {
|
pub trait InformationSchemaTableFactory {
|
||||||
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
|
||||||
|
|
||||||
/// The `information_schema` tables info provider.
|
/// The `information_schema` tables info provider.
|
||||||
@@ -146,9 +140,7 @@ pub struct InformationSchemaProvider {
|
|||||||
process_manager: Option<ProcessManagerRef>,
|
process_manager: Option<ProcessManagerRef>,
|
||||||
flow_metadata_manager: Arc<FlowMetadataManager>,
|
flow_metadata_manager: Arc<FlowMetadataManager>,
|
||||||
tables: HashMap<String, TableRef>,
|
tables: HashMap<String, TableRef>,
|
||||||
#[allow(dead_code)]
|
|
||||||
kv_backend: KvBackendRef,
|
kv_backend: KvBackendRef,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
fn system_table(&self, name: &str) -> Option<SystemTableRef> {
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
if let Some(factory) = self.extra_table_factories.get(name) {
|
if let Some(factory) = self.extra_table_factories.get(name) {
|
||||||
let req = MakeInformationTableRequest {
|
let req = MakeInformationTableRequest {
|
||||||
catalog_name: self.catalog_name.clone(),
|
catalog_name: self.catalog_name.clone(),
|
||||||
@@ -207,7 +198,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
ROUTINES => setup_memory_table!(ROUTINES),
|
ROUTINES => setup_memory_table!(ROUTINES),
|
||||||
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
|
||||||
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
|
||||||
TRIGGERS => setup_memory_table!(TRIGGERS),
|
|
||||||
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
|
||||||
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
|
||||||
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
|
||||||
@@ -218,7 +208,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
|
|||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
)) as _),
|
)) as _),
|
||||||
RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
|
|
||||||
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
|
||||||
self.catalog_name.clone(),
|
self.catalog_name.clone(),
|
||||||
self.catalog_manager.clone(),
|
self.catalog_manager.clone(),
|
||||||
@@ -286,7 +275,6 @@ impl InformationSchemaProvider {
|
|||||||
process_manager,
|
process_manager,
|
||||||
tables: HashMap::new(),
|
tables: HashMap::new(),
|
||||||
kv_backend,
|
kv_backend,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
extra_table_factories: HashMap::new(),
|
extra_table_factories: HashMap::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -295,7 +283,6 @@ impl InformationSchemaProvider {
|
|||||||
provider
|
provider
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub(crate) fn with_extra_table_factories(
|
pub(crate) fn with_extra_table_factories(
|
||||||
mut self,
|
mut self,
|
||||||
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
factories: HashMap<String, InformationSchemaTableFactoryRef>,
|
||||||
@@ -313,10 +300,6 @@ impl InformationSchemaProvider {
|
|||||||
// authentication details, and other critical information.
|
// authentication details, and other critical information.
|
||||||
// Only put these tables under `greptime` catalog to prevent info leak.
|
// Only put these tables under `greptime` catalog to prevent info leak.
|
||||||
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
if self.catalog_name == DEFAULT_CATALOG_NAME {
|
||||||
tables.insert(
|
|
||||||
RUNTIME_METRICS.to_string(),
|
|
||||||
self.build_table(RUNTIME_METRICS).unwrap(),
|
|
||||||
);
|
|
||||||
tables.insert(
|
tables.insert(
|
||||||
BUILD_INFO.to_string(),
|
BUILD_INFO.to_string(),
|
||||||
self.build_table(BUILD_INFO).unwrap(),
|
self.build_table(BUILD_INFO).unwrap(),
|
||||||
@@ -367,7 +350,6 @@ impl InformationSchemaProvider {
|
|||||||
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
if let Some(process_list) = self.build_table(PROCESS_LIST) {
|
||||||
tables.insert(PROCESS_LIST.to_string(), process_list);
|
tables.insert(PROCESS_LIST.to_string(), process_list);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
for name in self.extra_table_factories.keys() {
|
for name in self.extra_table_factories.keys() {
|
||||||
tables.insert(name.clone(), self.build_table(name).expect(name));
|
tables.insert(name.clone(), self.build_table(name).expect(name));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ use datatypes::timestamp::TimestampMillisecond;
|
|||||||
use datatypes::value::Value;
|
use datatypes::value::Value;
|
||||||
use datatypes::vectors::{
|
use datatypes::vectors::{
|
||||||
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
|
||||||
UInt32VectorBuilder, UInt64VectorBuilder,
|
|
||||||
};
|
};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
@@ -53,6 +52,8 @@ const PEER_ADDR: &str = "peer_addr";
|
|||||||
const PEER_HOSTNAME: &str = "peer_hostname";
|
const PEER_HOSTNAME: &str = "peer_hostname";
|
||||||
const TOTAL_CPU_MILLICORES: &str = "total_cpu_millicores";
|
const TOTAL_CPU_MILLICORES: &str = "total_cpu_millicores";
|
||||||
const TOTAL_MEMORY_BYTES: &str = "total_memory_bytes";
|
const TOTAL_MEMORY_BYTES: &str = "total_memory_bytes";
|
||||||
|
const CPU_USAGE_MILLICORES: &str = "cpu_usage_millicores";
|
||||||
|
const MEMORY_USAGE_BYTES: &str = "memory_usage_bytes";
|
||||||
const VERSION: &str = "version";
|
const VERSION: &str = "version";
|
||||||
const GIT_COMMIT: &str = "git_commit";
|
const GIT_COMMIT: &str = "git_commit";
|
||||||
const START_TIME: &str = "start_time";
|
const START_TIME: &str = "start_time";
|
||||||
@@ -67,15 +68,17 @@ const INIT_CAPACITY: usize = 42;
|
|||||||
/// - `peer_id`: the peer server id.
|
/// - `peer_id`: the peer server id.
|
||||||
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
|
||||||
/// - `peer_addr`: the peer gRPC address.
|
/// - `peer_addr`: the peer gRPC address.
|
||||||
|
/// - `peer_hostname`: the hostname of the peer.
|
||||||
/// - `total_cpu_millicores`: the total CPU millicores of the peer.
|
/// - `total_cpu_millicores`: the total CPU millicores of the peer.
|
||||||
/// - `total_memory_bytes`: the total memory bytes of the peer.
|
/// - `total_memory_bytes`: the total memory bytes of the peer.
|
||||||
|
/// - `cpu_usage_millicores`: the CPU usage millicores of the peer.
|
||||||
|
/// - `memory_usage_bytes`: the memory usage bytes of the peer.
|
||||||
/// - `version`: the build package version of the peer.
|
/// - `version`: the build package version of the peer.
|
||||||
/// - `git_commit`: the build git commit hash of the peer.
|
/// - `git_commit`: the build git commit hash of the peer.
|
||||||
/// - `start_time`: the starting time of the peer.
|
/// - `start_time`: the starting time of the peer.
|
||||||
/// - `uptime`: the uptime of the peer.
|
/// - `uptime`: the uptime of the peer.
|
||||||
/// - `active_time`: the time since the last activity of the peer.
|
/// - `active_time`: the time since the last activity of the peer.
|
||||||
/// - `node_status`: the status info of the peer.
|
/// - `node_status`: the status info of the peer.
|
||||||
/// - `peer_hostname`: the hostname of the peer.
|
|
||||||
///
|
///
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(super) struct InformationSchemaClusterInfo {
|
pub(super) struct InformationSchemaClusterInfo {
|
||||||
@@ -99,12 +102,22 @@ impl InformationSchemaClusterInfo {
|
|||||||
ColumnSchema::new(PEER_HOSTNAME, ConcreteDataType::string_datatype(), true),
|
ColumnSchema::new(PEER_HOSTNAME, ConcreteDataType::string_datatype(), true),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
TOTAL_CPU_MILLICORES,
|
TOTAL_CPU_MILLICORES,
|
||||||
ConcreteDataType::uint32_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
false,
|
false,
|
||||||
),
|
),
|
||||||
ColumnSchema::new(
|
ColumnSchema::new(
|
||||||
TOTAL_MEMORY_BYTES,
|
TOTAL_MEMORY_BYTES,
|
||||||
ConcreteDataType::uint64_datatype(),
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
CPU_USAGE_MILLICORES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
ColumnSchema::new(
|
||||||
|
MEMORY_USAGE_BYTES,
|
||||||
|
ConcreteDataType::int64_datatype(),
|
||||||
false,
|
false,
|
||||||
),
|
),
|
||||||
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
|
||||||
@@ -167,8 +180,10 @@ struct InformationSchemaClusterInfoBuilder {
|
|||||||
peer_types: StringVectorBuilder,
|
peer_types: StringVectorBuilder,
|
||||||
peer_addrs: StringVectorBuilder,
|
peer_addrs: StringVectorBuilder,
|
||||||
peer_hostnames: StringVectorBuilder,
|
peer_hostnames: StringVectorBuilder,
|
||||||
cpus: UInt32VectorBuilder,
|
total_cpu_millicores: Int64VectorBuilder,
|
||||||
memory_bytes: UInt64VectorBuilder,
|
total_memory_bytes: Int64VectorBuilder,
|
||||||
|
cpu_usage_millicores: Int64VectorBuilder,
|
||||||
|
memory_usage_bytes: Int64VectorBuilder,
|
||||||
versions: StringVectorBuilder,
|
versions: StringVectorBuilder,
|
||||||
git_commits: StringVectorBuilder,
|
git_commits: StringVectorBuilder,
|
||||||
start_times: TimestampMillisecondVectorBuilder,
|
start_times: TimestampMillisecondVectorBuilder,
|
||||||
@@ -186,8 +201,10 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
peer_hostnames: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
peer_hostnames: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
|
total_cpu_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
total_memory_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
cpu_usage_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
memory_usage_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
@@ -243,8 +260,14 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
self.start_times.push(None);
|
self.start_times.push(None);
|
||||||
self.uptimes.push(None);
|
self.uptimes.push(None);
|
||||||
}
|
}
|
||||||
self.cpus.push(Some(node_info.cpus));
|
self.total_cpu_millicores
|
||||||
self.memory_bytes.push(Some(node_info.memory_bytes));
|
.push(Some(node_info.total_cpu_millicores));
|
||||||
|
self.total_memory_bytes
|
||||||
|
.push(Some(node_info.total_memory_bytes));
|
||||||
|
self.cpu_usage_millicores
|
||||||
|
.push(Some(node_info.cpu_usage_millicores));
|
||||||
|
self.memory_usage_bytes
|
||||||
|
.push(Some(node_info.memory_usage_bytes));
|
||||||
|
|
||||||
if node_info.last_activity_ts > 0 {
|
if node_info.last_activity_ts > 0 {
|
||||||
self.active_times.push(Some(
|
self.active_times.push(Some(
|
||||||
@@ -269,8 +292,10 @@ impl InformationSchemaClusterInfoBuilder {
|
|||||||
Arc::new(self.peer_types.finish()),
|
Arc::new(self.peer_types.finish()),
|
||||||
Arc::new(self.peer_addrs.finish()),
|
Arc::new(self.peer_addrs.finish()),
|
||||||
Arc::new(self.peer_hostnames.finish()),
|
Arc::new(self.peer_hostnames.finish()),
|
||||||
Arc::new(self.cpus.finish()),
|
Arc::new(self.total_cpu_millicores.finish()),
|
||||||
Arc::new(self.memory_bytes.finish()),
|
Arc::new(self.total_memory_bytes.finish()),
|
||||||
|
Arc::new(self.cpu_usage_millicores.finish()),
|
||||||
|
Arc::new(self.memory_usage_bytes.finish()),
|
||||||
Arc::new(self.versions.finish()),
|
Arc::new(self.versions.finish()),
|
||||||
Arc::new(self.git_commits.finish()),
|
Arc::new(self.git_commits.finish()),
|
||||||
Arc::new(self.start_times.finish()),
|
Arc::new(self.start_times.finish()),
|
||||||
|
|||||||
@@ -15,8 +15,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
|
||||||
use datatypes::data_type::ConcreteDataType;
|
use datatypes::schema::{Schema, SchemaRef};
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
|
||||||
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
|
||||||
|
|
||||||
use crate::system_schema::information_schema::table_names::*;
|
use crate::system_schema::information_schema::table_names::*;
|
||||||
@@ -366,16 +365,6 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>
|
|||||||
vec![],
|
vec![],
|
||||||
),
|
),
|
||||||
|
|
||||||
TRIGGERS => (
|
|
||||||
vec![
|
|
||||||
string_column("TRIGGER_NAME"),
|
|
||||||
ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
|
|
||||||
string_column("TRIGGER_DEFINITION"),
|
|
||||||
ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
|
|
||||||
],
|
|
||||||
vec![],
|
|
||||||
),
|
|
||||||
|
|
||||||
// TODO: Considering store internal metrics in `global_status` and
|
// TODO: Considering store internal metrics in `global_status` and
|
||||||
// `session_status` tables.
|
// `session_status` tables.
|
||||||
GLOBAL_STATUS => (
|
GLOBAL_STATUS => (
|
||||||
|
|||||||
@@ -211,6 +211,7 @@ struct InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder,
|
partition_names: StringVectorBuilder,
|
||||||
partition_ordinal_positions: Int64VectorBuilder,
|
partition_ordinal_positions: Int64VectorBuilder,
|
||||||
partition_expressions: StringVectorBuilder,
|
partition_expressions: StringVectorBuilder,
|
||||||
|
partition_descriptions: StringVectorBuilder,
|
||||||
create_times: TimestampSecondVectorBuilder,
|
create_times: TimestampSecondVectorBuilder,
|
||||||
partition_ids: UInt64VectorBuilder,
|
partition_ids: UInt64VectorBuilder,
|
||||||
}
|
}
|
||||||
@@ -231,6 +232,7 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
|
partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
|
||||||
}
|
}
|
||||||
@@ -319,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get partition column names (shared by all partitions)
|
||||||
|
// In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
|
||||||
|
let partition_columns: String = table_info
|
||||||
|
.meta
|
||||||
|
.partition_column_names()
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(", ");
|
||||||
|
|
||||||
|
let partition_expr_str = if partition_columns.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(partition_columns)
|
||||||
|
};
|
||||||
|
|
||||||
for (index, partition) in partitions.iter().enumerate() {
|
for (index, partition) in partitions.iter().enumerate() {
|
||||||
let partition_name = format!("p{index}");
|
let partition_name = format!("p{index}");
|
||||||
|
|
||||||
@@ -328,8 +345,12 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
self.partition_names.push(Some(&partition_name));
|
self.partition_names.push(Some(&partition_name));
|
||||||
self.partition_ordinal_positions
|
self.partition_ordinal_positions
|
||||||
.push(Some((index + 1) as i64));
|
.push(Some((index + 1) as i64));
|
||||||
let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
|
// PARTITION_EXPRESSION: partition column names (same for all partitions)
|
||||||
self.partition_expressions.push(expression.as_deref());
|
self.partition_expressions
|
||||||
|
.push(partition_expr_str.as_deref());
|
||||||
|
// PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
|
||||||
|
let description = partition.partition_expr.as_ref().map(|e| e.to_string());
|
||||||
|
self.partition_descriptions.push(description.as_deref());
|
||||||
self.create_times.push(Some(TimestampSecond::from(
|
self.create_times.push(Some(TimestampSecond::from(
|
||||||
table_info.meta.created_on.timestamp(),
|
table_info.meta.created_on.timestamp(),
|
||||||
)));
|
)));
|
||||||
@@ -369,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
|
|||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
Arc::new(self.partition_expressions.finish()),
|
Arc::new(self.partition_expressions.finish()),
|
||||||
null_string_vector.clone(),
|
null_string_vector.clone(),
|
||||||
null_string_vector.clone(),
|
Arc::new(self.partition_descriptions.finish()),
|
||||||
// TODO(dennis): rows and index statistics info
|
// TODO(dennis): rows and index statistics info
|
||||||
null_i64_vector.clone(),
|
null_i64_vector.clone(),
|
||||||
null_i64_vector.clone(),
|
null_i64_vector.clone(),
|
||||||
|
|||||||
@@ -1,265 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
|
||||||
use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
|
|
||||||
use common_error::ext::BoxedError;
|
|
||||||
use common_recordbatch::adapter::RecordBatchStreamAdapter;
|
|
||||||
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
|
|
||||||
use common_time::util::current_time_millis;
|
|
||||||
use datafusion::execution::TaskContext;
|
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
|
|
||||||
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
|
|
||||||
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
|
|
||||||
use datatypes::prelude::{ConcreteDataType, MutableVector};
|
|
||||||
use datatypes::scalars::ScalarVectorBuilder;
|
|
||||||
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
|
|
||||||
use datatypes::vectors::{
|
|
||||||
ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
|
|
||||||
VectorRef,
|
|
||||||
};
|
|
||||||
use itertools::Itertools;
|
|
||||||
use snafu::ResultExt;
|
|
||||||
use store_api::storage::{ScanRequest, TableId};
|
|
||||||
|
|
||||||
use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
|
|
||||||
use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(super) struct InformationSchemaMetrics {
|
|
||||||
schema: SchemaRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
const METRIC_NAME: &str = "metric_name";
|
|
||||||
const METRIC_VALUE: &str = "value";
|
|
||||||
const METRIC_LABELS: &str = "labels";
|
|
||||||
const PEER_ADDR: &str = "peer_addr";
|
|
||||||
const PEER_TYPE: &str = "peer_type";
|
|
||||||
const TIMESTAMP: &str = "timestamp";
|
|
||||||
|
|
||||||
/// The `information_schema.runtime_metrics` virtual table.
|
|
||||||
/// It provides the GreptimeDB runtime metrics for the users by SQL.
|
|
||||||
impl InformationSchemaMetrics {
|
|
||||||
pub(super) fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
schema: Self::schema(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema() -> SchemaRef {
|
|
||||||
Arc::new(Schema::new(vec![
|
|
||||||
ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
|
|
||||||
ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
|
|
||||||
ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
|
|
||||||
ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
|
|
||||||
ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
|
|
||||||
ColumnSchema::new(
|
|
||||||
TIMESTAMP,
|
|
||||||
ConcreteDataType::timestamp_millisecond_datatype(),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
]))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn builder(&self) -> InformationSchemaMetricsBuilder {
|
|
||||||
InformationSchemaMetricsBuilder::new(self.schema.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InformationTable for InformationSchemaMetrics {
|
|
||||||
fn table_id(&self) -> TableId {
|
|
||||||
INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
|
|
||||||
}
|
|
||||||
|
|
||||||
fn table_name(&self) -> &'static str {
|
|
||||||
RUNTIME_METRICS
|
|
||||||
}
|
|
||||||
|
|
||||||
fn schema(&self) -> SchemaRef {
|
|
||||||
self.schema.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
|
|
||||||
let schema = self.schema.arrow_schema().clone();
|
|
||||||
let mut builder = self.builder();
|
|
||||||
let stream = Box::pin(DfRecordBatchStreamAdapter::new(
|
|
||||||
schema,
|
|
||||||
futures::stream::once(async move {
|
|
||||||
builder
|
|
||||||
.make_metrics(Some(request))
|
|
||||||
.await
|
|
||||||
.map(|x| x.into_df_record_batch())
|
|
||||||
.map_err(Into::into)
|
|
||||||
}),
|
|
||||||
));
|
|
||||||
|
|
||||||
Ok(Box::pin(
|
|
||||||
RecordBatchStreamAdapter::try_new(stream)
|
|
||||||
.map_err(BoxedError::new)
|
|
||||||
.context(InternalSnafu)?,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct InformationSchemaMetricsBuilder {
|
|
||||||
schema: SchemaRef,
|
|
||||||
|
|
||||||
metric_names: StringVectorBuilder,
|
|
||||||
metric_values: Float64VectorBuilder,
|
|
||||||
metric_labels: StringVectorBuilder,
|
|
||||||
peer_addrs: StringVectorBuilder,
|
|
||||||
peer_types: StringVectorBuilder,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InformationSchemaMetricsBuilder {
|
|
||||||
fn new(schema: SchemaRef) -> Self {
|
|
||||||
Self {
|
|
||||||
schema,
|
|
||||||
metric_names: StringVectorBuilder::with_capacity(42),
|
|
||||||
metric_values: Float64VectorBuilder::with_capacity(42),
|
|
||||||
metric_labels: StringVectorBuilder::with_capacity(42),
|
|
||||||
peer_addrs: StringVectorBuilder::with_capacity(42),
|
|
||||||
peer_types: StringVectorBuilder::with_capacity(42),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_metric(
|
|
||||||
&mut self,
|
|
||||||
metric_name: &str,
|
|
||||||
labels: String,
|
|
||||||
metric_value: f64,
|
|
||||||
peer: Option<&str>,
|
|
||||||
peer_type: &str,
|
|
||||||
) {
|
|
||||||
self.metric_names.push(Some(metric_name));
|
|
||||||
self.metric_values.push(Some(metric_value));
|
|
||||||
self.metric_labels.push(Some(&labels));
|
|
||||||
self.peer_addrs.push(peer);
|
|
||||||
self.peer_types.push(Some(peer_type));
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn make_metrics(&mut self, _request: Option<ScanRequest>) -> Result<RecordBatch> {
|
|
||||||
let metric_families = prometheus::gather();
|
|
||||||
|
|
||||||
let write_request =
|
|
||||||
common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
|
|
||||||
|
|
||||||
for ts in write_request.timeseries {
|
|
||||||
//Safety: always has `__name__` label
|
|
||||||
let metric_name = ts
|
|
||||||
.labels
|
|
||||||
.iter()
|
|
||||||
.find_map(|label| {
|
|
||||||
if label.name == "__name__" {
|
|
||||||
Some(label.value.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
self.add_metric(
|
|
||||||
&metric_name,
|
|
||||||
ts.labels
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|label| {
|
|
||||||
if label.name == "__name__" {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(format!("{}={}", label.name, label.value))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.join(", "),
|
|
||||||
// Safety: always has a sample
|
|
||||||
ts.samples[0].value,
|
|
||||||
// The peer column is always `None` for standalone
|
|
||||||
None,
|
|
||||||
"STANDALONE",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME(dennis): fetching other peers metrics
|
|
||||||
self.finish()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn finish(&mut self) -> Result<RecordBatch> {
|
|
||||||
let rows_num = self.metric_names.len();
|
|
||||||
|
|
||||||
let timestamps = Arc::new(ConstantVector::new(
|
|
||||||
Arc::new(TimestampMillisecondVector::from_slice([
|
|
||||||
current_time_millis(),
|
|
||||||
])),
|
|
||||||
rows_num,
|
|
||||||
));
|
|
||||||
|
|
||||||
let columns: Vec<VectorRef> = vec![
|
|
||||||
Arc::new(self.metric_names.finish()),
|
|
||||||
Arc::new(self.metric_values.finish()),
|
|
||||||
Arc::new(self.metric_labels.finish()),
|
|
||||||
Arc::new(self.peer_addrs.finish()),
|
|
||||||
Arc::new(self.peer_types.finish()),
|
|
||||||
timestamps,
|
|
||||||
];
|
|
||||||
|
|
||||||
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DfPartitionStream for InformationSchemaMetrics {
|
|
||||||
fn schema(&self) -> &ArrowSchemaRef {
|
|
||||||
self.schema.arrow_schema()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute(&self, _: Arc<TaskContext>) -> DfSendableRecordBatchStream {
|
|
||||||
let schema = self.schema.arrow_schema().clone();
|
|
||||||
let mut builder = self.builder();
|
|
||||||
Box::pin(DfRecordBatchStreamAdapter::new(
|
|
||||||
schema,
|
|
||||||
futures::stream::once(async move {
|
|
||||||
builder
|
|
||||||
.make_metrics(None)
|
|
||||||
.await
|
|
||||||
.map(|x| x.into_df_record_batch())
|
|
||||||
.map_err(Into::into)
|
|
||||||
}),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use common_recordbatch::RecordBatches;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_make_metrics() {
|
|
||||||
let metrics = InformationSchemaMetrics::new();
|
|
||||||
|
|
||||||
let stream = metrics.to_stream(ScanRequest::default()).unwrap();
|
|
||||||
|
|
||||||
let batches = RecordBatches::try_collect(stream).await.unwrap();
|
|
||||||
|
|
||||||
let result_literal = batches.pretty_print().unwrap();
|
|
||||||
|
|
||||||
assert!(result_literal.contains(METRIC_NAME));
|
|
||||||
assert!(result_literal.contains(METRIC_VALUE));
|
|
||||||
assert!(result_literal.contains(METRIC_LABELS));
|
|
||||||
assert!(result_literal.contains(PEER_ADDR));
|
|
||||||
assert!(result_literal.contains(PEER_TYPE));
|
|
||||||
assert!(result_literal.contains(TIMESTAMP));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
|
|||||||
pub const TRIGGERS: &str = "triggers";
|
pub const TRIGGERS: &str = "triggers";
|
||||||
pub const GLOBAL_STATUS: &str = "global_status";
|
pub const GLOBAL_STATUS: &str = "global_status";
|
||||||
pub const SESSION_STATUS: &str = "session_status";
|
pub const SESSION_STATUS: &str = "session_status";
|
||||||
pub const RUNTIME_METRICS: &str = "runtime_metrics";
|
|
||||||
pub const PARTITIONS: &str = "partitions";
|
pub const PARTITIONS: &str = "partitions";
|
||||||
pub const REGION_PEERS: &str = "region_peers";
|
pub const REGION_PEERS: &str = "region_peers";
|
||||||
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
|
||||||
|
|||||||
@@ -12,7 +12,6 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
use arrow_schema::SchemaRef as ArrowSchemaRef;
|
||||||
@@ -255,14 +254,17 @@ impl InformationSchemaTablesBuilder {
|
|||||||
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
|
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
|
||||||
// But we don't want the statements such as `show tables` fail,
|
// But we don't want the statements such as `show tables` fail,
|
||||||
// so using `unwrap_or_else` here instead of `?` operator.
|
// so using `unwrap_or_else` here instead of `?` operator.
|
||||||
let region_stats = information_extension
|
let region_stats = {
|
||||||
|
let mut x = information_extension
|
||||||
.region_stats()
|
.region_stats()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.unwrap_or_else(|e| {
|
||||||
error!(e; "Failed to call region_stats");
|
error!(e; "Failed to find region stats in information_schema, fallback to all empty");
|
||||||
e
|
vec![]
|
||||||
})
|
});
|
||||||
.unwrap_or_else(|_| vec![]);
|
x.sort_unstable_by_key(|x| x.id);
|
||||||
|
x
|
||||||
|
};
|
||||||
|
|
||||||
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
|
||||||
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
|
||||||
@@ -273,16 +275,16 @@ impl InformationSchemaTablesBuilder {
|
|||||||
// TODO(dennis): make it working for metric engine
|
// TODO(dennis): make it working for metric engine
|
||||||
let table_region_stats =
|
let table_region_stats =
|
||||||
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
|
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
|
||||||
let region_ids = table_info
|
table_info
|
||||||
.meta
|
.meta
|
||||||
.region_numbers
|
.region_numbers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|n| RegionId::new(table_info.ident.table_id, *n))
|
.map(|n| RegionId::new(table_info.ident.table_id, *n))
|
||||||
.collect::<HashSet<_>>();
|
.flat_map(|region_id| {
|
||||||
|
|
||||||
region_stats
|
region_stats
|
||||||
.iter()
|
.binary_search_by_key(®ion_id, |x| x.id)
|
||||||
.filter(|stat| region_ids.contains(&stat.id))
|
.map(|i| ®ion_stats[i])
|
||||||
|
})
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
} else {
|
} else {
|
||||||
vec![]
|
vec![]
|
||||||
|
|||||||
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
59
src/catalog/src/system_schema/numbers_table_provider.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||||
|
use common_catalog::consts::NUMBERS_TABLE_ID;
|
||||||
|
use table::TableRef;
|
||||||
|
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||||
|
use table::table::numbers::NUMBERS_TABLE_NAME;
|
||||||
|
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||||
|
use table::table::numbers::NumbersTable;
|
||||||
|
|
||||||
|
// NumbersTableProvider is a dedicated provider for feature-gating the numbers table.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NumbersTableProvider;
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "testing", debug_assertions))]
|
||||||
|
impl NumbersTableProvider {
|
||||||
|
pub(crate) fn table_exists(&self, name: &str) -> bool {
|
||||||
|
name == NUMBERS_TABLE_NAME
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||||
|
vec![NUMBERS_TABLE_NAME.to_string()]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn table(&self, name: &str) -> Option<TableRef> {
|
||||||
|
if name == NUMBERS_TABLE_NAME {
|
||||||
|
Some(NumbersTable::table(NUMBERS_TABLE_ID))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(any(test, feature = "testing", debug_assertions)))]
|
||||||
|
impl NumbersTableProvider {
|
||||||
|
pub(crate) fn table_exists(&self, _name: &str) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn table_names(&self) -> Vec<String> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn table(&self, _name: &str) -> Option<TableRef> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,12 +16,15 @@ mod export;
|
|||||||
mod import;
|
mod import;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
|
use client::DEFAULT_CATALOG_NAME;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
|
|
||||||
use crate::Tool;
|
use crate::Tool;
|
||||||
use crate::data::export::ExportCommand;
|
use crate::data::export::ExportCommand;
|
||||||
use crate::data::import::ImportCommand;
|
use crate::data::import::ImportCommand;
|
||||||
|
|
||||||
|
pub(crate) const COPY_PATH_PLACEHOLDER: &str = "<PATH/TO/FILES>";
|
||||||
|
|
||||||
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
/// Command for data operations including exporting data from and importing data into GreptimeDB.
|
||||||
#[derive(Subcommand)]
|
#[derive(Subcommand)]
|
||||||
pub enum DataCommand {
|
pub enum DataCommand {
|
||||||
@@ -37,3 +40,7 @@ impl DataCommand {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_database() -> String {
|
||||||
|
format!("{DEFAULT_CATALOG_NAME}-*")
|
||||||
|
}
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ use snafu::{OptionExt, ResultExt};
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
|
use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
|
||||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
|
||||||
@@ -63,12 +64,20 @@ pub struct ExportCommand {
|
|||||||
output_dir: Option<String>,
|
output_dir: Option<String>,
|
||||||
|
|
||||||
/// The name of the catalog to export.
|
/// The name of the catalog to export.
|
||||||
#[clap(long, default_value = "greptime-*")]
|
#[clap(long, default_value_t = default_database())]
|
||||||
database: String,
|
database: String,
|
||||||
|
|
||||||
/// Parallelism of the export.
|
/// The number of databases exported in parallel.
|
||||||
#[clap(long, short = 'j', default_value = "1")]
|
/// For example, if there are 20 databases and `db_parallelism` is 4,
|
||||||
export_jobs: usize,
|
/// 4 databases will be exported concurrently.
|
||||||
|
#[clap(long, short = 'j', default_value = "1", alias = "export-jobs")]
|
||||||
|
db_parallelism: usize,
|
||||||
|
|
||||||
|
/// The number of tables exported in parallel within a single database.
|
||||||
|
/// For example, if a database has 30 tables and `parallelism` is 8,
|
||||||
|
/// 8 tables will be exported concurrently.
|
||||||
|
#[clap(long, default_value = "4")]
|
||||||
|
table_parallelism: usize,
|
||||||
|
|
||||||
/// Max retry times for each job.
|
/// Max retry times for each job.
|
||||||
#[clap(long, default_value = "3")]
|
#[clap(long, default_value = "3")]
|
||||||
@@ -209,10 +218,11 @@ impl ExportCommand {
|
|||||||
schema,
|
schema,
|
||||||
database_client,
|
database_client,
|
||||||
output_dir: self.output_dir.clone(),
|
output_dir: self.output_dir.clone(),
|
||||||
parallelism: self.export_jobs,
|
export_jobs: self.db_parallelism,
|
||||||
target: self.target.clone(),
|
target: self.target.clone(),
|
||||||
start_time: self.start_time.clone(),
|
start_time: self.start_time.clone(),
|
||||||
end_time: self.end_time.clone(),
|
end_time: self.end_time.clone(),
|
||||||
|
parallelism: self.table_parallelism,
|
||||||
s3: self.s3,
|
s3: self.s3,
|
||||||
ddl_local_dir: self.ddl_local_dir.clone(),
|
ddl_local_dir: self.ddl_local_dir.clone(),
|
||||||
s3_bucket: self.s3_bucket.clone(),
|
s3_bucket: self.s3_bucket.clone(),
|
||||||
@@ -250,10 +260,11 @@ pub struct Export {
|
|||||||
schema: Option<String>,
|
schema: Option<String>,
|
||||||
database_client: DatabaseClient,
|
database_client: DatabaseClient,
|
||||||
output_dir: Option<String>,
|
output_dir: Option<String>,
|
||||||
parallelism: usize,
|
export_jobs: usize,
|
||||||
target: ExportTarget,
|
target: ExportTarget,
|
||||||
start_time: Option<String>,
|
start_time: Option<String>,
|
||||||
end_time: Option<String>,
|
end_time: Option<String>,
|
||||||
|
parallelism: usize,
|
||||||
s3: bool,
|
s3: bool,
|
||||||
ddl_local_dir: Option<String>,
|
ddl_local_dir: Option<String>,
|
||||||
s3_bucket: Option<String>,
|
s3_bucket: Option<String>,
|
||||||
@@ -463,7 +474,7 @@ impl Export {
|
|||||||
|
|
||||||
async fn export_create_table(&self) -> Result<()> {
|
async fn export_create_table(&self) -> Result<()> {
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
|
||||||
let db_names = self.get_db_names().await?;
|
let db_names = self.get_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
let operator = Arc::new(self.build_prefer_fs_operator().await?);
|
let operator = Arc::new(self.build_prefer_fs_operator().await?);
|
||||||
@@ -624,13 +635,13 @@ impl Export {
|
|||||||
|
|
||||||
async fn export_database_data(&self) -> Result<()> {
|
async fn export_database_data(&self) -> Result<()> {
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let semaphore = Arc::new(Semaphore::new(self.parallelism));
|
let semaphore = Arc::new(Semaphore::new(self.export_jobs));
|
||||||
let db_names = self.get_db_names().await?;
|
let db_names = self.get_db_names().await?;
|
||||||
let db_count = db_names.len();
|
let db_count = db_names.len();
|
||||||
let mut tasks = Vec::with_capacity(db_count);
|
let mut tasks = Vec::with_capacity(db_count);
|
||||||
let operator = Arc::new(self.build_operator().await?);
|
let operator = Arc::new(self.build_operator().await?);
|
||||||
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
|
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
|
||||||
let with_options = build_with_options(&self.start_time, &self.end_time);
|
let with_options = build_with_options(&self.start_time, &self.end_time, self.parallelism);
|
||||||
|
|
||||||
for schema in db_names {
|
for schema in db_names {
|
||||||
let semaphore_moved = semaphore.clone();
|
let semaphore_moved = semaphore.clone();
|
||||||
@@ -667,11 +678,27 @@ impl Export {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Create copy_from.sql file
|
// Create copy_from.sql file
|
||||||
let copy_database_from_sql = format!(
|
let copy_database_from_sql = {
|
||||||
|
let command_without_connection = format!(
|
||||||
|
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({});"#,
|
||||||
|
export_self.catalog, schema, COPY_PATH_PLACEHOLDER, with_options_clone
|
||||||
|
);
|
||||||
|
|
||||||
|
if connection_part.is_empty() {
|
||||||
|
command_without_connection
|
||||||
|
} else {
|
||||||
|
let command_with_connection = format!(
|
||||||
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
|
r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
|
||||||
export_self.catalog, schema, path, with_options_clone, connection_part
|
export_self.catalog, schema, path, with_options_clone, connection_part
|
||||||
);
|
);
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"-- {}\n{}",
|
||||||
|
command_with_connection, command_without_connection
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
|
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
|
||||||
export_self
|
export_self
|
||||||
.write_to_storage(
|
.write_to_storage(
|
||||||
@@ -871,7 +898,11 @@ impl Tool for Export {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
|
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
|
||||||
fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) -> String {
|
fn build_with_options(
|
||||||
|
start_time: &Option<String>,
|
||||||
|
end_time: &Option<String>,
|
||||||
|
parallelism: usize,
|
||||||
|
) -> String {
|
||||||
let mut options = vec!["format = 'parquet'".to_string()];
|
let mut options = vec!["format = 'parquet'".to_string()];
|
||||||
if let Some(start) = start_time {
|
if let Some(start) = start_time {
|
||||||
options.push(format!("start_time = '{}'", start));
|
options.push(format!("start_time = '{}'", start));
|
||||||
@@ -879,5 +910,6 @@ fn build_with_options(start_time: &Option<String>, end_time: &Option<String>) ->
|
|||||||
if let Some(end) = end_time {
|
if let Some(end) = end_time {
|
||||||
options.push(format!("end_time = '{}'", end));
|
options.push(format!("end_time = '{}'", end));
|
||||||
}
|
}
|
||||||
|
options.push(format!("parallelism = {}", parallelism));
|
||||||
options.join(", ")
|
options.join(", ")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,12 +21,13 @@ use clap::{Parser, ValueEnum};
|
|||||||
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
|
||||||
use common_error::ext::BoxedError;
|
use common_error::ext::BoxedError;
|
||||||
use common_telemetry::{error, info, warn};
|
use common_telemetry::{error, info, warn};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt, ensure};
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
|
use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
|
||||||
use crate::database::{DatabaseClient, parse_proxy_opts};
|
use crate::database::{DatabaseClient, parse_proxy_opts};
|
||||||
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
|
use crate::error::{Error, FileIoSnafu, InvalidArgumentsSnafu, Result, SchemaNotFoundSnafu};
|
||||||
use crate::{Tool, database};
|
use crate::{Tool, database};
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone, ValueEnum)]
|
#[derive(Debug, Default, Clone, ValueEnum)]
|
||||||
@@ -52,12 +53,14 @@ pub struct ImportCommand {
|
|||||||
input_dir: String,
|
input_dir: String,
|
||||||
|
|
||||||
/// The name of the catalog to import.
|
/// The name of the catalog to import.
|
||||||
#[clap(long, default_value = "greptime-*")]
|
#[clap(long, default_value_t = default_database())]
|
||||||
database: String,
|
database: String,
|
||||||
|
|
||||||
/// Parallelism of the import.
|
/// The number of databases imported in parallel.
|
||||||
#[clap(long, short = 'j', default_value = "1")]
|
/// For example, if there are 20 databases and `db_parallelism` is 4,
|
||||||
import_jobs: usize,
|
/// 4 databases will be imported concurrently.
|
||||||
|
#[clap(long, short = 'j', default_value = "1", alias = "import-jobs")]
|
||||||
|
db_parallelism: usize,
|
||||||
|
|
||||||
/// Max retry times for each job.
|
/// Max retry times for each job.
|
||||||
#[clap(long, default_value = "3")]
|
#[clap(long, default_value = "3")]
|
||||||
@@ -108,7 +111,7 @@ impl ImportCommand {
|
|||||||
schema,
|
schema,
|
||||||
database_client,
|
database_client,
|
||||||
input_dir: self.input_dir.clone(),
|
input_dir: self.input_dir.clone(),
|
||||||
parallelism: self.import_jobs,
|
parallelism: self.db_parallelism,
|
||||||
target: self.target.clone(),
|
target: self.target.clone(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
@@ -147,12 +150,15 @@ impl Import {
|
|||||||
let _permit = semaphore_moved.acquire().await.unwrap();
|
let _permit = semaphore_moved.acquire().await.unwrap();
|
||||||
let database_input_dir = self.catalog_path().join(&schema);
|
let database_input_dir = self.catalog_path().join(&schema);
|
||||||
let sql_file = database_input_dir.join(filename);
|
let sql_file = database_input_dir.join(filename);
|
||||||
let sql = tokio::fs::read_to_string(sql_file)
|
let mut sql = tokio::fs::read_to_string(sql_file)
|
||||||
.await
|
.await
|
||||||
.context(FileIoSnafu)?;
|
.context(FileIoSnafu)?;
|
||||||
if sql.is_empty() {
|
if sql.trim().is_empty() {
|
||||||
info!("Empty `{filename}` {database_input_dir:?}");
|
info!("Empty `{filename}` {database_input_dir:?}");
|
||||||
} else {
|
} else {
|
||||||
|
if filename == "copy_from.sql" {
|
||||||
|
sql = self.rewrite_copy_database_sql(&schema, &sql)?;
|
||||||
|
}
|
||||||
let db = exec_db.unwrap_or(&schema);
|
let db = exec_db.unwrap_or(&schema);
|
||||||
self.database_client.sql(&sql, db).await?;
|
self.database_client.sql(&sql, db).await?;
|
||||||
info!("Imported `{filename}` for database {schema}");
|
info!("Imported `{filename}` for database {schema}");
|
||||||
@@ -225,6 +231,57 @@ impl Import {
|
|||||||
}
|
}
|
||||||
Ok(db_names)
|
Ok(db_names)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn rewrite_copy_database_sql(&self, schema: &str, sql: &str) -> Result<String> {
|
||||||
|
let target_location = self.build_copy_database_location(schema);
|
||||||
|
let escaped_location = target_location.replace('\'', "''");
|
||||||
|
|
||||||
|
let mut first_stmt_checked = false;
|
||||||
|
for line in sql.lines() {
|
||||||
|
let trimmed = line.trim_start();
|
||||||
|
if trimmed.is_empty() || trimmed.starts_with("--") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
trimmed.starts_with("COPY DATABASE"),
|
||||||
|
InvalidArgumentsSnafu {
|
||||||
|
msg: "Expected COPY DATABASE statement at start of copy_from.sql"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
first_stmt_checked = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
first_stmt_checked,
|
||||||
|
InvalidArgumentsSnafu {
|
||||||
|
msg: "COPY DATABASE statement not found in copy_from.sql"
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
sql.contains(COPY_PATH_PLACEHOLDER),
|
||||||
|
InvalidArgumentsSnafu {
|
||||||
|
msg: format!(
|
||||||
|
"Placeholder `{}` not found in COPY DATABASE statement",
|
||||||
|
COPY_PATH_PLACEHOLDER
|
||||||
|
)
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(sql.replacen(COPY_PATH_PLACEHOLDER, &escaped_location, 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_copy_database_location(&self, schema: &str) -> String {
|
||||||
|
let mut path = self.catalog_path();
|
||||||
|
path.push(schema);
|
||||||
|
let mut path_str = path.to_string_lossy().into_owned();
|
||||||
|
if !path_str.ends_with('/') {
|
||||||
|
path_str.push('/');
|
||||||
|
}
|
||||||
|
path_str
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -240,3 +297,52 @@ impl Tool for Import {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn build_import(input_dir: &str) -> Import {
|
||||||
|
Import {
|
||||||
|
catalog: "catalog".to_string(),
|
||||||
|
schema: None,
|
||||||
|
database_client: DatabaseClient::new(
|
||||||
|
"127.0.0.1:4000".to_string(),
|
||||||
|
"catalog".to_string(),
|
||||||
|
None,
|
||||||
|
Duration::from_secs(0),
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
input_dir: input_dir.to_string(),
|
||||||
|
parallelism: 1,
|
||||||
|
target: ImportTarget::Data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rewrite_copy_database_sql_replaces_placeholder() {
|
||||||
|
let import = build_import("/tmp/export-path");
|
||||||
|
let comment = "-- COPY DATABASE \"catalog\".\"schema\" FROM 's3://bucket/demo/' WITH (format = 'parquet') CONNECTION (region = 'us-west-2')";
|
||||||
|
let sql = format!(
|
||||||
|
"{comment}\nCOPY DATABASE \"catalog\".\"schema\" FROM '{}' WITH (format = 'parquet');",
|
||||||
|
COPY_PATH_PLACEHOLDER
|
||||||
|
);
|
||||||
|
|
||||||
|
let rewritten = import.rewrite_copy_database_sql("schema", &sql).unwrap();
|
||||||
|
let expected_location = import.build_copy_database_location("schema");
|
||||||
|
let escaped = expected_location.replace('\'', "''");
|
||||||
|
|
||||||
|
assert!(rewritten.starts_with(comment));
|
||||||
|
assert!(rewritten.contains(&format!("FROM '{escaped}'")));
|
||||||
|
assert!(!rewritten.contains(COPY_PATH_PLACEHOLDER));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rewrite_copy_database_sql_requires_placeholder() {
|
||||||
|
let import = build_import("/tmp/export-path");
|
||||||
|
let sql = "COPY DATABASE \"catalog\".\"schema\" FROM '/tmp/export-path/catalog/schema/' WITH (format = 'parquet');";
|
||||||
|
assert!(import.rewrite_copy_database_sql("schema", sql).is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,7 +20,9 @@ use api::v1::health_check_client::HealthCheckClient;
|
|||||||
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
|
||||||
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
use api::v1::region::region_client::RegionClient as PbRegionClient;
|
||||||
use arrow_flight::flight_service_client::FlightServiceClient;
|
use arrow_flight::flight_service_client::FlightServiceClient;
|
||||||
use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
|
use common_grpc::channel_manager::{
|
||||||
|
ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
|
||||||
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tonic::codec::CompressionEncoding;
|
use tonic::codec::CompressionEncoding;
|
||||||
@@ -93,9 +95,10 @@ impl Client {
|
|||||||
U: AsRef<str>,
|
U: AsRef<str>,
|
||||||
A: AsRef<[U]>,
|
A: AsRef<[U]>,
|
||||||
{
|
{
|
||||||
let channel_config = ChannelConfig::default().client_tls_config(client_tls);
|
let channel_config = ChannelConfig::default().client_tls_config(client_tls.clone());
|
||||||
let channel_manager = ChannelManager::with_tls_config(channel_config)
|
let tls_config =
|
||||||
.context(error::CreateTlsChannelSnafu)?;
|
load_client_tls_config(Some(client_tls)).context(error::CreateTlsChannelSnafu)?;
|
||||||
|
let channel_manager = ChannelManager::with_config(channel_config, tls_config);
|
||||||
Ok(Self::with_manager_and_urls(channel_manager, urls))
|
Ok(Self::with_manager_and_urls(channel_manager, urls))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl FlownodeManager for NodeClients {
|
|||||||
impl NodeClients {
|
impl NodeClients {
|
||||||
pub fn new(config: ChannelConfig) -> Self {
|
pub fn new(config: ChannelConfig) -> Self {
|
||||||
Self {
|
Self {
|
||||||
channel_manager: ChannelManager::with_config(config),
|
channel_manager: ChannelManager::with_config(config, None),
|
||||||
clients: CacheBuilder::new(1024)
|
clients: CacheBuilder::new(1024)
|
||||||
.time_to_live(Duration::from_secs(30 * 60))
|
.time_to_live(Duration::from_secs(30 * 60))
|
||||||
.time_to_idle(Duration::from_secs(5 * 60))
|
.time_to_idle(Duration::from_secs(5 * 60))
|
||||||
|
|||||||
@@ -435,10 +435,10 @@ impl Database {
|
|||||||
.context(ExternalSnafu)?;
|
.context(ExternalSnafu)?;
|
||||||
match flight_message {
|
match flight_message {
|
||||||
FlightMessage::RecordBatch(arrow_batch) => {
|
FlightMessage::RecordBatch(arrow_batch) => {
|
||||||
yield RecordBatch::try_from_df_record_batch(
|
yield Ok(RecordBatch::from_df_record_batch(
|
||||||
schema_cloned.clone(),
|
schema_cloned.clone(),
|
||||||
arrow_batch,
|
arrow_batch,
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
FlightMessage::Metrics(_) => {}
|
FlightMessage::Metrics(_) => {}
|
||||||
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
|
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
|
||||||
|
|||||||
@@ -182,10 +182,8 @@ impl RegionRequester {
|
|||||||
|
|
||||||
match flight_message {
|
match flight_message {
|
||||||
FlightMessage::RecordBatch(record_batch) => {
|
FlightMessage::RecordBatch(record_batch) => {
|
||||||
let result_to_yield = RecordBatch::try_from_df_record_batch(
|
let result_to_yield =
|
||||||
schema_cloned.clone(),
|
RecordBatch::from_df_record_batch(schema_cloned.clone(), record_batch);
|
||||||
record_batch,
|
|
||||||
);
|
|
||||||
|
|
||||||
// get the next message from the stream. normally it should be a metrics message.
|
// get the next message from the stream. normally it should be a metrics message.
|
||||||
if let Some(next_flight_message_result) = flight_message_stream.next().await
|
if let Some(next_flight_message_result) = flight_message_stream.next().await
|
||||||
@@ -219,7 +217,7 @@ impl RegionRequester {
|
|||||||
stream_ended = true;
|
stream_ended = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
yield result_to_yield;
|
yield Ok(result_to_yield);
|
||||||
}
|
}
|
||||||
FlightMessage::Metrics(s) => {
|
FlightMessage::Metrics(s) => {
|
||||||
// just a branch in case of some metrics message comes after other things.
|
// just a branch in case of some metrics message comes after other things.
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ default = [
|
|||||||
"meta-srv/pg_kvbackend",
|
"meta-srv/pg_kvbackend",
|
||||||
"meta-srv/mysql_kvbackend",
|
"meta-srv/mysql_kvbackend",
|
||||||
]
|
]
|
||||||
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
|
enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
|
||||||
tokio-console = ["common-telemetry/tokio-console"]
|
tokio-console = ["common-telemetry/tokio-console"]
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
@@ -29,9 +29,11 @@ base64.workspace = true
|
|||||||
cache.workspace = true
|
cache.workspace = true
|
||||||
catalog.workspace = true
|
catalog.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
|
either = "1.15"
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
cli.workspace = true
|
cli.workspace = true
|
||||||
client.workspace = true
|
client.workspace = true
|
||||||
|
colored = "2.1.0"
|
||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-catalog.workspace = true
|
common-catalog.workspace = true
|
||||||
common-config.workspace = true
|
common-config.workspace = true
|
||||||
@@ -63,9 +65,11 @@ lazy_static.workspace = true
|
|||||||
meta-client.workspace = true
|
meta-client.workspace = true
|
||||||
meta-srv.workspace = true
|
meta-srv.workspace = true
|
||||||
metric-engine.workspace = true
|
metric-engine.workspace = true
|
||||||
|
mito2.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
nu-ansi-term = "0.46"
|
nu-ansi-term = "0.46"
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
|
parquet = { workspace = true, features = ["object_store"] }
|
||||||
plugins.workspace = true
|
plugins.workspace = true
|
||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
prost.workspace = true
|
prost.workspace = true
|
||||||
@@ -88,6 +92,11 @@ toml.workspace = true
|
|||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
tracing-appender.workspace = true
|
tracing-appender.workspace = true
|
||||||
|
|
||||||
|
[target.'cfg(unix)'.dependencies]
|
||||||
|
pprof = { version = "0.14", features = [
|
||||||
|
"flamegraph",
|
||||||
|
] }
|
||||||
|
|
||||||
[target.'cfg(not(windows))'.dependencies]
|
[target.'cfg(not(windows))'.dependencies]
|
||||||
tikv-jemallocator = "0.6"
|
tikv-jemallocator = "0.6"
|
||||||
|
|
||||||
|
|||||||
@@ -103,12 +103,15 @@ async fn main_body() -> Result<()> {
|
|||||||
|
|
||||||
async fn start(cli: Command) -> Result<()> {
|
async fn start(cli: Command) -> Result<()> {
|
||||||
match cli.subcmd {
|
match cli.subcmd {
|
||||||
SubCommand::Datanode(cmd) => {
|
SubCommand::Datanode(cmd) => match cmd.subcmd {
|
||||||
let opts = cmd.load_options(&cli.global_options)?;
|
datanode::SubCommand::Start(ref start) => {
|
||||||
|
let opts = start.load_options(&cli.global_options)?;
|
||||||
let plugins = Plugins::new();
|
let plugins = Plugins::new();
|
||||||
let builder = InstanceBuilder::try_new_with_init(opts, plugins).await?;
|
let builder = InstanceBuilder::try_new_with_init(opts, plugins).await?;
|
||||||
cmd.build_with(builder).await?.run().await
|
cmd.build_with(builder).await?.run().await
|
||||||
}
|
}
|
||||||
|
datanode::SubCommand::Objbench(ref bench) => bench.run().await,
|
||||||
|
},
|
||||||
SubCommand::Flownode(cmd) => {
|
SubCommand::Flownode(cmd) => {
|
||||||
cmd.build(cmd.load_options(&cli.global_options)?)
|
cmd.build(cmd.load_options(&cli.global_options)?)
|
||||||
.await?
|
.await?
|
||||||
|
|||||||
@@ -13,6 +13,8 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
pub mod builder;
|
pub mod builder;
|
||||||
|
#[allow(clippy::print_stdout)]
|
||||||
|
mod objbench;
|
||||||
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -23,13 +25,16 @@ use common_config::Configurable;
|
|||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||||
use common_telemetry::{info, warn};
|
use common_telemetry::{info, warn};
|
||||||
use common_wal::config::DatanodeWalConfig;
|
use common_wal::config::DatanodeWalConfig;
|
||||||
|
use datanode::config::RegionEngineConfig;
|
||||||
use datanode::datanode::Datanode;
|
use datanode::datanode::Datanode;
|
||||||
use meta_client::MetaClientOptions;
|
use meta_client::MetaClientOptions;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use snafu::{ResultExt, ensure};
|
use snafu::{ResultExt, ensure};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::App;
|
use crate::App;
|
||||||
use crate::datanode::builder::InstanceBuilder;
|
use crate::datanode::builder::InstanceBuilder;
|
||||||
|
use crate::datanode::objbench::ObjbenchCommand;
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
|
||||||
};
|
};
|
||||||
@@ -89,7 +94,7 @@ impl App for Instance {
|
|||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
subcmd: SubCommand,
|
pub subcmd: SubCommand,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
@@ -100,13 +105,26 @@ impl Command {
|
|||||||
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
match &self.subcmd {
|
match &self.subcmd {
|
||||||
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
SubCommand::Start(cmd) => cmd.load_options(global_options),
|
||||||
|
SubCommand::Objbench(_) => {
|
||||||
|
// For objbench command, we don't need to load DatanodeOptions
|
||||||
|
// It's a standalone utility command
|
||||||
|
let mut opts = datanode::config::DatanodeOptions::default();
|
||||||
|
opts.sanitize();
|
||||||
|
Ok(DatanodeOptions {
|
||||||
|
runtime: Default::default(),
|
||||||
|
plugins: Default::default(),
|
||||||
|
component: opts,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
enum SubCommand {
|
pub enum SubCommand {
|
||||||
Start(StartCommand),
|
Start(StartCommand),
|
||||||
|
/// Object storage benchmark tool
|
||||||
|
Objbench(ObjbenchCommand),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SubCommand {
|
impl SubCommand {
|
||||||
@@ -116,12 +134,33 @@ impl SubCommand {
|
|||||||
info!("Building datanode with {:#?}", cmd);
|
info!("Building datanode with {:#?}", cmd);
|
||||||
builder.build().await
|
builder.build().await
|
||||||
}
|
}
|
||||||
|
SubCommand::Objbench(cmd) => {
|
||||||
|
cmd.run().await?;
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Storage engine config
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct StorageConfig {
|
||||||
|
/// The working directory of database
|
||||||
|
pub data_home: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub store: object_store::config::ObjectStoreConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
|
||||||
|
#[serde(default)]
|
||||||
|
struct StorageConfigWrapper {
|
||||||
|
storage: StorageConfig,
|
||||||
|
region_engine: Vec<RegionEngineConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Parser, Default)]
|
#[derive(Debug, Parser, Default)]
|
||||||
struct StartCommand {
|
pub struct StartCommand {
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
node_id: Option<u64>,
|
node_id: Option<u64>,
|
||||||
/// The address to bind the gRPC server.
|
/// The address to bind the gRPC server.
|
||||||
@@ -149,7 +188,7 @@ struct StartCommand {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl StartCommand {
|
impl StartCommand {
|
||||||
fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
pub fn load_options(&self, global_options: &GlobalOptions) -> Result<DatanodeOptions> {
|
||||||
let mut opts = DatanodeOptions::load_layered_options(
|
let mut opts = DatanodeOptions::load_layered_options(
|
||||||
self.config_file.as_deref(),
|
self.config_file.as_deref(),
|
||||||
self.env_prefix.as_ref(),
|
self.env_prefix.as_ref(),
|
||||||
|
|||||||
678
src/cmd/src/datanode/objbench.rs
Normal file
678
src/cmd/src/datanode/objbench.rs
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use colored::Colorize;
|
||||||
|
use datanode::config::RegionEngineConfig;
|
||||||
|
use datanode::store;
|
||||||
|
use either::Either;
|
||||||
|
use mito2::access_layer::{
|
||||||
|
AccessLayer, AccessLayerRef, Metrics, OperationType, SstWriteRequest, WriteType,
|
||||||
|
};
|
||||||
|
use mito2::cache::{CacheManager, CacheManagerRef};
|
||||||
|
use mito2::config::{FulltextIndexConfig, MitoConfig, Mode};
|
||||||
|
use mito2::read::Source;
|
||||||
|
use mito2::sst::file::{FileHandle, FileMeta};
|
||||||
|
use mito2::sst::file_purger::{FilePurger, FilePurgerRef};
|
||||||
|
use mito2::sst::index::intermediate::IntermediateManager;
|
||||||
|
use mito2::sst::index::puffin_manager::PuffinManagerFactory;
|
||||||
|
use mito2::sst::parquet::reader::ParquetReaderBuilder;
|
||||||
|
use mito2::sst::parquet::{PARQUET_METADATA_KEY, WriteOptions};
|
||||||
|
use mito2::worker::write_cache_from_config;
|
||||||
|
use object_store::ObjectStore;
|
||||||
|
use regex::Regex;
|
||||||
|
use snafu::OptionExt;
|
||||||
|
use store_api::metadata::{RegionMetadata, RegionMetadataRef};
|
||||||
|
use store_api::path_utils::region_name;
|
||||||
|
use store_api::region_request::PathType;
|
||||||
|
use store_api::storage::FileId;
|
||||||
|
|
||||||
|
use crate::datanode::{StorageConfig, StorageConfigWrapper};
|
||||||
|
use crate::error;
|
||||||
|
|
||||||
|
/// Object storage benchmark command
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct ObjbenchCommand {
|
||||||
|
/// Path to the object-store config file (TOML). Must deserialize into object_store::config::ObjectStoreConfig.
|
||||||
|
#[clap(long, value_name = "FILE")]
|
||||||
|
pub config: PathBuf,
|
||||||
|
|
||||||
|
/// Source SST file path in object-store (e.g. "region_dir/<uuid>.parquet").
|
||||||
|
#[clap(long, value_name = "PATH")]
|
||||||
|
pub source: String,
|
||||||
|
|
||||||
|
/// Verbose output
|
||||||
|
#[clap(short, long, default_value_t = false)]
|
||||||
|
pub verbose: bool,
|
||||||
|
|
||||||
|
/// Output file path for pprof flamegraph (enables profiling)
|
||||||
|
#[clap(long, value_name = "FILE")]
|
||||||
|
pub pprof_file: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_config(config_path: &PathBuf) -> error::Result<(StorageConfig, MitoConfig)> {
|
||||||
|
let cfg_str = std::fs::read_to_string(config_path).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("failed to read config {}: {e}", config_path.display()),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let store_cfg: StorageConfigWrapper = toml::from_str(&cfg_str).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("failed to parse config {}: {e}", config_path.display()),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let storage_config = store_cfg.storage;
|
||||||
|
let mito_engine_config = store_cfg
|
||||||
|
.region_engine
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|c| {
|
||||||
|
if let RegionEngineConfig::Mito(mito) = c {
|
||||||
|
Some(mito)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.with_context(|| error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Engine config not found in {:?}", config_path),
|
||||||
|
})?;
|
||||||
|
Ok((storage_config, mito_engine_config))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ObjbenchCommand {
|
||||||
|
pub async fn run(&self) -> error::Result<()> {
|
||||||
|
if self.verbose {
|
||||||
|
common_telemetry::init_default_ut_logging();
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("{}", "Starting objbench with config:".cyan().bold());
|
||||||
|
|
||||||
|
// Build object store from config
|
||||||
|
let (store_cfg, mut mito_engine_config) = parse_config(&self.config)?;
|
||||||
|
|
||||||
|
let object_store = build_object_store(&store_cfg).await?;
|
||||||
|
println!("{} Object store initialized", "✓".green());
|
||||||
|
|
||||||
|
// Prepare source identifiers
|
||||||
|
let components = parse_file_dir_components(&self.source)?;
|
||||||
|
println!(
|
||||||
|
"{} Source path parsed: {}, components: {:?}",
|
||||||
|
"✓".green(),
|
||||||
|
self.source,
|
||||||
|
components
|
||||||
|
);
|
||||||
|
|
||||||
|
// Load parquet metadata to extract RegionMetadata and file stats
|
||||||
|
println!("{}", "Loading parquet metadata...".yellow());
|
||||||
|
let file_size = object_store
|
||||||
|
.stat(&self.source)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("stat failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.content_length();
|
||||||
|
let parquet_meta = load_parquet_metadata(object_store.clone(), &self.source, file_size)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("read parquet metadata failed: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
|
||||||
|
let num_rows = parquet_meta.file_metadata().num_rows() as u64;
|
||||||
|
let num_row_groups = parquet_meta.num_row_groups() as u64;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"{} Metadata loaded - rows: {}, size: {} bytes",
|
||||||
|
"✓".green(),
|
||||||
|
num_rows,
|
||||||
|
file_size
|
||||||
|
);
|
||||||
|
|
||||||
|
// Build a FileHandle for the source file
|
||||||
|
let file_meta = FileMeta {
|
||||||
|
region_id: region_meta.region_id,
|
||||||
|
file_id: components.file_id,
|
||||||
|
time_range: Default::default(),
|
||||||
|
level: 0,
|
||||||
|
file_size,
|
||||||
|
available_indexes: Default::default(),
|
||||||
|
indexes: Default::default(),
|
||||||
|
index_file_size: 0,
|
||||||
|
index_file_id: None,
|
||||||
|
num_rows,
|
||||||
|
num_row_groups,
|
||||||
|
sequence: None,
|
||||||
|
partition_expr: None,
|
||||||
|
num_series: 0,
|
||||||
|
};
|
||||||
|
let src_handle = FileHandle::new(file_meta, new_noop_file_purger());
|
||||||
|
|
||||||
|
// Build the reader for a single file via ParquetReaderBuilder
|
||||||
|
let table_dir = components.table_dir();
|
||||||
|
let (src_access_layer, cache_manager) = build_access_layer_simple(
|
||||||
|
&components,
|
||||||
|
object_store.clone(),
|
||||||
|
&mut mito_engine_config,
|
||||||
|
&store_cfg.data_home,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let reader_build_start = Instant::now();
|
||||||
|
|
||||||
|
let reader = ParquetReaderBuilder::new(
|
||||||
|
table_dir,
|
||||||
|
components.path_type,
|
||||||
|
src_handle.clone(),
|
||||||
|
object_store.clone(),
|
||||||
|
)
|
||||||
|
.expected_metadata(Some(region_meta.clone()))
|
||||||
|
.build()
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("build reader failed: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let reader_build_elapsed = reader_build_start.elapsed();
|
||||||
|
let total_rows = reader.parquet_metadata().file_metadata().num_rows();
|
||||||
|
println!("{} Reader built in {:?}", "✓".green(), reader_build_elapsed);
|
||||||
|
|
||||||
|
// Build write request
|
||||||
|
let fulltext_index_config = FulltextIndexConfig {
|
||||||
|
create_on_compaction: Mode::Disable,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let write_req = SstWriteRequest {
|
||||||
|
op_type: OperationType::Flush,
|
||||||
|
metadata: region_meta,
|
||||||
|
source: Either::Left(Source::Reader(Box::new(reader))),
|
||||||
|
cache_manager,
|
||||||
|
storage: None,
|
||||||
|
max_sequence: None,
|
||||||
|
index_options: Default::default(),
|
||||||
|
index_config: mito_engine_config.index.clone(),
|
||||||
|
inverted_index_config: MitoConfig::default().inverted_index,
|
||||||
|
fulltext_index_config,
|
||||||
|
bloom_filter_index_config: MitoConfig::default().bloom_filter_index,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Write SST
|
||||||
|
println!("{}", "Writing SST...".yellow());
|
||||||
|
|
||||||
|
// Start profiling if pprof_file is specified
|
||||||
|
#[cfg(unix)]
|
||||||
|
let profiler_guard = if self.pprof_file.is_some() {
|
||||||
|
println!("{} Starting profiling...", "⚡".yellow());
|
||||||
|
Some(
|
||||||
|
pprof::ProfilerGuardBuilder::default()
|
||||||
|
.frequency(99)
|
||||||
|
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
|
||||||
|
.build()
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to start profiler: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
if self.pprof_file.is_some() {
|
||||||
|
eprintln!(
|
||||||
|
"{}: Profiling is not supported on this platform",
|
||||||
|
"Warning".yellow()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let write_start = Instant::now();
|
||||||
|
let mut metrics = Metrics::new(WriteType::Flush);
|
||||||
|
let infos = src_access_layer
|
||||||
|
.write_sst(write_req, &WriteOptions::default(), &mut metrics)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("write_sst failed: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let write_elapsed = write_start.elapsed();
|
||||||
|
|
||||||
|
// Stop profiling and generate flamegraph if enabled
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let (Some(guard), Some(pprof_file)) = (profiler_guard, &self.pprof_file) {
|
||||||
|
println!("{} Generating flamegraph...", "🔥".yellow());
|
||||||
|
match guard.report().build() {
|
||||||
|
Ok(report) => {
|
||||||
|
let mut flamegraph_data = Vec::new();
|
||||||
|
if let Err(e) = report.flamegraph(&mut flamegraph_data) {
|
||||||
|
println!("{}: Failed to generate flamegraph: {}", "Error".red(), e);
|
||||||
|
} else if let Err(e) = std::fs::write(pprof_file, flamegraph_data) {
|
||||||
|
println!(
|
||||||
|
"{}: Failed to write flamegraph to {}: {}",
|
||||||
|
"Error".red(),
|
||||||
|
pprof_file.display(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
println!(
|
||||||
|
"{} Flamegraph saved to {}",
|
||||||
|
"✓".green(),
|
||||||
|
pprof_file.display().to_string().cyan()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("{}: Failed to generate pprof report: {}", "Error".red(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert_eq!(infos.len(), 1);
|
||||||
|
let dst_file_id = infos[0].file_id;
|
||||||
|
let dst_file_path = format!("{}/{}.parquet", components.region_dir(), dst_file_id);
|
||||||
|
let mut dst_index_path = None;
|
||||||
|
if infos[0].index_metadata.file_size > 0 {
|
||||||
|
dst_index_path = Some(format!(
|
||||||
|
"{}/index/{}.puffin",
|
||||||
|
components.region_dir(),
|
||||||
|
dst_file_id
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report results with ANSI colors
|
||||||
|
println!("\n{} {}", "Write complete!".green().bold(), "✓".green());
|
||||||
|
println!(" {}: {}", "Destination file".bold(), dst_file_path.cyan());
|
||||||
|
println!(" {}: {}", "Rows".bold(), total_rows.to_string().cyan());
|
||||||
|
println!(
|
||||||
|
" {}: {}",
|
||||||
|
"File size".bold(),
|
||||||
|
format!("{} bytes", file_size).cyan()
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
" {}: {:?}",
|
||||||
|
"Reader build time".bold(),
|
||||||
|
reader_build_elapsed
|
||||||
|
);
|
||||||
|
println!(" {}: {:?}", "Total time".bold(), write_elapsed);
|
||||||
|
|
||||||
|
// Print metrics in a formatted way
|
||||||
|
println!(" {}: {:?}", "Metrics".bold(), metrics,);
|
||||||
|
|
||||||
|
// Print infos
|
||||||
|
println!(" {}: {:?}", "Index".bold(), infos[0].index_metadata);
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
println!("\n{}", "Cleaning up...".yellow());
|
||||||
|
object_store.delete(&dst_file_path).await.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to delete dest file {}: {}", dst_file_path, e),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
println!("{} Temporary file {} deleted", "✓".green(), dst_file_path);
|
||||||
|
|
||||||
|
if let Some(index_path) = dst_index_path {
|
||||||
|
object_store.delete(&index_path).await.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to delete dest index file {}: {}", index_path, e),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
println!(
|
||||||
|
"{} Temporary index file {} deleted",
|
||||||
|
"✓".green(),
|
||||||
|
index_path
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\n{}", "Benchmark completed successfully!".green().bold());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct FileDirComponents {
|
||||||
|
catalog: String,
|
||||||
|
schema: String,
|
||||||
|
table_id: u32,
|
||||||
|
region_sequence: u32,
|
||||||
|
path_type: PathType,
|
||||||
|
file_id: FileId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FileDirComponents {
|
||||||
|
fn table_dir(&self) -> String {
|
||||||
|
format!("data/{}/{}/{}", self.catalog, self.schema, self.table_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn region_dir(&self) -> String {
|
||||||
|
let region_name = region_name(self.table_id, self.region_sequence);
|
||||||
|
match self.path_type {
|
||||||
|
PathType::Bare => {
|
||||||
|
format!(
|
||||||
|
"data/{}/{}/{}/{}",
|
||||||
|
self.catalog, self.schema, self.table_id, region_name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
PathType::Data => {
|
||||||
|
format!(
|
||||||
|
"data/{}/{}/{}/{}/data",
|
||||||
|
self.catalog, self.schema, self.table_id, region_name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
PathType::Metadata => {
|
||||||
|
format!(
|
||||||
|
"data/{}/{}/{}/{}/metadata",
|
||||||
|
self.catalog, self.schema, self.table_id, region_name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_file_dir_components(path: &str) -> error::Result<FileDirComponents> {
|
||||||
|
// Define the regex pattern to match all three path styles
|
||||||
|
let pattern =
|
||||||
|
r"^data/([^/]+)/([^/]+)/([^/]+)/([^/]+)_([^/]+)(?:/data|/metadata)?/(.+).parquet$";
|
||||||
|
|
||||||
|
// Compile the regex
|
||||||
|
let re = Regex::new(pattern).expect("Invalid regex pattern");
|
||||||
|
|
||||||
|
// Determine the path type
|
||||||
|
let path_type = if path.contains("/data/") {
|
||||||
|
PathType::Data
|
||||||
|
} else if path.contains("/metadata/") {
|
||||||
|
PathType::Metadata
|
||||||
|
} else {
|
||||||
|
PathType::Bare
|
||||||
|
};
|
||||||
|
|
||||||
|
// Try to match the path
|
||||||
|
let components = (|| {
|
||||||
|
let captures = re.captures(path)?;
|
||||||
|
if captures.len() != 7 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut components = FileDirComponents {
|
||||||
|
catalog: "".to_string(),
|
||||||
|
schema: "".to_string(),
|
||||||
|
table_id: 0,
|
||||||
|
region_sequence: 0,
|
||||||
|
path_type,
|
||||||
|
file_id: FileId::default(),
|
||||||
|
};
|
||||||
|
// Extract the components
|
||||||
|
components.catalog = captures.get(1)?.as_str().to_string();
|
||||||
|
components.schema = captures.get(2)?.as_str().to_string();
|
||||||
|
components.table_id = captures[3].parse().ok()?;
|
||||||
|
components.region_sequence = captures[5].parse().ok()?;
|
||||||
|
let file_id_str = &captures[6];
|
||||||
|
components.file_id = FileId::parse_str(file_id_str).ok()?;
|
||||||
|
Some(components)
|
||||||
|
})();
|
||||||
|
components.context(error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Expect valid source file path, got: {}", path),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_region_metadata(
|
||||||
|
file_path: &str,
|
||||||
|
meta: &parquet::file::metadata::ParquetMetaData,
|
||||||
|
) -> error::Result<RegionMetadataRef> {
|
||||||
|
use parquet::format::KeyValue;
|
||||||
|
let kvs: Option<&Vec<KeyValue>> = meta.file_metadata().key_value_metadata();
|
||||||
|
let Some(kvs) = kvs else {
|
||||||
|
return Err(error::IllegalConfigSnafu {
|
||||||
|
msg: format!("{file_path}: missing parquet key_value metadata"),
|
||||||
|
}
|
||||||
|
.build());
|
||||||
|
};
|
||||||
|
let json = kvs
|
||||||
|
.iter()
|
||||||
|
.find(|kv| kv.key == PARQUET_METADATA_KEY)
|
||||||
|
.and_then(|kv| kv.value.as_ref())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("{file_path}: key {PARQUET_METADATA_KEY} not found or empty"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let region: RegionMetadata = RegionMetadata::from_json(json).map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("invalid region metadata json: {e}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
Ok(Arc::new(region))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_object_store(sc: &StorageConfig) -> error::Result<ObjectStore> {
|
||||||
|
store::new_object_store(sc.store.clone(), &sc.data_home)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to build object store: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_access_layer_simple(
|
||||||
|
components: &FileDirComponents,
|
||||||
|
object_store: ObjectStore,
|
||||||
|
config: &mut MitoConfig,
|
||||||
|
data_home: &str,
|
||||||
|
) -> error::Result<(AccessLayerRef, CacheManagerRef)> {
|
||||||
|
let _ = config.index.sanitize(data_home, &config.inverted_index);
|
||||||
|
let puffin_manager = PuffinManagerFactory::new(
|
||||||
|
&config.index.aux_path,
|
||||||
|
config.index.staging_size.as_bytes(),
|
||||||
|
Some(config.index.write_buffer_size.as_bytes() as _),
|
||||||
|
config.index.staging_ttl,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to build access layer: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to build IntermediateManager: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?
|
||||||
|
.with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _));
|
||||||
|
|
||||||
|
let cache_manager =
|
||||||
|
build_cache_manager(config, puffin_manager.clone(), intermediate_manager.clone()).await?;
|
||||||
|
let layer = AccessLayer::new(
|
||||||
|
components.table_dir(),
|
||||||
|
components.path_type,
|
||||||
|
object_store,
|
||||||
|
puffin_manager,
|
||||||
|
intermediate_manager,
|
||||||
|
);
|
||||||
|
Ok((Arc::new(layer), cache_manager))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn build_cache_manager(
|
||||||
|
config: &MitoConfig,
|
||||||
|
puffin_manager: PuffinManagerFactory,
|
||||||
|
intermediate_manager: IntermediateManager,
|
||||||
|
) -> error::Result<CacheManagerRef> {
|
||||||
|
let write_cache = write_cache_from_config(config, puffin_manager, intermediate_manager)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error::IllegalConfigSnafu {
|
||||||
|
msg: format!("Failed to build write cache: {e:?}"),
|
||||||
|
}
|
||||||
|
.build()
|
||||||
|
})?;
|
||||||
|
let cache_manager = Arc::new(
|
||||||
|
CacheManager::builder()
|
||||||
|
.sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
|
||||||
|
.vector_cache_size(config.vector_cache_size.as_bytes())
|
||||||
|
.page_cache_size(config.page_cache_size.as_bytes())
|
||||||
|
.selector_result_cache_size(config.selector_result_cache_size.as_bytes())
|
||||||
|
.index_metadata_size(config.index.metadata_cache_size.as_bytes())
|
||||||
|
.index_content_size(config.index.content_cache_size.as_bytes())
|
||||||
|
.index_content_page_size(config.index.content_cache_page_size.as_bytes())
|
||||||
|
.index_result_cache_size(config.index.result_cache_size.as_bytes())
|
||||||
|
.puffin_metadata_size(config.index.metadata_cache_size.as_bytes())
|
||||||
|
.write_cache(write_cache)
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
Ok(cache_manager)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_noop_file_purger() -> FilePurgerRef {
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Noop;
|
||||||
|
impl FilePurger for Noop {
|
||||||
|
fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool) {}
|
||||||
|
}
|
||||||
|
Arc::new(Noop)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_parquet_metadata(
|
||||||
|
object_store: ObjectStore,
|
||||||
|
path: &str,
|
||||||
|
file_size: u64,
|
||||||
|
) -> Result<parquet::file::metadata::ParquetMetaData, Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
use parquet::file::FOOTER_SIZE;
|
||||||
|
use parquet::file::metadata::ParquetMetaDataReader;
|
||||||
|
let actual_size = if file_size == 0 {
|
||||||
|
object_store.stat(path).await?.content_length()
|
||||||
|
} else {
|
||||||
|
file_size
|
||||||
|
};
|
||||||
|
if actual_size < FOOTER_SIZE as u64 {
|
||||||
|
return Err("file too small".into());
|
||||||
|
}
|
||||||
|
let prefetch: u64 = 64 * 1024;
|
||||||
|
let start = actual_size.saturating_sub(prefetch);
|
||||||
|
let buffer = object_store
|
||||||
|
.read_with(path)
|
||||||
|
.range(start..actual_size)
|
||||||
|
.await?
|
||||||
|
.to_vec();
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
let mut footer = [0; 8];
|
||||||
|
footer.copy_from_slice(&buffer[buffer_len - FOOTER_SIZE..]);
|
||||||
|
let footer = ParquetMetaDataReader::decode_footer_tail(&footer)?;
|
||||||
|
let metadata_len = footer.metadata_length() as u64;
|
||||||
|
if actual_size - (FOOTER_SIZE as u64) < metadata_len {
|
||||||
|
return Err("invalid footer/metadata length".into());
|
||||||
|
}
|
||||||
|
if (metadata_len as usize) <= buffer_len - FOOTER_SIZE {
|
||||||
|
let metadata_start = buffer_len - metadata_len as usize - FOOTER_SIZE;
|
||||||
|
let meta = ParquetMetaDataReader::decode_metadata(
|
||||||
|
&buffer[metadata_start..buffer_len - FOOTER_SIZE],
|
||||||
|
)?;
|
||||||
|
Ok(meta)
|
||||||
|
} else {
|
||||||
|
let metadata_start = actual_size - metadata_len - FOOTER_SIZE as u64;
|
||||||
|
let data = object_store
|
||||||
|
.read_with(path)
|
||||||
|
.range(metadata_start..(actual_size - FOOTER_SIZE as u64))
|
||||||
|
.await?
|
||||||
|
.to_vec();
|
||||||
|
let meta = ParquetMetaDataReader::decode_metadata(&data)?;
|
||||||
|
Ok(meta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use common_base::readable_size::ReadableSize;
|
||||||
|
use store_api::region_request::PathType;
|
||||||
|
|
||||||
|
use crate::datanode::objbench::{parse_config, parse_file_dir_components};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_dir() {
|
||||||
|
let meta_path = "data/greptime/public/1024/1024_0000000000/metadata/00020380-009c-426d-953e-b4e34c15af34.parquet";
|
||||||
|
let c = parse_file_dir_components(meta_path).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
c.file_id.to_string(),
|
||||||
|
"00020380-009c-426d-953e-b4e34c15af34"
|
||||||
|
);
|
||||||
|
assert_eq!(c.catalog, "greptime");
|
||||||
|
assert_eq!(c.schema, "public");
|
||||||
|
assert_eq!(c.table_id, 1024);
|
||||||
|
assert_eq!(c.region_sequence, 0);
|
||||||
|
assert_eq!(c.path_type, PathType::Metadata);
|
||||||
|
|
||||||
|
let c = parse_file_dir_components(
|
||||||
|
"data/greptime/public/1024/1024_0000000000/data/00020380-009c-426d-953e-b4e34c15af34.parquet",
|
||||||
|
).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
c.file_id.to_string(),
|
||||||
|
"00020380-009c-426d-953e-b4e34c15af34"
|
||||||
|
);
|
||||||
|
assert_eq!(c.catalog, "greptime");
|
||||||
|
assert_eq!(c.schema, "public");
|
||||||
|
assert_eq!(c.table_id, 1024);
|
||||||
|
assert_eq!(c.region_sequence, 0);
|
||||||
|
assert_eq!(c.path_type, PathType::Data);
|
||||||
|
|
||||||
|
let c = parse_file_dir_components(
|
||||||
|
"data/greptime/public/1024/1024_0000000000/00020380-009c-426d-953e-b4e34c15af34.parquet",
|
||||||
|
).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
c.file_id.to_string(),
|
||||||
|
"00020380-009c-426d-953e-b4e34c15af34"
|
||||||
|
);
|
||||||
|
assert_eq!(c.catalog, "greptime");
|
||||||
|
assert_eq!(c.schema, "public");
|
||||||
|
assert_eq!(c.table_id, 1024);
|
||||||
|
assert_eq!(c.region_sequence, 0);
|
||||||
|
assert_eq!(c.path_type, PathType::Bare);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_config() {
|
||||||
|
let path = "../../config/datanode.example.toml";
|
||||||
|
let (storage, engine) = parse_config(&PathBuf::from_str(path).unwrap()).unwrap();
|
||||||
|
assert_eq!(storage.data_home, "./greptimedb_data");
|
||||||
|
assert_eq!(engine.index.staging_size, ReadableSize::gb(2));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -99,13 +99,6 @@ pub enum Error {
|
|||||||
source: flow::Error,
|
source: flow::Error,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Servers error"))]
|
|
||||||
Servers {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
source: servers::error::Error,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to start frontend"))]
|
#[snafu(display("Failed to start frontend"))]
|
||||||
StartFrontend {
|
StartFrontend {
|
||||||
#[snafu(implicit)]
|
#[snafu(implicit)]
|
||||||
@@ -336,7 +329,6 @@ impl ErrorExt for Error {
|
|||||||
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
Error::ShutdownFrontend { source, .. } => source.status_code(),
|
||||||
Error::StartMetaServer { source, .. } => source.status_code(),
|
Error::StartMetaServer { source, .. } => source.status_code(),
|
||||||
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
Error::ShutdownMetaServer { source, .. } => source.status_code(),
|
||||||
Error::Servers { source, .. } => source.status_code(),
|
|
||||||
Error::BuildMetaServer { source, .. } => source.status_code(),
|
Error::BuildMetaServer { source, .. } => source.status_code(),
|
||||||
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
|
||||||
Error::BuildCli { source, .. } => source.status_code(),
|
Error::BuildCli { source, .. } => source.status_code(),
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -30,6 +31,7 @@ use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHand
|
|||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
use common_meta::key::TableMetadataManager;
|
use common_meta::key::TableMetadataManager;
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
|
use common_stat::ResourceStatImpl;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
@@ -38,12 +40,14 @@ use flow::{
|
|||||||
get_flow_auth_options,
|
get_flow_auth_options,
|
||||||
};
|
};
|
||||||
use meta_client::{MetaClientOptions, MetaClientType};
|
use meta_client::{MetaClientOptions, MetaClientType};
|
||||||
|
use plugins::flownode::context::GrpcConfigureContext;
|
||||||
|
use servers::configurator::GrpcBuilderConfiguratorRef;
|
||||||
use snafu::{OptionExt, ResultExt, ensure};
|
use snafu::{OptionExt, ResultExt, ensure};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{
|
use crate::error::{
|
||||||
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
|
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
|
||||||
MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
|
||||||
};
|
};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||||
@@ -54,33 +58,14 @@ type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
|
|||||||
|
|
||||||
pub struct Instance {
|
pub struct Instance {
|
||||||
flownode: FlownodeInstance,
|
flownode: FlownodeInstance,
|
||||||
|
|
||||||
// The components of flownode, which make it easier to expand based
|
|
||||||
// on the components.
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components: Components,
|
|
||||||
|
|
||||||
// Keep the logging guard to prevent the worker from being dropped.
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
_guard: Vec<WorkerGuard>,
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub struct Components {
|
|
||||||
pub catalog_manager: catalog::CatalogManagerRef,
|
|
||||||
pub fe_client: Arc<FrontendClient>,
|
|
||||||
pub kv_backend: common_meta::kv_backend::KvBackendRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
pub fn new(
|
pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
|
||||||
flownode: FlownodeInstance,
|
|
||||||
#[cfg(feature = "enterprise")] components: Components,
|
|
||||||
guard: Vec<WorkerGuard>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
Self {
|
||||||
flownode,
|
flownode,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components,
|
|
||||||
_guard: guard,
|
_guard: guard,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -93,11 +78,6 @@ impl Instance {
|
|||||||
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
|
||||||
&mut self.flownode
|
&mut self.flownode
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub fn components(&self) -> &Components {
|
|
||||||
&self.components
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
@@ -372,11 +352,15 @@ impl StartCommand {
|
|||||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
let mut resource_stat = ResourceStatImpl::default();
|
||||||
|
resource_stat.start_collect_cpu_usage();
|
||||||
|
|
||||||
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
|
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
|
||||||
&opts,
|
&opts,
|
||||||
meta_client.clone(),
|
meta_client.clone(),
|
||||||
opts.heartbeat.clone(),
|
opts.heartbeat.clone(),
|
||||||
Arc::new(executor),
|
Arc::new(executor),
|
||||||
|
Arc::new(resource_stat),
|
||||||
);
|
);
|
||||||
|
|
||||||
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
|
||||||
@@ -391,7 +375,7 @@ impl StartCommand {
|
|||||||
let frontend_client = Arc::new(frontend_client);
|
let frontend_client = Arc::new(frontend_client);
|
||||||
let flownode_builder = FlownodeBuilder::new(
|
let flownode_builder = FlownodeBuilder::new(
|
||||||
opts.clone(),
|
opts.clone(),
|
||||||
plugins,
|
plugins.clone(),
|
||||||
table_metadata_manager,
|
table_metadata_manager,
|
||||||
catalog_manager.clone(),
|
catalog_manager.clone(),
|
||||||
flow_metadata_manager,
|
flow_metadata_manager,
|
||||||
@@ -400,8 +384,29 @@ impl StartCommand {
|
|||||||
.with_heartbeat_task(heartbeat_task);
|
.with_heartbeat_task(heartbeat_task);
|
||||||
|
|
||||||
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
|
||||||
|
|
||||||
|
let builder =
|
||||||
|
FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
|
||||||
|
let builder = if let Some(configurator) =
|
||||||
|
plugins.get::<GrpcBuilderConfiguratorRef<GrpcConfigureContext>>()
|
||||||
|
{
|
||||||
|
let context = GrpcConfigureContext {
|
||||||
|
kv_backend: cached_meta_backend.clone(),
|
||||||
|
fe_client: frontend_client.clone(),
|
||||||
|
flownode_id: member_id,
|
||||||
|
catalog_manager: catalog_manager.clone(),
|
||||||
|
};
|
||||||
|
configurator
|
||||||
|
.configure(builder, context)
|
||||||
|
.await
|
||||||
|
.context(OtherSnafu)?
|
||||||
|
} else {
|
||||||
|
builder
|
||||||
|
};
|
||||||
|
let grpc_server = builder.build();
|
||||||
|
|
||||||
let services = FlownodeServiceBuilder::new(&opts)
|
let services = FlownodeServiceBuilder::new(&opts)
|
||||||
.with_default_grpc_server(flownode.flownode_server())
|
.with_grpc_server(grpc_server)
|
||||||
.enable_http_service()
|
.enable_http_service()
|
||||||
.build()
|
.build()
|
||||||
.context(StartFlownodeSnafu)?;
|
.context(StartFlownodeSnafu)?;
|
||||||
@@ -425,16 +430,6 @@ impl StartCommand {
|
|||||||
.set_frontend_invoker(invoker)
|
.set_frontend_invoker(invoker)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
Ok(Instance::new(flownode, guard))
|
||||||
let components = Components {
|
|
||||||
catalog_manager: catalog_manager.clone(),
|
|
||||||
fe_client: frontend_client,
|
|
||||||
kv_backend: cached_meta_backend,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "enterprise"))]
|
|
||||||
return Ok(Instance::new(flownode, guard));
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
Ok(Instance::new(flownode, components, guard))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -19,17 +20,23 @@ use std::time::Duration;
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_extension::DistributedInformationExtension;
|
use catalog::information_extension::DistributedInformationExtension;
|
||||||
use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
|
use catalog::kvbackend::{
|
||||||
|
CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
|
||||||
|
MetaKvBackend,
|
||||||
|
};
|
||||||
use catalog::process_manager::ProcessManager;
|
use catalog::process_manager::ProcessManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use client::client_manager::NodeClients;
|
use client::client_manager::NodeClients;
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
use common_grpc::channel_manager::ChannelConfig;
|
use common_grpc::channel_manager::ChannelConfig;
|
||||||
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
|
||||||
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
use common_meta::heartbeat::handler::HandlerGroupExecutor;
|
||||||
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
|
||||||
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
|
||||||
|
use common_query::prelude::set_default_prefix;
|
||||||
|
use common_stat::ResourceStatImpl;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
@@ -39,14 +46,16 @@ use frontend::heartbeat::HeartbeatTask;
|
|||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::server::Services;
|
use frontend::server::Services;
|
||||||
use meta_client::{MetaClientOptions, MetaClientType};
|
use meta_client::{MetaClientOptions, MetaClientType};
|
||||||
|
use plugins::frontend::context::{
|
||||||
|
CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
|
||||||
|
};
|
||||||
use servers::addrs;
|
use servers::addrs;
|
||||||
use servers::export_metrics::ExportMetricsTask;
|
|
||||||
use servers::grpc::GrpcOptions;
|
use servers::grpc::GrpcOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use snafu::{OptionExt, ResultExt};
|
use snafu::{OptionExt, ResultExt};
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, OtherSnafu, Result};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
|
||||||
|
|
||||||
@@ -174,6 +183,8 @@ pub struct StartCommand {
|
|||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
tls_key_path: Option<String>,
|
tls_key_path: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
|
tls_watch: bool,
|
||||||
|
#[clap(long)]
|
||||||
user_provider: Option<String>,
|
user_provider: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
disable_dashboard: Option<bool>,
|
disable_dashboard: Option<bool>,
|
||||||
@@ -227,6 +238,7 @@ impl StartCommand {
|
|||||||
self.tls_mode.clone(),
|
self.tls_mode.clone(),
|
||||||
self.tls_cert_path.clone(),
|
self.tls_cert_path.clone(),
|
||||||
self.tls_key_path.clone(),
|
self.tls_key_path.clone(),
|
||||||
|
self.tls_watch,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(addr) = &self.http_addr {
|
if let Some(addr) = &self.http_addr {
|
||||||
@@ -332,6 +344,9 @@ impl StartCommand {
|
|||||||
.context(error::StartFrontendSnafu)?;
|
.context(error::StartFrontendSnafu)?;
|
||||||
|
|
||||||
set_default_timezone(opts.default_timezone.as_deref()).context(error::InitTimezoneSnafu)?;
|
set_default_timezone(opts.default_timezone.as_deref()).context(error::InitTimezoneSnafu)?;
|
||||||
|
set_default_prefix(opts.default_column_prefix.as_deref())
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(error::BuildCliSnafu)?;
|
||||||
|
|
||||||
let meta_client_options = opts
|
let meta_client_options = opts
|
||||||
.meta_client
|
.meta_client
|
||||||
@@ -408,9 +423,18 @@ impl StartCommand {
|
|||||||
layered_cache_registry.clone(),
|
layered_cache_registry.clone(),
|
||||||
)
|
)
|
||||||
.with_process_manager(process_manager.clone());
|
.with_process_manager(process_manager.clone());
|
||||||
#[cfg(feature = "enterprise")]
|
let builder = if let Some(configurator) =
|
||||||
let builder = if let Some(factories) = plugins.get() {
|
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||||
builder.with_extra_information_table_factories(factories)
|
{
|
||||||
|
let ctx = DistributedCatalogManagerConfigureContext {
|
||||||
|
meta_client: meta_client.clone(),
|
||||||
|
};
|
||||||
|
let ctx = CatalogManagerConfigureContext::Distributed(ctx);
|
||||||
|
|
||||||
|
configurator
|
||||||
|
.configure(builder, ctx)
|
||||||
|
.await
|
||||||
|
.context(OtherSnafu)?
|
||||||
} else {
|
} else {
|
||||||
builder
|
builder
|
||||||
};
|
};
|
||||||
@@ -421,11 +445,15 @@ impl StartCommand {
|
|||||||
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
let mut resource_stat = ResourceStatImpl::default();
|
||||||
|
resource_stat.start_collect_cpu_usage();
|
||||||
|
|
||||||
let heartbeat_task = HeartbeatTask::new(
|
let heartbeat_task = HeartbeatTask::new(
|
||||||
&opts,
|
&opts,
|
||||||
meta_client.clone(),
|
meta_client.clone(),
|
||||||
opts.heartbeat.clone(),
|
opts.heartbeat.clone(),
|
||||||
Arc::new(executor),
|
Arc::new(executor),
|
||||||
|
Arc::new(resource_stat),
|
||||||
);
|
);
|
||||||
let heartbeat_task = Some(heartbeat_task);
|
let heartbeat_task = Some(heartbeat_task);
|
||||||
|
|
||||||
@@ -445,9 +473,6 @@ impl StartCommand {
|
|||||||
.context(error::StartFrontendSnafu)?;
|
.context(error::StartFrontendSnafu)?;
|
||||||
let instance = Arc::new(instance);
|
let instance = Arc::new(instance);
|
||||||
|
|
||||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
|
||||||
.context(error::ServersSnafu)?;
|
|
||||||
|
|
||||||
let servers = Services::new(opts, instance.clone(), plugins)
|
let servers = Services::new(opts, instance.clone(), plugins)
|
||||||
.build()
|
.build()
|
||||||
.context(error::StartFrontendSnafu)?;
|
.context(error::StartFrontendSnafu)?;
|
||||||
@@ -456,7 +481,6 @@ impl StartCommand {
|
|||||||
instance,
|
instance,
|
||||||
servers,
|
servers,
|
||||||
heartbeat_task,
|
heartbeat_task,
|
||||||
export_metrics_task,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Instance::new(frontend, guard))
|
Ok(Instance::new(frontend, guard))
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt::{self, Debug};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ use common_config::Configurable;
|
|||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||||
use common_version::{short_version, verbose_version};
|
use common_version::{short_version, verbose_version};
|
||||||
use meta_srv::bootstrap::MetasrvInstance;
|
use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
|
||||||
use meta_srv::metasrv::BackendImpl;
|
use meta_srv::metasrv::BackendImpl;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
@@ -177,7 +177,7 @@ pub struct StartCommand {
|
|||||||
backend: Option<BackendImpl>,
|
backend: Option<BackendImpl>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for StartCommand {
|
impl Debug for StartCommand {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.debug_struct("StartCommand")
|
f.debug_struct("StartCommand")
|
||||||
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
.field("rpc_bind_addr", &self.rpc_bind_addr)
|
||||||
@@ -341,7 +341,7 @@ impl StartCommand {
|
|||||||
.await
|
.await
|
||||||
.context(StartMetaServerSnafu)?;
|
.context(StartMetaServerSnafu)?;
|
||||||
|
|
||||||
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
|
let builder = metasrv_builder(&opts, plugins, None)
|
||||||
.await
|
.await
|
||||||
.context(error::BuildMetaServerSnafu)?;
|
.context(error::BuildMetaServerSnafu)?;
|
||||||
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -20,7 +21,7 @@ use std::{fs, path};
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
|
||||||
use catalog::information_schema::InformationExtensionRef;
|
use catalog::information_schema::InformationExtensionRef;
|
||||||
use catalog::kvbackend::KvBackendCatalogManagerBuilder;
|
use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
|
||||||
use catalog::process_manager::ProcessManager;
|
use catalog::process_manager::ProcessManager;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use common_base::Plugins;
|
use common_base::Plugins;
|
||||||
@@ -31,7 +32,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
|
|||||||
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
|
||||||
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
use common_meta::ddl::table_meta::TableMetadataAllocator;
|
||||||
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
|
||||||
use common_meta::ddl_manager::DdlManager;
|
use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
|
||||||
use common_meta::key::flow::FlowMetadataManager;
|
use common_meta::key::flow::FlowMetadataManager;
|
||||||
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
|
||||||
use common_meta::kv_backend::KvBackendRef;
|
use common_meta::kv_backend::KvBackendRef;
|
||||||
@@ -41,6 +42,7 @@ use common_meta::region_registry::LeaderRegionRegistry;
|
|||||||
use common_meta::sequence::SequenceBuilder;
|
use common_meta::sequence::SequenceBuilder;
|
||||||
use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator};
|
use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator};
|
||||||
use common_procedure::ProcedureManagerRef;
|
use common_procedure::ProcedureManagerRef;
|
||||||
|
use common_query::prelude::set_default_prefix;
|
||||||
use common_telemetry::info;
|
use common_telemetry::info;
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
|
||||||
use common_time::timezone::set_default_timezone;
|
use common_time::timezone::set_default_timezone;
|
||||||
@@ -56,14 +58,17 @@ use frontend::instance::StandaloneDatanodeManager;
|
|||||||
use frontend::instance::builder::FrontendBuilder;
|
use frontend::instance::builder::FrontendBuilder;
|
||||||
use frontend::server::Services;
|
use frontend::server::Services;
|
||||||
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
|
||||||
use servers::export_metrics::ExportMetricsTask;
|
use plugins::frontend::context::{
|
||||||
|
CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
|
||||||
|
};
|
||||||
|
use plugins::standalone::context::DdlManagerConfigureContext;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
use standalone::StandaloneInformationExtension;
|
use standalone::StandaloneInformationExtension;
|
||||||
use standalone::options::StandaloneOptions;
|
use standalone::options::StandaloneOptions;
|
||||||
use tracing_appender::non_blocking::WorkerGuard;
|
use tracing_appender::non_blocking::WorkerGuard;
|
||||||
|
|
||||||
use crate::error::{Result, StartFlownodeSnafu};
|
use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
|
||||||
use crate::options::{GlobalOptions, GreptimeOptions};
|
use crate::options::{GlobalOptions, GreptimeOptions};
|
||||||
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
|
||||||
|
|
||||||
@@ -116,34 +121,15 @@ pub struct Instance {
|
|||||||
flownode: FlownodeInstance,
|
flownode: FlownodeInstance,
|
||||||
procedure_manager: ProcedureManagerRef,
|
procedure_manager: ProcedureManagerRef,
|
||||||
wal_options_allocator: WalOptionsAllocatorRef,
|
wal_options_allocator: WalOptionsAllocatorRef,
|
||||||
|
|
||||||
// The components of standalone, which make it easier to expand based
|
|
||||||
// on the components.
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components: Components,
|
|
||||||
|
|
||||||
// Keep the logging guard to prevent the worker from being dropped.
|
// Keep the logging guard to prevent the worker from being dropped.
|
||||||
_guard: Vec<WorkerGuard>,
|
_guard: Vec<WorkerGuard>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub struct Components {
|
|
||||||
pub plugins: Plugins,
|
|
||||||
pub kv_backend: KvBackendRef,
|
|
||||||
pub frontend_client: Arc<FrontendClient>,
|
|
||||||
pub catalog_manager: catalog::CatalogManagerRef,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Instance {
|
impl Instance {
|
||||||
/// Find the socket addr of a server by its `name`.
|
/// Find the socket addr of a server by its `name`.
|
||||||
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
pub fn server_addr(&self, name: &str) -> Option<SocketAddr> {
|
||||||
self.frontend.server_handlers().addr(name)
|
self.frontend.server_handlers().addr(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
pub fn components(&self) -> &Components {
|
|
||||||
&self.components
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -227,6 +213,8 @@ pub struct StartCommand {
|
|||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
tls_key_path: Option<String>,
|
tls_key_path: Option<String>,
|
||||||
#[clap(long)]
|
#[clap(long)]
|
||||||
|
tls_watch: bool,
|
||||||
|
#[clap(long)]
|
||||||
user_provider: Option<String>,
|
user_provider: Option<String>,
|
||||||
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
|
||||||
pub env_prefix: String,
|
pub env_prefix: String,
|
||||||
@@ -276,6 +264,7 @@ impl StartCommand {
|
|||||||
self.tls_mode.clone(),
|
self.tls_mode.clone(),
|
||||||
self.tls_cert_path.clone(),
|
self.tls_cert_path.clone(),
|
||||||
self.tls_key_path.clone(),
|
self.tls_key_path.clone(),
|
||||||
|
self.tls_watch,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(addr) = &self.http_addr {
|
if let Some(addr) = &self.http_addr {
|
||||||
@@ -355,6 +344,10 @@ impl StartCommand {
|
|||||||
let mut plugins = Plugins::new();
|
let mut plugins = Plugins::new();
|
||||||
let plugin_opts = opts.plugins;
|
let plugin_opts = opts.plugins;
|
||||||
let mut opts = opts.component;
|
let mut opts = opts.component;
|
||||||
|
set_default_prefix(opts.default_column_prefix.as_deref())
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(error::BuildCliSnafu)?;
|
||||||
|
|
||||||
opts.grpc.detect_server_addr();
|
opts.grpc.detect_server_addr();
|
||||||
let fe_opts = opts.frontend_options();
|
let fe_opts = opts.frontend_options();
|
||||||
let dn_opts = opts.datanode_options();
|
let dn_opts = opts.datanode_options();
|
||||||
@@ -408,6 +401,13 @@ impl StartCommand {
|
|||||||
plugins.insert::<InformationExtensionRef>(information_extension.clone());
|
plugins.insert::<InformationExtensionRef>(information_extension.clone());
|
||||||
|
|
||||||
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
|
||||||
|
|
||||||
|
// for standalone not use grpc, but get a handler to frontend grpc client without
|
||||||
|
// actually make a connection
|
||||||
|
let (frontend_client, frontend_instance_handler) =
|
||||||
|
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
||||||
|
let frontend_client = Arc::new(frontend_client);
|
||||||
|
|
||||||
let builder = KvBackendCatalogManagerBuilder::new(
|
let builder = KvBackendCatalogManagerBuilder::new(
|
||||||
information_extension.clone(),
|
information_extension.clone(),
|
||||||
kv_backend.clone(),
|
kv_backend.clone(),
|
||||||
@@ -415,9 +415,17 @@ impl StartCommand {
|
|||||||
)
|
)
|
||||||
.with_procedure_manager(procedure_manager.clone())
|
.with_procedure_manager(procedure_manager.clone())
|
||||||
.with_process_manager(process_manager.clone());
|
.with_process_manager(process_manager.clone());
|
||||||
#[cfg(feature = "enterprise")]
|
let builder = if let Some(configurator) =
|
||||||
let builder = if let Some(factories) = plugins.get() {
|
plugins.get::<CatalogManagerConfiguratorRef<CatalogManagerConfigureContext>>()
|
||||||
builder.with_extra_information_table_factories(factories)
|
{
|
||||||
|
let ctx = StandaloneCatalogManagerConfigureContext {
|
||||||
|
fe_client: frontend_client.clone(),
|
||||||
|
};
|
||||||
|
let ctx = CatalogManagerConfigureContext::Standalone(ctx);
|
||||||
|
configurator
|
||||||
|
.configure(builder, ctx)
|
||||||
|
.await
|
||||||
|
.context(OtherSnafu)?
|
||||||
} else {
|
} else {
|
||||||
builder
|
builder
|
||||||
};
|
};
|
||||||
@@ -432,11 +440,6 @@ impl StartCommand {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
// for standalone not use grpc, but get a handler to frontend grpc client without
|
|
||||||
// actually make a connection
|
|
||||||
let (frontend_client, frontend_instance_handler) =
|
|
||||||
FrontendClient::from_empty_grpc_handler(opts.query.clone());
|
|
||||||
let frontend_client = Arc::new(frontend_client);
|
|
||||||
let flow_builder = FlownodeBuilder::new(
|
let flow_builder = FlownodeBuilder::new(
|
||||||
flownode_options,
|
flownode_options,
|
||||||
plugins.clone(),
|
plugins.clone(),
|
||||||
@@ -507,11 +510,21 @@ impl StartCommand {
|
|||||||
|
|
||||||
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
|
||||||
.context(error::InitDdlManagerSnafu)?;
|
.context(error::InitDdlManagerSnafu)?;
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let ddl_manager = {
|
let ddl_manager = if let Some(configurator) =
|
||||||
let trigger_ddl_manager: Option<common_meta::ddl_manager::TriggerDdlManagerRef> =
|
plugins.get::<DdlManagerConfiguratorRef<DdlManagerConfigureContext>>()
|
||||||
plugins.get();
|
{
|
||||||
ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
|
let ctx = DdlManagerConfigureContext {
|
||||||
|
kv_backend: kv_backend.clone(),
|
||||||
|
fe_client: frontend_client.clone(),
|
||||||
|
catalog_manager: catalog_manager.clone(),
|
||||||
|
};
|
||||||
|
configurator
|
||||||
|
.configure(ddl_manager, ctx)
|
||||||
|
.await
|
||||||
|
.context(OtherSnafu)?
|
||||||
|
} else {
|
||||||
|
ddl_manager
|
||||||
};
|
};
|
||||||
|
|
||||||
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
|
||||||
@@ -557,9 +570,6 @@ impl StartCommand {
|
|||||||
.context(StartFlownodeSnafu)?;
|
.context(StartFlownodeSnafu)?;
|
||||||
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
flow_streaming_engine.set_frontend_invoker(invoker).await;
|
||||||
|
|
||||||
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
|
|
||||||
.context(error::ServersSnafu)?;
|
|
||||||
|
|
||||||
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
|
||||||
.build()
|
.build()
|
||||||
.context(error::StartFrontendSnafu)?;
|
.context(error::StartFrontendSnafu)?;
|
||||||
@@ -568,15 +578,6 @@ impl StartCommand {
|
|||||||
instance: fe_instance,
|
instance: fe_instance,
|
||||||
servers,
|
servers,
|
||||||
heartbeat_task: None,
|
heartbeat_task: None,
|
||||||
export_metrics_task,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
let components = Components {
|
|
||||||
plugins,
|
|
||||||
kv_backend,
|
|
||||||
frontend_client,
|
|
||||||
catalog_manager,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Instance {
|
Ok(Instance {
|
||||||
@@ -585,8 +586,6 @@ impl StartCommand {
|
|||||||
flownode,
|
flownode,
|
||||||
procedure_manager,
|
procedure_manager,
|
||||||
wal_options_allocator,
|
wal_options_allocator,
|
||||||
#[cfg(feature = "enterprise")]
|
|
||||||
components,
|
|
||||||
_guard: guard,
|
_guard: guard,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -764,6 +763,9 @@ mod tests {
|
|||||||
fn test_load_log_options_from_cli() {
|
fn test_load_log_options_from_cli() {
|
||||||
let cmd = StartCommand {
|
let cmd = StartCommand {
|
||||||
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
|
||||||
|
mysql_addr: Some("127.0.0.1:4002".to_string()),
|
||||||
|
postgres_addr: Some("127.0.0.1:4003".to_string()),
|
||||||
|
tls_watch: true,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -780,6 +782,8 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
|
||||||
assert_eq!("debug", opts.logging.level.unwrap());
|
assert_eq!("debug", opts.logging.level.unwrap());
|
||||||
|
assert!(opts.mysql.tls.watch);
|
||||||
|
assert!(opts.postgres.tls.watch);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use cmd::options::GreptimeOptions;
|
use cmd::options::GreptimeOptions;
|
||||||
|
use common_base::memory_limit::MemoryLimit;
|
||||||
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
use common_config::{Configurable, DEFAULT_DATA_HOME};
|
||||||
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
|
||||||
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
|
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
|
||||||
@@ -30,7 +31,6 @@ use meta_srv::selector::SelectorType;
|
|||||||
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
use metric_engine::config::EngineConfig as MetricEngineConfig;
|
||||||
use mito2::config::MitoConfig;
|
use mito2::config::MitoConfig;
|
||||||
use query::options::QueryOptions;
|
use query::options::QueryOptions;
|
||||||
use servers::export_metrics::ExportMetricsOption;
|
|
||||||
use servers::grpc::GrpcOptions;
|
use servers::grpc::GrpcOptions;
|
||||||
use servers::http::HttpOptions;
|
use servers::http::HttpOptions;
|
||||||
use servers::tls::{TlsMode, TlsOption};
|
use servers::tls::{TlsMode, TlsOption};
|
||||||
@@ -48,6 +48,7 @@ fn test_load_datanode_example_config() {
|
|||||||
let expected = GreptimeOptions::<DatanodeOptions> {
|
let expected = GreptimeOptions::<DatanodeOptions> {
|
||||||
component: DatanodeOptions {
|
component: DatanodeOptions {
|
||||||
node_id: Some(42),
|
node_id: Some(42),
|
||||||
|
default_column_prefix: Some("greptime".to_string()),
|
||||||
meta_client: Some(MetaClientOptions {
|
meta_client: Some(MetaClientOptions {
|
||||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||||
timeout: Duration::from_secs(3),
|
timeout: Duration::from_secs(3),
|
||||||
@@ -73,14 +74,19 @@ fn test_load_datanode_example_config() {
|
|||||||
RegionEngineConfig::Mito(MitoConfig {
|
RegionEngineConfig::Mito(MitoConfig {
|
||||||
auto_flush_interval: Duration::from_secs(3600),
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||||
|
scan_memory_limit: MemoryLimit::Percentage(50),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
RegionEngineConfig::File(FileEngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
experimental_sparse_primary_key_encoding: false,
|
sparse_primary_key_encoding: true,
|
||||||
flush_metadata_region_interval: Duration::from_secs(30),
|
flush_metadata_region_interval: Duration::from_secs(30),
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
|
query: QueryOptions {
|
||||||
|
memory_pool_size: MemoryLimit::Percentage(50),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
logging: LoggingOptions {
|
logging: LoggingOptions {
|
||||||
level: Some("info".to_string()),
|
level: Some("info".to_string()),
|
||||||
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
|
||||||
@@ -88,11 +94,6 @@ fn test_load_datanode_example_config() {
|
|||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
export_metrics: ExportMetricsOption {
|
|
||||||
self_import: None,
|
|
||||||
remote_write: Some(Default::default()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
grpc: GrpcOptions::default()
|
grpc: GrpcOptions::default()
|
||||||
.with_bind_addr("127.0.0.1:3001")
|
.with_bind_addr("127.0.0.1:3001")
|
||||||
.with_server_addr("127.0.0.1:3001"),
|
.with_server_addr("127.0.0.1:3001"),
|
||||||
@@ -113,6 +114,7 @@ fn test_load_frontend_example_config() {
|
|||||||
let expected = GreptimeOptions::<FrontendOptions> {
|
let expected = GreptimeOptions::<FrontendOptions> {
|
||||||
component: FrontendOptions {
|
component: FrontendOptions {
|
||||||
default_timezone: Some("UTC".to_string()),
|
default_timezone: Some("UTC".to_string()),
|
||||||
|
default_column_prefix: Some("greptime".to_string()),
|
||||||
meta_client: Some(MetaClientOptions {
|
meta_client: Some(MetaClientOptions {
|
||||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||||
timeout: Duration::from_secs(3),
|
timeout: Duration::from_secs(3),
|
||||||
@@ -138,11 +140,6 @@ fn test_load_frontend_example_config() {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
export_metrics: ExportMetricsOption {
|
|
||||||
self_import: None,
|
|
||||||
remote_write: Some(Default::default()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
grpc: GrpcOptions {
|
grpc: GrpcOptions {
|
||||||
bind_addr: "127.0.0.1:4001".to_string(),
|
bind_addr: "127.0.0.1:4001".to_string(),
|
||||||
server_addr: "127.0.0.1:4001".to_string(),
|
server_addr: "127.0.0.1:4001".to_string(),
|
||||||
@@ -153,6 +150,10 @@ fn test_load_frontend_example_config() {
|
|||||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
query: QueryOptions {
|
||||||
|
memory_pool_size: MemoryLimit::Percentage(50),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -189,11 +190,6 @@ fn test_load_metasrv_example_config() {
|
|||||||
tcp_nodelay: true,
|
tcp_nodelay: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
export_metrics: ExportMetricsOption {
|
|
||||||
self_import: None,
|
|
||||||
remote_write: Some(Default::default()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
backend_tls: Some(TlsOption {
|
backend_tls: Some(TlsOption {
|
||||||
mode: TlsMode::Prefer,
|
mode: TlsMode::Prefer,
|
||||||
cert_path: String::new(),
|
cert_path: String::new(),
|
||||||
@@ -240,6 +236,7 @@ fn test_load_flownode_example_config() {
|
|||||||
query: QueryOptions {
|
query: QueryOptions {
|
||||||
parallelism: 1,
|
parallelism: 1,
|
||||||
allow_query_fallback: false,
|
allow_query_fallback: false,
|
||||||
|
memory_pool_size: MemoryLimit::Percentage(50),
|
||||||
},
|
},
|
||||||
meta_client: Some(MetaClientOptions {
|
meta_client: Some(MetaClientOptions {
|
||||||
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
|
||||||
@@ -273,6 +270,7 @@ fn test_load_standalone_example_config() {
|
|||||||
let expected = GreptimeOptions::<StandaloneOptions> {
|
let expected = GreptimeOptions::<StandaloneOptions> {
|
||||||
component: StandaloneOptions {
|
component: StandaloneOptions {
|
||||||
default_timezone: Some("UTC".to_string()),
|
default_timezone: Some("UTC".to_string()),
|
||||||
|
default_column_prefix: Some("greptime".to_string()),
|
||||||
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
|
||||||
dir: Some(format!("{}/{}", DEFAULT_DATA_HOME, WAL_DIR)),
|
dir: Some(format!("{}/{}", DEFAULT_DATA_HOME, WAL_DIR)),
|
||||||
sync_period: Some(Duration::from_secs(10)),
|
sync_period: Some(Duration::from_secs(10)),
|
||||||
@@ -283,11 +281,12 @@ fn test_load_standalone_example_config() {
|
|||||||
RegionEngineConfig::Mito(MitoConfig {
|
RegionEngineConfig::Mito(MitoConfig {
|
||||||
auto_flush_interval: Duration::from_secs(3600),
|
auto_flush_interval: Duration::from_secs(3600),
|
||||||
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
|
||||||
|
scan_memory_limit: MemoryLimit::Percentage(50),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}),
|
}),
|
||||||
RegionEngineConfig::File(FileEngineConfig {}),
|
RegionEngineConfig::File(FileEngineConfig {}),
|
||||||
RegionEngineConfig::Metric(MetricEngineConfig {
|
RegionEngineConfig::Metric(MetricEngineConfig {
|
||||||
experimental_sparse_primary_key_encoding: false,
|
sparse_primary_key_encoding: true,
|
||||||
flush_metadata_region_interval: Duration::from_secs(30),
|
flush_metadata_region_interval: Duration::from_secs(30),
|
||||||
}),
|
}),
|
||||||
],
|
],
|
||||||
@@ -302,16 +301,14 @@ fn test_load_standalone_example_config() {
|
|||||||
tracing_sample_ratio: Some(Default::default()),
|
tracing_sample_ratio: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
export_metrics: ExportMetricsOption {
|
|
||||||
self_import: Some(Default::default()),
|
|
||||||
remote_write: Some(Default::default()),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
http: HttpOptions {
|
http: HttpOptions {
|
||||||
cors_allowed_origins: vec!["https://example.com".to_string()],
|
cors_allowed_origins: vec!["https://example.com".to_string()],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
|
query: QueryOptions {
|
||||||
|
memory_pool_size: MemoryLimit::Percentage(50),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
..Default::default()
|
..Default::default()
|
||||||
|
|||||||
@@ -18,9 +18,11 @@ bytes.workspace = true
|
|||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
lazy_static.workspace = true
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
|
regex.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -15,10 +15,12 @@
|
|||||||
pub mod bit_vec;
|
pub mod bit_vec;
|
||||||
pub mod bytes;
|
pub mod bytes;
|
||||||
pub mod cancellation;
|
pub mod cancellation;
|
||||||
|
pub mod memory_limit;
|
||||||
pub mod plugins;
|
pub mod plugins;
|
||||||
pub mod range_read;
|
pub mod range_read;
|
||||||
#[allow(clippy::all)]
|
#[allow(clippy::all)]
|
||||||
pub mod readable_size;
|
pub mod readable_size;
|
||||||
|
pub mod regex_pattern;
|
||||||
pub mod secrets;
|
pub mod secrets;
|
||||||
pub mod serde;
|
pub mod serde;
|
||||||
|
|
||||||
|
|||||||
265
src/common/base/src/memory_limit.rs
Normal file
265
src/common/base/src/memory_limit.rs
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::fmt::{self, Display};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
|
use crate::readable_size::ReadableSize;
|
||||||
|
|
||||||
|
/// Memory limit configuration that supports both absolute size and percentage.
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
/// - Absolute size: "2GB", "4GiB", "512MB"
|
||||||
|
/// - Percentage: "50%", "75%"
|
||||||
|
/// - Unlimited: "unlimited", "0"
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
|
pub enum MemoryLimit {
|
||||||
|
/// Absolute memory size.
|
||||||
|
Size(ReadableSize),
|
||||||
|
/// Percentage of total system memory (0-100).
|
||||||
|
Percentage(u8),
|
||||||
|
/// No memory limit.
|
||||||
|
#[default]
|
||||||
|
Unlimited,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryLimit {
|
||||||
|
/// Resolve the memory limit to bytes based on total system memory.
|
||||||
|
/// Returns 0 if the limit is unlimited.
|
||||||
|
pub fn resolve(&self, total_memory_bytes: u64) -> u64 {
|
||||||
|
match self {
|
||||||
|
MemoryLimit::Size(size) => size.as_bytes(),
|
||||||
|
MemoryLimit::Percentage(pct) => total_memory_bytes * (*pct as u64) / 100,
|
||||||
|
MemoryLimit::Unlimited => 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if this limit is unlimited.
|
||||||
|
pub fn is_unlimited(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
MemoryLimit::Size(size) => size.as_bytes() == 0,
|
||||||
|
MemoryLimit::Percentage(pct) => *pct == 0,
|
||||||
|
MemoryLimit::Unlimited => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for MemoryLimit {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let s = s.trim();
|
||||||
|
|
||||||
|
if s.eq_ignore_ascii_case("unlimited") {
|
||||||
|
return Ok(MemoryLimit::Unlimited);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(pct_str) = s.strip_suffix('%') {
|
||||||
|
let pct = pct_str
|
||||||
|
.trim()
|
||||||
|
.parse::<u8>()
|
||||||
|
.map_err(|e| format!("invalid percentage value '{}': {}", pct_str, e))?;
|
||||||
|
|
||||||
|
if pct > 100 {
|
||||||
|
return Err(format!("percentage must be between 0 and 100, got {}", pct));
|
||||||
|
}
|
||||||
|
|
||||||
|
if pct == 0 {
|
||||||
|
Ok(MemoryLimit::Unlimited)
|
||||||
|
} else {
|
||||||
|
Ok(MemoryLimit::Percentage(pct))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let size = ReadableSize::from_str(s)?;
|
||||||
|
if size.as_bytes() == 0 {
|
||||||
|
Ok(MemoryLimit::Unlimited)
|
||||||
|
} else {
|
||||||
|
Ok(MemoryLimit::Size(size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for MemoryLimit {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
MemoryLimit::Size(size) => write!(f, "{}", size),
|
||||||
|
MemoryLimit::Percentage(pct) => write!(f, "{}%", pct),
|
||||||
|
MemoryLimit::Unlimited => write!(f, "unlimited"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for MemoryLimit {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_str(&self.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for MemoryLimit {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let s = String::deserialize(deserializer)?;
|
||||||
|
MemoryLimit::from_str(&s).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_absolute_size() {
|
||||||
|
assert_eq!(
|
||||||
|
"2GB".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
"512MB".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Size(ReadableSize(512 * 1024 * 1024))
|
||||||
|
);
|
||||||
|
assert_eq!("0".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_percentage() {
|
||||||
|
assert_eq!(
|
||||||
|
"50%".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Percentage(50)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
"75%".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Percentage(75)
|
||||||
|
);
|
||||||
|
assert_eq!("0%".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_invalid() {
|
||||||
|
assert!("150%".parse::<MemoryLimit>().is_err());
|
||||||
|
assert!("-10%".parse::<MemoryLimit>().is_err());
|
||||||
|
assert!("invalid".parse::<MemoryLimit>().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_resolve() {
|
||||||
|
let total = 8 * 1024 * 1024 * 1024; // 8GB
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024)).resolve(total),
|
||||||
|
2 * 1024 * 1024 * 1024
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
MemoryLimit::Percentage(50).resolve(total),
|
||||||
|
4 * 1024 * 1024 * 1024
|
||||||
|
);
|
||||||
|
assert_eq!(MemoryLimit::Unlimited.resolve(total), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_unlimited() {
|
||||||
|
assert!(MemoryLimit::Unlimited.is_unlimited());
|
||||||
|
assert!(!MemoryLimit::Size(ReadableSize(1024)).is_unlimited());
|
||||||
|
assert!(!MemoryLimit::Percentage(50).is_unlimited());
|
||||||
|
assert!(!MemoryLimit::Percentage(1).is_unlimited());
|
||||||
|
|
||||||
|
// Defensive: these states shouldn't exist via public API, but check anyway
|
||||||
|
assert!(MemoryLimit::Size(ReadableSize(0)).is_unlimited());
|
||||||
|
assert!(MemoryLimit::Percentage(0).is_unlimited());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_100_percent() {
|
||||||
|
assert_eq!(
|
||||||
|
"100%".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Percentage(100)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_display_percentage() {
|
||||||
|
assert_eq!(MemoryLimit::Percentage(20).to_string(), "20%");
|
||||||
|
assert_eq!(MemoryLimit::Percentage(50).to_string(), "50%");
|
||||||
|
assert_eq!(MemoryLimit::Percentage(100).to_string(), "100%");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_unlimited() {
|
||||||
|
assert_eq!(
|
||||||
|
"unlimited".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Unlimited
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
"UNLIMITED".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Unlimited
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
"Unlimited".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Unlimited
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_display_unlimited() {
|
||||||
|
assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_display_roundtrip() {
|
||||||
|
let cases = vec![
|
||||||
|
"50%",
|
||||||
|
"100%",
|
||||||
|
"1%",
|
||||||
|
"2GB",
|
||||||
|
"512MB",
|
||||||
|
"unlimited",
|
||||||
|
"UNLIMITED",
|
||||||
|
"0", // normalized to unlimited
|
||||||
|
"0%", // normalized to unlimited
|
||||||
|
];
|
||||||
|
|
||||||
|
for input in cases {
|
||||||
|
let parsed = input.parse::<MemoryLimit>().unwrap();
|
||||||
|
let displayed = parsed.to_string();
|
||||||
|
let reparsed = displayed.parse::<MemoryLimit>().unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
parsed, reparsed,
|
||||||
|
"round-trip failed: '{}' -> '{}' -> '{:?}'",
|
||||||
|
input, displayed, reparsed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_zero_normalization() {
|
||||||
|
// All forms of zero should normalize to Unlimited
|
||||||
|
assert_eq!("0".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
|
||||||
|
assert_eq!("0%".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
|
||||||
|
assert_eq!("0B".parse::<MemoryLimit>().unwrap(), MemoryLimit::Unlimited);
|
||||||
|
assert_eq!(
|
||||||
|
"0KB".parse::<MemoryLimit>().unwrap(),
|
||||||
|
MemoryLimit::Unlimited
|
||||||
|
);
|
||||||
|
|
||||||
|
// Unlimited always displays as "unlimited"
|
||||||
|
assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -32,7 +32,12 @@ impl Plugins {
|
|||||||
|
|
||||||
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
pub fn insert<T: 'static + Send + Sync>(&self, value: T) {
|
||||||
let last = self.write().insert(value);
|
let last = self.write().insert(value);
|
||||||
assert!(last.is_none(), "each type of plugins must be one and only");
|
if last.is_some() {
|
||||||
|
panic!(
|
||||||
|
"Plugin of type {} already exists",
|
||||||
|
std::any::type_name::<T>()
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
pub fn get<T: 'static + Send + Sync + Clone>(&self) -> Option<T> {
|
||||||
@@ -140,7 +145,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic(expected = "each type of plugins must be one and only")]
|
#[should_panic(expected = "Plugin of type i32 already exists")]
|
||||||
fn test_plugin_uniqueness() {
|
fn test_plugin_uniqueness() {
|
||||||
let plugins = Plugins::new();
|
let plugins = Plugins::new();
|
||||||
plugins.insert(1i32);
|
plugins.insert(1i32);
|
||||||
|
|||||||
22
src/common/base/src/regex_pattern.rs
Normal file
22
src/common/base/src/regex_pattern.rs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
|
||||||
|
pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref NAME_PATTERN_REG: Regex = Regex::new(&format!("^{NAME_PATTERN}$")).unwrap();
|
||||||
|
}
|
||||||
@@ -8,5 +8,6 @@ license.workspace = true
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
const_format.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
27
src/common/catalog/build.rs
Normal file
27
src/common/catalog/build.rs
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
// Set DEFAULT_CATALOG_NAME from environment variable or use default value
|
||||||
|
let default_catalog_name =
|
||||||
|
std::env::var("DEFAULT_CATALOG_NAME").unwrap_or_else(|_| "greptime".to_string());
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"cargo:rustc-env=DEFAULT_CATALOG_NAME={}",
|
||||||
|
default_catalog_name
|
||||||
|
);
|
||||||
|
|
||||||
|
// Rerun build script if the environment variable changes
|
||||||
|
println!("cargo:rerun-if-env-changed=DEFAULT_CATALOG_NAME");
|
||||||
|
}
|
||||||
@@ -12,13 +12,15 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use const_format::concatcp;
|
||||||
|
|
||||||
pub const SYSTEM_CATALOG_NAME: &str = "system";
|
pub const SYSTEM_CATALOG_NAME: &str = "system";
|
||||||
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
|
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
|
||||||
pub const PG_CATALOG_NAME: &str = "pg_catalog";
|
pub const PG_CATALOG_NAME: &str = "pg_catalog";
|
||||||
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
|
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
|
||||||
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
|
pub const DEFAULT_CATALOG_NAME: &str = env!("DEFAULT_CATALOG_NAME");
|
||||||
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
pub const DEFAULT_SCHEMA_NAME: &str = "public";
|
||||||
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
|
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = concatcp!(DEFAULT_CATALOG_NAME, "_private");
|
||||||
|
|
||||||
/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
|
/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
|
||||||
/// User defined table id starts from this value.
|
/// User defined table id starts from this value.
|
||||||
@@ -84,8 +86,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
|
|||||||
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
|
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
|
||||||
/// id for information_schema.SESSION_STATUS
|
/// id for information_schema.SESSION_STATUS
|
||||||
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
|
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
|
||||||
/// id for information_schema.RUNTIME_METRICS
|
|
||||||
pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
|
|
||||||
/// id for information_schema.PARTITIONS
|
/// id for information_schema.PARTITIONS
|
||||||
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
|
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
|
||||||
/// id for information_schema.REGION_PEERS
|
/// id for information_schema.REGION_PEERS
|
||||||
@@ -110,6 +110,8 @@ pub const INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID: u32 = 37;
|
|||||||
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
|
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
|
||||||
/// id for information_schema.ssts_index_meta
|
/// id for information_schema.ssts_index_meta
|
||||||
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
|
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
|
||||||
|
/// id for information_schema.alerts
|
||||||
|
pub const INFORMATION_SCHEMA_ALERTS_TABLE_ID: u32 = 40;
|
||||||
|
|
||||||
// ----- End of information_schema tables -----
|
// ----- End of information_schema tables -----
|
||||||
|
|
||||||
@@ -150,4 +152,9 @@ pub const TRACE_TABLE_NAME_SESSION_KEY: &str = "trace_table_name";
|
|||||||
pub fn trace_services_table_name(trace_table_name: &str) -> String {
|
pub fn trace_services_table_name(trace_table_name: &str) -> String {
|
||||||
format!("{}_services", trace_table_name)
|
format!("{}_services", trace_table_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Generate the trace operations table name from the trace table name by adding `_operations` suffix.
|
||||||
|
pub fn trace_operations_table_name(trace_table_name: &str) -> String {
|
||||||
|
format!("{}_operations", trace_table_name)
|
||||||
|
}
|
||||||
// ---- End of special table and fields ----
|
// ---- End of special table and fields ----
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ workspace = true
|
|||||||
common-base.workspace = true
|
common-base.workspace = true
|
||||||
common-error.workspace = true
|
common-error.workspace = true
|
||||||
common-macro.workspace = true
|
common-macro.workspace = true
|
||||||
common-stat.workspace = true
|
|
||||||
config.workspace = true
|
config.workspace = true
|
||||||
humantime-serde.workspace = true
|
humantime-serde.workspace = true
|
||||||
object-store.workspace = true
|
object-store.workspace = true
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod utils;
|
|
||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
// Copyright 2023 Greptime Team
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
use common_base::readable_size::ReadableSize;
|
|
||||||
use common_stat::{get_total_cpu_millicores, get_total_memory_readable};
|
|
||||||
|
|
||||||
/// `ResourceSpec` holds the static resource specifications of a node,
|
|
||||||
/// such as CPU cores and memory capacity. These values are fixed
|
|
||||||
/// at startup and do not change dynamically during runtime.
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub struct ResourceSpec {
|
|
||||||
pub cpus: i64,
|
|
||||||
pub memory: Option<ReadableSize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ResourceSpec {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
cpus: get_total_cpu_millicores(),
|
|
||||||
memory: get_total_memory_readable(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -36,7 +36,7 @@ object_store_opendal.workspace = true
|
|||||||
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
|
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
|
||||||
parquet.workspace = true
|
parquet.workspace = true
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
regex = "1.7"
|
regex.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
snafu.workspace = true
|
snafu.workspace = true
|
||||||
strum.workspace = true
|
strum.workspace = true
|
||||||
|
|||||||
@@ -12,28 +12,11 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::future::Future;
|
|
||||||
|
|
||||||
use arrow::record_batch::RecordBatch;
|
use arrow::record_batch::RecordBatch;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use datafusion::parquet::format::FileMetaData;
|
use datafusion::parquet::format::FileMetaData;
|
||||||
use snafu::{OptionExt, ResultExt};
|
|
||||||
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
|
||||||
|
|
||||||
use crate::error::{self, Result};
|
use crate::error::Result;
|
||||||
use crate::share_buffer::SharedBuffer;
|
|
||||||
|
|
||||||
pub struct LazyBufferedWriter<T, U, F> {
|
|
||||||
path: String,
|
|
||||||
writer_factory: F,
|
|
||||||
writer: Option<T>,
|
|
||||||
/// None stands for [`LazyBufferedWriter`] closed.
|
|
||||||
encoder: Option<U>,
|
|
||||||
buffer: SharedBuffer,
|
|
||||||
rows_written: usize,
|
|
||||||
bytes_written: u64,
|
|
||||||
threshold: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait DfRecordBatchEncoder {
|
pub trait DfRecordBatchEncoder {
|
||||||
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
|
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
|
||||||
@@ -43,126 +26,3 @@ pub trait DfRecordBatchEncoder {
|
|||||||
pub trait ArrowWriterCloser {
|
pub trait ArrowWriterCloser {
|
||||||
async fn close(mut self) -> Result<FileMetaData>;
|
async fn close(mut self) -> Result<FileMetaData>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<
|
|
||||||
T: AsyncWrite + Send + Unpin,
|
|
||||||
U: DfRecordBatchEncoder + ArrowWriterCloser,
|
|
||||||
F: Fn(String) -> Fut,
|
|
||||||
Fut: Future<Output = Result<T>>,
|
|
||||||
> LazyBufferedWriter<T, U, F>
|
|
||||||
{
|
|
||||||
/// Closes `LazyBufferedWriter` and optionally flushes all data to underlying storage
|
|
||||||
/// if any row's been written.
|
|
||||||
pub async fn close_with_arrow_writer(mut self) -> Result<(FileMetaData, u64)> {
|
|
||||||
let encoder = self
|
|
||||||
.encoder
|
|
||||||
.take()
|
|
||||||
.context(error::BufferedWriterClosedSnafu)?;
|
|
||||||
let metadata = encoder.close().await?;
|
|
||||||
|
|
||||||
// It's important to shut down! flushes all pending writes
|
|
||||||
self.close_inner_writer().await?;
|
|
||||||
Ok((metadata, self.bytes_written))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<
|
|
||||||
T: AsyncWrite + Send + Unpin,
|
|
||||||
U: DfRecordBatchEncoder,
|
|
||||||
F: Fn(String) -> Fut,
|
|
||||||
Fut: Future<Output = Result<T>>,
|
|
||||||
> LazyBufferedWriter<T, U, F>
|
|
||||||
{
|
|
||||||
/// Closes the writer and flushes the buffer data.
|
|
||||||
pub async fn close_inner_writer(&mut self) -> Result<()> {
|
|
||||||
// Use `rows_written` to keep a track of if any rows have been written.
|
|
||||||
// If no row's been written, then we can simply close the underlying
|
|
||||||
// writer without flush so that no file will be actually created.
|
|
||||||
if self.rows_written != 0 {
|
|
||||||
self.bytes_written += self.try_flush(true).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(writer) = &mut self.writer {
|
|
||||||
writer.shutdown().await.context(error::AsyncWriteSnafu)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(
|
|
||||||
threshold: usize,
|
|
||||||
buffer: SharedBuffer,
|
|
||||||
encoder: U,
|
|
||||||
path: impl AsRef<str>,
|
|
||||||
writer_factory: F,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
path: path.as_ref().to_string(),
|
|
||||||
threshold,
|
|
||||||
encoder: Some(encoder),
|
|
||||||
buffer,
|
|
||||||
rows_written: 0,
|
|
||||||
bytes_written: 0,
|
|
||||||
writer_factory,
|
|
||||||
writer: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn write(&mut self, batch: &RecordBatch) -> Result<()> {
|
|
||||||
let encoder = self
|
|
||||||
.encoder
|
|
||||||
.as_mut()
|
|
||||||
.context(error::BufferedWriterClosedSnafu)?;
|
|
||||||
encoder.write(batch)?;
|
|
||||||
self.rows_written += batch.num_rows();
|
|
||||||
self.bytes_written += self.try_flush(false).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_flush(&mut self, all: bool) -> Result<u64> {
|
|
||||||
let mut bytes_written: u64 = 0;
|
|
||||||
|
|
||||||
// Once buffered data size reaches threshold, split the data in chunks (typically 4MB)
|
|
||||||
// and write to underlying storage.
|
|
||||||
while self.buffer.buffer.lock().unwrap().len() >= self.threshold {
|
|
||||||
let chunk = {
|
|
||||||
let mut buffer = self.buffer.buffer.lock().unwrap();
|
|
||||||
buffer.split_to(self.threshold)
|
|
||||||
};
|
|
||||||
let size = chunk.len();
|
|
||||||
|
|
||||||
self.maybe_init_writer()
|
|
||||||
.await?
|
|
||||||
.write_all(&chunk)
|
|
||||||
.await
|
|
||||||
.context(error::AsyncWriteSnafu)?;
|
|
||||||
|
|
||||||
bytes_written += size as u64;
|
|
||||||
}
|
|
||||||
|
|
||||||
if all {
|
|
||||||
bytes_written += self.try_flush_all().await?;
|
|
||||||
}
|
|
||||||
Ok(bytes_written)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Only initiates underlying file writer when rows have been written.
|
|
||||||
async fn maybe_init_writer(&mut self) -> Result<&mut T> {
|
|
||||||
if let Some(ref mut writer) = self.writer {
|
|
||||||
Ok(writer)
|
|
||||||
} else {
|
|
||||||
let writer = (self.writer_factory)(self.path.clone()).await?;
|
|
||||||
Ok(self.writer.insert(writer))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_flush_all(&mut self) -> Result<u64> {
|
|
||||||
let remain = self.buffer.buffer.lock().unwrap().split();
|
|
||||||
let size = remain.len();
|
|
||||||
self.maybe_init_writer()
|
|
||||||
.await?
|
|
||||||
.write_all(&remain)
|
|
||||||
.await
|
|
||||||
.context(error::AsyncWriteSnafu)?;
|
|
||||||
Ok(size as u64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
202
src/common/datasource/src/compressed_writer.rs
Normal file
202
src/common/datasource/src/compressed_writer.rs
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use async_compression::tokio::write::{BzEncoder, GzipEncoder, XzEncoder, ZstdEncoder};
|
||||||
|
use snafu::ResultExt;
|
||||||
|
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||||
|
|
||||||
|
use crate::compression::CompressionType;
|
||||||
|
use crate::error::{self, Result};
|
||||||
|
|
||||||
|
/// A compressed writer that wraps an underlying async writer with compression.
|
||||||
|
///
|
||||||
|
/// This writer supports multiple compression formats including GZIP, BZIP2, XZ, and ZSTD.
|
||||||
|
/// It provides transparent compression for any async writer implementation.
|
||||||
|
pub struct CompressedWriter {
|
||||||
|
inner: Box<dyn AsyncWrite + Unpin + Send>,
|
||||||
|
compression_type: CompressionType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompressedWriter {
|
||||||
|
/// Creates a new compressed writer with the specified compression type.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `writer` - The underlying writer to wrap with compression
|
||||||
|
/// * `compression_type` - The type of compression to apply
|
||||||
|
pub fn new(
|
||||||
|
writer: impl AsyncWrite + Unpin + Send + 'static,
|
||||||
|
compression_type: CompressionType,
|
||||||
|
) -> Self {
|
||||||
|
let inner: Box<dyn AsyncWrite + Unpin + Send> = match compression_type {
|
||||||
|
CompressionType::Gzip => Box::new(GzipEncoder::new(writer)),
|
||||||
|
CompressionType::Bzip2 => Box::new(BzEncoder::new(writer)),
|
||||||
|
CompressionType::Xz => Box::new(XzEncoder::new(writer)),
|
||||||
|
CompressionType::Zstd => Box::new(ZstdEncoder::new(writer)),
|
||||||
|
CompressionType::Uncompressed => Box::new(writer),
|
||||||
|
};
|
||||||
|
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
compression_type,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the compression type used by this writer.
|
||||||
|
pub fn compression_type(&self) -> CompressionType {
|
||||||
|
self.compression_type
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush the writer and shutdown compression
|
||||||
|
pub async fn shutdown(mut self) -> Result<()> {
|
||||||
|
self.inner
|
||||||
|
.shutdown()
|
||||||
|
.await
|
||||||
|
.context(error::AsyncWriteSnafu)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWrite for CompressedWriter {
|
||||||
|
fn poll_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
Pin::new(&mut self.inner).poll_write(cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(&mut self.inner).poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(&mut self.inner).poll_shutdown(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A trait for converting async writers into compressed writers.
|
||||||
|
///
|
||||||
|
/// This trait is automatically implemented for all types that implement [`AsyncWrite`].
|
||||||
|
pub trait IntoCompressedWriter {
|
||||||
|
/// Converts this writer into a [`CompressedWriter`] with the specified compression type.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `self` - The underlying writer to wrap with compression
|
||||||
|
/// * `compression_type` - The type of compression to apply
|
||||||
|
fn into_compressed_writer(self, compression_type: CompressionType) -> CompressedWriter
|
||||||
|
where
|
||||||
|
Self: AsyncWrite + Unpin + Send + 'static + Sized,
|
||||||
|
{
|
||||||
|
CompressedWriter::new(self, compression_type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<W: AsyncWrite + Unpin + Send + 'static> IntoCompressedWriter for W {}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_writer_gzip() {
|
||||||
|
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||||
|
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Gzip);
|
||||||
|
let original = b"test data for gzip compression";
|
||||||
|
|
||||||
|
writer.write_all(original).await.unwrap();
|
||||||
|
writer.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||||
|
|
||||||
|
// The compressed data should be different from the original
|
||||||
|
assert_ne!(buffer, original);
|
||||||
|
assert!(!buffer.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_writer_bzip2() {
|
||||||
|
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||||
|
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Bzip2);
|
||||||
|
let original = b"test data for bzip2 compression";
|
||||||
|
|
||||||
|
writer.write_all(original).await.unwrap();
|
||||||
|
writer.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||||
|
|
||||||
|
// The compressed data should be different from the original
|
||||||
|
assert_ne!(buffer, original);
|
||||||
|
assert!(!buffer.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_writer_xz() {
|
||||||
|
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||||
|
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Xz);
|
||||||
|
let original = b"test data for xz compression";
|
||||||
|
|
||||||
|
writer.write_all(original).await.unwrap();
|
||||||
|
writer.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||||
|
|
||||||
|
// The compressed data should be different from the original
|
||||||
|
assert_ne!(buffer, original);
|
||||||
|
assert!(!buffer.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_writer_zstd() {
|
||||||
|
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||||
|
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Zstd);
|
||||||
|
let original = b"test data for zstd compression";
|
||||||
|
|
||||||
|
writer.write_all(original).await.unwrap();
|
||||||
|
writer.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||||
|
|
||||||
|
// The compressed data should be different from the original
|
||||||
|
assert_ne!(buffer, original);
|
||||||
|
assert!(!buffer.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_writer_uncompressed() {
|
||||||
|
let (duplex_writer, mut duplex_reader) = duplex(1024);
|
||||||
|
let mut writer = duplex_writer.into_compressed_writer(CompressionType::Uncompressed);
|
||||||
|
let original = b"test data for uncompressed";
|
||||||
|
|
||||||
|
writer.write_all(original).await.unwrap();
|
||||||
|
writer.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let mut buffer = Vec::new();
|
||||||
|
duplex_reader.read_to_end(&mut buffer).await.unwrap();
|
||||||
|
|
||||||
|
// Uncompressed data should be the same as the original
|
||||||
|
assert_eq!(buffer, original);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -194,12 +194,6 @@ pub enum Error {
|
|||||||
location: Location,
|
location: Location,
|
||||||
},
|
},
|
||||||
|
|
||||||
#[snafu(display("Buffered writer closed"))]
|
|
||||||
BufferedWriterClosed {
|
|
||||||
#[snafu(implicit)]
|
|
||||||
location: Location,
|
|
||||||
},
|
|
||||||
|
|
||||||
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
#[snafu(display("Failed to write parquet file, path: {}", path))]
|
||||||
WriteParquet {
|
WriteParquet {
|
||||||
path: String,
|
path: String,
|
||||||
@@ -208,6 +202,14 @@ pub enum Error {
|
|||||||
#[snafu(source)]
|
#[snafu(source)]
|
||||||
error: parquet::errors::ParquetError,
|
error: parquet::errors::ParquetError,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
#[snafu(display("Failed to build file stream"))]
|
||||||
|
BuildFileStream {
|
||||||
|
#[snafu(implicit)]
|
||||||
|
location: Location,
|
||||||
|
#[snafu(source)]
|
||||||
|
error: datafusion::error::DataFusionError,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@@ -239,7 +241,7 @@ impl ErrorExt for Error {
|
|||||||
| ReadRecordBatch { .. }
|
| ReadRecordBatch { .. }
|
||||||
| WriteRecordBatch { .. }
|
| WriteRecordBatch { .. }
|
||||||
| EncodeRecordBatch { .. }
|
| EncodeRecordBatch { .. }
|
||||||
| BufferedWriterClosed { .. }
|
| BuildFileStream { .. }
|
||||||
| OrcReader { .. } => StatusCode::Unexpected,
|
| OrcReader { .. } => StatusCode::Unexpected,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,12 +30,22 @@ use arrow::record_batch::RecordBatch;
|
|||||||
use arrow_schema::{ArrowError, Schema as ArrowSchema};
|
use arrow_schema::{ArrowError, Schema as ArrowSchema};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::{Buf, Bytes};
|
use bytes::{Buf, Bytes};
|
||||||
use datafusion::datasource::physical_plan::FileOpenFuture;
|
use common_recordbatch::DfSendableRecordBatchStream;
|
||||||
|
use datafusion::datasource::file_format::file_compression_type::FileCompressionType as DfCompressionType;
|
||||||
|
use datafusion::datasource::listing::PartitionedFile;
|
||||||
|
use datafusion::datasource::object_store::ObjectStoreUrl;
|
||||||
|
use datafusion::datasource::physical_plan::{
|
||||||
|
FileGroup, FileOpenFuture, FileScanConfigBuilder, FileSource, FileStream,
|
||||||
|
};
|
||||||
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
use datafusion::error::{DataFusionError, Result as DataFusionResult};
|
||||||
use datafusion::physical_plan::SendableRecordBatchStream;
|
use datafusion::physical_plan::SendableRecordBatchStream;
|
||||||
|
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
|
||||||
|
use datatypes::arrow::datatypes::SchemaRef;
|
||||||
use futures::{StreamExt, TryStreamExt};
|
use futures::{StreamExt, TryStreamExt};
|
||||||
use object_store::ObjectStore;
|
use object_store::ObjectStore;
|
||||||
|
use object_store_opendal::OpendalStore;
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
use tokio_util::compat::FuturesAsyncWriteCompatExt;
|
||||||
|
|
||||||
use self::csv::CsvFormat;
|
use self::csv::CsvFormat;
|
||||||
@@ -43,7 +53,8 @@ use self::json::JsonFormat;
|
|||||||
use self::orc::OrcFormat;
|
use self::orc::OrcFormat;
|
||||||
use self::parquet::ParquetFormat;
|
use self::parquet::ParquetFormat;
|
||||||
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
use crate::DEFAULT_WRITE_BUFFER_SIZE;
|
||||||
use crate::buffered_writer::{DfRecordBatchEncoder, LazyBufferedWriter};
|
use crate::buffered_writer::DfRecordBatchEncoder;
|
||||||
|
use crate::compressed_writer::{CompressedWriter, IntoCompressedWriter};
|
||||||
use crate::compression::CompressionType;
|
use crate::compression::CompressionType;
|
||||||
use crate::error::{self, Result};
|
use crate::error::{self, Result};
|
||||||
use crate::share_buffer::SharedBuffer;
|
use crate::share_buffer::SharedBuffer;
|
||||||
@@ -195,33 +206,128 @@ pub async fn infer_schemas(
|
|||||||
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
|
ArrowSchema::try_merge(schemas).context(error::MergeSchemaSnafu)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn stream_to_file<T: DfRecordBatchEncoder, U: Fn(SharedBuffer) -> T>(
|
/// Writes data to a compressed writer if the data is not empty.
|
||||||
|
///
|
||||||
|
/// Does nothing if `data` is empty; otherwise writes all data and returns any error.
|
||||||
|
async fn write_to_compressed_writer(
|
||||||
|
compressed_writer: &mut CompressedWriter,
|
||||||
|
data: &[u8],
|
||||||
|
) -> Result<()> {
|
||||||
|
if !data.is_empty() {
|
||||||
|
compressed_writer
|
||||||
|
.write_all(data)
|
||||||
|
.await
|
||||||
|
.context(error::AsyncWriteSnafu)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Streams [SendableRecordBatchStream] to a file with optional compression support.
|
||||||
|
/// Data is buffered and flushed according to the given `threshold`.
|
||||||
|
/// Ensures that writer resources are cleanly released and that an empty file is not
|
||||||
|
/// created if no rows are written.
|
||||||
|
///
|
||||||
|
/// Returns the total number of rows successfully written.
|
||||||
|
pub async fn stream_to_file<E>(
|
||||||
mut stream: SendableRecordBatchStream,
|
mut stream: SendableRecordBatchStream,
|
||||||
store: ObjectStore,
|
store: ObjectStore,
|
||||||
path: &str,
|
path: &str,
|
||||||
threshold: usize,
|
threshold: usize,
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
encoder_factory: U,
|
compression_type: CompressionType,
|
||||||
) -> Result<usize> {
|
encoder_factory: impl Fn(SharedBuffer) -> E,
|
||||||
let buffer = SharedBuffer::with_capacity(threshold);
|
) -> Result<usize>
|
||||||
let encoder = encoder_factory(buffer.clone());
|
where
|
||||||
let mut writer = LazyBufferedWriter::new(threshold, buffer, encoder, path, |path| async {
|
E: DfRecordBatchEncoder,
|
||||||
store
|
{
|
||||||
.writer_with(&path)
|
// Create the file writer with OpenDAL's built-in buffering
|
||||||
|
let writer = store
|
||||||
|
.writer_with(path)
|
||||||
.concurrent(concurrency)
|
.concurrent(concurrency)
|
||||||
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
.chunk(DEFAULT_WRITE_BUFFER_SIZE.as_bytes() as usize)
|
||||||
.await
|
.await
|
||||||
.map(|v| v.into_futures_async_write().compat_write())
|
.with_context(|_| error::WriteObjectSnafu { path })?
|
||||||
.context(error::WriteObjectSnafu { path })
|
.into_futures_async_write()
|
||||||
});
|
.compat_write();
|
||||||
|
|
||||||
|
// Apply compression if needed
|
||||||
|
let mut compressed_writer = writer.into_compressed_writer(compression_type);
|
||||||
|
|
||||||
|
// Create a buffer for the encoder
|
||||||
|
let buffer = SharedBuffer::with_capacity(threshold);
|
||||||
|
let mut encoder = encoder_factory(buffer.clone());
|
||||||
|
|
||||||
let mut rows = 0;
|
let mut rows = 0;
|
||||||
|
|
||||||
|
// Process each record batch
|
||||||
while let Some(batch) = stream.next().await {
|
while let Some(batch) = stream.next().await {
|
||||||
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
let batch = batch.context(error::ReadRecordBatchSnafu)?;
|
||||||
writer.write(&batch).await?;
|
|
||||||
|
// Write batch using the encoder
|
||||||
|
encoder.write(&batch)?;
|
||||||
rows += batch.num_rows();
|
rows += batch.num_rows();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let chunk = {
|
||||||
|
let mut buffer_guard = buffer.buffer.lock().unwrap();
|
||||||
|
if buffer_guard.len() < threshold {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
writer.close_inner_writer().await?;
|
buffer_guard.split_to(threshold)
|
||||||
|
};
|
||||||
|
write_to_compressed_writer(&mut compressed_writer, &chunk).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no row's been written, just simply close the underlying writer
|
||||||
|
// without flush so that no file will be actually created.
|
||||||
|
if rows != 0 {
|
||||||
|
// Final flush of any remaining data
|
||||||
|
let final_data = {
|
||||||
|
let mut buffer_guard = buffer.buffer.lock().unwrap();
|
||||||
|
buffer_guard.split()
|
||||||
|
};
|
||||||
|
write_to_compressed_writer(&mut compressed_writer, &final_data).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown compression and close writer
|
||||||
|
compressed_writer.shutdown().await?;
|
||||||
|
|
||||||
Ok(rows)
|
Ok(rows)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a [FileStream] for reading data from a file with optional column projection
|
||||||
|
/// and compression support.
|
||||||
|
///
|
||||||
|
/// Returns [SendableRecordBatchStream].
|
||||||
|
pub async fn file_to_stream(
|
||||||
|
store: &ObjectStore,
|
||||||
|
filename: &str,
|
||||||
|
file_schema: SchemaRef,
|
||||||
|
file_source: Arc<dyn FileSource>,
|
||||||
|
projection: Option<Vec<usize>>,
|
||||||
|
compression_type: CompressionType,
|
||||||
|
) -> Result<DfSendableRecordBatchStream> {
|
||||||
|
let df_compression: DfCompressionType = compression_type.into();
|
||||||
|
let config = FileScanConfigBuilder::new(
|
||||||
|
ObjectStoreUrl::local_filesystem(),
|
||||||
|
file_schema,
|
||||||
|
file_source.clone(),
|
||||||
|
)
|
||||||
|
.with_file_group(FileGroup::new(vec![PartitionedFile::new(
|
||||||
|
filename.to_string(),
|
||||||
|
0,
|
||||||
|
)]))
|
||||||
|
.with_projection(projection)
|
||||||
|
.with_file_compression_type(df_compression)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let store = Arc::new(OpendalStore::new(store.clone()));
|
||||||
|
let file_opener = file_source
|
||||||
|
.with_projection(&config)
|
||||||
|
.create_file_opener(store, &config, 0);
|
||||||
|
let stream = FileStream::new(&config, 0, file_opener, &ExecutionPlanMetricsSet::new())
|
||||||
|
.context(error::BuildFileStreamSnafu)?;
|
||||||
|
|
||||||
|
Ok(Box::pin(stream))
|
||||||
|
}
|
||||||
|
|||||||
@@ -157,7 +157,14 @@ pub async fn stream_to_csv(
|
|||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
format: &CsvFormat,
|
format: &CsvFormat,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
|
stream_to_file(
|
||||||
|
stream,
|
||||||
|
store,
|
||||||
|
path,
|
||||||
|
threshold,
|
||||||
|
concurrency,
|
||||||
|
format.compression_type,
|
||||||
|
|buffer| {
|
||||||
let mut builder = WriterBuilder::new();
|
let mut builder = WriterBuilder::new();
|
||||||
if let Some(timestamp_format) = &format.timestamp_format {
|
if let Some(timestamp_format) = &format.timestamp_format {
|
||||||
builder = builder.with_timestamp_format(timestamp_format.to_owned())
|
builder = builder.with_timestamp_format(timestamp_format.to_owned())
|
||||||
@@ -169,7 +176,8 @@ pub async fn stream_to_csv(
|
|||||||
builder = builder.with_time_format(time_format.to_owned())
|
builder = builder.with_time_format(time_format.to_owned())
|
||||||
}
|
}
|
||||||
builder.build(buffer)
|
builder.build(buffer)
|
||||||
})
|
},
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -181,13 +189,21 @@ impl DfRecordBatchEncoder for csv::Writer<SharedBuffer> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||||
use common_test_util::find_workspace_path;
|
use common_test_util::find_workspace_path;
|
||||||
|
use datafusion::datasource::physical_plan::{CsvSource, FileSource};
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
|
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::file_format::{
|
use crate::file_format::{
|
||||||
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
FORMAT_COMPRESSION_TYPE, FORMAT_DELIMITER, FORMAT_HAS_HEADER,
|
||||||
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat,
|
FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
|
||||||
};
|
};
|
||||||
use crate::test_util::{format_schema, test_store};
|
use crate::test_util::{format_schema, test_store};
|
||||||
|
|
||||||
@@ -297,4 +313,166 @@ mod tests {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_csv() {
|
||||||
|
// Create test data
|
||||||
|
let column_schemas = vec![
|
||||||
|
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
|
||||||
|
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||||
|
];
|
||||||
|
let schema = Arc::new(Schema::new(column_schemas));
|
||||||
|
|
||||||
|
// Create multiple record batches with different data
|
||||||
|
let batch1_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
|
||||||
|
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
|
||||||
|
];
|
||||||
|
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
|
||||||
|
|
||||||
|
let batch2_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
|
||||||
|
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
|
||||||
|
];
|
||||||
|
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
|
||||||
|
|
||||||
|
let batch3_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
|
||||||
|
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
|
||||||
|
];
|
||||||
|
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
|
||||||
|
|
||||||
|
// Combine all batches into a RecordBatches collection
|
||||||
|
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
|
||||||
|
|
||||||
|
// Test with different compression types
|
||||||
|
let compression_types = vec![
|
||||||
|
CompressionType::Gzip,
|
||||||
|
CompressionType::Bzip2,
|
||||||
|
CompressionType::Xz,
|
||||||
|
CompressionType::Zstd,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Create a temporary file path
|
||||||
|
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_csv");
|
||||||
|
for compression_type in compression_types {
|
||||||
|
let format = CsvFormat {
|
||||||
|
compression_type,
|
||||||
|
..CsvFormat::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use correct format without Debug formatter
|
||||||
|
let compressed_file_name =
|
||||||
|
format!("test_compressed_csv.{}", compression_type.file_extension());
|
||||||
|
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
|
||||||
|
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a simple file store for testing
|
||||||
|
let store = test_store("/");
|
||||||
|
|
||||||
|
// Export CSV with compression
|
||||||
|
let rows = stream_to_csv(
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
|
||||||
|
store,
|
||||||
|
compressed_file_path_str,
|
||||||
|
1024,
|
||||||
|
1,
|
||||||
|
&format,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(rows, 9);
|
||||||
|
|
||||||
|
// Verify compressed file was created and has content
|
||||||
|
assert!(compressed_file_path.exists());
|
||||||
|
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
|
||||||
|
assert!(file_size > 0);
|
||||||
|
|
||||||
|
// Verify the file is actually compressed
|
||||||
|
let file_content = std::fs::read(&compressed_file_path).unwrap();
|
||||||
|
// Compressed files should not start with CSV header
|
||||||
|
// They should have compression magic bytes
|
||||||
|
match compression_type {
|
||||||
|
CompressionType::Gzip => {
|
||||||
|
// Gzip magic bytes: 0x1f 0x8b
|
||||||
|
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], 0x8b,
|
||||||
|
"Gzip file should have 0x8b as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
CompressionType::Bzip2 => {
|
||||||
|
// Bzip2 magic bytes: 'BZ'
|
||||||
|
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], b'Z',
|
||||||
|
"Bzip2 file should have 'Z' as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
CompressionType::Xz => {
|
||||||
|
// XZ magic bytes: 0xFD '7zXZ'
|
||||||
|
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
|
||||||
|
}
|
||||||
|
CompressionType::Zstd => {
|
||||||
|
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
|
||||||
|
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], 0xB5,
|
||||||
|
"Zstd file should have 0xB5 as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the compressed file can be decompressed and content matches original data
|
||||||
|
let store = test_store("/");
|
||||||
|
let schema = Arc::new(
|
||||||
|
CsvFormat {
|
||||||
|
compression_type,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
.infer_schema(&store, compressed_file_path_str)
|
||||||
|
.await
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
let csv_source = CsvSource::new(true, b',', b'"')
|
||||||
|
.with_schema(schema.clone())
|
||||||
|
.with_batch_size(8192);
|
||||||
|
|
||||||
|
let stream = file_to_stream(
|
||||||
|
&store,
|
||||||
|
compressed_file_path_str,
|
||||||
|
schema.clone(),
|
||||||
|
csv_source.clone(),
|
||||||
|
None,
|
||||||
|
compression_type,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
|
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
let expected = r#"+----+---------+-------+
|
||||||
|
| id | name | value |
|
||||||
|
+----+---------+-------+
|
||||||
|
| 1 | Alice | 10.5 |
|
||||||
|
| 2 | Bob | 20.3 |
|
||||||
|
| 3 | Charlie | 30.7 |
|
||||||
|
| 4 | David | 40.1 |
|
||||||
|
| 5 | Eva | 50.2 |
|
||||||
|
| 6 | Frank | 60.3 |
|
||||||
|
| 7 | Grace | 70.4 |
|
||||||
|
| 8 | Henry | 80.5 |
|
||||||
|
| 9 | Ivy | 90.6 |
|
||||||
|
+----+---------+-------+"#;
|
||||||
|
assert_eq!(expected, pretty_print);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,10 +115,17 @@ pub async fn stream_to_json(
|
|||||||
path: &str,
|
path: &str,
|
||||||
threshold: usize,
|
threshold: usize,
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
|
format: &JsonFormat,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
stream_to_file(stream, store, path, threshold, concurrency, |buffer| {
|
stream_to_file(
|
||||||
json::LineDelimitedWriter::new(buffer)
|
stream,
|
||||||
})
|
store,
|
||||||
|
path,
|
||||||
|
threshold,
|
||||||
|
concurrency,
|
||||||
|
format.compression_type,
|
||||||
|
json::LineDelimitedWriter::new,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,10 +137,21 @@ impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
|
||||||
|
use common_recordbatch::{RecordBatch, RecordBatches};
|
||||||
use common_test_util::find_workspace_path;
|
use common_test_util::find_workspace_path;
|
||||||
|
use datafusion::datasource::physical_plan::{FileSource, JsonSource};
|
||||||
|
use datatypes::prelude::ConcreteDataType;
|
||||||
|
use datatypes::schema::{ColumnSchema, Schema};
|
||||||
|
use datatypes::vectors::{Float64Vector, StringVector, UInt32Vector, VectorRef};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::file_format::{FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat};
|
use crate::file_format::{
|
||||||
|
FORMAT_COMPRESSION_TYPE, FORMAT_SCHEMA_INFER_MAX_RECORD, FileFormat, file_to_stream,
|
||||||
|
};
|
||||||
use crate::test_util::{format_schema, test_store};
|
use crate::test_util::{format_schema, test_store};
|
||||||
|
|
||||||
fn test_data_root() -> String {
|
fn test_data_root() -> String {
|
||||||
@@ -203,4 +221,165 @@ mod tests {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_compressed_json() {
|
||||||
|
// Create test data
|
||||||
|
let column_schemas = vec![
|
||||||
|
ColumnSchema::new("id", ConcreteDataType::uint32_datatype(), false),
|
||||||
|
ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
|
||||||
|
ColumnSchema::new("value", ConcreteDataType::float64_datatype(), false),
|
||||||
|
];
|
||||||
|
let schema = Arc::new(Schema::new(column_schemas));
|
||||||
|
|
||||||
|
// Create multiple record batches with different data
|
||||||
|
let batch1_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![1, 2, 3])),
|
||||||
|
Arc::new(StringVector::from(vec!["Alice", "Bob", "Charlie"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![10.5, 20.3, 30.7])),
|
||||||
|
];
|
||||||
|
let batch1 = RecordBatch::new(schema.clone(), batch1_columns).unwrap();
|
||||||
|
|
||||||
|
let batch2_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![4, 5, 6])),
|
||||||
|
Arc::new(StringVector::from(vec!["David", "Eva", "Frank"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![40.1, 50.2, 60.3])),
|
||||||
|
];
|
||||||
|
let batch2 = RecordBatch::new(schema.clone(), batch2_columns).unwrap();
|
||||||
|
|
||||||
|
let batch3_columns: Vec<VectorRef> = vec![
|
||||||
|
Arc::new(UInt32Vector::from_slice(vec![7, 8, 9])),
|
||||||
|
Arc::new(StringVector::from(vec!["Grace", "Henry", "Ivy"])),
|
||||||
|
Arc::new(Float64Vector::from_slice(vec![70.4, 80.5, 90.6])),
|
||||||
|
];
|
||||||
|
let batch3 = RecordBatch::new(schema.clone(), batch3_columns).unwrap();
|
||||||
|
|
||||||
|
// Combine all batches into a RecordBatches collection
|
||||||
|
let recordbatches = RecordBatches::try_new(schema, vec![batch1, batch2, batch3]).unwrap();
|
||||||
|
|
||||||
|
// Test with different compression types
|
||||||
|
let compression_types = vec![
|
||||||
|
CompressionType::Gzip,
|
||||||
|
CompressionType::Bzip2,
|
||||||
|
CompressionType::Xz,
|
||||||
|
CompressionType::Zstd,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Create a temporary file path
|
||||||
|
let temp_dir = common_test_util::temp_dir::create_temp_dir("test_compressed_json");
|
||||||
|
for compression_type in compression_types {
|
||||||
|
let format = JsonFormat {
|
||||||
|
compression_type,
|
||||||
|
..JsonFormat::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let compressed_file_name =
|
||||||
|
format!("test_compressed_json.{}", compression_type.file_extension());
|
||||||
|
let compressed_file_path = temp_dir.path().join(&compressed_file_name);
|
||||||
|
let compressed_file_path_str = compressed_file_path.to_str().unwrap();
|
||||||
|
|
||||||
|
// Create a simple file store for testing
|
||||||
|
let store = test_store("/");
|
||||||
|
|
||||||
|
// Export JSON with compression
|
||||||
|
let rows = stream_to_json(
|
||||||
|
Box::pin(DfRecordBatchStreamAdapter::new(recordbatches.as_stream())),
|
||||||
|
store,
|
||||||
|
compressed_file_path_str,
|
||||||
|
1024,
|
||||||
|
1,
|
||||||
|
&format,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(rows, 9);
|
||||||
|
|
||||||
|
// Verify compressed file was created and has content
|
||||||
|
assert!(compressed_file_path.exists());
|
||||||
|
let file_size = std::fs::metadata(&compressed_file_path).unwrap().len();
|
||||||
|
assert!(file_size > 0);
|
||||||
|
|
||||||
|
// Verify the file is actually compressed
|
||||||
|
let file_content = std::fs::read(&compressed_file_path).unwrap();
|
||||||
|
// Compressed files should not start with '{' (JSON character)
|
||||||
|
// They should have compression magic bytes
|
||||||
|
match compression_type {
|
||||||
|
CompressionType::Gzip => {
|
||||||
|
// Gzip magic bytes: 0x1f 0x8b
|
||||||
|
assert_eq!(file_content[0], 0x1f, "Gzip file should start with 0x1f");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], 0x8b,
|
||||||
|
"Gzip file should have 0x8b as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
CompressionType::Bzip2 => {
|
||||||
|
// Bzip2 magic bytes: 'BZ'
|
||||||
|
assert_eq!(file_content[0], b'B', "Bzip2 file should start with 'B'");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], b'Z',
|
||||||
|
"Bzip2 file should have 'Z' as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
CompressionType::Xz => {
|
||||||
|
// XZ magic bytes: 0xFD '7zXZ'
|
||||||
|
assert_eq!(file_content[0], 0xFD, "XZ file should start with 0xFD");
|
||||||
|
}
|
||||||
|
CompressionType::Zstd => {
|
||||||
|
// Zstd magic bytes: 0x28 0xB5 0x2F 0xFD
|
||||||
|
assert_eq!(file_content[0], 0x28, "Zstd file should start with 0x28");
|
||||||
|
assert_eq!(
|
||||||
|
file_content[1], 0xB5,
|
||||||
|
"Zstd file should have 0xB5 as second byte"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the compressed file can be decompressed and content matches original data
|
||||||
|
let store = test_store("/");
|
||||||
|
let schema = Arc::new(
|
||||||
|
JsonFormat {
|
||||||
|
compression_type,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
.infer_schema(&store, compressed_file_path_str)
|
||||||
|
.await
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
let json_source = JsonSource::new()
|
||||||
|
.with_schema(schema.clone())
|
||||||
|
.with_batch_size(8192);
|
||||||
|
|
||||||
|
let stream = file_to_stream(
|
||||||
|
&store,
|
||||||
|
compressed_file_path_str,
|
||||||
|
schema.clone(),
|
||||||
|
json_source.clone(),
|
||||||
|
None,
|
||||||
|
compression_type,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let batches = stream.try_collect::<Vec<_>>().await.unwrap();
|
||||||
|
let pretty_print = arrow::util::pretty::pretty_format_batches(&batches)
|
||||||
|
.unwrap()
|
||||||
|
.to_string();
|
||||||
|
let expected = r#"+----+---------+-------+
|
||||||
|
| id | name | value |
|
||||||
|
+----+---------+-------+
|
||||||
|
| 1 | Alice | 10.5 |
|
||||||
|
| 2 | Bob | 20.3 |
|
||||||
|
| 3 | Charlie | 30.7 |
|
||||||
|
| 4 | David | 40.1 |
|
||||||
|
| 5 | Eva | 50.2 |
|
||||||
|
| 6 | Frank | 60.3 |
|
||||||
|
| 7 | Grace | 70.4 |
|
||||||
|
| 8 | Henry | 80.5 |
|
||||||
|
| 9 | Ivy | 90.6 |
|
||||||
|
+----+---------+-------+"#;
|
||||||
|
assert_eq!(expected, pretty_print);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@
|
|||||||
#![feature(type_alias_impl_trait)]
|
#![feature(type_alias_impl_trait)]
|
||||||
|
|
||||||
pub mod buffered_writer;
|
pub mod buffered_writer;
|
||||||
|
pub mod compressed_writer;
|
||||||
pub mod compression;
|
pub mod compression;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod file_format;
|
pub mod file_format;
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ use object_store::ObjectStore;
|
|||||||
use object_store::services::Fs;
|
use object_store::services::Fs;
|
||||||
|
|
||||||
use crate::file_format::csv::{CsvFormat, stream_to_csv};
|
use crate::file_format::csv::{CsvFormat, stream_to_csv};
|
||||||
use crate::file_format::json::stream_to_json;
|
use crate::file_format::json::{JsonFormat, stream_to_json};
|
||||||
use crate::test_util;
|
use crate::test_util;
|
||||||
|
|
||||||
pub const TEST_BATCH_SIZE: usize = 100;
|
pub const TEST_BATCH_SIZE: usize = 100;
|
||||||
@@ -122,13 +122,16 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
|
|||||||
|
|
||||||
let output_path = format!("{}/{}", dir.path().display(), "output");
|
let output_path = format!("{}/{}", dir.path().display(), "output");
|
||||||
|
|
||||||
|
let json_format = JsonFormat::default();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
stream_to_json(
|
stream_to_json(
|
||||||
Box::pin(stream),
|
Box::pin(stream),
|
||||||
tmp_store.clone(),
|
tmp_store.clone(),
|
||||||
&output_path,
|
&output_path,
|
||||||
threshold(size),
|
threshold(size),
|
||||||
8
|
8,
|
||||||
|
&json_format,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
|
|||||||
@@ -45,3 +45,19 @@ pub fn from_err_code_msg_to_header(code: u32, msg: &str) -> HeaderMap {
|
|||||||
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
|
header.insert(GREPTIME_DB_HEADER_ERROR_MSG, msg);
|
||||||
header
|
header
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the external root cause of the source error (exclude the current error).
|
||||||
|
pub fn root_source(err: &dyn std::error::Error) -> Option<&dyn std::error::Error> {
|
||||||
|
// There are some divergence about the behavior of the `sources()` API
|
||||||
|
// in https://github.com/rust-lang/rust/issues/58520
|
||||||
|
// So this function iterates the sources manually.
|
||||||
|
let mut root = err.source();
|
||||||
|
while let Some(r) = root {
|
||||||
|
if let Some(s) = r.source() {
|
||||||
|
root = Some(s);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root
|
||||||
|
}
|
||||||
|
|||||||
@@ -97,9 +97,9 @@ pub trait Event: Send + Sync + Debug {
|
|||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add the extra row to the event with the default row.
|
/// Add the extra rows to the event with the default row.
|
||||||
fn extra_row(&self) -> Result<Row> {
|
fn extra_rows(&self) -> Result<Vec<Row>> {
|
||||||
Ok(Row { values: vec![] })
|
Ok(vec![Row { values: vec![] }])
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the event as any type.
|
/// Returns the event as any type.
|
||||||
@@ -159,7 +159,8 @@ pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsert
|
|||||||
|
|
||||||
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
|
let mut rows: Vec<Row> = Vec::with_capacity(events.len());
|
||||||
for event in events {
|
for event in events {
|
||||||
let extra_row = event.extra_row()?;
|
let extra_rows = event.extra_rows()?;
|
||||||
|
for extra_row in extra_rows {
|
||||||
let mut values = Vec::with_capacity(3 + extra_row.values.len());
|
let mut values = Vec::with_capacity(3 + extra_row.values.len());
|
||||||
values.extend([
|
values.extend([
|
||||||
ValueData::StringValue(event.event_type().to_string()).into(),
|
ValueData::StringValue(event.event_type().to_string()).into(),
|
||||||
@@ -169,6 +170,7 @@ pub fn build_row_inserts_request(events: &[&Box<dyn Event>]) -> Result<RowInsert
|
|||||||
values.extend(extra_row.values);
|
values.extend(extra_row.values);
|
||||||
rows.push(Row { values });
|
rows.push(Row { values });
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(RowInsertRequests {
|
Ok(RowInsertRequests {
|
||||||
inserts: vec![RowInsertRequest {
|
inserts: vec![RowInsertRequest {
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ impl MetaClientSelector {
|
|||||||
let cfg = ChannelConfig::new()
|
let cfg = ChannelConfig::new()
|
||||||
.connect_timeout(Duration::from_secs(30))
|
.connect_timeout(Duration::from_secs(30))
|
||||||
.timeout(Duration::from_secs(30));
|
.timeout(Duration::from_secs(30));
|
||||||
let channel_manager = ChannelManager::with_config(cfg);
|
let channel_manager = ChannelManager::with_config(cfg, None);
|
||||||
Self {
|
Self {
|
||||||
meta_client,
|
meta_client,
|
||||||
channel_manager,
|
channel_manager,
|
||||||
|
|||||||
@@ -107,8 +107,8 @@ impl Event for SlowQueryEvent {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extra_row(&self) -> Result<Row> {
|
fn extra_rows(&self) -> Result<Vec<Row>> {
|
||||||
Ok(Row {
|
Ok(vec![Row {
|
||||||
values: vec![
|
values: vec![
|
||||||
ValueData::U64Value(self.cost).into(),
|
ValueData::U64Value(self.cost).into(),
|
||||||
ValueData::U64Value(self.threshold).into(),
|
ValueData::U64Value(self.threshold).into(),
|
||||||
@@ -119,7 +119,7 @@ impl Event for SlowQueryEvent {
|
|||||||
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
|
ValueData::TimestampMillisecondValue(self.promql_start.unwrap_or(0)).into(),
|
||||||
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
|
ValueData::TimestampMillisecondValue(self.promql_end.unwrap_or(0)).into(),
|
||||||
],
|
],
|
||||||
})
|
}])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn json_payload(&self) -> Result<String> {
|
fn json_payload(&self) -> Result<String> {
|
||||||
|
|||||||
@@ -47,10 +47,12 @@ h3o = { version = "0.6", optional = true }
|
|||||||
hyperloglogplus = "0.4"
|
hyperloglogplus = "0.4"
|
||||||
jsonb.workspace = true
|
jsonb.workspace = true
|
||||||
memchr = "2.7"
|
memchr = "2.7"
|
||||||
|
mito-codec.workspace = true
|
||||||
nalgebra.workspace = true
|
nalgebra.workspace = true
|
||||||
num = "0.4"
|
num = "0.4"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
paste.workspace = true
|
paste.workspace = true
|
||||||
|
regex.workspace = true
|
||||||
s2 = { version = "0.0.12", optional = true }
|
s2 = { version = "0.0.12", optional = true }
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
mod build_index_table;
|
||||||
mod flush_compact_region;
|
mod flush_compact_region;
|
||||||
mod flush_compact_table;
|
mod flush_compact_table;
|
||||||
mod migrate_region;
|
mod migrate_region;
|
||||||
@@ -26,6 +27,7 @@ use reconcile_catalog::ReconcileCatalogFunction;
|
|||||||
use reconcile_database::ReconcileDatabaseFunction;
|
use reconcile_database::ReconcileDatabaseFunction;
|
||||||
use reconcile_table::ReconcileTableFunction;
|
use reconcile_table::ReconcileTableFunction;
|
||||||
|
|
||||||
|
use crate::admin::build_index_table::BuildIndexFunction;
|
||||||
use crate::flush_flow::FlushFlowFunction;
|
use crate::flush_flow::FlushFlowFunction;
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
@@ -40,6 +42,7 @@ impl AdminFunction {
|
|||||||
registry.register(CompactRegionFunction::factory());
|
registry.register(CompactRegionFunction::factory());
|
||||||
registry.register(FlushTableFunction::factory());
|
registry.register(FlushTableFunction::factory());
|
||||||
registry.register(CompactTableFunction::factory());
|
registry.register(CompactTableFunction::factory());
|
||||||
|
registry.register(BuildIndexFunction::factory());
|
||||||
registry.register(FlushFlowFunction::factory());
|
registry.register(FlushFlowFunction::factory());
|
||||||
registry.register(ReconcileCatalogFunction::factory());
|
registry.register(ReconcileCatalogFunction::factory());
|
||||||
registry.register(ReconcileDatabaseFunction::factory());
|
registry.register(ReconcileDatabaseFunction::factory());
|
||||||
|
|||||||
80
src/common/function/src/admin/build_index_table.rs
Normal file
80
src/common/function/src/admin/build_index_table.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use arrow::datatypes::DataType as ArrowDataType;
|
||||||
|
use common_error::ext::BoxedError;
|
||||||
|
use common_macro::admin_fn;
|
||||||
|
use common_query::error::{
|
||||||
|
InvalidFuncArgsSnafu, MissingTableMutationHandlerSnafu, Result, TableMutationSnafu,
|
||||||
|
UnsupportedInputDataTypeSnafu,
|
||||||
|
};
|
||||||
|
use datafusion_expr::{Signature, Volatility};
|
||||||
|
use datatypes::prelude::*;
|
||||||
|
use session::context::QueryContextRef;
|
||||||
|
use session::table_name::table_name_to_full_name;
|
||||||
|
use snafu::{ResultExt, ensure};
|
||||||
|
use table::requests::BuildIndexTableRequest;
|
||||||
|
|
||||||
|
use crate::handlers::TableMutationHandlerRef;
|
||||||
|
|
||||||
|
#[admin_fn(
|
||||||
|
name = BuildIndexFunction,
|
||||||
|
display_name = build_index,
|
||||||
|
sig_fn = build_index_signature,
|
||||||
|
ret = uint64
|
||||||
|
)]
|
||||||
|
pub(crate) async fn build_index(
|
||||||
|
table_mutation_handler: &TableMutationHandlerRef,
|
||||||
|
query_ctx: &QueryContextRef,
|
||||||
|
params: &[ValueRef<'_>],
|
||||||
|
) -> Result<Value> {
|
||||||
|
ensure!(
|
||||||
|
params.len() == 1,
|
||||||
|
InvalidFuncArgsSnafu {
|
||||||
|
err_msg: format!(
|
||||||
|
"The length of the args is not correct, expect 1, have: {}",
|
||||||
|
params.len()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
let ValueRef::String(table_name) = params[0] else {
|
||||||
|
return UnsupportedInputDataTypeSnafu {
|
||||||
|
function: "build_index",
|
||||||
|
datatypes: params.iter().map(|v| v.data_type()).collect::<Vec<_>>(),
|
||||||
|
}
|
||||||
|
.fail();
|
||||||
|
};
|
||||||
|
|
||||||
|
let (catalog_name, schema_name, table_name) = table_name_to_full_name(table_name, query_ctx)
|
||||||
|
.map_err(BoxedError::new)
|
||||||
|
.context(TableMutationSnafu)?;
|
||||||
|
|
||||||
|
let affected_rows = table_mutation_handler
|
||||||
|
.build_index(
|
||||||
|
BuildIndexTableRequest {
|
||||||
|
catalog_name,
|
||||||
|
schema_name,
|
||||||
|
table_name,
|
||||||
|
},
|
||||||
|
query_ctx.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Value::from(affected_rows as u64))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_index_signature() -> Signature {
|
||||||
|
Signature::uniform(1, vec![ArrowDataType::Utf8], Volatility::Immutable)
|
||||||
|
}
|
||||||
@@ -29,6 +29,8 @@ use arrow::array::StructArray;
|
|||||||
use arrow_schema::{FieldRef, Fields};
|
use arrow_schema::{FieldRef, Fields};
|
||||||
use common_telemetry::debug;
|
use common_telemetry::debug;
|
||||||
use datafusion::functions_aggregate::all_default_aggregate_functions;
|
use datafusion::functions_aggregate::all_default_aggregate_functions;
|
||||||
|
use datafusion::functions_aggregate::count::Count;
|
||||||
|
use datafusion::functions_aggregate::min_max::{Max, Min};
|
||||||
use datafusion::optimizer::AnalyzerRule;
|
use datafusion::optimizer::AnalyzerRule;
|
||||||
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
use datafusion::optimizer::analyzer::type_coercion::TypeCoercion;
|
||||||
use datafusion::physical_planner::create_aggregate_expr_and_maybe_filter;
|
use datafusion::physical_planner::create_aggregate_expr_and_maybe_filter;
|
||||||
@@ -413,6 +415,51 @@ impl AggregateUDFImpl for StateWrapper {
|
|||||||
fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result<Vec<DataType>> {
|
fn coerce_types(&self, arg_types: &[DataType]) -> datafusion_common::Result<Vec<DataType>> {
|
||||||
self.inner.coerce_types(arg_types)
|
self.inner.coerce_types(arg_types)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn value_from_stats(
|
||||||
|
&self,
|
||||||
|
statistics_args: &datafusion_expr::StatisticsArgs,
|
||||||
|
) -> Option<ScalarValue> {
|
||||||
|
let inner = self.inner().inner().as_any();
|
||||||
|
// only count/min/max need special handling here, for getting result from statistics
|
||||||
|
// the result of count/min/max is also the result of count_state so can return directly
|
||||||
|
let can_use_stat = inner.is::<Count>() || inner.is::<Max>() || inner.is::<Min>();
|
||||||
|
if !can_use_stat {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// fix return type by extract the first field's data type from the struct type
|
||||||
|
let state_type = if let DataType::Struct(fields) = &statistics_args.return_type {
|
||||||
|
if fields.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
fields[0].data_type().clone()
|
||||||
|
} else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let fixed_args = datafusion_expr::StatisticsArgs {
|
||||||
|
statistics: statistics_args.statistics,
|
||||||
|
return_type: &state_type,
|
||||||
|
is_distinct: statistics_args.is_distinct,
|
||||||
|
exprs: statistics_args.exprs,
|
||||||
|
};
|
||||||
|
|
||||||
|
let ret = self.inner().value_from_stats(&fixed_args)?;
|
||||||
|
|
||||||
|
// wrap the result into struct scalar value
|
||||||
|
let fields = if let DataType::Struct(fields) = &statistics_args.return_type {
|
||||||
|
fields
|
||||||
|
} else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
let array = ret.to_array().ok()?;
|
||||||
|
|
||||||
|
let struct_array = StructArray::new(fields.clone(), vec![array], None);
|
||||||
|
let ret = ScalarValue::Struct(Arc::new(struct_array));
|
||||||
|
Some(ret)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The wrapper's input is the same as the original aggregate function's input,
|
/// The wrapper's input is the same as the original aggregate function's input,
|
||||||
|
|||||||
@@ -12,10 +12,12 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
use crate::aggrs::vector::avg::VectorAvg;
|
||||||
use crate::aggrs::vector::product::VectorProduct;
|
use crate::aggrs::vector::product::VectorProduct;
|
||||||
use crate::aggrs::vector::sum::VectorSum;
|
use crate::aggrs::vector::sum::VectorSum;
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
|
||||||
|
mod avg;
|
||||||
mod product;
|
mod product;
|
||||||
mod sum;
|
mod sum;
|
||||||
|
|
||||||
@@ -25,5 +27,6 @@ impl VectorFunction {
|
|||||||
pub fn register(registry: &FunctionRegistry) {
|
pub fn register(registry: &FunctionRegistry) {
|
||||||
registry.register_aggr(VectorSum::uadf_impl());
|
registry.register_aggr(VectorSum::uadf_impl());
|
||||||
registry.register_aggr(VectorProduct::uadf_impl());
|
registry.register_aggr(VectorProduct::uadf_impl());
|
||||||
|
registry.register_aggr(VectorAvg::uadf_impl());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
270
src/common/function/src/aggrs/vector/avg.rs
Normal file
270
src/common/function/src/aggrs/vector/avg.rs
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
// Copyright 2023 Greptime Team
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow::array::{Array, ArrayRef, AsArray, BinaryArray, LargeStringArray, StringArray};
|
||||||
|
use arrow::compute::sum;
|
||||||
|
use arrow::datatypes::UInt64Type;
|
||||||
|
use arrow_schema::{DataType, Field};
|
||||||
|
use datafusion_common::{Result, ScalarValue};
|
||||||
|
use datafusion_expr::{
|
||||||
|
Accumulator, AggregateUDF, Signature, SimpleAggregateUDF, TypeSignature, Volatility,
|
||||||
|
};
|
||||||
|
use datafusion_functions_aggregate_common::accumulator::AccumulatorArgs;
|
||||||
|
use nalgebra::{Const, DVector, DVectorView, Dyn, OVector};
|
||||||
|
|
||||||
|
use crate::scalars::vector::impl_conv::{
|
||||||
|
binlit_as_veclit, parse_veclit_from_strlit, veclit_to_binlit,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The accumulator for the `vec_avg` aggregate function.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct VectorAvg {
|
||||||
|
sum: Option<OVector<f32, Dyn>>,
|
||||||
|
count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VectorAvg {
|
||||||
|
/// Create a new `AggregateUDF` for the `vec_avg` aggregate function.
|
||||||
|
pub fn uadf_impl() -> AggregateUDF {
|
||||||
|
let signature = Signature::one_of(
|
||||||
|
vec![
|
||||||
|
TypeSignature::Exact(vec![DataType::Utf8]),
|
||||||
|
TypeSignature::Exact(vec![DataType::LargeUtf8]),
|
||||||
|
TypeSignature::Exact(vec![DataType::Binary]),
|
||||||
|
],
|
||||||
|
Volatility::Immutable,
|
||||||
|
);
|
||||||
|
let udaf = SimpleAggregateUDF::new_with_signature(
|
||||||
|
"vec_avg",
|
||||||
|
signature,
|
||||||
|
DataType::Binary,
|
||||||
|
Arc::new(Self::accumulator),
|
||||||
|
vec![
|
||||||
|
Arc::new(Field::new("sum", DataType::Binary, true)),
|
||||||
|
Arc::new(Field::new("count", DataType::UInt64, true)),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
AggregateUDF::from(udaf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn accumulator(args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> {
|
||||||
|
if args.schema.fields().len() != 1 {
|
||||||
|
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||||
|
"expect creating `VEC_AVG` with only one input field, actual {}",
|
||||||
|
args.schema.fields().len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let t = args.schema.field(0).data_type();
|
||||||
|
if !matches!(t, DataType::Utf8 | DataType::LargeUtf8 | DataType::Binary) {
|
||||||
|
return Err(datafusion_common::DataFusionError::Internal(format!(
|
||||||
|
"unexpected input datatype {t} when creating `VEC_AVG`"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Box::new(VectorAvg::default()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inner(&mut self, len: usize) -> &mut OVector<f32, Dyn> {
|
||||||
|
self.sum
|
||||||
|
.get_or_insert_with(|| OVector::zeros_generic(Dyn(len), Const::<1>))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, values: &[ArrayRef], is_update: bool) -> Result<()> {
|
||||||
|
if values.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let vectors = match values[0].data_type() {
|
||||||
|
DataType::Utf8 => {
|
||||||
|
let arr: &StringArray = values[0].as_string();
|
||||||
|
arr.iter()
|
||||||
|
.filter_map(|x| x.map(|s| parse_veclit_from_strlit(s).map_err(Into::into)))
|
||||||
|
.map(|x| x.map(Cow::Owned))
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
}
|
||||||
|
DataType::LargeUtf8 => {
|
||||||
|
let arr: &LargeStringArray = values[0].as_string();
|
||||||
|
arr.iter()
|
||||||
|
.filter_map(|x| x.map(|s| parse_veclit_from_strlit(s).map_err(Into::into)))
|
||||||
|
.map(|x: Result<Vec<f32>>| x.map(Cow::Owned))
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
}
|
||||||
|
DataType::Binary => {
|
||||||
|
let arr: &BinaryArray = values[0].as_binary();
|
||||||
|
arr.iter()
|
||||||
|
.filter_map(|x| x.map(|b| binlit_as_veclit(b).map_err(Into::into)))
|
||||||
|
.collect::<Result<Vec<_>>>()?
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(datafusion_common::DataFusionError::NotImplemented(format!(
|
||||||
|
"unsupported data type {} for `VEC_AVG`",
|
||||||
|
values[0].data_type()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if vectors.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let len = if is_update {
|
||||||
|
vectors.len() as u64
|
||||||
|
} else {
|
||||||
|
sum(values[1].as_primitive::<UInt64Type>()).unwrap_or_default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let dims = vectors[0].len();
|
||||||
|
let mut sum = DVector::zeros(dims);
|
||||||
|
for v in vectors {
|
||||||
|
if v.len() != dims {
|
||||||
|
return Err(datafusion_common::DataFusionError::Execution(
|
||||||
|
"vectors length not match: VEC_AVG".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let v_view = DVectorView::from_slice(&v, dims);
|
||||||
|
sum += &v_view;
|
||||||
|
}
|
||||||
|
|
||||||
|
*self.inner(dims) += sum;
|
||||||
|
self.count += len;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Accumulator for VectorAvg {
|
||||||
|
fn state(&mut self) -> Result<Vec<ScalarValue>> {
|
||||||
|
let vector = match &self.sum {
|
||||||
|
None => ScalarValue::Binary(None),
|
||||||
|
Some(sum) => ScalarValue::Binary(Some(veclit_to_binlit(sum.as_slice()))),
|
||||||
|
};
|
||||||
|
Ok(vec![vector, ScalarValue::from(self.count)])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
|
||||||
|
self.update(values, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
|
||||||
|
self.update(states, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evaluate(&mut self) -> Result<ScalarValue> {
|
||||||
|
match &self.sum {
|
||||||
|
None => Ok(ScalarValue::Binary(None)),
|
||||||
|
Some(sum) => Ok(ScalarValue::Binary(Some(veclit_to_binlit(
|
||||||
|
(sum / self.count as f32).as_slice(),
|
||||||
|
)))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
size_of_val(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arrow::array::StringArray;
|
||||||
|
use datatypes::scalars::ScalarVector;
|
||||||
|
use datatypes::vectors::{ConstantVector, StringVector, Vector};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_update_batch() {
|
||||||
|
// test update empty batch, expect not updating anything
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
vec_avg.update_batch(&[]).unwrap();
|
||||||
|
assert!(vec_avg.sum.is_none());
|
||||||
|
assert_eq!(ScalarValue::Binary(None), vec_avg.evaluate().unwrap());
|
||||||
|
|
||||||
|
// test update one not-null value
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
ScalarValue::Binary(Some(veclit_to_binlit(&[2.5, 3.5, 4.5]))),
|
||||||
|
vec_avg.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// test update one null value
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![Option::<String>::None]))];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(ScalarValue::Binary(None), vec_avg.evaluate().unwrap());
|
||||||
|
|
||||||
|
// test update no null-value batch
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
ScalarValue::Binary(Some(veclit_to_binlit(&[4.0, 5.0, 6.0]))),
|
||||||
|
vec_avg.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// test update null-value batch
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
|
||||||
|
Some("[1.0,2.0,3.0]".to_string()),
|
||||||
|
None,
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
ScalarValue::Binary(Some(veclit_to_binlit(&[4.0, 5.0, 6.0]))),
|
||||||
|
vec_avg.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![Arc::new(StringArray::from(vec![
|
||||||
|
None,
|
||||||
|
Some("[4.0,5.0,6.0]".to_string()),
|
||||||
|
Some("[7.0,8.0,9.0]".to_string()),
|
||||||
|
]))];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
ScalarValue::Binary(Some(veclit_to_binlit(&[5.5, 6.5, 7.5]))),
|
||||||
|
vec_avg.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// test update with constant vector
|
||||||
|
let mut vec_avg = VectorAvg::default();
|
||||||
|
let v: Vec<ArrayRef> = vec![
|
||||||
|
Arc::new(ConstantVector::new(
|
||||||
|
Arc::new(StringVector::from_vec(vec!["[1.0,2.0,3.0]".to_string()])),
|
||||||
|
4,
|
||||||
|
))
|
||||||
|
.to_arrow_array(),
|
||||||
|
];
|
||||||
|
vec_avg.update_batch(&v).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
ScalarValue::Binary(Some(veclit_to_binlit(&[1.0, 2.0, 3.0]))),
|
||||||
|
vec_avg.evaluate().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -34,6 +34,8 @@ use crate::scalars::json::JsonFunction;
|
|||||||
use crate::scalars::matches::MatchesFunction;
|
use crate::scalars::matches::MatchesFunction;
|
||||||
use crate::scalars::matches_term::MatchesTermFunction;
|
use crate::scalars::matches_term::MatchesTermFunction;
|
||||||
use crate::scalars::math::MathFunction;
|
use crate::scalars::math::MathFunction;
|
||||||
|
use crate::scalars::primary_key::DecodePrimaryKeyFunction;
|
||||||
|
use crate::scalars::string::register_string_functions;
|
||||||
use crate::scalars::timestamp::TimestampFunction;
|
use crate::scalars::timestamp::TimestampFunction;
|
||||||
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
|
||||||
use crate::scalars::vector::VectorFunction as VectorScalarFunction;
|
use crate::scalars::vector::VectorFunction as VectorScalarFunction;
|
||||||
@@ -142,6 +144,7 @@ pub static FUNCTION_REGISTRY: LazyLock<Arc<FunctionRegistry>> = LazyLock::new(||
|
|||||||
ExpressionFunction::register(&function_registry);
|
ExpressionFunction::register(&function_registry);
|
||||||
UddSketchCalcFunction::register(&function_registry);
|
UddSketchCalcFunction::register(&function_registry);
|
||||||
HllCalcFunction::register(&function_registry);
|
HllCalcFunction::register(&function_registry);
|
||||||
|
DecodePrimaryKeyFunction::register(&function_registry);
|
||||||
|
|
||||||
// Full text search function
|
// Full text search function
|
||||||
MatchesFunction::register(&function_registry);
|
MatchesFunction::register(&function_registry);
|
||||||
@@ -154,6 +157,9 @@ pub static FUNCTION_REGISTRY: LazyLock<Arc<FunctionRegistry>> = LazyLock::new(||
|
|||||||
// Json related functions
|
// Json related functions
|
||||||
JsonFunction::register(&function_registry);
|
JsonFunction::register(&function_registry);
|
||||||
|
|
||||||
|
// String related functions
|
||||||
|
register_string_functions(&function_registry);
|
||||||
|
|
||||||
// Vector related functions
|
// Vector related functions
|
||||||
VectorScalarFunction::register(&function_registry);
|
VectorScalarFunction::register(&function_registry);
|
||||||
VectorAggrFunction::register(&function_registry);
|
VectorAggrFunction::register(&function_registry);
|
||||||
|
|||||||
@@ -25,7 +25,9 @@ use common_query::Output;
|
|||||||
use common_query::error::Result;
|
use common_query::error::Result;
|
||||||
use session::context::QueryContextRef;
|
use session::context::QueryContextRef;
|
||||||
use store_api::storage::RegionId;
|
use store_api::storage::RegionId;
|
||||||
use table::requests::{CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest};
|
use table::requests::{
|
||||||
|
BuildIndexTableRequest, CompactTableRequest, DeleteRequest, FlushTableRequest, InsertRequest,
|
||||||
|
};
|
||||||
|
|
||||||
/// A trait for handling table mutations in `QueryEngine`.
|
/// A trait for handling table mutations in `QueryEngine`.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@@ -47,6 +49,13 @@ pub trait TableMutationHandler: Send + Sync {
|
|||||||
ctx: QueryContextRef,
|
ctx: QueryContextRef,
|
||||||
) -> Result<AffectedRows>;
|
) -> Result<AffectedRows>;
|
||||||
|
|
||||||
|
/// Trigger an index build task for the table.
|
||||||
|
async fn build_index(
|
||||||
|
&self,
|
||||||
|
request: BuildIndexTableRequest,
|
||||||
|
ctx: QueryContextRef,
|
||||||
|
) -> Result<AffectedRows>;
|
||||||
|
|
||||||
/// Trigger a flush task for a table region.
|
/// Trigger a flush task for a table region.
|
||||||
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
|
async fn flush_region(&self, region_id: RegionId, ctx: QueryContextRef)
|
||||||
-> Result<AffectedRows>;
|
-> Result<AffectedRows>;
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ pub mod json;
|
|||||||
pub mod matches;
|
pub mod matches;
|
||||||
pub mod matches_term;
|
pub mod matches_term;
|
||||||
pub mod math;
|
pub mod math;
|
||||||
|
pub mod primary_key;
|
||||||
|
pub(crate) mod string;
|
||||||
pub mod vector;
|
pub mod vector;
|
||||||
|
|
||||||
pub(crate) mod hll_count;
|
pub(crate) mod hll_count;
|
||||||
|
|||||||
@@ -20,7 +20,9 @@ use common_query::error;
|
|||||||
use common_time::{Date, Timestamp};
|
use common_time::{Date, Timestamp};
|
||||||
use datafusion_common::DataFusionError;
|
use datafusion_common::DataFusionError;
|
||||||
use datafusion_common::arrow::array::{Array, AsArray, StringViewBuilder};
|
use datafusion_common::arrow::array::{Array, AsArray, StringViewBuilder};
|
||||||
use datafusion_common::arrow::datatypes::{ArrowTimestampType, DataType, Date32Type, TimeUnit};
|
use datafusion_common::arrow::datatypes::{
|
||||||
|
ArrowTimestampType, DataType, Date32Type, Date64Type, TimeUnit,
|
||||||
|
};
|
||||||
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
|
use datafusion_expr::{ColumnarValue, ScalarFunctionArgs, Signature};
|
||||||
use snafu::ResultExt;
|
use snafu::ResultExt;
|
||||||
|
|
||||||
@@ -40,6 +42,7 @@ impl Default for DateFormatFunction {
|
|||||||
signature: helper::one_of_sigs2(
|
signature: helper::one_of_sigs2(
|
||||||
vec![
|
vec![
|
||||||
DataType::Date32,
|
DataType::Date32,
|
||||||
|
DataType::Date64,
|
||||||
DataType::Timestamp(TimeUnit::Second, None),
|
DataType::Timestamp(TimeUnit::Second, None),
|
||||||
DataType::Timestamp(TimeUnit::Millisecond, None),
|
DataType::Timestamp(TimeUnit::Millisecond, None),
|
||||||
DataType::Timestamp(TimeUnit::Microsecond, None),
|
DataType::Timestamp(TimeUnit::Microsecond, None),
|
||||||
@@ -115,6 +118,29 @@ impl Function for DateFormatFunction {
|
|||||||
builder.append_option(result.as_deref());
|
builder.append_option(result.as_deref());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
DataType::Date64 => {
|
||||||
|
let left = left.as_primitive::<Date64Type>();
|
||||||
|
for i in 0..size {
|
||||||
|
let date = left.is_valid(i).then(|| {
|
||||||
|
let ms = left.value(i);
|
||||||
|
Timestamp::new_millisecond(ms)
|
||||||
|
});
|
||||||
|
let format = formats.is_valid(i).then(|| formats.value(i));
|
||||||
|
|
||||||
|
let result = match (date, format) {
|
||||||
|
(Some(ts), Some(fmt)) => {
|
||||||
|
Some(ts.as_formatted_string(fmt, Some(timezone)).map_err(|e| {
|
||||||
|
DataFusionError::Execution(format!(
|
||||||
|
"cannot format {ts:?} as '{fmt}': {e}"
|
||||||
|
))
|
||||||
|
})?)
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
builder.append_option(result.as_deref());
|
||||||
|
}
|
||||||
|
}
|
||||||
x => {
|
x => {
|
||||||
return Err(DataFusionError::Execution(format!(
|
return Err(DataFusionError::Execution(format!(
|
||||||
"unsupported input data type {x}"
|
"unsupported input data type {x}"
|
||||||
@@ -137,7 +163,9 @@ mod tests {
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use arrow_schema::Field;
|
use arrow_schema::Field;
|
||||||
use datafusion_common::arrow::array::{Date32Array, StringArray, TimestampSecondArray};
|
use datafusion_common::arrow::array::{
|
||||||
|
Date32Array, Date64Array, StringArray, TimestampSecondArray,
|
||||||
|
};
|
||||||
use datafusion_common::config::ConfigOptions;
|
use datafusion_common::config::ConfigOptions;
|
||||||
use datafusion_expr::{TypeSignature, Volatility};
|
use datafusion_expr::{TypeSignature, Volatility};
|
||||||
|
|
||||||
@@ -166,7 +194,7 @@ mod tests {
|
|||||||
Signature {
|
Signature {
|
||||||
type_signature: TypeSignature::OneOf(sigs),
|
type_signature: TypeSignature::OneOf(sigs),
|
||||||
volatility: Volatility::Immutable
|
volatility: Volatility::Immutable
|
||||||
} if sigs.len() == 5));
|
} if sigs.len() == 6));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -213,6 +241,50 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_date64_date_format() {
|
||||||
|
let f = DateFormatFunction::default();
|
||||||
|
|
||||||
|
let dates = vec![Some(123000), None, Some(42000), None];
|
||||||
|
let formats = vec![
|
||||||
|
"%Y-%m-%d %T.%3f",
|
||||||
|
"%Y-%m-%d %T.%3f",
|
||||||
|
"%Y-%m-%d %T.%3f",
|
||||||
|
"%Y-%m-%d %T.%3f",
|
||||||
|
];
|
||||||
|
let results = [
|
||||||
|
Some("1970-01-01 00:02:03.000"),
|
||||||
|
None,
|
||||||
|
Some("1970-01-01 00:00:42.000"),
|
||||||
|
None,
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut config_options = ConfigOptions::default();
|
||||||
|
config_options.extensions.insert(FunctionContext::default());
|
||||||
|
let config_options = Arc::new(config_options);
|
||||||
|
|
||||||
|
let args = ScalarFunctionArgs {
|
||||||
|
args: vec![
|
||||||
|
ColumnarValue::Array(Arc::new(Date64Array::from(dates))),
|
||||||
|
ColumnarValue::Array(Arc::new(StringArray::from_iter_values(formats))),
|
||||||
|
],
|
||||||
|
arg_fields: vec![],
|
||||||
|
number_rows: 4,
|
||||||
|
return_field: Arc::new(Field::new("x", DataType::Utf8View, false)),
|
||||||
|
config_options,
|
||||||
|
};
|
||||||
|
let result = f
|
||||||
|
.invoke_with_args(args)
|
||||||
|
.and_then(|x| x.to_array(4))
|
||||||
|
.unwrap();
|
||||||
|
let vector = result.as_string_view();
|
||||||
|
|
||||||
|
assert_eq!(4, vector.len());
|
||||||
|
for (actual, expect) in vector.iter().zip(results) {
|
||||||
|
assert_eq!(actual, expect);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_date_date_format() {
|
fn test_date_date_format() {
|
||||||
let f = DateFormatFunction::default();
|
let f = DateFormatFunction::default();
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
mod binary;
|
mod binary;
|
||||||
mod ctx;
|
mod ctx;
|
||||||
|
mod if_func;
|
||||||
mod is_null;
|
mod is_null;
|
||||||
mod unary;
|
mod unary;
|
||||||
|
|
||||||
@@ -22,6 +23,7 @@ pub use ctx::EvalContext;
|
|||||||
pub use unary::scalar_unary_op;
|
pub use unary::scalar_unary_op;
|
||||||
|
|
||||||
use crate::function_registry::FunctionRegistry;
|
use crate::function_registry::FunctionRegistry;
|
||||||
|
use crate::scalars::expression::if_func::IfFunction;
|
||||||
use crate::scalars::expression::is_null::IsNullFunction;
|
use crate::scalars::expression::is_null::IsNullFunction;
|
||||||
|
|
||||||
pub(crate) struct ExpressionFunction;
|
pub(crate) struct ExpressionFunction;
|
||||||
@@ -29,5 +31,6 @@ pub(crate) struct ExpressionFunction;
|
|||||||
impl ExpressionFunction {
|
impl ExpressionFunction {
|
||||||
pub fn register(registry: &FunctionRegistry) {
|
pub fn register(registry: &FunctionRegistry) {
|
||||||
registry.register_scalar(IsNullFunction::default());
|
registry.register_scalar(IsNullFunction::default());
|
||||||
|
registry.register_scalar(IfFunction::default());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user