diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index dd9209ffcd..549d6300ea 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -5,23 +5,23 @@
* @GreptimeTeam/db-approver
## [Module] Database Engine
-/src/index @zhongzc
+/src/index @evenyag @discord9 @WenyXu
/src/mito2 @evenyag @v0y4g3r @waynexia
-/src/query @evenyag
+/src/query @evenyag @waynexia @discord9
## [Module] Distributed
-/src/common/meta @MichaelScofield
-/src/common/procedure @MichaelScofield
-/src/meta-client @MichaelScofield
-/src/meta-srv @MichaelScofield
+/src/common/meta @MichaelScofield @WenyXu
+/src/common/procedure @MichaelScofield @WenyXu
+/src/meta-client @MichaelScofield @WenyXu
+/src/meta-srv @MichaelScofield @WenyXu
## [Module] Write Ahead Log
-/src/log-store @v0y4g3r
-/src/store-api @v0y4g3r
+/src/log-store @v0y4g3r @WenyXu
+/src/store-api @v0y4g3r @evenyag
## [Module] Metrics Engine
-/src/metric-engine @waynexia
-/src/promql @waynexia
+/src/metric-engine @waynexia @WenyXu
+/src/promql @waynexia @evenyag @discord9
## [Module] Flow
-/src/flow @zhongzc @waynexia
+/src/flow @discord9 @waynexia
diff --git a/.github/actions/build-greptime-binary/action.yml b/.github/actions/build-greptime-binary/action.yml
index ecbc05ed38..62ee9eb599 100644
--- a/.github/actions/build-greptime-binary/action.yml
+++ b/.github/actions/build-greptime-binary/action.yml
@@ -32,9 +32,23 @@ inputs:
description: Image Registry
required: false
default: 'docker.io'
+ large-page-size:
+ description: Build GreptimeDB with large page size (65536).
+ required: false
+ default: 'false'
+
runs:
using: composite
steps:
+ - name: Set extra build environment variables
+ shell: bash
+ run: |
+ if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
+ echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
+ else
+ echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
+ fi
+
- name: Build greptime binary
shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }}
@@ -45,7 +59,8 @@ runs:
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
- IMAGE_REGISTRY=${{ inputs.image-registry }}
+ IMAGE_REGISTRY=${{ inputs.image-registry }} \
+ EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
- name: Upload artifacts
uses: ./.github/actions/upload-artifacts
diff --git a/.github/actions/build-linux-artifacts/action.yml b/.github/actions/build-linux-artifacts/action.yml
index 9c88b25075..3cb9c43955 100644
--- a/.github/actions/build-linux-artifacts/action.yml
+++ b/.github/actions/build-linux-artifacts/action.yml
@@ -27,6 +27,10 @@ inputs:
description: Working directory to build the artifacts
required: false
default: .
+ large-page-size:
+ description: Build GreptimeDB with large page size (65536).
+ required: false
+ default: 'false'
runs:
using: composite
steps:
@@ -59,6 +63,7 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
+ large-page-size: ${{ inputs.large-page-size }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash
@@ -77,6 +82,7 @@ runs:
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
+ large-page-size: ${{ inputs.large-page-size }}
- name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary
@@ -89,3 +95,4 @@ runs:
build-android-artifacts: true
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
+ large-page-size: ${{ inputs.large-page-size }}
diff --git a/.github/scripts/deploy-greptimedb.sh b/.github/scripts/deploy-greptimedb.sh
index fca21993b4..10831f8625 100755
--- a/.github/scripts/deploy-greptimedb.sh
+++ b/.github/scripts/deploy-greptimedb.sh
@@ -7,6 +7,8 @@ KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}"
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
+GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
+GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}"
@@ -58,7 +60,7 @@ function deploy_greptimedb_operator() {
# Use the latest chart and image.
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \
--create-namespace \
- --set image.tag=latest \
+ --set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
-n "$DEFAULT_INSTALL_NAMESPACE"
# Wait for greptimedb-operator to be ready.
@@ -78,6 +80,7 @@ function deploy_greptimedb_cluster() {
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \
--create-namespace \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
+ --set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
-n "$install_namespace"
@@ -115,6 +118,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
--create-namespace \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \
+ --set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \
diff --git a/.github/scripts/update-helm-charts-version.sh b/.github/scripts/update-helm-charts-version.sh
index d501ed8d02..e60e991846 100755
--- a/.github/scripts/update-helm-charts-version.sh
+++ b/.github/scripts/update-helm-charts-version.sh
@@ -39,8 +39,11 @@ update_helm_charts_version() {
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
- --reviewer zyy17 \
- --reviewer daviderli614
+ --reviewer sunng87 \
+ --reviewer daviderli614 \
+ --reviewer killme2008 \
+ --reviewer evenyag \
+ --reviewer fengjiachun
}
update_helm_charts_version
diff --git a/.github/scripts/update-homebrew-greptme-version.sh b/.github/scripts/update-homebrew-greptme-version.sh
index 4abf4f2218..f474f19778 100755
--- a/.github/scripts/update-homebrew-greptme-version.sh
+++ b/.github/scripts/update-homebrew-greptme-version.sh
@@ -35,8 +35,11 @@ update_homebrew_greptime_version() {
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
- --reviewer zyy17 \
- --reviewer daviderli614
+ --reviewer sunng87 \
+ --reviewer daviderli614 \
+ --reviewer killme2008 \
+ --reviewer evenyag \
+ --reviewer fengjiachun
}
update_homebrew_greptime_version
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index fad3e316e8..021867e4ed 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -4,10 +4,11 @@ name: GreptimeDB Development Build
on:
workflow_dispatch: # Allows you to run this workflow manually.
inputs:
- repository:
- description: The public repository to build
+ large-page-size:
+ description: Build GreptimeDB with large page size (65536).
+ type: boolean
required: false
- default: GreptimeTeam/greptimedb
+ default: false
commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build
required: true
@@ -181,6 +182,7 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
+ large-page-size: ${{ inputs.large-page-size }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -214,6 +216,7 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
+ large-page-size: ${{ inputs.large-page-size }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 8dde424c8e..af5ddc5368 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -613,6 +613,9 @@ jobs:
- name: "MySQL Kvbackend"
opts: "--setup-mysql"
kafka: false
+ - name: "Flat format"
+ opts: "--enable-flat-format"
+ kafka: false
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
@@ -808,7 +811,7 @@ jobs:
- name: Setup external services
working-directory: tests-integration/fixtures
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
-
+
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend
env:
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index fc472e2d8b..71812a35bf 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -92,5 +92,6 @@ jobs:
mode:
- name: "Basic"
- name: "Remote WAL"
+ - name: "Flat format"
steps:
- run: 'echo "No action required"'
diff --git a/.github/workflows/multi-lang-tests.yml b/.github/workflows/multi-lang-tests.yml
new file mode 100644
index 0000000000..6da0a658dd
--- /dev/null
+++ b/.github/workflows/multi-lang-tests.yml
@@ -0,0 +1,57 @@
+name: Multi-language Integration Tests
+
+on:
+ push:
+ branches:
+ - main
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ build-greptimedb:
+ if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
+ name: Build GreptimeDB binary
+ runs-on: ubuntu-latest
+ timeout-minutes: 60
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ persist-credentials: false
+ - uses: arduino/setup-protoc@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions-rust-lang/setup-rust-toolchain@v1
+ - uses: Swatinem/rust-cache@v2
+ with:
+ shared-key: "multi-lang-build"
+ cache-all-crates: "true"
+ save-if: ${{ github.ref == 'refs/heads/main' }}
+ - name: Install cargo-gc-bin
+ shell: bash
+ run: cargo install cargo-gc-bin --force
+ - name: Build greptime binary
+ shell: bash
+ run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
+ - name: Pack greptime binary
+ shell: bash
+ run: |
+ mkdir bin && \
+ mv ./target/debug/greptime bin
+ - name: Print greptime binary info
+ run: ls -lh bin
+ - name: Upload greptime binary
+ uses: actions/upload-artifact@v4
+ with:
+ name: greptime-bin
+ path: bin/
+ retention-days: 1
+
+ run-multi-lang-tests:
+ name: Run Multi-language SDK Tests
+ needs: build-greptimedb
+ uses: ./.github/workflows/run-multi-lang-tests.yml
+ with:
+ artifact-name: greptime-bin
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 6640d1d3df..710a767334 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -174,6 +174,18 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
+ run-multi-lang-tests:
+ name: Run Multi-language SDK Tests
+ if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
+ needs: [
+ allocate-runners,
+ build-linux-amd64-artifacts,
+ ]
+ uses: ./.github/workflows/run-multi-lang-tests.yml
+ with:
+ artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
+ artifact-is-tarball: true
+
release-images-to-dockerhub:
name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
@@ -301,7 +313,8 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team
needs: [
- release-images-to-dockerhub
+ release-images-to-dockerhub,
+ run-multi-lang-tests,
]
runs-on: ubuntu-latest
permissions:
@@ -319,17 +332,17 @@ jobs:
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
+ CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
- name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0
- if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
+ if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0
- if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
+ if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }}
with:
payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index bc9da93b9c..614500fab1 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -215,6 +215,18 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
+ run-multi-lang-tests:
+ name: Run Multi-language SDK Tests
+ if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
+ needs: [
+ allocate-runners,
+ build-linux-amd64-artifacts,
+ ]
+ uses: ./.github/workflows/run-multi-lang-tests.yml
+ with:
+ artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
+ artifact-is-tarball: true
+
build-macos-artifacts:
name: Build macOS artifacts
strategy:
@@ -303,6 +315,7 @@ jobs:
allocate-runners,
build-linux-amd64-artifacts,
build-linux-arm64-artifacts,
+ run-multi-lang-tests,
]
runs-on: ubuntu-latest
outputs:
@@ -381,6 +394,7 @@ jobs:
build-macos-artifacts,
build-windows-artifacts,
release-images-to-dockerhub,
+ run-multi-lang-tests,
]
runs-on: ubuntu-latest
steps:
diff --git a/.github/workflows/run-multi-lang-tests.yml b/.github/workflows/run-multi-lang-tests.yml
new file mode 100644
index 0000000000..f744d7a644
--- /dev/null
+++ b/.github/workflows/run-multi-lang-tests.yml
@@ -0,0 +1,194 @@
+# Reusable workflow for running multi-language SDK tests against GreptimeDB
+# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
+# Supports both direct binary artifacts and tarball artifacts
+
+name: Run Multi-language SDK Tests
+
+on:
+ workflow_call:
+ inputs:
+ artifact-name:
+ required: true
+ type: string
+ description: 'Name of the artifact containing greptime binary'
+ http-port:
+ required: false
+ type: string
+ default: '4000'
+ description: 'HTTP server port'
+ mysql-port:
+ required: false
+ type: string
+ default: '4002'
+ description: 'MySQL server port'
+ postgres-port:
+ required: false
+ type: string
+ default: '4003'
+ description: 'PostgreSQL server port'
+ db-name:
+ required: false
+ type: string
+ default: 'test_db'
+ description: 'Test database name'
+ username:
+ required: false
+ type: string
+ default: 'greptime_user'
+ description: 'Authentication username'
+ password:
+ required: false
+ type: string
+ default: 'greptime_pwd'
+ description: 'Authentication password'
+ timeout-minutes:
+ required: false
+ type: number
+ default: 30
+ description: 'Job timeout in minutes'
+ artifact-is-tarball:
+ required: false
+ type: boolean
+ default: false
+ description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
+
+jobs:
+ run-tests:
+ name: Run Multi-language SDK Tests
+ runs-on: ubuntu-latest
+ timeout-minutes: ${{ inputs.timeout-minutes }}
+ steps:
+ - name: Checkout greptimedb-tests repository
+ uses: actions/checkout@v4
+ with:
+ repository: GreptimeTeam/greptimedb-tests
+ persist-credentials: false
+
+ - name: Download pre-built greptime binary
+ uses: actions/download-artifact@v4
+ with:
+ name: ${{ inputs.artifact-name }}
+ path: artifact
+
+ - name: Setup greptime binary
+ run: |
+ mkdir -p bin
+ if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
+ # Extract tarball and find greptime binary
+ tar -xzf artifact/*.tar.gz -C artifact
+ find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
+ else
+ # Direct binary format
+ if [ -f artifact/greptime ]; then
+ cp artifact/greptime bin/greptime
+ else
+ cp artifact/* bin/greptime
+ fi
+ fi
+ chmod +x ./bin/greptime
+ ls -lh ./bin/greptime
+ ./bin/greptime --version
+
+ - name: Setup Java 17
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '17'
+ cache: 'maven'
+
+ - name: Setup Python 3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.8'
+
+ - name: Setup Go 1.24
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.24'
+ cache: true
+ cache-dependency-path: go-tests/go.sum
+
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+
+ - name: Install Python dependencies
+ run: |
+ pip install mysql-connector-python psycopg2-binary
+ python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
+ python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
+
+ - name: Install Go dependencies
+ working-directory: go-tests
+ run: |
+ go mod download
+ go mod verify
+ go version
+
+ - name: Kill existing GreptimeDB processes
+ run: |
+ pkill -f greptime || true
+ sleep 2
+
+ - name: Start GreptimeDB standalone
+ run: |
+ ./bin/greptime standalone start \
+ --http-addr 0.0.0.0:${{ inputs.http-port }} \
+ --rpc-addr 0.0.0.0:4001 \
+ --mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
+ --postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
+ --user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
+
+ - name: Wait for GreptimeDB to be ready
+ run: |
+ echo "Waiting for GreptimeDB..."
+ for i in {1..60}; do
+ if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
+ echo "✅ GreptimeDB is ready"
+ exit 0
+ fi
+ sleep 2
+ done
+ echo "❌ GreptimeDB failed to start"
+ cat /tmp/greptimedb.log
+ exit 1
+
+ - name: Run multi-language tests
+ env:
+ DB_NAME: ${{ inputs.db-name }}
+ MYSQL_HOST: 127.0.0.1
+ MYSQL_PORT: ${{ inputs.mysql-port }}
+ POSTGRES_HOST: 127.0.0.1
+ POSTGRES_PORT: ${{ inputs.postgres-port }}
+ HTTP_HOST: 127.0.0.1
+ HTTP_PORT: ${{ inputs.http-port }}
+ GREPTIME_USERNAME: ${{ inputs.username }}
+ GREPTIME_PASSWORD: ${{ inputs.password }}
+ run: |
+ chmod +x ./run_tests.sh
+ ./run_tests.sh
+
+ - name: Collect logs on failure
+ if: failure()
+ run: |
+ echo "=== GreptimeDB Logs ==="
+ cat /tmp/greptimedb.log || true
+
+ - name: Upload test logs on failure
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: test-logs
+ path: |
+ /tmp/greptimedb.log
+ java-tests/target/surefire-reports/
+ python-tests/.pytest_cache/
+ go-tests/*.log
+ **/test-output/
+ retention-days: 7
+
+ - name: Cleanup
+ if: always()
+ run: |
+ pkill -f greptime || true
diff --git a/Cargo.lock b/Cargo.lock
index 68872d0cd3..d166138407 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -212,8 +212,9 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
+ "arrow-schema",
"common-base",
"common-decimal",
"common-error",
@@ -732,7 +733,7 @@ dependencies = [
[[package]]
name = "auth"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -1264,7 +1265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4"
dependencies = [
"memchr",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"serde",
]
@@ -1382,7 +1383,7 @@ dependencies = [
[[package]]
name = "cache"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"catalog",
"common-error",
@@ -1417,7 +1418,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow",
@@ -1629,6 +1630,7 @@ dependencies = [
"chrono",
"chrono-tz-build",
"phf 0.11.3",
+ "uncased",
]
[[package]]
@@ -1639,6 +1641,8 @@ checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402"
dependencies = [
"parse-zoneinfo",
"phf_codegen 0.11.3",
+ "phf_shared 0.11.3",
+ "uncased",
]
[[package]]
@@ -1759,7 +1763,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "cli"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-stream",
"async-trait",
@@ -1812,7 +1816,7 @@ dependencies = [
[[package]]
name = "client"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arc-swap",
@@ -1844,8 +1848,8 @@ dependencies = [
"serde_json",
"snafu 0.8.6",
"store-api",
- "substrait 0.18.0",
"substrait 0.37.3",
+ "substrait 1.0.0-beta.2",
"tokio",
"tokio-stream",
"tonic 0.13.1",
@@ -1885,7 +1889,7 @@ dependencies = [
[[package]]
name = "cmd"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"auth",
@@ -1896,6 +1900,7 @@ dependencies = [
"clap 4.5.40",
"cli",
"client",
+ "colored",
"common-base",
"common-catalog",
"common-config",
@@ -1917,6 +1922,7 @@ dependencies = [
"common-wal",
"datanode",
"datatypes",
+ "either",
"etcd-client",
"file-engine",
"flow",
@@ -1932,7 +1938,9 @@ dependencies = [
"moka",
"nu-ansi-term",
"object-store",
+ "parquet",
"plugins",
+ "pprof",
"prometheus",
"prost 0.13.5",
"query",
@@ -1975,6 +1983,16 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
+[[package]]
+name = "colored"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
+dependencies = [
+ "lazy_static",
+ "windows-sys 0.59.0",
+]
+
[[package]]
name = "comfy-table"
version = "7.1.2"
@@ -1994,7 +2012,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"anymap2",
"async-trait",
@@ -2004,9 +2022,11 @@ dependencies = [
"common-macro",
"common-test-util",
"futures",
+ "lazy_static",
"paste",
"pin-project",
"rand 0.9.1",
+ "regex",
"serde",
"snafu 0.8.6",
"tokio",
@@ -2016,16 +2036,18 @@ dependencies = [
[[package]]
name = "common-catalog"
-version = "0.18.0"
+version = "1.0.0-beta.2"
+dependencies = [
+ "const_format",
+]
[[package]]
name = "common-config"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-error",
"common-macro",
- "common-stat",
"common-telemetry",
"common-test-util",
"common-wal",
@@ -2045,7 +2067,7 @@ dependencies = [
[[package]]
name = "common-datasource"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"arrow",
"arrow-schema",
@@ -2080,7 +2102,7 @@ dependencies = [
[[package]]
name = "common-decimal"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"bigdecimal 0.4.8",
"common-error",
@@ -2093,7 +2115,7 @@ dependencies = [
[[package]]
name = "common-error"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-macro",
"http 1.3.1",
@@ -2104,7 +2126,7 @@ dependencies = [
[[package]]
name = "common-event-recorder"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2126,7 +2148,7 @@ dependencies = [
[[package]]
name = "common-frontend"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2148,7 +2170,7 @@ dependencies = [
[[package]]
name = "common-function"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -2186,11 +2208,13 @@ dependencies = [
"hyperloglogplus",
"jsonb",
"memchr",
+ "mito-codec",
"nalgebra",
"num",
"num-traits",
"paste",
"pretty_assertions",
+ "regex",
"s2",
"serde",
"serde_json",
@@ -2206,7 +2230,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"common-runtime",
@@ -2223,7 +2247,7 @@ dependencies = [
[[package]]
name = "common-grpc"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -2242,11 +2266,13 @@ dependencies = [
"hyper 1.6.0",
"hyper-util",
"lazy_static",
+ "notify",
"prost 0.13.5",
"rand 0.9.1",
"serde",
"serde_json",
"snafu 0.8.6",
+ "tempfile",
"tokio",
"tokio-util",
"tonic 0.13.1",
@@ -2256,7 +2282,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"common-base",
@@ -2276,7 +2302,7 @@ dependencies = [
[[package]]
name = "common-macro"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"greptime-proto",
"once_cell",
@@ -2287,7 +2313,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"anyhow",
"common-error",
@@ -2303,7 +2329,7 @@ dependencies = [
[[package]]
name = "common-meta"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"anymap2",
"api",
@@ -2375,7 +2401,7 @@ dependencies = [
[[package]]
name = "common-options"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-grpc",
"humantime-serde",
@@ -2384,11 +2410,11 @@ dependencies = [
[[package]]
name = "common-plugins"
-version = "0.18.0"
+version = "1.0.0-beta.2"
[[package]]
name = "common-pprof"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-error",
"common-macro",
@@ -2400,7 +2426,7 @@ dependencies = [
[[package]]
name = "common-procedure"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-stream",
@@ -2429,7 +2455,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"common-procedure",
@@ -2439,7 +2465,7 @@ dependencies = [
[[package]]
name = "common-query"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -2454,6 +2480,7 @@ dependencies = [
"datafusion-expr",
"datatypes",
"futures-util",
+ "once_cell",
"serde",
"snafu 0.8.6",
"sqlparser",
@@ -2464,7 +2491,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"arc-swap",
"common-base",
@@ -2488,7 +2515,7 @@ dependencies = [
[[package]]
name = "common-runtime"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -2517,7 +2544,7 @@ dependencies = [
[[package]]
name = "common-session"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"serde",
"strum 0.27.1",
@@ -2525,7 +2552,7 @@ dependencies = [
[[package]]
name = "common-sql"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-decimal",
@@ -2543,19 +2570,22 @@ dependencies = [
[[package]]
name = "common-stat"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-base",
+ "common-runtime",
+ "common-telemetry",
"lazy_static",
"nix 0.30.1",
"num_cpus",
"prometheus",
"sysinfo",
+ "tokio",
]
[[package]]
name = "common-telemetry"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"backtrace",
"common-base",
@@ -2584,7 +2614,7 @@ dependencies = [
[[package]]
name = "common-test-util"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"client",
"common-grpc",
@@ -2597,7 +2627,7 @@ dependencies = [
[[package]]
name = "common-time"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"arrow",
"chrono",
@@ -2615,7 +2645,7 @@ dependencies = [
[[package]]
name = "common-version"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"build-data",
"cargo-manifest",
@@ -2626,7 +2656,7 @@ dependencies = [
[[package]]
name = "common-wal"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-base",
"common-error",
@@ -2649,7 +2679,7 @@ dependencies = [
[[package]]
name = "common-workload"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"common-telemetry",
"serde",
@@ -3711,9 +3741,9 @@ dependencies = [
[[package]]
name = "datafusion-pg-catalog"
-version = "0.11.0"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f258caedd1593e7dca3bf53912249de6685fa224bcce897ede1fbb7b040ac6f6"
+checksum = "755393864c0c2dd95575ceed4b25e348686028e1b83d06f8f39914209999f821"
dependencies = [
"async-trait",
"datafusion",
@@ -3886,7 +3916,7 @@ dependencies = [
[[package]]
name = "datanode"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -3907,6 +3937,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -3949,7 +3980,7 @@ dependencies = [
[[package]]
name = "datatypes"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"arrow",
"arrow-array",
@@ -4585,7 +4616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298"
dependencies = [
"bit-set",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"regex-syntax 0.8.7",
]
@@ -4621,7 +4652,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "file-engine"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -4753,7 +4784,7 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "flow"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow",
@@ -4822,7 +4853,7 @@ dependencies = [
"sql",
"store-api",
"strum 0.27.1",
- "substrait 0.18.0",
+ "substrait 1.0.0-beta.2",
"table",
"tokio",
"tonic 0.13.1",
@@ -4877,7 +4908,7 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619"
[[package]]
name = "frontend"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arc-swap",
@@ -4904,6 +4935,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -5319,7 +5351,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=72a0d22e0f5f716b2ee21bca091f87a88c36e5ca#72a0d22e0f5f716b2ee21bca091f87a88c36e5ca"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0df99f09f1d6785055b2d9da96fc4ecc2bdf6803#0df99f09f1d6785055b2d9da96fc4ecc2bdf6803"
dependencies = [
"prost 0.13.5",
"prost-types 0.13.5",
@@ -6087,7 +6119,7 @@ dependencies = [
[[package]]
name = "index"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -6114,7 +6146,7 @@ dependencies = [
"rand 0.9.1",
"rand_chacha 0.9.0",
"regex",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"roaring",
"serde",
"serde_json",
@@ -6299,17 +6331,6 @@ dependencies = [
"derive_utils",
]
-[[package]]
-name = "io-uring"
-version = "0.7.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
-dependencies = [
- "bitflags 2.9.1",
- "cfg-if",
- "libc",
-]
-
[[package]]
name = "ipnet"
version = "2.11.0"
@@ -6731,7 +6752,7 @@ version = "0.22.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5baa5e9ff84f1aefd264e6869907646538a52147a755d494517a8007fb48733"
dependencies = [
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"rustversion",
]
@@ -7027,7 +7048,7 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "log-query"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"chrono",
"common-error",
@@ -7039,7 +7060,7 @@ dependencies = [
[[package]]
name = "log-store"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-stream",
"async-trait",
@@ -7346,7 +7367,7 @@ dependencies = [
[[package]]
name = "meta-client"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -7374,7 +7395,7 @@ dependencies = [
[[package]]
name = "meta-srv"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -7398,6 +7419,7 @@ dependencies = [
"common-procedure",
"common-procedure-test",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-time",
"common-version",
@@ -7421,7 +7443,9 @@ dependencies = [
"lazy_static",
"local-ip-address",
"once_cell",
+ "ordered-float 4.6.0",
"parking_lot 0.12.4",
+ "partition",
"prometheus",
"prost 0.13.5",
"rand 0.9.1",
@@ -7471,7 +7495,7 @@ dependencies = [
[[package]]
name = "metric-engine"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -7489,6 +7513,7 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
+ "common-wal",
"datafusion",
"datatypes",
"futures-util",
@@ -7565,7 +7590,7 @@ dependencies = [
[[package]]
name = "mito-codec"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"bytes",
@@ -7573,6 +7598,7 @@ dependencies = [
"common-decimal",
"common-error",
"common-macro",
+ "common-query",
"common-recordbatch",
"common-telemetry",
"common-time",
@@ -7589,7 +7615,7 @@ dependencies = [
[[package]]
name = "mito2"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -8327,7 +8353,7 @@ dependencies = [
[[package]]
name = "object-store"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"anyhow",
"bytes",
@@ -8336,6 +8362,7 @@ dependencies = [
"common-macro",
"common-telemetry",
"common-test-util",
+ "derive_builder 0.20.2",
"futures",
"humantime-serde",
"lazy_static",
@@ -8506,7 +8533,7 @@ dependencies = [
[[package]]
name = "opensrv-mysql"
version = "0.8.0"
-source = "git+https://github.com/datafuselabs/opensrv?rev=a1fb4da215c8693c7e4f62be249a01b7fec52997#a1fb4da215c8693c7e4f62be249a01b7fec52997"
+source = "git+https://github.com/datafuselabs/opensrv?tag=v0.10.0#074bd8fb81da3c9e6d6a098a482f3380478b9c0b"
dependencies = [
"async-trait",
"byteorder",
@@ -8612,7 +8639,7 @@ dependencies = [
[[package]]
name = "operator"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -8638,6 +8665,7 @@ dependencies = [
"common-recordbatch",
"common-runtime",
"common-sql",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -8649,6 +8677,7 @@ dependencies = [
"futures",
"futures-util",
"humantime",
+ "itertools 0.14.0",
"jsonb",
"lazy_static",
"meta-client",
@@ -8670,7 +8699,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
- "substrait 0.18.0",
+ "substrait 1.0.0-beta.2",
"table",
"tokio",
"tokio-util",
@@ -8956,7 +8985,7 @@ dependencies = [
[[package]]
name = "partition"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -9160,10 +9189,21 @@ dependencies = [
]
[[package]]
-name = "pgwire"
-version = "0.34.2"
+name = "pg_interval"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f56a81b4fcc69016028f657a68f9b8e8a2a4b7d07684ca3298f2d3e7ff199ce"
+checksum = "fe46640b465e284b048ef065cbed8ef17a622878d310c724578396b4cfd00df2"
+dependencies = [
+ "bytes",
+ "chrono",
+ "postgres-types",
+]
+
+[[package]]
+name = "pgwire"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d331bb0eef5bc83a221c0a85b1f205bccf094d4f72a26ae1d68a1b1c535123b7"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -9179,6 +9219,7 @@ dependencies = [
"ring",
"rust_decimal",
"rustls-pki-types",
+ "ryu",
"serde",
"serde_json",
"stringprep",
@@ -9255,6 +9296,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
dependencies = [
"siphasher",
+ "uncased",
]
[[package]]
@@ -9300,7 +9342,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pipeline"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -9456,9 +9498,10 @@ dependencies = [
[[package]]
name = "plugins"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"auth",
+ "catalog",
"clap 4.5.40",
"cli",
"common-base",
@@ -9467,6 +9510,7 @@ dependencies = [
"datanode",
"flow",
"frontend",
+ "meta-client",
"meta-srv",
"serde",
"snafu 0.8.6",
@@ -9756,7 +9800,7 @@ dependencies = [
[[package]]
name = "promql"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"async-trait",
@@ -10039,7 +10083,7 @@ dependencies = [
[[package]]
name = "puffin"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-compression 0.4.19",
"async-trait",
@@ -10081,7 +10125,7 @@ dependencies = [
[[package]]
name = "query"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -10105,6 +10149,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-time",
"datafusion",
@@ -10147,7 +10192,7 @@ dependencies = [
"sql",
"sqlparser",
"store-api",
- "substrait 0.18.0",
+ "substrait 1.0.0-beta.2",
"table",
"tokio",
"tokio-stream",
@@ -10464,13 +10509,13 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.11.1"
+version = "1.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.9",
+ "regex-automata 0.4.13",
"regex-syntax 0.8.7",
]
@@ -10485,9 +10530,9 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.9"
+version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
dependencies = [
"aho-corasick",
"memchr",
@@ -11483,7 +11528,7 @@ dependencies = [
[[package]]
name = "servers"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11560,6 +11605,7 @@ dependencies = [
"otel-arrow-rust",
"parking_lot 0.12.4",
"permutation",
+ "pg_interval",
"pgwire",
"pin-project",
"pipeline",
@@ -11601,6 +11647,7 @@ dependencies = [
"tower 0.5.2",
"tower-http 0.6.6",
"tracing",
+ "tracing-opentelemetry",
"urlencoding",
"uuid",
"vrl",
@@ -11609,7 +11656,7 @@ dependencies = [
[[package]]
name = "session"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"ahash 0.8.12",
"api",
@@ -11943,7 +11990,7 @@ dependencies = [
[[package]]
name = "sql"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-buffer",
@@ -11964,6 +12011,7 @@ dependencies = [
"datafusion-physical-expr",
"datafusion-sql",
"datatypes",
+ "either",
"hex",
"humantime",
"iso8601",
@@ -12002,7 +12050,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"clap 4.5.40",
@@ -12279,7 +12327,7 @@ dependencies = [
[[package]]
name = "standalone"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"async-trait",
"catalog",
@@ -12320,7 +12368,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "store-api"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"aquamarine",
@@ -12485,28 +12533,6 @@ dependencies = [
"winapi",
]
-[[package]]
-name = "substrait"
-version = "0.18.0"
-dependencies = [
- "async-trait",
- "bytes",
- "common-error",
- "common-function",
- "common-macro",
- "common-telemetry",
- "datafusion",
- "datafusion-common",
- "datafusion-expr",
- "datafusion-substrait",
- "datatypes",
- "promql",
- "prost 0.13.5",
- "snafu 0.8.6",
- "substrait 0.37.3",
- "tokio",
-]
-
[[package]]
name = "substrait"
version = "0.37.3"
@@ -12553,6 +12579,28 @@ dependencies = [
"walkdir",
]
+[[package]]
+name = "substrait"
+version = "1.0.0-beta.2"
+dependencies = [
+ "async-trait",
+ "bytes",
+ "common-error",
+ "common-function",
+ "common-macro",
+ "common-telemetry",
+ "datafusion",
+ "datafusion-common",
+ "datafusion-expr",
+ "datafusion-substrait",
+ "datatypes",
+ "promql",
+ "prost 0.13.5",
+ "snafu 0.8.6",
+ "substrait 0.37.3",
+ "tokio",
+]
+
[[package]]
name = "subtle"
version = "2.6.1"
@@ -12656,7 +12704,7 @@ dependencies = [
[[package]]
name = "table"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"async-trait",
@@ -12895,7 +12943,7 @@ dependencies = [
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.7",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -12925,7 +12973,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
[[package]]
name = "tests-fuzz"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"arbitrary",
"async-trait",
@@ -12969,7 +13017,7 @@ dependencies = [
[[package]]
name = "tests-integration"
-version = "0.18.0"
+version = "1.0.0-beta.2"
dependencies = [
"api",
"arrow-flight",
@@ -12995,6 +13043,7 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-runtime",
+ "common-stat",
"common-telemetry",
"common-test-util",
"common-time",
@@ -13018,6 +13067,7 @@ dependencies = [
"loki-proto",
"meta-client",
"meta-srv",
+ "mito2",
"moka",
"mysql_async",
"object-store",
@@ -13042,7 +13092,7 @@ dependencies = [
"sqlx",
"standalone",
"store-api",
- "substrait 0.18.0",
+ "substrait 1.0.0-beta.2",
"table",
"tempfile",
"time",
@@ -13244,23 +13294,20 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.47.1"
+version = "1.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
+checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
dependencies = [
- "backtrace",
"bytes",
- "io-uring",
"libc",
"mio",
"parking_lot 0.12.4",
"pin-project-lite",
"signal-hook-registry",
- "slab",
"socket2 0.6.0",
"tokio-macros",
"tracing",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.2",
]
[[package]]
@@ -13275,9 +13322,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "2.5.0"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
+checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
dependencies = [
"proc-macro2",
"quote",
@@ -13955,6 +14002,15 @@ dependencies = [
"serde",
]
+[[package]]
+name = "uncased"
+version = "0.9.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697"
+dependencies = [
+ "version_check",
+]
+
[[package]]
name = "unescaper"
version = "0.1.6"
@@ -14699,6 +14755,15 @@ dependencies = [
"windows-targets 0.52.6",
]
+[[package]]
+name = "windows-sys"
+version = "0.61.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
+dependencies = [
+ "windows-link 0.2.1",
+]
+
[[package]]
name = "windows-targets"
version = "0.48.5"
diff --git a/Cargo.toml b/Cargo.toml
index b76c5ae1cf..d0a2f66f58 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -74,7 +74,7 @@ members = [
resolver = "2"
[workspace.package]
-version = "0.18.0"
+version = "1.0.0-beta.2"
edition = "2024"
license = "Apache-2.0"
@@ -118,9 +118,10 @@ bitflags = "2.4.1"
bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] }
-chrono-tz = "0.10.1"
+chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
+const_format = "0.2"
crossbeam-utils = "0.8"
dashmap = "6.1"
datafusion = "50"
@@ -130,7 +131,7 @@ datafusion-functions = "50"
datafusion-functions-aggregate-common = "50"
datafusion-optimizer = "50"
datafusion-orc = "0.5"
-datafusion-pg-catalog = "0.11"
+datafusion-pg-catalog = "0.12.2"
datafusion-physical-expr = "50"
datafusion-physical-plan = "50"
datafusion-sql = "50"
@@ -147,7 +148,7 @@ etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62d
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "72a0d22e0f5f716b2ee21bca091f87a88c36e5ca" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" }
hex = "0.4"
http = "1"
humantime = "2.1"
@@ -191,7 +192,7 @@ prost-types = "0.13"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9"
ratelimit = "0.10"
-regex = "1.8"
+regex = "1.12"
regex-automata = "0.4"
reqwest = { version = "0.12", default-features = false, features = [
"json",
@@ -218,12 +219,7 @@ similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] }
-sqlx = { version = "0.8", features = [
- "runtime-tokio-rustls",
- "mysql",
- "postgres",
- "chrono",
-] }
+sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] }
strum = { version = "0.27", features = ["derive"] }
sysinfo = "0.33"
tempfile = "3"
@@ -238,6 +234,7 @@ tower = "0.5"
tower-http = "0.6"
tracing = "0.1"
tracing-appender = "0.2"
+tracing-opentelemetry = "0.31.0"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2"
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] }
diff --git a/Makefile b/Makefile
index a200244029..91fb600d14 100644
--- a/Makefile
+++ b/Makefile
@@ -17,6 +17,8 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?=
+EXTRA_BUILD_ENVS ?=
+ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
@@ -83,6 +85,7 @@ build: ## Build debug version greptime.
.PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
+ ${ASSEMBLED_EXTRA_BUILD_ENV} \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \
diff --git a/README.md b/README.md
index 94944c36ba..6c83582a24 100644
--- a/README.md
+++ b/README.md
@@ -12,8 +12,7 @@
- GreptimeCloud |
- User Guide |
+ User Guide |
API Docs |
Roadmap 2025
@@ -67,17 +66,24 @@
## Introduction
-**GreptimeDB** is an open-source, cloud-native database purpose-built for the unified collection and analysis of observability data (metrics, logs, and traces). Whether you’re operating on the edge, in the cloud, or across hybrid environments, GreptimeDB empowers real-time insights at massive scale — all in one system.
+**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments.
## Features
| Feature | Description |
| --------- | ----------- |
-| [Unified Observability Data](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | Store metrics, logs, and traces as timestamped, contextual wide events. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [streaming](https://docs.greptime.com/user-guide/flow-computation/overview). |
-| [High Performance & Cost Effective](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust, with a distributed query engine, [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index), and optimized columnar storage, delivering sub-second responses at PB scale. |
-| [Cloud-Native Architecture](https://docs.greptime.com/user-guide/concepts/architecture) | Designed for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management), with compute/storage separation, native object storage (AWS S3, Azure Blob, etc.) and seamless cross-cloud access. |
-| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | Access via SQL/PromQL interfaces, REST API, MySQL/PostgreSQL protocols, and popular ingestion [protocols](https://docs.greptime.com/user-guide/protocols/overview). |
-| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere: edge (including ARM/[Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) or cloud, with unified APIs and efficient data sync. |
+| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
+| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
+| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
+| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
+| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
+| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
+
+ ✅ **Perfect for:**
+ - Unified observability stack replacing Prometheus + Loki + Tempo
+ - Large-scale metrics with high cardinality (millions to billions of time series)
+ - Large-scale observability platform requiring cost efficiency and scalability
+ - IoT and edge computing with resource and bandwidth constraints
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database).
@@ -86,10 +92,10 @@ Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why
| Feature | GreptimeDB | Traditional TSDB | Log Stores |
|----------------------------------|-----------------------|--------------------|-----------------|
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
-| Query Language | SQL, PromQL, Streaming| Custom/PromQL | Custom/DSL |
+| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
-| Integration | REST, SQL, Common protocols | Varies | Varies |
+| Integration | REST API, SQL, Common protocols | Varies | Varies |
**Performance:**
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
@@ -99,22 +105,18 @@ Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/feat
## Architecture
-* Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document.
-* [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
+GreptimeDB can run in two modes:
+* **Standalone Mode** - Single binary for development and small deployments
+* **Distributed Mode** - Separate components for production scale:
+ - Frontend: Query processing and protocol handling
+ - Datanode: Data storage and retrieval
+ - Metasrv: Metadata management and coordination
+
+Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB:
## Try GreptimeDB
-### 1. [Live Demo](https://greptime.com/playground)
-
-Experience GreptimeDB directly in your browser.
-
-### 2. [GreptimeCloud](https://console.greptime.cloud/)
-
-Start instantly with a free cluster.
-
-### 3. Docker (Local Quickstart)
-
```shell
docker pull greptime/greptimedb
```
@@ -130,7 +132,8 @@ docker run -p 127.0.0.1:4000-4003:4000-4003 \
--postgres-addr 0.0.0.0:4003
```
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
-[Full Install Guide](https://docs.greptime.com/getting-started/installation/overview)
+
+Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview).
**Troubleshooting:**
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
@@ -159,21 +162,26 @@ cargo run -- standalone start
## Tools & Extensions
-- **Kubernetes:** [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
-- **Helm Charts:** [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
-- **Dashboard:** [Web UI](https://github.com/GreptimeTeam/dashboard)
-- **SDKs/Ingester:** [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust), [JS](https://github.com/GreptimeTeam/greptimedb-ingester-js)
-- **Grafana**: [Official Dashboard](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
+- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator)
+- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
+- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard)
+- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
+- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource)
+- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
## Project Status
-> **Status:** Beta.
-> **GA (v1.0):** Targeted for mid 2025.
+> **Status:** Beta — marching toward v1.0 GA!
+> **GA (v1.0):** January 10, 2026
-- Being used in production by early adopters
+- Deployed in production by open-source projects and commercial users
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
- Suitable for evaluation and pilot deployments
+GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
+
+**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
+
For production use, we recommend using the latest stable release.
[](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
@@ -214,5 +222,5 @@ Special thanks to all contributors! See [AUTHORS.md](https://github.com/Greptime
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model)
- [Apache Parquet™](https://parquet.apache.org/) (file storage)
-- [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
+- [Apache DataFusion™](https://arrow.apache.org/datafusion/) (query engine)
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction)
diff --git a/config/config.md b/config/config.md
index 72d48b5bcb..29185c6b58 100644
--- a/config/config.md
+++ b/config/config.md
@@ -13,9 +13,10 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. |
+| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.
By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
-| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
+| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.
NOTE: This setting affects scan_memory_limit's privileged tier allocation.
When set, 70% of queries get privileged memory access (full scan_memory_limit).
The remaining 30% get standard tier access (70% of scan_memory_limit). |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. |
@@ -103,6 +104,7 @@
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.
Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. |
+| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).
Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
Setting it to 0 disables the limit (unbounded, default behavior).
When this limit is reached, queries will fail with ResourceExhausted error.
NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.
- `File`: the data is stored in the local file system.
- `S3`: the data is stored in the S3 object storage.
- `Gcs`: the data is stored in the Google Cloud Storage.
- `Azblob`: the data is stored in the Azure Blob Storage.
- `Oss`: the data is stored in the Aliyun OSS. |
@@ -150,10 +152,13 @@
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
+| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).
When enabled, index files are loaded into the write cache during region initialization,
which can improve query performance at the cost of longer startup times. |
+| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).
The remaining capacity is used for data (parquet) files.
Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
+| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.
Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
Setting it to 0 disables the limit.
NOTE: Works with max_concurrent_queries for tiered memory allocation.
- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.
To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -187,7 +192,7 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.
Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
-| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
+| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -205,14 +210,6 @@
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
-| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
You must create the database before enabling it. |
-| `export_metrics.self_import.db` | String | Unset | -- |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -226,6 +223,7 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. |
+| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
@@ -306,6 +304,7 @@
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. |
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.
Default to false, meaning when push down optimize failed, return error msg |
+| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).
Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
Setting it to 0 disables the limit (unbounded, default behavior).
When this limit is reached, queries will fail with ResourceExhausted error.
NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
| `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.connect_timeout` | String | `10s` | -- |
@@ -328,12 +327,6 @@
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
-| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -347,7 +340,7 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `data_home` | String | `./greptimedb_data` | The working home directory. |
-| `store_addrs` | Array | -- | Store server address default to etcd store.
For postgres store, the format is:
"password=password dbname=postgres user=postgres host=localhost port=5432"
For etcd store, the format is:
"127.0.0.1:2379" |
+| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.
For etcd: a list of "host:port" endpoints.
e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
For PostgreSQL: a connection string in libpq format or URI.
e.g.
- "host=localhost port=5432 user=postgres password= dbname=postgres"
- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
For mysql store, the format is a MySQL connection URL.
e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `backend` | String | `etcd_store` | The datastore for meta server.
Available values:
- `etcd_store` (default value)
- `memory_store`
- `postgres_store`
- `mysql_store` |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.
**Only used when backend is `postgres_store`.** |
@@ -363,12 +356,11 @@
| `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
-| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here |
+| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
Note: if TLS is configured in both this section and the `store_addrs` connection string, the
settings here will override the TLS settings in `store_addrs`. |
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
- "disable" - No TLS
- "prefer" (default) - Try TLS, fallback to plain
- "require" - Require TLS
- "verify_ca" - Require TLS and verify CA
- "verify_full" - Require TLS and verify hostname |
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)
Like "/path/to/client.crt" |
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)
Like "/path/to/client.key" |
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)
Required when using custom CAs or self-signed certificates
Leave empty to use system root certificates only
Like "/path/to/ca.crt" |
-| `backend_tls.watch` | Bool | `false` | Watch for certificate file changes and auto reload |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.
If left empty or unset, the server will automatically use the IP address of the first network interface
on the host, with the same port number as the one specified in `bind_addr`. |
@@ -423,12 +415,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -440,10 +426,11 @@
| Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- |
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
+| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.
It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.
By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
-| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
+| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.
NOTE: This setting affects scan_memory_limit's privileged tier allocation.
When set, 70% of queries get privileged memory access (full scan_memory_limit).
The remaining 30% get standard tier access (70% of scan_memory_limit). |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
@@ -497,6 +484,7 @@
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.
**It's only used when the provider is `kafka`**.
This option ensures that when Kafka messages are deleted, the system
can still successfully replay memtable data without throwing an
out-of-range error.
However, enabling this option might lead to unexpected data loss,
as the system will skip over missing entries instead of treating
them as critical errors. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.
Default to 0, which means the number of CPU cores. |
+| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).
Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
Setting it to 0 disables the limit (unbounded, default behavior).
When this limit is reached, queries will fail with ResourceExhausted error.
NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.
- `File`: the data is stored in the local file system.
- `S3`: the data is stored in the S3 object storage.
- `Gcs`: the data is stored in the Google Cloud Storage.
- `Azblob`: the data is stored in the Azure Blob Storage.
- `Oss`: the data is stored in the Aliyun OSS. |
@@ -546,10 +534,13 @@
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. |
+| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).
When enabled, index files are loaded into the write cache during region initialization,
which can improve query performance at the cost of longer startup times. |
+| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).
The remaining capacity is used for data (parquet) files.
Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
+| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.
Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
Setting it to 0 disables the limit.
NOTE: Works with max_concurrent_queries for tiered memory allocation.
- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.
To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
@@ -583,7 +574,7 @@
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.
Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
-| `region_engine.metric.experimental_sparse_primary_key_encoding` | Bool | `false` | Whether to enable the experimental sparse primary key encoding. |
+| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -596,12 +587,6 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
@@ -670,5 +655,6 @@
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `query` | -- | -- | -- |
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.
Default to 1, so it won't use too much cpu or memory |
+| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).
Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
Setting it to 0 disables the limit (unbounded, default behavior).
When this limit is reached, queries will fail with ResourceExhausted error.
NOTE: This does NOT limit memory used by table scans. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.
When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
is set to "prof:true,prof_active:false". The official image adds this env variable.
Default is true. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 82ee07bd84..8db6bf3d1c 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -2,6 +2,10 @@
## @toml2docs:none-default
node_id = 42
+## The default column prefix for auto-created time index and value columns.
+## @toml2docs:none-default
+default_column_prefix = "greptime"
+
## Start services after regions have obtained leases.
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false
@@ -14,6 +18,9 @@ init_regions_in_background = false
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
+## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
+## When set, 70% of queries get privileged memory access (full scan_memory_limit).
+## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
@@ -257,6 +264,13 @@ overwrite_entry_start_id = false
## Default to 0, which means the number of CPU cores.
parallelism = 0
+## Memory pool size for query execution operators (aggregation, sorting, join).
+## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
+## Setting it to 0 disables the limit (unbounded, default behavior).
+## When this limit is reached, queries will fail with ResourceExhausted error.
+## NOTE: This does NOT limit memory used by table scans.
+memory_pool_size = "50%"
+
## The data storage options.
[storage]
## The working home directory.
@@ -485,6 +499,17 @@ write_cache_size = "5GiB"
## @toml2docs:none-default
write_cache_ttl = "8h"
+## Preload index (puffin) files into cache on region open (default: true).
+## When enabled, index files are loaded into the write cache during region initialization,
+## which can improve query performance at the cost of longer startup times.
+preload_index_cache = true
+
+## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
+## The remaining capacity is used for data (parquet) files.
+## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
+## 1GiB is reserved for index files and 4GiB for data files.
+index_cache_percent = 20
+
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -497,6 +522,14 @@ max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
+## Memory limit for table scans across all queries.
+## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
+## Setting it to 0 disables the limit.
+## NOTE: Works with max_concurrent_queries for tiered memory allocation.
+## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
+## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
+scan_memory_limit = "50%"
+
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
@@ -636,8 +669,8 @@ fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Metric engine options.
[region_engine.metric]
-## Whether to enable the experimental sparse primary key encoding.
-experimental_sparse_primary_key_encoding = false
+## Whether to use sparse primary key encoding.
+sparse_primary_key_encoding = true
## The logging options.
[logging]
@@ -679,21 +712,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/flownode.example.toml b/config/flownode.example.toml
index 81ff25f283..4e44c1ecbb 100644
--- a/config/flownode.example.toml
+++ b/config/flownode.example.toml
@@ -158,6 +158,13 @@ default_ratio = 1.0
## Default to 1, so it won't use too much cpu or memory
parallelism = 1
+## Memory pool size for query execution operators (aggregation, sorting, join).
+## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
+## Setting it to 0 disables the limit (unbounded, default behavior).
+## When this limit is reached, queries will fail with ResourceExhausted error.
+## NOTE: This does NOT limit memory used by table scans.
+memory_pool_size = "50%"
+
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 9ffcdad540..ecac6cff01 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -2,6 +2,10 @@
## @toml2docs:none-default
default_timezone = "UTC"
+## The default column prefix for auto-created time index and value columns.
+## @toml2docs:none-default
+default_column_prefix = "greptime"
+
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
@@ -252,6 +256,13 @@ parallelism = 0
## Default to false, meaning when push down optimize failed, return error msg
allow_query_fallback = false
+## Memory pool size for query execution operators (aggregation, sorting, join).
+## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
+## Setting it to 0 disables the limit (unbounded, default behavior).
+## When this limit is reached, queries will fail with ResourceExhausted error.
+## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
+memory_pool_size = "50%"
+
## Datanode options.
[datanode]
## Datanode client options.
@@ -318,21 +329,6 @@ sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
ttl = "90d"
-## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index d7d5ace99c..7997383a52 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -1,11 +1,19 @@
## The working home directory.
data_home = "./greptimedb_data"
-## Store server address default to etcd store.
-## For postgres store, the format is:
-## "password=password dbname=postgres user=postgres host=localhost port=5432"
-## For etcd store, the format is:
-## "127.0.0.1:2379"
+## Store server address(es). The format depends on the selected backend.
+##
+## For etcd: a list of "host:port" endpoints.
+## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
+##
+## For PostgreSQL: a connection string in libpq format or URI.
+## e.g.
+## - "host=localhost port=5432 user=postgres password= dbname=postgres"
+## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
+## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
+##
+## For mysql store, the format is a MySQL connection URL.
+## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix.
@@ -75,6 +83,9 @@ node_max_idle_time = "24hours"
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
+##
+## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
+## settings here will override the TLS settings in `store_addrs`.
[backend_tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - "disable" - No TLS
@@ -98,9 +109,6 @@ key_path = ""
## Like "/path/to/ca.crt"
ca_cert_path = ""
-## Watch for certificate file changes and auto reload
-watch = false
-
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
@@ -323,21 +331,6 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 744dbbe751..661067d2a1 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -2,6 +2,10 @@
## @toml2docs:none-default
default_timezone = "UTC"
+## The default column prefix for auto-created time index and value columns.
+## @toml2docs:none-default
+default_column_prefix = "greptime"
+
## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized.
init_regions_in_background = false
@@ -10,6 +14,9 @@ init_regions_in_background = false
init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited.
+## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
+## When set, 70% of queries get privileged memory access (full scan_memory_limit).
+## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
@@ -361,6 +368,13 @@ max_running_procedures = 128
## Default to 0, which means the number of CPU cores.
parallelism = 0
+## Memory pool size for query execution operators (aggregation, sorting, join).
+## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
+## Setting it to 0 disables the limit (unbounded, default behavior).
+## When this limit is reached, queries will fail with ResourceExhausted error.
+## NOTE: This does NOT limit memory used by table scans.
+memory_pool_size = "50%"
+
## The data storage options.
[storage]
## The working home directory.
@@ -576,6 +590,17 @@ write_cache_size = "5GiB"
## @toml2docs:none-default
write_cache_ttl = "8h"
+## Preload index (puffin) files into cache on region open (default: true).
+## When enabled, index files are loaded into the write cache during region initialization,
+## which can improve query performance at the cost of longer startup times.
+preload_index_cache = true
+
+## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
+## The remaining capacity is used for data (parquet) files.
+## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
+## 1GiB is reserved for index files and 4GiB for data files.
+index_cache_percent = 20
+
## Buffer size for SST writing.
sst_write_buffer_size = "8MB"
@@ -588,6 +613,14 @@ max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false
+## Memory limit for table scans across all queries.
+## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
+## Setting it to 0 disables the limit.
+## NOTE: Works with max_concurrent_queries for tiered memory allocation.
+## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
+## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
+scan_memory_limit = "50%"
+
## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m"
@@ -727,8 +760,8 @@ fork_dictionary_bytes = "1GiB"
[[region_engine]]
## Metric engine options.
[region_engine.metric]
-## Whether to enable the experimental sparse primary key encoding.
-experimental_sparse_primary_key_encoding = false
+## Whether to use sparse primary key encoding.
+sparse_primary_key_encoding = true
## The logging options.
[logging]
@@ -787,27 +820,6 @@ default_ratio = 1.0
## @toml2docs:none-default
#+ sample_ratio = 1.0
-## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
-## You must create the database before enabling it.
-[export_metrics.self_import]
-## @toml2docs:none-default
-db = "greptime_metrics"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
-
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
## The tokio console address.
diff --git a/docker/buildx/centos/Dockerfile b/docker/buildx/centos/Dockerfile
index b7e822fac6..f5bbd15ad6 100644
--- a/docker/buildx/centos/Dockerfile
+++ b/docker/buildx/centos/Dockerfile
@@ -1,10 +1,10 @@
-FROM centos:7 as builder
+FROM centos:7 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
-ENV LANG en_US.utf8
+ENV LANG=en_US.utf8
WORKDIR /greptimedb
# Install dependencies
@@ -22,7 +22,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
-ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH
+ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=.,rw \
@@ -33,7 +33,7 @@ RUN --mount=target=.,rw \
TARGET_DIR=/out/target
# Export the binary to the clean image.
-FROM centos:7 as base
+FROM centos:7 AS base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN yum install -y epel-release \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
-ENV PATH /greptime/bin/:$PATH
+ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
diff --git a/docker/buildx/distroless/Dockerfile b/docker/buildx/distroless/Dockerfile
new file mode 100644
index 0000000000..b0f3af33e8
--- /dev/null
+++ b/docker/buildx/distroless/Dockerfile
@@ -0,0 +1,65 @@
+FROM ubuntu:22.04 AS builder
+
+ARG CARGO_PROFILE
+ARG FEATURES
+ARG OUTPUT_DIR
+
+ENV LANG=en_US.utf8
+WORKDIR /greptimedb
+
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
+
+# Install dependencies.
+RUN --mount=type=cache,target=/var/cache/apt \
+ apt-get update && apt-get install -y \
+ libssl-dev \
+ protobuf-compiler \
+ curl \
+ git \
+ build-essential \
+ pkg-config
+
+# Install Rust.
+SHELL ["/bin/bash", "-c"]
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
+ENV PATH=/root/.cargo/bin/:$PATH
+
+# Build the project in release mode.
+RUN --mount=target=. \
+ --mount=type=cache,target=/root/.cargo/registry \
+ make build \
+ CARGO_PROFILE=${CARGO_PROFILE} \
+ FEATURES=${FEATURES} \
+ TARGET_DIR=/out/target
+
+FROM ubuntu:22.04 AS libs
+
+ARG TARGETARCH
+
+# Copy required library dependencies based on architecture
+RUN if [ "$TARGETARCH" = "amd64" ]; then \
+ cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
+ elif [ "$TARGETARCH" = "arm64" ]; then \
+ cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
+ else \
+ echo "Unsupported architecture: $TARGETARCH" && exit 1; \
+ fi
+
+# Export the binary to the clean distroless image.
+FROM gcr.io/distroless/cc-debian12:latest AS base
+
+ARG OUTPUT_DIR
+ARG TARGETARCH
+
+# Copy required library dependencies
+COPY --from=libs /lib /lib
+COPY --from=busybox:stable /bin/busybox /bin/busybox
+
+WORKDIR /greptime
+COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
+ENV PATH=/greptime/bin/:$PATH
+
+ENV MALLOC_CONF="prof:true,prof_active:false"
+
+ENTRYPOINT ["greptime"]
diff --git a/docker/buildx/ubuntu/Dockerfile b/docker/buildx/ubuntu/Dockerfile
index 6306e04688..b6dc386da4 100644
--- a/docker/buildx/ubuntu/Dockerfile
+++ b/docker/buildx/ubuntu/Dockerfile
@@ -1,10 +1,10 @@
-FROM ubuntu:22.04 as builder
+FROM ubuntu:22.04 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
-ENV LANG en_US.utf8
+ENV LANG=en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
@@ -23,7 +23,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
-ENV PATH /root/.cargo/bin/:$PATH
+ENV PATH=/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
@@ -35,7 +35,7 @@ RUN --mount=target=. \
# Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image.
-FROM ubuntu:22.04 as base
+FROM ubuntu:22.04 AS base
ARG OUTPUT_DIR
@@ -45,7 +45,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
-ENV PATH /greptime/bin/:$PATH
+ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
diff --git a/docker/ci/centos/Dockerfile b/docker/ci/centos/Dockerfile
index 480f2196b2..67efadd7dc 100644
--- a/docker/ci/centos/Dockerfile
+++ b/docker/ci/centos/Dockerfile
@@ -13,7 +13,7 @@ ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/
-ENV PATH /greptime/bin/:$PATH
+ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
diff --git a/docker/ci/distroless/Dockerfile b/docker/ci/distroless/Dockerfile
new file mode 100644
index 0000000000..f5e7ebd88e
--- /dev/null
+++ b/docker/ci/distroless/Dockerfile
@@ -0,0 +1,40 @@
+FROM ubuntu:22.04 AS libs
+
+ARG TARGETARCH
+
+# Copy required library dependencies based on architecture
+# TARGETARCH values: amd64, arm64
+# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
+RUN if [ "$TARGETARCH" = "amd64" ]; then \
+ mkdir -p /output/x86_64-linux-gnu && \
+ cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
+ elif [ "$TARGETARCH" = "arm64" ]; then \
+ mkdir -p /output/aarch64-linux-gnu && \
+ cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
+ else \
+ echo "Unsupported architecture: $TARGETARCH" && exit 1; \
+ fi
+
+FROM gcr.io/distroless/cc-debian12:latest
+
+# The root path under which contains all the dependencies to build this Dockerfile.
+ARG DOCKER_BUILD_ROOT=.
+# The binary name of GreptimeDB executable.
+# Defaults to "greptime", but sometimes in other projects it might be different.
+ARG TARGET_BIN=greptime
+
+ARG TARGETARCH
+
+# Copy required library dependencies
+COPY --from=libs /output /lib
+COPY --from=busybox:stable /bin/busybox /bin/busybox
+
+ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
+
+ENV PATH=/greptime/bin/:$PATH
+
+ENV TARGET_BIN=$TARGET_BIN
+
+ENV MALLOC_CONF="prof:true,prof_active:false"
+
+ENTRYPOINT ["greptime"]
diff --git a/docker/ci/ubuntu/Dockerfile b/docker/ci/ubuntu/Dockerfile
index 046fd62972..c1a88e02c8 100644
--- a/docker/ci/ubuntu/Dockerfile
+++ b/docker/ci/ubuntu/Dockerfile
@@ -14,7 +14,7 @@ ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
-ENV PATH /greptime/bin/:$PATH
+ENV PATH=/greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN
diff --git a/docs/how-to/how-to-change-log-level-on-the-fly.md b/docs/how-to/how-to-change-log-level-on-the-fly.md
index 16a72bf6ae..c3bf2602a2 100644
--- a/docs/how-to/how-to-change-log-level-on-the-fly.md
+++ b/docs/how-to/how-to-change-log-level-on-the-fly.md
@@ -13,4 +13,19 @@ Log Level changed from Some("info") to "trace,flow=debug"%
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`.
-The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
\ No newline at end of file
+The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
+
+# Enable/Disable Trace on the Fly
+
+## HTTP API
+
+example:
+```bash
+curl --data "true" 127.0.0.1:4000/debug/enable_trace
+```
+And database will reply with something like:
+```
+trace enabled%
+```
+
+Possible values are "true" or "false".
diff --git a/docs/how-to/how-to-profile-memory.md b/docs/how-to/how-to-profile-memory.md
index a860c95246..b4bc00093a 100644
--- a/docs/how-to/how-to-profile-memory.md
+++ b/docs/how-to/how-to-profile-memory.md
@@ -71,6 +71,15 @@ curl -X POST localhost:4000/debug/prof/mem/activate
# Deactivate heap profiling
curl -X POST localhost:4000/debug/prof/mem/deactivate
+
+# Activate gdump feature that dumps memory profiling data every time virtual memory usage exceeds previous maximum value.
+curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=true'
+
+# Deactivate gdump.
+curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=false'
+
+# Retrieve current gdump status.
+curl -X GET localhost:4000/debug/prof/mem/gdump
```
### Dump memory profiling data
diff --git a/docs/rfcs/2025-07-23-global-gc-worker.md b/docs/rfcs/2025-07-23-global-gc-worker.md
index 69d1e3ac34..331ed01f38 100644
--- a/docs/rfcs/2025-07-23-global-gc-worker.md
+++ b/docs/rfcs/2025-07-23-global-gc-worker.md
@@ -106,6 +106,37 @@ This mechanism may be too complex to implement at once. We can consider a two-ph
Also the read replica shouldn't be later in manifest version for more than the lingering time of obsolete files, otherwise it might ref to files that are already deleted by the GC worker.
- need to upload tmp manifest to object storage, which may introduce additional complexity and potential performance overhead. But since long-running queries are typically not frequent, the performance impact is expected to be minimal.
+one potential race condition with region-migration is illustrated below:
+
+```mermaid
+sequenceDiagram
+ participant gc_worker as GC Worker(same dn as region 1)
+ participant region1 as Region 1 (Leader → Follower)
+ participant region2 as Region 2 (Follower → Leader)
+ participant region_dir as Region Directory
+
+ gc_worker->>region1: Start GC, get region manifest
+ activate region1
+ region1-->>gc_worker: Region 1 manifest
+ deactivate region1
+ gc_worker->>region_dir: Scan region directory
+
+ Note over region1,region2: Region Migration Occurs
+ region1-->>region2: Downgrade to Follower
+ region2-->>region1: Becomes Leader
+
+ region2->>region_dir: Add new file
+
+ gc_worker->>region_dir: Continue scanning
+ gc_worker-->>region_dir: Discovers new file
+ Note over gc_worker: New file not in Region 1's manifest
+ gc_worker->>gc_worker: Mark file as orphan(incorrectly)
+```
+which could cause gc worker to incorrectly mark the new file as orphan and delete it, if config the lingering time for orphan files(files not mentioned anywhere(in used or unused)) is not long enough.
+
+A good enough solution could be to use lock to prevent gc worker to happen on the region if region migration is happening on the region, and vise versa.
+
+The race condition between gc worker and repartition also needs to be considered carefully. For now, acquiring lock for both region-migration and repartition during gc worker process could be a simple solution.
## Conclusion and Rationale
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index f5826d01a7..3515b788b5 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -8,6 +8,7 @@ license.workspace = true
workspace = true
[dependencies]
+arrow-schema.workspace = true
common-base.workspace = true
common-decimal.workspace = true
common-error.workspace = true
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index da5fdcfeda..f53f3f162b 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashSet;
+use std::collections::{BTreeMap, HashSet};
use std::sync::Arc;
use common_decimal::Decimal128;
@@ -20,13 +20,12 @@ use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECIS
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
use common_time::{Date, IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth, Timestamp};
+use datatypes::json::value::{JsonNumber, JsonValue, JsonValueRef, JsonVariant};
use datatypes::prelude::{ConcreteDataType, ValueRef};
use datatypes::types::{
- IntervalType, JsonFormat, StructField, StructType, TimeType, TimestampType,
-};
-use datatypes::value::{
- ListValue, ListValueRef, OrderedF32, OrderedF64, StructValue, StructValueRef, Value,
+ IntervalType, JsonFormat, JsonType, StructField, StructType, TimeType, TimestampType,
};
+use datatypes::value::{ListValueRef, OrderedF32, OrderedF64, StructValueRef, Value};
use datatypes::vectors::VectorRef;
use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr;
@@ -34,9 +33,9 @@ use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{
- self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, JsonNativeTypeExtension,
- JsonTypeExtension, ListTypeExtension, QueryRequest, Row, SemanticType, StructTypeExtension,
- VectorTypeExtension,
+ self, ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, DictionaryTypeExtension,
+ JsonList, JsonNativeTypeExtension, JsonObject, JsonTypeExtension, ListTypeExtension,
+ QueryRequest, Row, SemanticType, StructTypeExtension, VectorTypeExtension, json_value,
};
use paste::paste;
use snafu::prelude::*;
@@ -81,6 +80,10 @@ impl ColumnDataTypeWrapper {
pub fn to_parts(&self) -> (ColumnDataType, Option) {
(self.datatype, self.datatype_ext.clone())
}
+
+ pub fn into_parts(self) -> (ColumnDataType, Option) {
+ (self.datatype, self.datatype_ext)
+ }
}
impl From for ConcreteDataType {
@@ -126,6 +129,7 @@ impl From for ConcreteDataType {
};
ConcreteDataType::json_native_datatype(inner_type.into())
}
+ None => ConcreteDataType::Json(JsonType::null()),
_ => {
// invalid state, type extension is missing or invalid
ConcreteDataType::null_datatype()
@@ -215,6 +219,26 @@ impl From for ConcreteDataType {
ConcreteDataType::null_datatype()
}
}
+ ColumnDataType::Dictionary => {
+ if let Some(TypeExt::DictionaryType(d)) = datatype_wrapper
+ .datatype_ext
+ .as_ref()
+ .and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
+ {
+ let key_type = ColumnDataTypeWrapper {
+ datatype: d.key_datatype(),
+ datatype_ext: d.key_datatype_extension.clone().map(|ext| *ext),
+ };
+ let value_type = ColumnDataTypeWrapper {
+ datatype: d.value_datatype(),
+ datatype_ext: d.value_datatype_extension.clone().map(|ext| *ext),
+ };
+ ConcreteDataType::dictionary_datatype(key_type.into(), value_type.into())
+ } else {
+ // invalid state: type extension not found
+ ConcreteDataType::null_datatype()
+ }
+ }
}
}
}
@@ -338,13 +362,30 @@ impl ColumnDataTypeWrapper {
}),
}
}
+
+ pub fn dictionary_datatype(
+ key_type: ColumnDataTypeWrapper,
+ value_type: ColumnDataTypeWrapper,
+ ) -> Self {
+ ColumnDataTypeWrapper {
+ datatype: ColumnDataType::Dictionary,
+ datatype_ext: Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::DictionaryType(Box::new(DictionaryTypeExtension {
+ key_datatype: key_type.datatype().into(),
+ key_datatype_extension: key_type.datatype_ext.map(Box::new),
+ value_datatype: value_type.datatype().into(),
+ value_datatype_extension: value_type.datatype_ext.map(Box::new),
+ }))),
+ }),
+ }
+ }
}
impl TryFrom for ColumnDataTypeWrapper {
type Error = error::Error;
fn try_from(datatype: ConcreteDataType) -> Result {
- let column_datatype = match datatype {
+ let column_datatype = match &datatype {
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
@@ -381,9 +422,8 @@ impl TryFrom for ColumnDataTypeWrapper {
ConcreteDataType::Vector(_) => ColumnDataType::Vector,
ConcreteDataType::List(_) => ColumnDataType::List,
ConcreteDataType::Struct(_) => ColumnDataType::Struct,
- ConcreteDataType::Null(_)
- | ConcreteDataType::Dictionary(_)
- | ConcreteDataType::Duration(_) => {
+ ConcreteDataType::Dictionary(_) => ColumnDataType::Dictionary,
+ ConcreteDataType::Null(_) | ConcreteDataType::Duration(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail();
}
};
@@ -404,16 +444,22 @@ impl TryFrom for ColumnDataTypeWrapper {
JsonFormat::Jsonb => Some(ColumnDataTypeExtension {
type_ext: Some(TypeExt::JsonType(JsonTypeExtension::JsonBinary.into())),
}),
- JsonFormat::Native(inner) => {
- let inner_type = ColumnDataTypeWrapper::try_from(*inner.clone())?;
- Some(ColumnDataTypeExtension {
- type_ext: Some(TypeExt::JsonNativeType(Box::new(
- JsonNativeTypeExtension {
- datatype: inner_type.datatype.into(),
- datatype_extension: inner_type.datatype_ext.map(Box::new),
- },
- ))),
- })
+ JsonFormat::Native(native_type) => {
+ if native_type.is_null() {
+ None
+ } else {
+ let native_type = ConcreteDataType::from(native_type.as_ref());
+ let (datatype, datatype_extension) =
+ ColumnDataTypeWrapper::try_from(native_type)?.into_parts();
+ Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::JsonNativeType(Box::new(
+ JsonNativeTypeExtension {
+ datatype: datatype as i32,
+ datatype_extension: datatype_extension.map(Box::new),
+ },
+ ))),
+ })
+ }
}
}
} else {
@@ -463,6 +509,25 @@ impl TryFrom for ColumnDataTypeWrapper {
None
}
}
+ ColumnDataType::Dictionary => {
+ if let ConcreteDataType::Dictionary(dict_type) = &datatype {
+ let key_type = ColumnDataTypeWrapper::try_from(dict_type.key_type().clone())?;
+ let value_type =
+ ColumnDataTypeWrapper::try_from(dict_type.value_type().clone())?;
+ Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::DictionaryType(Box::new(
+ DictionaryTypeExtension {
+ key_datatype: key_type.datatype.into(),
+ key_datatype_extension: key_type.datatype_ext.map(Box::new),
+ value_datatype: value_type.datatype.into(),
+ value_datatype_extension: value_type.datatype_ext.map(Box::new),
+ },
+ ))),
+ })
+ } else {
+ None
+ }
+ }
_ => None,
};
Ok(Self {
@@ -601,6 +666,9 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
struct_values: Vec::with_capacity(capacity),
..Default::default()
},
+ ColumnDataType::Dictionary => Values {
+ ..Default::default()
+ },
}
}
@@ -801,21 +869,8 @@ pub fn pb_value_to_value_ref<'a>(
}
ValueData::JsonValue(inner_value) => {
- let json_datatype_ext = datatype_ext
- .as_ref()
- .and_then(|ext| {
- if let Some(TypeExt::JsonNativeType(l)) = &ext.type_ext {
- Some(l)
- } else {
- None
- }
- })
- .expect("json value must contain datatype ext");
-
- ValueRef::Json(Box::new(pb_value_to_value_ref(
- inner_value,
- json_datatype_ext.datatype_extension.as_deref(),
- )))
+ let value = decode_json_value(inner_value);
+ ValueRef::Json(Box::new(value))
}
}
}
@@ -839,125 +894,64 @@ pub fn is_column_type_value_eq(
.unwrap_or(false)
}
-/// Convert value into proto's value.
-pub fn to_proto_value(value: Value) -> v1::Value {
- match value {
- Value::Null => v1::Value { value_data: None },
- Value::Boolean(v) => v1::Value {
- value_data: Some(ValueData::BoolValue(v)),
- },
- Value::UInt8(v) => v1::Value {
- value_data: Some(ValueData::U8Value(v.into())),
- },
- Value::UInt16(v) => v1::Value {
- value_data: Some(ValueData::U16Value(v.into())),
- },
- Value::UInt32(v) => v1::Value {
- value_data: Some(ValueData::U32Value(v)),
- },
- Value::UInt64(v) => v1::Value {
- value_data: Some(ValueData::U64Value(v)),
- },
- Value::Int8(v) => v1::Value {
- value_data: Some(ValueData::I8Value(v.into())),
- },
- Value::Int16(v) => v1::Value {
- value_data: Some(ValueData::I16Value(v.into())),
- },
- Value::Int32(v) => v1::Value {
- value_data: Some(ValueData::I32Value(v)),
- },
- Value::Int64(v) => v1::Value {
- value_data: Some(ValueData::I64Value(v)),
- },
- Value::Float32(v) => v1::Value {
- value_data: Some(ValueData::F32Value(*v)),
- },
- Value::Float64(v) => v1::Value {
- value_data: Some(ValueData::F64Value(*v)),
- },
- Value::String(v) => v1::Value {
- value_data: Some(ValueData::StringValue(v.as_utf8().to_string())),
- },
- Value::Binary(v) => v1::Value {
- value_data: Some(ValueData::BinaryValue(v.to_vec())),
- },
- Value::Date(v) => v1::Value {
- value_data: Some(ValueData::DateValue(v.val())),
- },
- Value::Timestamp(v) => match v.unit() {
- TimeUnit::Second => v1::Value {
- value_data: Some(ValueData::TimestampSecondValue(v.value())),
- },
- TimeUnit::Millisecond => v1::Value {
- value_data: Some(ValueData::TimestampMillisecondValue(v.value())),
- },
- TimeUnit::Microsecond => v1::Value {
- value_data: Some(ValueData::TimestampMicrosecondValue(v.value())),
- },
- TimeUnit::Nanosecond => v1::Value {
- value_data: Some(ValueData::TimestampNanosecondValue(v.value())),
- },
- },
- Value::Time(v) => match v.unit() {
- TimeUnit::Second => v1::Value {
- value_data: Some(ValueData::TimeSecondValue(v.value())),
- },
- TimeUnit::Millisecond => v1::Value {
- value_data: Some(ValueData::TimeMillisecondValue(v.value())),
- },
- TimeUnit::Microsecond => v1::Value {
- value_data: Some(ValueData::TimeMicrosecondValue(v.value())),
- },
- TimeUnit::Nanosecond => v1::Value {
- value_data: Some(ValueData::TimeNanosecondValue(v.value())),
- },
- },
- Value::IntervalYearMonth(v) => v1::Value {
- value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
- },
- Value::IntervalDayTime(v) => v1::Value {
- value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
- },
- Value::IntervalMonthDayNano(v) => v1::Value {
- value_data: Some(ValueData::IntervalMonthDayNanoValue(
- convert_month_day_nano_to_pb(v),
- )),
- },
- Value::Decimal128(v) => v1::Value {
- value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
- },
- Value::List(list_value) => v1::Value {
- value_data: Some(ValueData::ListValue(v1::ListValue {
- items: convert_list_to_pb_values(list_value),
+fn encode_json_value(value: JsonValue) -> v1::JsonValue {
+ fn helper(json: JsonVariant) -> v1::JsonValue {
+ let value = match json {
+ JsonVariant::Null => None,
+ JsonVariant::Bool(x) => Some(json_value::Value::Boolean(x)),
+ JsonVariant::Number(x) => Some(match x {
+ JsonNumber::PosInt(i) => json_value::Value::Uint(i),
+ JsonNumber::NegInt(i) => json_value::Value::Int(i),
+ JsonNumber::Float(f) => json_value::Value::Float(f.0),
+ }),
+ JsonVariant::String(x) => Some(json_value::Value::Str(x)),
+ JsonVariant::Array(x) => Some(json_value::Value::Array(JsonList {
+ items: x.into_iter().map(helper).collect::>(),
})),
- },
- Value::Struct(struct_value) => v1::Value {
- value_data: Some(ValueData::StructValue(v1::StructValue {
- items: convert_struct_to_pb_values(struct_value),
- })),
- },
- Value::Json(v) => v1::Value {
- value_data: Some(ValueData::JsonValue(Box::new(to_proto_value(*v)))),
- },
- Value::Duration(_) => v1::Value { value_data: None },
+ JsonVariant::Object(x) => {
+ let entries = x
+ .into_iter()
+ .map(|(key, v)| v1::json_object::Entry {
+ key,
+ value: Some(helper(v)),
+ })
+ .collect::>();
+ Some(json_value::Value::Object(JsonObject { entries }))
+ }
+ };
+ v1::JsonValue { value }
}
+ helper(value.into_variant())
}
-fn convert_list_to_pb_values(list_value: ListValue) -> Vec {
- list_value
- .take_items()
- .into_iter()
- .map(to_proto_value)
- .collect()
-}
-
-fn convert_struct_to_pb_values(struct_value: StructValue) -> Vec {
- struct_value
- .take_items()
- .into_iter()
- .map(to_proto_value)
- .collect()
+fn decode_json_value(value: &v1::JsonValue) -> JsonValueRef<'_> {
+ let Some(value) = &value.value else {
+ return JsonValueRef::null();
+ };
+ match value {
+ json_value::Value::Boolean(x) => (*x).into(),
+ json_value::Value::Int(x) => (*x).into(),
+ json_value::Value::Uint(x) => (*x).into(),
+ json_value::Value::Float(x) => (*x).into(),
+ json_value::Value::Str(x) => (x.as_str()).into(),
+ json_value::Value::Array(array) => array
+ .items
+ .iter()
+ .map(|x| decode_json_value(x).into_variant())
+ .collect::>()
+ .into(),
+ json_value::Value::Object(x) => x
+ .entries
+ .iter()
+ .filter_map(|entry| {
+ entry
+ .value
+ .as_ref()
+ .map(|v| (entry.key.as_str(), decode_json_value(v).into_variant()))
+ })
+ .collect::>()
+ .into(),
+ }
}
/// Returns the [ColumnDataTypeWrapper] of the value.
@@ -1006,14 +1000,14 @@ pub fn vectors_to_rows<'a>(
let mut rows = vec![Row { values: vec![] }; row_count];
for column in columns {
for (row_index, row) in rows.iter_mut().enumerate() {
- row.values.push(value_to_grpc_value(column.get(row_index)))
+ row.values.push(to_grpc_value(column.get(row_index)))
}
}
rows
}
-pub fn value_to_grpc_value(value: Value) -> GrpcValue {
+pub fn to_grpc_value(value: Value) -> GrpcValue {
GrpcValue {
value_data: match value {
Value::Null => None,
@@ -1053,7 +1047,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
let items = list_value
.take_items()
.into_iter()
- .map(value_to_grpc_value)
+ .map(to_grpc_value)
.collect();
Some(ValueData::ListValue(v1::ListValue { items }))
}
@@ -1061,13 +1055,11 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
let items = struct_value
.take_items()
.into_iter()
- .map(value_to_grpc_value)
+ .map(to_grpc_value)
.collect();
Some(ValueData::StructValue(v1::StructValue { items }))
}
- Value::Json(inner_value) => Some(ValueData::JsonValue(Box::new(value_to_grpc_value(
- *inner_value,
- )))),
+ Value::Json(v) => Some(ValueData::JsonValue(encode_json_value(*v))),
Value::Duration(_) => unreachable!(),
},
}
@@ -1163,6 +1155,7 @@ mod tests {
use common_time::interval::IntervalUnit;
use datatypes::scalars::ScalarVector;
use datatypes::types::{Int8Type, Int32Type, UInt8Type, UInt32Type};
+ use datatypes::value::{ListValue, StructValue};
use datatypes::vectors::{
BooleanVector, DateVector, Float32Vector, PrimitiveVector, StringVector,
};
@@ -1259,6 +1252,9 @@ mod tests {
let values = values_with_capacity(ColumnDataType::Json, 2);
assert_eq!(2, values.json_values.capacity());
assert_eq!(2, values.string_values.capacity());
+
+ let values = values_with_capacity(ColumnDataType::Dictionary, 2);
+ assert!(values.bool_values.is_empty());
}
#[test]
@@ -1355,6 +1351,17 @@ mod tests {
ConcreteDataType::list_datatype(Arc::new(ConcreteDataType::string_datatype())),
ColumnDataTypeWrapper::list_datatype(ColumnDataTypeWrapper::string_datatype()).into()
);
+ assert_eq!(
+ ConcreteDataType::dictionary_datatype(
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::string_datatype()
+ ),
+ ColumnDataTypeWrapper::dictionary_datatype(
+ ColumnDataTypeWrapper::int32_datatype(),
+ ColumnDataTypeWrapper::string_datatype()
+ )
+ .into()
+ );
let struct_type = StructType::new(Arc::new(vec![
StructField::new("id".to_string(), ConcreteDataType::int64_datatype(), true),
StructField::new(
@@ -1525,6 +1532,18 @@ mod tests {
ColumnDataTypeWrapper::vector_datatype(3),
ConcreteDataType::vector_datatype(3).try_into().unwrap()
);
+ assert_eq!(
+ ColumnDataTypeWrapper::dictionary_datatype(
+ ColumnDataTypeWrapper::int32_datatype(),
+ ColumnDataTypeWrapper::string_datatype()
+ ),
+ ConcreteDataType::dictionary_datatype(
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::string_datatype()
+ )
+ .try_into()
+ .unwrap()
+ );
let result: Result = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
@@ -1580,6 +1599,20 @@ mod tests {
datatype_extension: Some(Box::new(ColumnDataTypeExtension {
type_ext: Some(TypeExt::StructType(StructTypeExtension {
fields: vec![
+ v1::StructField {
+ name: "address".to_string(),
+ datatype: ColumnDataTypeWrapper::string_datatype()
+ .datatype()
+ .into(),
+ datatype_extension: None
+ },
+ v1::StructField {
+ name: "age".to_string(),
+ datatype: ColumnDataTypeWrapper::int64_datatype()
+ .datatype()
+ .into(),
+ datatype_extension: None
+ },
v1::StructField {
name: "id".to_string(),
datatype: ColumnDataTypeWrapper::int64_datatype()
@@ -1594,20 +1627,6 @@ mod tests {
.into(),
datatype_extension: None
},
- v1::StructField {
- name: "age".to_string(),
- datatype: ColumnDataTypeWrapper::int32_datatype()
- .datatype()
- .into(),
- datatype_extension: None
- },
- v1::StructField {
- name: "address".to_string(),
- datatype: ColumnDataTypeWrapper::string_datatype()
- .datatype()
- .into(),
- datatype_extension: None
- }
]
}))
}))
@@ -1740,7 +1759,7 @@ mod tests {
Arc::new(ConcreteDataType::boolean_datatype()),
));
- let pb_value = to_proto_value(value);
+ let pb_value = to_grpc_value(value);
match pb_value.value_data.unwrap() {
ValueData::ListValue(pb_list_value) => {
@@ -1769,7 +1788,7 @@ mod tests {
.unwrap(),
);
- let pb_value = to_proto_value(value);
+ let pb_value = to_grpc_value(value);
match pb_value.value_data.unwrap() {
ValueData::StructValue(pb_struct_value) => {
@@ -1778,4 +1797,199 @@ mod tests {
_ => panic!("Unexpected value type"),
}
}
+
+ #[test]
+ fn test_encode_decode_json_value() {
+ let json = JsonValue::null();
+ let proto = encode_json_value(json.clone());
+ assert!(proto.value.is_none());
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = true.into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(proto.value, Some(json_value::Value::Boolean(true)));
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = (-1i64).into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(proto.value, Some(json_value::Value::Int(-1)));
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = 1u64.into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(proto.value, Some(json_value::Value::Uint(1)));
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = 1.0f64.into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(proto.value, Some(json_value::Value::Float(1.0)));
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = "s".into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(proto.value, Some(json_value::Value::Str("s".to_string())));
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = [1i64, 2, 3].into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(
+ proto.value,
+ Some(json_value::Value::Array(JsonList {
+ items: vec![
+ v1::JsonValue {
+ value: Some(json_value::Value::Int(1))
+ },
+ v1::JsonValue {
+ value: Some(json_value::Value::Int(2))
+ },
+ v1::JsonValue {
+ value: Some(json_value::Value::Int(3))
+ }
+ ]
+ }))
+ );
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = [(); 0].into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(
+ proto.value,
+ Some(json_value::Value::Array(JsonList { items: vec![] }))
+ );
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = [("k3", 3i64), ("k2", 2i64), ("k1", 1i64)].into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(
+ proto.value,
+ Some(json_value::Value::Object(JsonObject {
+ entries: vec![
+ v1::json_object::Entry {
+ key: "k1".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Int(1))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "k2".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Int(2))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "k3".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Int(3))
+ }),
+ },
+ ]
+ }))
+ );
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = [("null", ()); 0].into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(
+ proto.value,
+ Some(json_value::Value::Object(JsonObject { entries: vec![] }))
+ );
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+
+ let json: JsonValue = [
+ ("null", JsonVariant::from(())),
+ ("bool", false.into()),
+ ("list", ["hello", "world"].into()),
+ (
+ "object",
+ [
+ ("positive_i", JsonVariant::from(42u64)),
+ ("negative_i", (-42i64).into()),
+ ("nested", [("what", "blah")].into()),
+ ]
+ .into(),
+ ),
+ ]
+ .into();
+ let proto = encode_json_value(json.clone());
+ assert_eq!(
+ proto.value,
+ Some(json_value::Value::Object(JsonObject {
+ entries: vec![
+ v1::json_object::Entry {
+ key: "bool".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Boolean(false))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "list".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Array(JsonList {
+ items: vec![
+ v1::JsonValue {
+ value: Some(json_value::Value::Str("hello".to_string()))
+ },
+ v1::JsonValue {
+ value: Some(json_value::Value::Str("world".to_string()))
+ },
+ ]
+ }))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "null".to_string(),
+ value: Some(v1::JsonValue { value: None }),
+ },
+ v1::json_object::Entry {
+ key: "object".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Object(JsonObject {
+ entries: vec![
+ v1::json_object::Entry {
+ key: "negative_i".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Int(-42))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "nested".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Object(JsonObject {
+ entries: vec![v1::json_object::Entry {
+ key: "what".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Str(
+ "blah".to_string()
+ ))
+ }),
+ },]
+ }))
+ }),
+ },
+ v1::json_object::Entry {
+ key: "positive_i".to_string(),
+ value: Some(v1::JsonValue {
+ value: Some(json_value::Value::Uint(42))
+ }),
+ },
+ ]
+ }))
+ }),
+ },
+ ]
+ }))
+ );
+ let value = decode_json_value(&proto);
+ assert_eq!(json.as_ref(), value);
+ }
}
diff --git a/src/api/src/v1/column_def.rs b/src/api/src/v1/column_def.rs
index 5be3d5c196..88ee0c5749 100644
--- a/src/api/src/v1/column_def.rs
+++ b/src/api/src/v1/column_def.rs
@@ -14,6 +14,7 @@
use std::collections::HashMap;
+use arrow_schema::extension::{EXTENSION_TYPE_METADATA_KEY, EXTENSION_TYPE_NAME_KEY};
use datatypes::schema::{
COMMENT_KEY, ColumnDefaultConstraint, ColumnSchema, FULLTEXT_KEY, FulltextAnalyzer,
FulltextBackend, FulltextOptions, INVERTED_INDEX_KEY, SKIPPING_INDEX_KEY, SkippingIndexOptions,
@@ -68,6 +69,15 @@ pub fn try_as_column_schema(column_def: &ColumnDef) -> Result {
if let Some(skipping_index) = options.options.get(SKIPPING_INDEX_GRPC_KEY) {
metadata.insert(SKIPPING_INDEX_KEY.to_string(), skipping_index.to_owned());
}
+ if let Some(extension_name) = options.options.get(EXTENSION_TYPE_NAME_KEY) {
+ metadata.insert(EXTENSION_TYPE_NAME_KEY.to_string(), extension_name.clone());
+ }
+ if let Some(extension_metadata) = options.options.get(EXTENSION_TYPE_METADATA_KEY) {
+ metadata.insert(
+ EXTENSION_TYPE_METADATA_KEY.to_string(),
+ extension_metadata.clone(),
+ );
+ }
}
ColumnSchema::new(&column_def.name, data_type.into(), column_def.is_nullable)
@@ -139,6 +149,17 @@ pub fn options_from_column_schema(column_schema: &ColumnSchema) -> Option: Send + Sync {
+ async fn configure(
+ &self,
+ builder: KvBackendCatalogManagerBuilder,
+ ctx: C,
+ ) -> std::result::Result;
+}
+
+pub type CatalogManagerConfiguratorRef = Arc>;
+
pub struct KvBackendCatalogManagerBuilder {
information_extension: InformationExtensionRef,
backend: KvBackendRef,
cache_registry: LayeredCacheRegistryRef,
procedure_manager: Option,
process_manager: Option,
- #[cfg(feature = "enterprise")]
- extra_information_table_factories:
- std::collections::HashMap,
+ extra_information_table_factories: HashMap,
}
impl KvBackendCatalogManagerBuilder {
@@ -54,8 +67,7 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager: None,
process_manager: None,
- #[cfg(feature = "enterprise")]
- extra_information_table_factories: std::collections::HashMap::new(),
+ extra_information_table_factories: HashMap::new(),
}
}
@@ -70,10 +82,9 @@ impl KvBackendCatalogManagerBuilder {
}
/// Sets the extra information tables.
- #[cfg(feature = "enterprise")]
pub fn with_extra_information_table_factories(
mut self,
- factories: std::collections::HashMap,
+ factories: HashMap,
) -> Self {
self.extra_information_table_factories = factories;
self
@@ -86,7 +97,6 @@ impl KvBackendCatalogManagerBuilder {
cache_registry,
procedure_manager,
process_manager,
- #[cfg(feature = "enterprise")]
extra_information_table_factories,
} = self;
Arc::new_cyclic(|me| KvBackendCatalogManager {
@@ -110,7 +120,6 @@ impl KvBackendCatalogManagerBuilder {
process_manager.clone(),
backend.clone(),
);
- #[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(extra_information_table_factories.clone());
Arc::new(provider)
@@ -119,9 +128,9 @@ impl KvBackendCatalogManagerBuilder {
DEFAULT_CATALOG_NAME.to_string(),
me.clone(),
)),
+ numbers_table_provider: NumbersTableProvider,
backend,
process_manager,
- #[cfg(feature = "enterprise")]
extra_information_table_factories,
},
cache_registry,
diff --git a/src/catalog/src/kvbackend/manager.rs b/src/catalog/src/kvbackend/manager.rs
index 902f15c09e..7852142c6a 100644
--- a/src/catalog/src/kvbackend/manager.rs
+++ b/src/catalog/src/kvbackend/manager.rs
@@ -18,8 +18,7 @@ use std::sync::{Arc, Weak};
use async_stream::try_stream;
use common_catalog::consts::{
- DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
- PG_CATALOG_NAME,
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, PG_CATALOG_NAME,
};
use common_error::ext::BoxedError;
use common_meta::cache::{
@@ -45,7 +44,6 @@ use table::TableRef;
use table::dist_table::DistTable;
use table::metadata::{TableId, TableInfoRef};
use table::table::PartitionRules;
-use table::table::numbers::{NUMBERS_TABLE_NAME, NumbersTable};
use table::table_name::TableName;
use tokio::sync::Semaphore;
use tokio_stream::wrappers::ReceiverStream;
@@ -55,12 +53,13 @@ use crate::error::{
CacheNotFoundSnafu, GetTableCacheSnafu, InvalidTableInfoInCatalogSnafu, ListCatalogsSnafu,
ListSchemasSnafu, ListTablesSnafu, Result, TableMetadataManagerSnafu,
};
-#[cfg(feature = "enterprise")]
-use crate::information_schema::InformationSchemaTableFactoryRef;
-use crate::information_schema::{InformationExtensionRef, InformationSchemaProvider};
+use crate::information_schema::{
+ InformationExtensionRef, InformationSchemaProvider, InformationSchemaTableFactoryRef,
+};
use crate::kvbackend::TableCacheRef;
use crate::process_manager::ProcessManagerRef;
use crate::system_schema::SystemSchemaProvider;
+use crate::system_schema::numbers_table_provider::NumbersTableProvider;
use crate::system_schema::pg_catalog::PGCatalogProvider;
/// Access all existing catalog, schema and tables.
@@ -555,9 +554,9 @@ pub(super) struct SystemCatalog {
// system_schema_provider for default catalog
pub(super) information_schema_provider: Arc,
pub(super) pg_catalog_provider: Arc,
+ pub(super) numbers_table_provider: NumbersTableProvider,
pub(super) backend: KvBackendRef,
pub(super) process_manager: Option,
- #[cfg(feature = "enterprise")]
pub(super) extra_information_table_factories:
std::collections::HashMap,
}
@@ -584,9 +583,7 @@ impl SystemCatalog {
PG_CATALOG_NAME if channel == Channel::Postgres => {
self.pg_catalog_provider.table_names()
}
- DEFAULT_SCHEMA_NAME => {
- vec![NUMBERS_TABLE_NAME.to_string()]
- }
+ DEFAULT_SCHEMA_NAME => self.numbers_table_provider.table_names(),
_ => vec![],
}
}
@@ -604,7 +601,7 @@ impl SystemCatalog {
if schema == INFORMATION_SCHEMA_NAME {
self.information_schema_provider.table(table).is_some()
} else if schema == DEFAULT_SCHEMA_NAME {
- table == NUMBERS_TABLE_NAME
+ self.numbers_table_provider.table_exists(table)
} else if schema == PG_CATALOG_NAME && channel == Channel::Postgres {
self.pg_catalog_provider.table(table).is_some()
} else {
@@ -630,7 +627,6 @@ impl SystemCatalog {
self.process_manager.clone(),
self.backend.clone(),
);
- #[cfg(feature = "enterprise")]
let provider = provider
.with_extra_table_factories(self.extra_information_table_factories.clone());
Arc::new(provider)
@@ -649,8 +645,8 @@ impl SystemCatalog {
});
pg_catalog_provider.table(table_name)
}
- } else if schema == DEFAULT_SCHEMA_NAME && table_name == NUMBERS_TABLE_NAME {
- Some(NumbersTable::table(NUMBERS_TABLE_ID))
+ } else if schema == DEFAULT_SCHEMA_NAME {
+ self.numbers_table_provider.table(table_name)
} else {
None
}
diff --git a/src/catalog/src/system_schema.rs b/src/catalog/src/system_schema.rs
index c813ab6ab7..2e1c890427 100644
--- a/src/catalog/src/system_schema.rs
+++ b/src/catalog/src/system_schema.rs
@@ -14,6 +14,7 @@
pub mod information_schema;
mod memory_table;
+pub mod numbers_table_provider;
pub mod pg_catalog;
pub mod predicate;
mod utils;
diff --git a/src/catalog/src/system_schema/information_schema.rs b/src/catalog/src/system_schema/information_schema.rs
index 3ffcf73631..18384b8163 100644
--- a/src/catalog/src/system_schema/information_schema.rs
+++ b/src/catalog/src/system_schema/information_schema.rs
@@ -22,7 +22,6 @@ mod procedure_info;
pub mod process_list;
pub mod region_peers;
mod region_statistics;
-mod runtime_metrics;
pub mod schemata;
mod ssts;
mod table_constraints;
@@ -65,7 +64,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
-use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::ssts::{
InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
@@ -97,7 +95,6 @@ lazy_static! {
ROUTINES,
SCHEMA_PRIVILEGES,
TABLE_PRIVILEGES,
- TRIGGERS,
GLOBAL_STATUS,
SESSION_STATUS,
PARTITIONS,
@@ -120,7 +117,6 @@ macro_rules! setup_memory_table {
};
}
-#[cfg(feature = "enterprise")]
pub struct MakeInformationTableRequest {
pub catalog_name: String,
pub catalog_manager: Weak,
@@ -131,12 +127,10 @@ pub struct MakeInformationTableRequest {
///
/// This trait allows for extensibility of the information schema by providing
/// a way to dynamically create custom information schema tables.
-#[cfg(feature = "enterprise")]
pub trait InformationSchemaTableFactory {
fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
}
-#[cfg(feature = "enterprise")]
pub type InformationSchemaTableFactoryRef = Arc;
/// The `information_schema` tables info provider.
@@ -146,9 +140,7 @@ pub struct InformationSchemaProvider {
process_manager: Option,
flow_metadata_manager: Arc,
tables: HashMap,
- #[allow(dead_code)]
kv_backend: KvBackendRef,
- #[cfg(feature = "enterprise")]
extra_table_factories: HashMap,
}
@@ -169,7 +161,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
}
fn system_table(&self, name: &str) -> Option {
- #[cfg(feature = "enterprise")]
if let Some(factory) = self.extra_table_factories.get(name) {
let req = MakeInformationTableRequest {
catalog_name: self.catalog_name.clone(),
@@ -207,7 +198,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
ROUTINES => setup_memory_table!(ROUTINES),
SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
- TRIGGERS => setup_memory_table!(TRIGGERS),
GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
@@ -218,7 +208,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
- RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
@@ -286,7 +275,6 @@ impl InformationSchemaProvider {
process_manager,
tables: HashMap::new(),
kv_backend,
- #[cfg(feature = "enterprise")]
extra_table_factories: HashMap::new(),
};
@@ -295,7 +283,6 @@ impl InformationSchemaProvider {
provider
}
- #[cfg(feature = "enterprise")]
pub(crate) fn with_extra_table_factories(
mut self,
factories: HashMap,
@@ -313,10 +300,6 @@ impl InformationSchemaProvider {
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
- tables.insert(
- RUNTIME_METRICS.to_string(),
- self.build_table(RUNTIME_METRICS).unwrap(),
- );
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
@@ -367,7 +350,6 @@ impl InformationSchemaProvider {
if let Some(process_list) = self.build_table(PROCESS_LIST) {
tables.insert(PROCESS_LIST.to_string(), process_list);
}
- #[cfg(feature = "enterprise")]
for name in self.extra_table_factories.keys() {
tables.insert(name.clone(), self.build_table(name).expect(name));
}
diff --git a/src/catalog/src/system_schema/information_schema/cluster_info.rs b/src/catalog/src/system_schema/information_schema/cluster_info.rs
index f45dc5be06..1ba1a55fb6 100644
--- a/src/catalog/src/system_schema/information_schema/cluster_info.rs
+++ b/src/catalog/src/system_schema/information_schema/cluster_info.rs
@@ -33,7 +33,6 @@ use datatypes::timestamp::TimestampMillisecond;
use datatypes::value::Value;
use datatypes::vectors::{
Int64VectorBuilder, StringVectorBuilder, TimestampMillisecondVectorBuilder,
- UInt32VectorBuilder, UInt64VectorBuilder,
};
use serde::Serialize;
use snafu::ResultExt;
@@ -53,6 +52,8 @@ const PEER_ADDR: &str = "peer_addr";
const PEER_HOSTNAME: &str = "peer_hostname";
const TOTAL_CPU_MILLICORES: &str = "total_cpu_millicores";
const TOTAL_MEMORY_BYTES: &str = "total_memory_bytes";
+const CPU_USAGE_MILLICORES: &str = "cpu_usage_millicores";
+const MEMORY_USAGE_BYTES: &str = "memory_usage_bytes";
const VERSION: &str = "version";
const GIT_COMMIT: &str = "git_commit";
const START_TIME: &str = "start_time";
@@ -67,15 +68,17 @@ const INIT_CAPACITY: usize = 42;
/// - `peer_id`: the peer server id.
/// - `peer_type`: the peer type, such as `datanode`, `frontend`, `metasrv` etc.
/// - `peer_addr`: the peer gRPC address.
+/// - `peer_hostname`: the hostname of the peer.
/// - `total_cpu_millicores`: the total CPU millicores of the peer.
/// - `total_memory_bytes`: the total memory bytes of the peer.
+/// - `cpu_usage_millicores`: the CPU usage millicores of the peer.
+/// - `memory_usage_bytes`: the memory usage bytes of the peer.
/// - `version`: the build package version of the peer.
/// - `git_commit`: the build git commit hash of the peer.
/// - `start_time`: the starting time of the peer.
/// - `uptime`: the uptime of the peer.
/// - `active_time`: the time since the last activity of the peer.
/// - `node_status`: the status info of the peer.
-/// - `peer_hostname`: the hostname of the peer.
///
#[derive(Debug)]
pub(super) struct InformationSchemaClusterInfo {
@@ -99,12 +102,22 @@ impl InformationSchemaClusterInfo {
ColumnSchema::new(PEER_HOSTNAME, ConcreteDataType::string_datatype(), true),
ColumnSchema::new(
TOTAL_CPU_MILLICORES,
- ConcreteDataType::uint32_datatype(),
+ ConcreteDataType::int64_datatype(),
false,
),
ColumnSchema::new(
TOTAL_MEMORY_BYTES,
- ConcreteDataType::uint64_datatype(),
+ ConcreteDataType::int64_datatype(),
+ false,
+ ),
+ ColumnSchema::new(
+ CPU_USAGE_MILLICORES,
+ ConcreteDataType::int64_datatype(),
+ false,
+ ),
+ ColumnSchema::new(
+ MEMORY_USAGE_BYTES,
+ ConcreteDataType::int64_datatype(),
false,
),
ColumnSchema::new(VERSION, ConcreteDataType::string_datatype(), false),
@@ -167,8 +180,10 @@ struct InformationSchemaClusterInfoBuilder {
peer_types: StringVectorBuilder,
peer_addrs: StringVectorBuilder,
peer_hostnames: StringVectorBuilder,
- cpus: UInt32VectorBuilder,
- memory_bytes: UInt64VectorBuilder,
+ total_cpu_millicores: Int64VectorBuilder,
+ total_memory_bytes: Int64VectorBuilder,
+ cpu_usage_millicores: Int64VectorBuilder,
+ memory_usage_bytes: Int64VectorBuilder,
versions: StringVectorBuilder,
git_commits: StringVectorBuilder,
start_times: TimestampMillisecondVectorBuilder,
@@ -186,8 +201,10 @@ impl InformationSchemaClusterInfoBuilder {
peer_types: StringVectorBuilder::with_capacity(INIT_CAPACITY),
peer_addrs: StringVectorBuilder::with_capacity(INIT_CAPACITY),
peer_hostnames: StringVectorBuilder::with_capacity(INIT_CAPACITY),
- cpus: UInt32VectorBuilder::with_capacity(INIT_CAPACITY),
- memory_bytes: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
+ total_cpu_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
+ total_memory_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
+ cpu_usage_millicores: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
+ memory_usage_bytes: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
versions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
git_commits: StringVectorBuilder::with_capacity(INIT_CAPACITY),
start_times: TimestampMillisecondVectorBuilder::with_capacity(INIT_CAPACITY),
@@ -243,8 +260,14 @@ impl InformationSchemaClusterInfoBuilder {
self.start_times.push(None);
self.uptimes.push(None);
}
- self.cpus.push(Some(node_info.cpus));
- self.memory_bytes.push(Some(node_info.memory_bytes));
+ self.total_cpu_millicores
+ .push(Some(node_info.total_cpu_millicores));
+ self.total_memory_bytes
+ .push(Some(node_info.total_memory_bytes));
+ self.cpu_usage_millicores
+ .push(Some(node_info.cpu_usage_millicores));
+ self.memory_usage_bytes
+ .push(Some(node_info.memory_usage_bytes));
if node_info.last_activity_ts > 0 {
self.active_times.push(Some(
@@ -269,8 +292,10 @@ impl InformationSchemaClusterInfoBuilder {
Arc::new(self.peer_types.finish()),
Arc::new(self.peer_addrs.finish()),
Arc::new(self.peer_hostnames.finish()),
- Arc::new(self.cpus.finish()),
- Arc::new(self.memory_bytes.finish()),
+ Arc::new(self.total_cpu_millicores.finish()),
+ Arc::new(self.total_memory_bytes.finish()),
+ Arc::new(self.cpu_usage_millicores.finish()),
+ Arc::new(self.memory_usage_bytes.finish()),
Arc::new(self.versions.finish()),
Arc::new(self.git_commits.finish()),
Arc::new(self.start_times.finish()),
diff --git a/src/catalog/src/system_schema/information_schema/information_memory_table.rs b/src/catalog/src/system_schema/information_schema/information_memory_table.rs
index 03fbd16e13..56a84a0da1 100644
--- a/src/catalog/src/system_schema/information_schema/information_memory_table.rs
+++ b/src/catalog/src/system_schema/information_schema/information_memory_table.rs
@@ -15,8 +15,7 @@
use std::sync::Arc;
use common_catalog::consts::{METRIC_ENGINE, MITO_ENGINE};
-use datatypes::data_type::ConcreteDataType;
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+use datatypes::schema::{Schema, SchemaRef};
use datatypes::vectors::{Int64Vector, StringVector, VectorRef};
use crate::system_schema::information_schema::table_names::*;
@@ -366,16 +365,6 @@ pub(super) fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec
vec![],
),
- TRIGGERS => (
- vec![
- string_column("TRIGGER_NAME"),
- ColumnSchema::new("trigger_id", ConcreteDataType::uint64_datatype(), false),
- string_column("TRIGGER_DEFINITION"),
- ColumnSchema::new("flownode_id", ConcreteDataType::uint64_datatype(), true),
- ],
- vec![],
- ),
-
// TODO: Considering store internal metrics in `global_status` and
// `session_status` tables.
GLOBAL_STATUS => (
diff --git a/src/catalog/src/system_schema/information_schema/partitions.rs b/src/catalog/src/system_schema/information_schema/partitions.rs
index 68f4f83051..b9396fe554 100644
--- a/src/catalog/src/system_schema/information_schema/partitions.rs
+++ b/src/catalog/src/system_schema/information_schema/partitions.rs
@@ -211,6 +211,7 @@ struct InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder,
partition_ordinal_positions: Int64VectorBuilder,
partition_expressions: StringVectorBuilder,
+ partition_descriptions: StringVectorBuilder,
create_times: TimestampSecondVectorBuilder,
partition_ids: UInt64VectorBuilder,
}
@@ -231,6 +232,7 @@ impl InformationSchemaPartitionsBuilder {
partition_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ordinal_positions: Int64VectorBuilder::with_capacity(INIT_CAPACITY),
partition_expressions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ partition_descriptions: StringVectorBuilder::with_capacity(INIT_CAPACITY),
create_times: TimestampSecondVectorBuilder::with_capacity(INIT_CAPACITY),
partition_ids: UInt64VectorBuilder::with_capacity(INIT_CAPACITY),
}
@@ -319,6 +321,21 @@ impl InformationSchemaPartitionsBuilder {
return;
}
+ // Get partition column names (shared by all partitions)
+ // In MySQL, PARTITION_EXPRESSION is the partitioning function expression (e.g., column name)
+ let partition_columns: String = table_info
+ .meta
+ .partition_column_names()
+ .cloned()
+ .collect::>()
+ .join(", ");
+
+ let partition_expr_str = if partition_columns.is_empty() {
+ None
+ } else {
+ Some(partition_columns)
+ };
+
for (index, partition) in partitions.iter().enumerate() {
let partition_name = format!("p{index}");
@@ -328,8 +345,12 @@ impl InformationSchemaPartitionsBuilder {
self.partition_names.push(Some(&partition_name));
self.partition_ordinal_positions
.push(Some((index + 1) as i64));
- let expression = partition.partition_expr.as_ref().map(|e| e.to_string());
- self.partition_expressions.push(expression.as_deref());
+ // PARTITION_EXPRESSION: partition column names (same for all partitions)
+ self.partition_expressions
+ .push(partition_expr_str.as_deref());
+ // PARTITION_DESCRIPTION: partition boundary expression (different for each partition)
+ let description = partition.partition_expr.as_ref().map(|e| e.to_string());
+ self.partition_descriptions.push(description.as_deref());
self.create_times.push(Some(TimestampSecond::from(
table_info.meta.created_on.timestamp(),
)));
@@ -369,7 +390,7 @@ impl InformationSchemaPartitionsBuilder {
null_string_vector.clone(),
Arc::new(self.partition_expressions.finish()),
null_string_vector.clone(),
- null_string_vector.clone(),
+ Arc::new(self.partition_descriptions.finish()),
// TODO(dennis): rows and index statistics info
null_i64_vector.clone(),
null_i64_vector.clone(),
diff --git a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs b/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
deleted file mode 100644
index 5ccb871321..0000000000
--- a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use arrow_schema::SchemaRef as ArrowSchemaRef;
-use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
-use common_error::ext::BoxedError;
-use common_recordbatch::adapter::RecordBatchStreamAdapter;
-use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
-use common_time::util::current_time_millis;
-use datafusion::execution::TaskContext;
-use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
-use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
-use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
-use datatypes::prelude::{ConcreteDataType, MutableVector};
-use datatypes::scalars::ScalarVectorBuilder;
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use datatypes::vectors::{
- ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
- VectorRef,
-};
-use itertools::Itertools;
-use snafu::ResultExt;
-use store_api::storage::{ScanRequest, TableId};
-
-use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
-use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
-
-#[derive(Debug)]
-pub(super) struct InformationSchemaMetrics {
- schema: SchemaRef,
-}
-
-const METRIC_NAME: &str = "metric_name";
-const METRIC_VALUE: &str = "value";
-const METRIC_LABELS: &str = "labels";
-const PEER_ADDR: &str = "peer_addr";
-const PEER_TYPE: &str = "peer_type";
-const TIMESTAMP: &str = "timestamp";
-
-/// The `information_schema.runtime_metrics` virtual table.
-/// It provides the GreptimeDB runtime metrics for the users by SQL.
-impl InformationSchemaMetrics {
- pub(super) fn new() -> Self {
- Self {
- schema: Self::schema(),
- }
- }
-
- fn schema() -> SchemaRef {
- Arc::new(Schema::new(vec![
- ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
- ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(
- TIMESTAMP,
- ConcreteDataType::timestamp_millisecond_datatype(),
- false,
- ),
- ]))
- }
-
- fn builder(&self) -> InformationSchemaMetricsBuilder {
- InformationSchemaMetricsBuilder::new(self.schema.clone())
- }
-}
-
-impl InformationTable for InformationSchemaMetrics {
- fn table_id(&self) -> TableId {
- INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
- }
-
- fn table_name(&self) -> &'static str {
- RUNTIME_METRICS
- }
-
- fn schema(&self) -> SchemaRef {
- self.schema.clone()
- }
-
- fn to_stream(&self, request: ScanRequest) -> Result {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- let stream = Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(Some(request))
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ));
-
- Ok(Box::pin(
- RecordBatchStreamAdapter::try_new(stream)
- .map_err(BoxedError::new)
- .context(InternalSnafu)?,
- ))
- }
-}
-
-struct InformationSchemaMetricsBuilder {
- schema: SchemaRef,
-
- metric_names: StringVectorBuilder,
- metric_values: Float64VectorBuilder,
- metric_labels: StringVectorBuilder,
- peer_addrs: StringVectorBuilder,
- peer_types: StringVectorBuilder,
-}
-
-impl InformationSchemaMetricsBuilder {
- fn new(schema: SchemaRef) -> Self {
- Self {
- schema,
- metric_names: StringVectorBuilder::with_capacity(42),
- metric_values: Float64VectorBuilder::with_capacity(42),
- metric_labels: StringVectorBuilder::with_capacity(42),
- peer_addrs: StringVectorBuilder::with_capacity(42),
- peer_types: StringVectorBuilder::with_capacity(42),
- }
- }
-
- fn add_metric(
- &mut self,
- metric_name: &str,
- labels: String,
- metric_value: f64,
- peer: Option<&str>,
- peer_type: &str,
- ) {
- self.metric_names.push(Some(metric_name));
- self.metric_values.push(Some(metric_value));
- self.metric_labels.push(Some(&labels));
- self.peer_addrs.push(peer);
- self.peer_types.push(Some(peer_type));
- }
-
- async fn make_metrics(&mut self, _request: Option) -> Result {
- let metric_families = prometheus::gather();
-
- let write_request =
- common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
-
- for ts in write_request.timeseries {
- //Safety: always has `__name__` label
- let metric_name = ts
- .labels
- .iter()
- .find_map(|label| {
- if label.name == "__name__" {
- Some(label.value.clone())
- } else {
- None
- }
- })
- .unwrap();
-
- self.add_metric(
- &metric_name,
- ts.labels
- .into_iter()
- .filter_map(|label| {
- if label.name == "__name__" {
- None
- } else {
- Some(format!("{}={}", label.name, label.value))
- }
- })
- .join(", "),
- // Safety: always has a sample
- ts.samples[0].value,
- // The peer column is always `None` for standalone
- None,
- "STANDALONE",
- );
- }
-
- // FIXME(dennis): fetching other peers metrics
- self.finish()
- }
-
- fn finish(&mut self) -> Result {
- let rows_num = self.metric_names.len();
-
- let timestamps = Arc::new(ConstantVector::new(
- Arc::new(TimestampMillisecondVector::from_slice([
- current_time_millis(),
- ])),
- rows_num,
- ));
-
- let columns: Vec = vec![
- Arc::new(self.metric_names.finish()),
- Arc::new(self.metric_values.finish()),
- Arc::new(self.metric_labels.finish()),
- Arc::new(self.peer_addrs.finish()),
- Arc::new(self.peer_types.finish()),
- timestamps,
- ];
-
- RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
- }
-}
-
-impl DfPartitionStream for InformationSchemaMetrics {
- fn schema(&self) -> &ArrowSchemaRef {
- self.schema.arrow_schema()
- }
-
- fn execute(&self, _: Arc) -> DfSendableRecordBatchStream {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(None)
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use common_recordbatch::RecordBatches;
-
- use super::*;
-
- #[tokio::test]
- async fn test_make_metrics() {
- let metrics = InformationSchemaMetrics::new();
-
- let stream = metrics.to_stream(ScanRequest::default()).unwrap();
-
- let batches = RecordBatches::try_collect(stream).await.unwrap();
-
- let result_literal = batches.pretty_print().unwrap();
-
- assert!(result_literal.contains(METRIC_NAME));
- assert!(result_literal.contains(METRIC_VALUE));
- assert!(result_literal.contains(METRIC_LABELS));
- assert!(result_literal.contains(PEER_ADDR));
- assert!(result_literal.contains(PEER_TYPE));
- assert!(result_literal.contains(TIMESTAMP));
- }
-}
diff --git a/src/catalog/src/system_schema/information_schema/table_names.rs b/src/catalog/src/system_schema/information_schema/table_names.rs
index 23791425dc..2a3329fece 100644
--- a/src/catalog/src/system_schema/information_schema/table_names.rs
+++ b/src/catalog/src/system_schema/information_schema/table_names.rs
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
-pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
diff --git a/src/catalog/src/system_schema/information_schema/tables.rs b/src/catalog/src/system_schema/information_schema/tables.rs
index 507dedc547..38a0cb1d61 100644
--- a/src/catalog/src/system_schema/information_schema/tables.rs
+++ b/src/catalog/src/system_schema/information_schema/tables.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashSet;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
@@ -255,14 +254,17 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
// But we don't want the statements such as `show tables` fail,
// so using `unwrap_or_else` here instead of `?` operator.
- let region_stats = information_extension
- .region_stats()
- .await
- .map_err(|e| {
- error!(e; "Failed to call region_stats");
- e
- })
- .unwrap_or_else(|_| vec![]);
+ let region_stats = {
+ let mut x = information_extension
+ .region_stats()
+ .await
+ .unwrap_or_else(|e| {
+ error!(e; "Failed to find region stats in information_schema, fallback to all empty");
+ vec![]
+ });
+ x.sort_unstable_by_key(|x| x.id);
+ x
+ };
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
@@ -273,16 +275,16 @@ impl InformationSchemaTablesBuilder {
// TODO(dennis): make it working for metric engine
let table_region_stats =
if table_info.meta.engine == MITO_ENGINE || table_info.is_physical_table() {
- let region_ids = table_info
+ table_info
.meta
.region_numbers
.iter()
.map(|n| RegionId::new(table_info.ident.table_id, *n))
- .collect::>();
-
- region_stats
- .iter()
- .filter(|stat| region_ids.contains(&stat.id))
+ .flat_map(|region_id| {
+ region_stats
+ .binary_search_by_key(®ion_id, |x| x.id)
+ .map(|i| ®ion_stats[i])
+ })
.collect::>()
} else {
vec![]
diff --git a/src/catalog/src/system_schema/numbers_table_provider.rs b/src/catalog/src/system_schema/numbers_table_provider.rs
new file mode 100644
index 0000000000..6ea6d554b7
--- /dev/null
+++ b/src/catalog/src/system_schema/numbers_table_provider.rs
@@ -0,0 +1,59 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(any(test, feature = "testing", debug_assertions))]
+use common_catalog::consts::NUMBERS_TABLE_ID;
+use table::TableRef;
+#[cfg(any(test, feature = "testing", debug_assertions))]
+use table::table::numbers::NUMBERS_TABLE_NAME;
+#[cfg(any(test, feature = "testing", debug_assertions))]
+use table::table::numbers::NumbersTable;
+
+// NumbersTableProvider is a dedicated provider for feature-gating the numbers table.
+#[derive(Clone)]
+pub struct NumbersTableProvider;
+
+#[cfg(any(test, feature = "testing", debug_assertions))]
+impl NumbersTableProvider {
+ pub(crate) fn table_exists(&self, name: &str) -> bool {
+ name == NUMBERS_TABLE_NAME
+ }
+
+ pub(crate) fn table_names(&self) -> Vec {
+ vec![NUMBERS_TABLE_NAME.to_string()]
+ }
+
+ pub(crate) fn table(&self, name: &str) -> Option {
+ if name == NUMBERS_TABLE_NAME {
+ Some(NumbersTable::table(NUMBERS_TABLE_ID))
+ } else {
+ None
+ }
+ }
+}
+
+#[cfg(not(any(test, feature = "testing", debug_assertions)))]
+impl NumbersTableProvider {
+ pub(crate) fn table_exists(&self, _name: &str) -> bool {
+ false
+ }
+
+ pub(crate) fn table_names(&self) -> Vec {
+ vec![]
+ }
+
+ pub(crate) fn table(&self, _name: &str) -> Option {
+ None
+ }
+}
diff --git a/src/cli/src/data.rs b/src/cli/src/data.rs
index bac7f3e308..be623f63a2 100644
--- a/src/cli/src/data.rs
+++ b/src/cli/src/data.rs
@@ -16,12 +16,15 @@ mod export;
mod import;
use clap::Subcommand;
+use client::DEFAULT_CATALOG_NAME;
use common_error::ext::BoxedError;
use crate::Tool;
use crate::data::export::ExportCommand;
use crate::data::import::ImportCommand;
+pub(crate) const COPY_PATH_PLACEHOLDER: &str = "";
+
/// Command for data operations including exporting data from and importing data into GreptimeDB.
#[derive(Subcommand)]
pub enum DataCommand {
@@ -37,3 +40,7 @@ impl DataCommand {
}
}
}
+
+pub(crate) fn default_database() -> String {
+ format!("{DEFAULT_CATALOG_NAME}-*")
+}
diff --git a/src/cli/src/data/export.rs b/src/cli/src/data/export.rs
index a9f68bf9c9..007f8aa67c 100644
--- a/src/cli/src/data/export.rs
+++ b/src/cli/src/data/export.rs
@@ -30,6 +30,7 @@ use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
+use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
use crate::database::{DatabaseClient, parse_proxy_opts};
use crate::error::{
EmptyResultSnafu, Error, OpenDalSnafu, OutputDirNotSetSnafu, Result, S3ConfigNotSetSnafu,
@@ -63,12 +64,20 @@ pub struct ExportCommand {
output_dir: Option,
/// The name of the catalog to export.
- #[clap(long, default_value = "greptime-*")]
+ #[clap(long, default_value_t = default_database())]
database: String,
- /// Parallelism of the export.
- #[clap(long, short = 'j', default_value = "1")]
- export_jobs: usize,
+ /// The number of databases exported in parallel.
+ /// For example, if there are 20 databases and `db_parallelism` is 4,
+ /// 4 databases will be exported concurrently.
+ #[clap(long, short = 'j', default_value = "1", alias = "export-jobs")]
+ db_parallelism: usize,
+
+ /// The number of tables exported in parallel within a single database.
+ /// For example, if a database has 30 tables and `parallelism` is 8,
+ /// 8 tables will be exported concurrently.
+ #[clap(long, default_value = "4")]
+ table_parallelism: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -209,10 +218,11 @@ impl ExportCommand {
schema,
database_client,
output_dir: self.output_dir.clone(),
- parallelism: self.export_jobs,
+ export_jobs: self.db_parallelism,
target: self.target.clone(),
start_time: self.start_time.clone(),
end_time: self.end_time.clone(),
+ parallelism: self.table_parallelism,
s3: self.s3,
ddl_local_dir: self.ddl_local_dir.clone(),
s3_bucket: self.s3_bucket.clone(),
@@ -250,10 +260,11 @@ pub struct Export {
schema: Option,
database_client: DatabaseClient,
output_dir: Option,
- parallelism: usize,
+ export_jobs: usize,
target: ExportTarget,
start_time: Option,
end_time: Option,
+ parallelism: usize,
s3: bool,
ddl_local_dir: Option,
s3_bucket: Option,
@@ -463,7 +474,7 @@ impl Export {
async fn export_create_table(&self) -> Result<()> {
let timer = Instant::now();
- let semaphore = Arc::new(Semaphore::new(self.parallelism));
+ let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let operator = Arc::new(self.build_prefer_fs_operator().await?);
@@ -624,13 +635,13 @@ impl Export {
async fn export_database_data(&self) -> Result<()> {
let timer = Instant::now();
- let semaphore = Arc::new(Semaphore::new(self.parallelism));
+ let semaphore = Arc::new(Semaphore::new(self.export_jobs));
let db_names = self.get_db_names().await?;
let db_count = db_names.len();
let mut tasks = Vec::with_capacity(db_count);
let operator = Arc::new(self.build_operator().await?);
let fs_first_operator = Arc::new(self.build_prefer_fs_operator().await?);
- let with_options = build_with_options(&self.start_time, &self.end_time);
+ let with_options = build_with_options(&self.start_time, &self.end_time, self.parallelism);
for schema in db_names {
let semaphore_moved = semaphore.clone();
@@ -667,10 +678,26 @@ impl Export {
);
// Create copy_from.sql file
- let copy_database_from_sql = format!(
- r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
- export_self.catalog, schema, path, with_options_clone, connection_part
- );
+ let copy_database_from_sql = {
+ let command_without_connection = format!(
+ r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({});"#,
+ export_self.catalog, schema, COPY_PATH_PLACEHOLDER, with_options_clone
+ );
+
+ if connection_part.is_empty() {
+ command_without_connection
+ } else {
+ let command_with_connection = format!(
+ r#"COPY DATABASE "{}"."{}" FROM '{}' WITH ({}){};"#,
+ export_self.catalog, schema, path, with_options_clone, connection_part
+ );
+
+ format!(
+ "-- {}\n{}",
+ command_with_connection, command_without_connection
+ )
+ }
+ };
let copy_from_path = export_self.get_file_path(&schema, "copy_from.sql");
export_self
@@ -871,7 +898,11 @@ impl Tool for Export {
}
/// Builds the WITH options string for SQL commands, assuming consistent syntax across S3 and local exports.
-fn build_with_options(start_time: &Option, end_time: &Option) -> String {
+fn build_with_options(
+ start_time: &Option,
+ end_time: &Option,
+ parallelism: usize,
+) -> String {
let mut options = vec!["format = 'parquet'".to_string()];
if let Some(start) = start_time {
options.push(format!("start_time = '{}'", start));
@@ -879,5 +910,6 @@ fn build_with_options(start_time: &Option, end_time: &Option) ->
if let Some(end) = end_time {
options.push(format!("end_time = '{}'", end));
}
+ options.push(format!("parallelism = {}", parallelism));
options.join(", ")
}
diff --git a/src/cli/src/data/import.rs b/src/cli/src/data/import.rs
index 102de8ac91..ffe8b62c7e 100644
--- a/src/cli/src/data/import.rs
+++ b/src/cli/src/data/import.rs
@@ -21,12 +21,13 @@ use clap::{Parser, ValueEnum};
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_error::ext::BoxedError;
use common_telemetry::{error, info, warn};
-use snafu::{OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt, ensure};
use tokio::sync::Semaphore;
use tokio::time::Instant;
+use crate::data::{COPY_PATH_PLACEHOLDER, default_database};
use crate::database::{DatabaseClient, parse_proxy_opts};
-use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
+use crate::error::{Error, FileIoSnafu, InvalidArgumentsSnafu, Result, SchemaNotFoundSnafu};
use crate::{Tool, database};
#[derive(Debug, Default, Clone, ValueEnum)]
@@ -52,12 +53,14 @@ pub struct ImportCommand {
input_dir: String,
/// The name of the catalog to import.
- #[clap(long, default_value = "greptime-*")]
+ #[clap(long, default_value_t = default_database())]
database: String,
- /// Parallelism of the import.
- #[clap(long, short = 'j', default_value = "1")]
- import_jobs: usize,
+ /// The number of databases imported in parallel.
+ /// For example, if there are 20 databases and `db_parallelism` is 4,
+ /// 4 databases will be imported concurrently.
+ #[clap(long, short = 'j', default_value = "1", alias = "import-jobs")]
+ db_parallelism: usize,
/// Max retry times for each job.
#[clap(long, default_value = "3")]
@@ -108,7 +111,7 @@ impl ImportCommand {
schema,
database_client,
input_dir: self.input_dir.clone(),
- parallelism: self.import_jobs,
+ parallelism: self.db_parallelism,
target: self.target.clone(),
}))
}
@@ -147,12 +150,15 @@ impl Import {
let _permit = semaphore_moved.acquire().await.unwrap();
let database_input_dir = self.catalog_path().join(&schema);
let sql_file = database_input_dir.join(filename);
- let sql = tokio::fs::read_to_string(sql_file)
+ let mut sql = tokio::fs::read_to_string(sql_file)
.await
.context(FileIoSnafu)?;
- if sql.is_empty() {
+ if sql.trim().is_empty() {
info!("Empty `{filename}` {database_input_dir:?}");
} else {
+ if filename == "copy_from.sql" {
+ sql = self.rewrite_copy_database_sql(&schema, &sql)?;
+ }
let db = exec_db.unwrap_or(&schema);
self.database_client.sql(&sql, db).await?;
info!("Imported `{filename}` for database {schema}");
@@ -225,6 +231,57 @@ impl Import {
}
Ok(db_names)
}
+
+ fn rewrite_copy_database_sql(&self, schema: &str, sql: &str) -> Result {
+ let target_location = self.build_copy_database_location(schema);
+ let escaped_location = target_location.replace('\'', "''");
+
+ let mut first_stmt_checked = false;
+ for line in sql.lines() {
+ let trimmed = line.trim_start();
+ if trimmed.is_empty() || trimmed.starts_with("--") {
+ continue;
+ }
+
+ ensure!(
+ trimmed.starts_with("COPY DATABASE"),
+ InvalidArgumentsSnafu {
+ msg: "Expected COPY DATABASE statement at start of copy_from.sql"
+ }
+ );
+ first_stmt_checked = true;
+ break;
+ }
+
+ ensure!(
+ first_stmt_checked,
+ InvalidArgumentsSnafu {
+ msg: "COPY DATABASE statement not found in copy_from.sql"
+ }
+ );
+
+ ensure!(
+ sql.contains(COPY_PATH_PLACEHOLDER),
+ InvalidArgumentsSnafu {
+ msg: format!(
+ "Placeholder `{}` not found in COPY DATABASE statement",
+ COPY_PATH_PLACEHOLDER
+ )
+ }
+ );
+
+ Ok(sql.replacen(COPY_PATH_PLACEHOLDER, &escaped_location, 1))
+ }
+
+ fn build_copy_database_location(&self, schema: &str) -> String {
+ let mut path = self.catalog_path();
+ path.push(schema);
+ let mut path_str = path.to_string_lossy().into_owned();
+ if !path_str.ends_with('/') {
+ path_str.push('/');
+ }
+ path_str
+ }
}
#[async_trait]
@@ -240,3 +297,52 @@ impl Tool for Import {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::time::Duration;
+
+ use super::*;
+
+ fn build_import(input_dir: &str) -> Import {
+ Import {
+ catalog: "catalog".to_string(),
+ schema: None,
+ database_client: DatabaseClient::new(
+ "127.0.0.1:4000".to_string(),
+ "catalog".to_string(),
+ None,
+ Duration::from_secs(0),
+ None,
+ ),
+ input_dir: input_dir.to_string(),
+ parallelism: 1,
+ target: ImportTarget::Data,
+ }
+ }
+
+ #[test]
+ fn rewrite_copy_database_sql_replaces_placeholder() {
+ let import = build_import("/tmp/export-path");
+ let comment = "-- COPY DATABASE \"catalog\".\"schema\" FROM 's3://bucket/demo/' WITH (format = 'parquet') CONNECTION (region = 'us-west-2')";
+ let sql = format!(
+ "{comment}\nCOPY DATABASE \"catalog\".\"schema\" FROM '{}' WITH (format = 'parquet');",
+ COPY_PATH_PLACEHOLDER
+ );
+
+ let rewritten = import.rewrite_copy_database_sql("schema", &sql).unwrap();
+ let expected_location = import.build_copy_database_location("schema");
+ let escaped = expected_location.replace('\'', "''");
+
+ assert!(rewritten.starts_with(comment));
+ assert!(rewritten.contains(&format!("FROM '{escaped}'")));
+ assert!(!rewritten.contains(COPY_PATH_PLACEHOLDER));
+ }
+
+ #[test]
+ fn rewrite_copy_database_sql_requires_placeholder() {
+ let import = build_import("/tmp/export-path");
+ let sql = "COPY DATABASE \"catalog\".\"schema\" FROM '/tmp/export-path/catalog/schema/' WITH (format = 'parquet');";
+ assert!(import.rewrite_copy_database_sql("schema", sql).is_err());
+ }
+}
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index 1506ac5208..39cb5c30aa 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -20,7 +20,9 @@ use api::v1::health_check_client::HealthCheckClient;
use api::v1::prometheus_gateway_client::PrometheusGatewayClient;
use api::v1::region::region_client::RegionClient as PbRegionClient;
use arrow_flight::flight_service_client::FlightServiceClient;
-use common_grpc::channel_manager::{ChannelConfig, ChannelManager, ClientTlsOption};
+use common_grpc::channel_manager::{
+ ChannelConfig, ChannelManager, ClientTlsOption, load_client_tls_config,
+};
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
use tonic::codec::CompressionEncoding;
@@ -93,9 +95,10 @@ impl Client {
U: AsRef,
A: AsRef<[U]>,
{
- let channel_config = ChannelConfig::default().client_tls_config(client_tls);
- let channel_manager = ChannelManager::with_tls_config(channel_config)
- .context(error::CreateTlsChannelSnafu)?;
+ let channel_config = ChannelConfig::default().client_tls_config(client_tls.clone());
+ let tls_config =
+ load_client_tls_config(Some(client_tls)).context(error::CreateTlsChannelSnafu)?;
+ let channel_manager = ChannelManager::with_config(channel_config, tls_config);
Ok(Self::with_manager_and_urls(channel_manager, urls))
}
diff --git a/src/client/src/client_manager.rs b/src/client/src/client_manager.rs
index 80afd2fb32..edac45a9fe 100644
--- a/src/client/src/client_manager.rs
+++ b/src/client/src/client_manager.rs
@@ -74,7 +74,7 @@ impl FlownodeManager for NodeClients {
impl NodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {
- channel_manager: ChannelManager::with_config(config),
+ channel_manager: ChannelManager::with_config(config, None),
clients: CacheBuilder::new(1024)
.time_to_live(Duration::from_secs(30 * 60))
.time_to_idle(Duration::from_secs(5 * 60))
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 0646c3e2a3..239f3fe3f9 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -435,10 +435,10 @@ impl Database {
.context(ExternalSnafu)?;
match flight_message {
FlightMessage::RecordBatch(arrow_batch) => {
- yield RecordBatch::try_from_df_record_batch(
+ yield Ok(RecordBatch::from_df_record_batch(
schema_cloned.clone(),
arrow_batch,
- )
+ ))
}
FlightMessage::Metrics(_) => {}
FlightMessage::AffectedRows(_) | FlightMessage::Schema(_) => {
diff --git a/src/client/src/region.rs b/src/client/src/region.rs
index 6e5a286083..3e80b83cec 100644
--- a/src/client/src/region.rs
+++ b/src/client/src/region.rs
@@ -182,10 +182,8 @@ impl RegionRequester {
match flight_message {
FlightMessage::RecordBatch(record_batch) => {
- let result_to_yield = RecordBatch::try_from_df_record_batch(
- schema_cloned.clone(),
- record_batch,
- );
+ let result_to_yield =
+ RecordBatch::from_df_record_batch(schema_cloned.clone(), record_batch);
// get the next message from the stream. normally it should be a metrics message.
if let Some(next_flight_message_result) = flight_message_stream.next().await
@@ -219,7 +217,7 @@ impl RegionRequester {
stream_ended = true;
}
- yield result_to_yield;
+ yield Ok(result_to_yield);
}
FlightMessage::Metrics(s) => {
// just a branch in case of some metrics message comes after other things.
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index 744d13faeb..d279ddb7f0 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -16,7 +16,7 @@ default = [
"meta-srv/pg_kvbackend",
"meta-srv/mysql_kvbackend",
]
-enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise", "catalog/enterprise"]
+enterprise = ["common-meta/enterprise", "frontend/enterprise", "meta-srv/enterprise"]
tokio-console = ["common-telemetry/tokio-console"]
[lints]
@@ -29,9 +29,11 @@ base64.workspace = true
cache.workspace = true
catalog.workspace = true
chrono.workspace = true
+either = "1.15"
clap.workspace = true
cli.workspace = true
client.workspace = true
+colored = "2.1.0"
common-base.workspace = true
common-catalog.workspace = true
common-config.workspace = true
@@ -63,9 +65,11 @@ lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
metric-engine.workspace = true
+mito2.workspace = true
moka.workspace = true
nu-ansi-term = "0.46"
object-store.workspace = true
+parquet = { workspace = true, features = ["object_store"] }
plugins.workspace = true
prometheus.workspace = true
prost.workspace = true
@@ -88,6 +92,11 @@ toml.workspace = true
tonic.workspace = true
tracing-appender.workspace = true
+[target.'cfg(unix)'.dependencies]
+pprof = { version = "0.14", features = [
+ "flamegraph",
+] }
+
[target.'cfg(not(windows))'.dependencies]
tikv-jemallocator = "0.6"
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index cf72b3d32f..f6bbebf7fb 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -103,12 +103,15 @@ async fn main_body() -> Result<()> {
async fn start(cli: Command) -> Result<()> {
match cli.subcmd {
- SubCommand::Datanode(cmd) => {
- let opts = cmd.load_options(&cli.global_options)?;
- let plugins = Plugins::new();
- let builder = InstanceBuilder::try_new_with_init(opts, plugins).await?;
- cmd.build_with(builder).await?.run().await
- }
+ SubCommand::Datanode(cmd) => match cmd.subcmd {
+ datanode::SubCommand::Start(ref start) => {
+ let opts = start.load_options(&cli.global_options)?;
+ let plugins = Plugins::new();
+ let builder = InstanceBuilder::try_new_with_init(opts, plugins).await?;
+ cmd.build_with(builder).await?.run().await
+ }
+ datanode::SubCommand::Objbench(ref bench) => bench.run().await,
+ },
SubCommand::Flownode(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 641d3fc5fd..23ca644ffc 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -13,6 +13,8 @@
// limitations under the License.
pub mod builder;
+#[allow(clippy::print_stdout)]
+mod objbench;
use std::path::Path;
use std::time::Duration;
@@ -23,13 +25,16 @@ use common_config::Configurable;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_telemetry::{info, warn};
use common_wal::config::DatanodeWalConfig;
+use datanode::config::RegionEngineConfig;
use datanode::datanode::Datanode;
use meta_client::MetaClientOptions;
+use serde::{Deserialize, Serialize};
use snafu::{ResultExt, ensure};
use tracing_appender::non_blocking::WorkerGuard;
use crate::App;
use crate::datanode::builder::InstanceBuilder;
+use crate::datanode::objbench::ObjbenchCommand;
use crate::error::{
LoadLayeredConfigSnafu, MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu,
};
@@ -89,7 +94,7 @@ impl App for Instance {
#[derive(Parser)]
pub struct Command {
#[clap(subcommand)]
- subcmd: SubCommand,
+ pub subcmd: SubCommand,
}
impl Command {
@@ -100,13 +105,26 @@ impl Command {
pub fn load_options(&self, global_options: &GlobalOptions) -> Result {
match &self.subcmd {
SubCommand::Start(cmd) => cmd.load_options(global_options),
+ SubCommand::Objbench(_) => {
+ // For objbench command, we don't need to load DatanodeOptions
+ // It's a standalone utility command
+ let mut opts = datanode::config::DatanodeOptions::default();
+ opts.sanitize();
+ Ok(DatanodeOptions {
+ runtime: Default::default(),
+ plugins: Default::default(),
+ component: opts,
+ })
+ }
}
}
}
#[derive(Parser)]
-enum SubCommand {
+pub enum SubCommand {
Start(StartCommand),
+ /// Object storage benchmark tool
+ Objbench(ObjbenchCommand),
}
impl SubCommand {
@@ -116,12 +134,33 @@ impl SubCommand {
info!("Building datanode with {:#?}", cmd);
builder.build().await
}
+ SubCommand::Objbench(cmd) => {
+ cmd.run().await?;
+ std::process::exit(0);
+ }
}
}
}
+/// Storage engine config
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
+#[serde(default)]
+pub struct StorageConfig {
+ /// The working directory of database
+ pub data_home: String,
+ #[serde(flatten)]
+ pub store: object_store::config::ObjectStoreConfig,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)]
+#[serde(default)]
+struct StorageConfigWrapper {
+ storage: StorageConfig,
+ region_engine: Vec,
+}
+
#[derive(Debug, Parser, Default)]
-struct StartCommand {
+pub struct StartCommand {
#[clap(long)]
node_id: Option,
/// The address to bind the gRPC server.
@@ -149,7 +188,7 @@ struct StartCommand {
}
impl StartCommand {
- fn load_options(&self, global_options: &GlobalOptions) -> Result {
+ pub fn load_options(&self, global_options: &GlobalOptions) -> Result {
let mut opts = DatanodeOptions::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
diff --git a/src/cmd/src/datanode/objbench.rs b/src/cmd/src/datanode/objbench.rs
new file mode 100644
index 0000000000..a8ff8b4daf
--- /dev/null
+++ b/src/cmd/src/datanode/objbench.rs
@@ -0,0 +1,678 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::time::Instant;
+
+use clap::Parser;
+use colored::Colorize;
+use datanode::config::RegionEngineConfig;
+use datanode::store;
+use either::Either;
+use mito2::access_layer::{
+ AccessLayer, AccessLayerRef, Metrics, OperationType, SstWriteRequest, WriteType,
+};
+use mito2::cache::{CacheManager, CacheManagerRef};
+use mito2::config::{FulltextIndexConfig, MitoConfig, Mode};
+use mito2::read::Source;
+use mito2::sst::file::{FileHandle, FileMeta};
+use mito2::sst::file_purger::{FilePurger, FilePurgerRef};
+use mito2::sst::index::intermediate::IntermediateManager;
+use mito2::sst::index::puffin_manager::PuffinManagerFactory;
+use mito2::sst::parquet::reader::ParquetReaderBuilder;
+use mito2::sst::parquet::{PARQUET_METADATA_KEY, WriteOptions};
+use mito2::worker::write_cache_from_config;
+use object_store::ObjectStore;
+use regex::Regex;
+use snafu::OptionExt;
+use store_api::metadata::{RegionMetadata, RegionMetadataRef};
+use store_api::path_utils::region_name;
+use store_api::region_request::PathType;
+use store_api::storage::FileId;
+
+use crate::datanode::{StorageConfig, StorageConfigWrapper};
+use crate::error;
+
+/// Object storage benchmark command
+#[derive(Debug, Parser)]
+pub struct ObjbenchCommand {
+ /// Path to the object-store config file (TOML). Must deserialize into object_store::config::ObjectStoreConfig.
+ #[clap(long, value_name = "FILE")]
+ pub config: PathBuf,
+
+ /// Source SST file path in object-store (e.g. "region_dir/.parquet").
+ #[clap(long, value_name = "PATH")]
+ pub source: String,
+
+ /// Verbose output
+ #[clap(short, long, default_value_t = false)]
+ pub verbose: bool,
+
+ /// Output file path for pprof flamegraph (enables profiling)
+ #[clap(long, value_name = "FILE")]
+ pub pprof_file: Option,
+}
+
+fn parse_config(config_path: &PathBuf) -> error::Result<(StorageConfig, MitoConfig)> {
+ let cfg_str = std::fs::read_to_string(config_path).map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("failed to read config {}: {e}", config_path.display()),
+ }
+ .build()
+ })?;
+
+ let store_cfg: StorageConfigWrapper = toml::from_str(&cfg_str).map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("failed to parse config {}: {e}", config_path.display()),
+ }
+ .build()
+ })?;
+
+ let storage_config = store_cfg.storage;
+ let mito_engine_config = store_cfg
+ .region_engine
+ .into_iter()
+ .filter_map(|c| {
+ if let RegionEngineConfig::Mito(mito) = c {
+ Some(mito)
+ } else {
+ None
+ }
+ })
+ .next()
+ .with_context(|| error::IllegalConfigSnafu {
+ msg: format!("Engine config not found in {:?}", config_path),
+ })?;
+ Ok((storage_config, mito_engine_config))
+}
+
+impl ObjbenchCommand {
+ pub async fn run(&self) -> error::Result<()> {
+ if self.verbose {
+ common_telemetry::init_default_ut_logging();
+ }
+
+ println!("{}", "Starting objbench with config:".cyan().bold());
+
+ // Build object store from config
+ let (store_cfg, mut mito_engine_config) = parse_config(&self.config)?;
+
+ let object_store = build_object_store(&store_cfg).await?;
+ println!("{} Object store initialized", "✓".green());
+
+ // Prepare source identifiers
+ let components = parse_file_dir_components(&self.source)?;
+ println!(
+ "{} Source path parsed: {}, components: {:?}",
+ "✓".green(),
+ self.source,
+ components
+ );
+
+ // Load parquet metadata to extract RegionMetadata and file stats
+ println!("{}", "Loading parquet metadata...".yellow());
+ let file_size = object_store
+ .stat(&self.source)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("stat failed: {e}"),
+ }
+ .build()
+ })?
+ .content_length();
+ let parquet_meta = load_parquet_metadata(object_store.clone(), &self.source, file_size)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("read parquet metadata failed: {e}"),
+ }
+ .build()
+ })?;
+
+ let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
+ let num_rows = parquet_meta.file_metadata().num_rows() as u64;
+ let num_row_groups = parquet_meta.num_row_groups() as u64;
+
+ println!(
+ "{} Metadata loaded - rows: {}, size: {} bytes",
+ "✓".green(),
+ num_rows,
+ file_size
+ );
+
+ // Build a FileHandle for the source file
+ let file_meta = FileMeta {
+ region_id: region_meta.region_id,
+ file_id: components.file_id,
+ time_range: Default::default(),
+ level: 0,
+ file_size,
+ available_indexes: Default::default(),
+ indexes: Default::default(),
+ index_file_size: 0,
+ index_file_id: None,
+ num_rows,
+ num_row_groups,
+ sequence: None,
+ partition_expr: None,
+ num_series: 0,
+ };
+ let src_handle = FileHandle::new(file_meta, new_noop_file_purger());
+
+ // Build the reader for a single file via ParquetReaderBuilder
+ let table_dir = components.table_dir();
+ let (src_access_layer, cache_manager) = build_access_layer_simple(
+ &components,
+ object_store.clone(),
+ &mut mito_engine_config,
+ &store_cfg.data_home,
+ )
+ .await?;
+ let reader_build_start = Instant::now();
+
+ let reader = ParquetReaderBuilder::new(
+ table_dir,
+ components.path_type,
+ src_handle.clone(),
+ object_store.clone(),
+ )
+ .expected_metadata(Some(region_meta.clone()))
+ .build()
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("build reader failed: {e:?}"),
+ }
+ .build()
+ })?;
+
+ let reader_build_elapsed = reader_build_start.elapsed();
+ let total_rows = reader.parquet_metadata().file_metadata().num_rows();
+ println!("{} Reader built in {:?}", "✓".green(), reader_build_elapsed);
+
+ // Build write request
+ let fulltext_index_config = FulltextIndexConfig {
+ create_on_compaction: Mode::Disable,
+ ..Default::default()
+ };
+
+ let write_req = SstWriteRequest {
+ op_type: OperationType::Flush,
+ metadata: region_meta,
+ source: Either::Left(Source::Reader(Box::new(reader))),
+ cache_manager,
+ storage: None,
+ max_sequence: None,
+ index_options: Default::default(),
+ index_config: mito_engine_config.index.clone(),
+ inverted_index_config: MitoConfig::default().inverted_index,
+ fulltext_index_config,
+ bloom_filter_index_config: MitoConfig::default().bloom_filter_index,
+ };
+
+ // Write SST
+ println!("{}", "Writing SST...".yellow());
+
+ // Start profiling if pprof_file is specified
+ #[cfg(unix)]
+ let profiler_guard = if self.pprof_file.is_some() {
+ println!("{} Starting profiling...", "⚡".yellow());
+ Some(
+ pprof::ProfilerGuardBuilder::default()
+ .frequency(99)
+ .blocklist(&["libc", "libgcc", "pthread", "vdso"])
+ .build()
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to start profiler: {e}"),
+ }
+ .build()
+ })?,
+ )
+ } else {
+ None
+ };
+
+ #[cfg(not(unix))]
+ if self.pprof_file.is_some() {
+ eprintln!(
+ "{}: Profiling is not supported on this platform",
+ "Warning".yellow()
+ );
+ }
+
+ let write_start = Instant::now();
+ let mut metrics = Metrics::new(WriteType::Flush);
+ let infos = src_access_layer
+ .write_sst(write_req, &WriteOptions::default(), &mut metrics)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("write_sst failed: {e:?}"),
+ }
+ .build()
+ })?;
+
+ let write_elapsed = write_start.elapsed();
+
+ // Stop profiling and generate flamegraph if enabled
+ #[cfg(unix)]
+ if let (Some(guard), Some(pprof_file)) = (profiler_guard, &self.pprof_file) {
+ println!("{} Generating flamegraph...", "🔥".yellow());
+ match guard.report().build() {
+ Ok(report) => {
+ let mut flamegraph_data = Vec::new();
+ if let Err(e) = report.flamegraph(&mut flamegraph_data) {
+ println!("{}: Failed to generate flamegraph: {}", "Error".red(), e);
+ } else if let Err(e) = std::fs::write(pprof_file, flamegraph_data) {
+ println!(
+ "{}: Failed to write flamegraph to {}: {}",
+ "Error".red(),
+ pprof_file.display(),
+ e
+ );
+ } else {
+ println!(
+ "{} Flamegraph saved to {}",
+ "✓".green(),
+ pprof_file.display().to_string().cyan()
+ );
+ }
+ }
+ Err(e) => {
+ println!("{}: Failed to generate pprof report: {}", "Error".red(), e);
+ }
+ }
+ }
+ assert_eq!(infos.len(), 1);
+ let dst_file_id = infos[0].file_id;
+ let dst_file_path = format!("{}/{}.parquet", components.region_dir(), dst_file_id);
+ let mut dst_index_path = None;
+ if infos[0].index_metadata.file_size > 0 {
+ dst_index_path = Some(format!(
+ "{}/index/{}.puffin",
+ components.region_dir(),
+ dst_file_id
+ ));
+ }
+
+ // Report results with ANSI colors
+ println!("\n{} {}", "Write complete!".green().bold(), "✓".green());
+ println!(" {}: {}", "Destination file".bold(), dst_file_path.cyan());
+ println!(" {}: {}", "Rows".bold(), total_rows.to_string().cyan());
+ println!(
+ " {}: {}",
+ "File size".bold(),
+ format!("{} bytes", file_size).cyan()
+ );
+ println!(
+ " {}: {:?}",
+ "Reader build time".bold(),
+ reader_build_elapsed
+ );
+ println!(" {}: {:?}", "Total time".bold(), write_elapsed);
+
+ // Print metrics in a formatted way
+ println!(" {}: {:?}", "Metrics".bold(), metrics,);
+
+ // Print infos
+ println!(" {}: {:?}", "Index".bold(), infos[0].index_metadata);
+
+ // Cleanup
+ println!("\n{}", "Cleaning up...".yellow());
+ object_store.delete(&dst_file_path).await.map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to delete dest file {}: {}", dst_file_path, e),
+ }
+ .build()
+ })?;
+ println!("{} Temporary file {} deleted", "✓".green(), dst_file_path);
+
+ if let Some(index_path) = dst_index_path {
+ object_store.delete(&index_path).await.map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to delete dest index file {}: {}", index_path, e),
+ }
+ .build()
+ })?;
+ println!(
+ "{} Temporary index file {} deleted",
+ "✓".green(),
+ index_path
+ );
+ }
+
+ println!("\n{}", "Benchmark completed successfully!".green().bold());
+ Ok(())
+ }
+}
+
+#[derive(Debug)]
+struct FileDirComponents {
+ catalog: String,
+ schema: String,
+ table_id: u32,
+ region_sequence: u32,
+ path_type: PathType,
+ file_id: FileId,
+}
+
+impl FileDirComponents {
+ fn table_dir(&self) -> String {
+ format!("data/{}/{}/{}", self.catalog, self.schema, self.table_id)
+ }
+
+ fn region_dir(&self) -> String {
+ let region_name = region_name(self.table_id, self.region_sequence);
+ match self.path_type {
+ PathType::Bare => {
+ format!(
+ "data/{}/{}/{}/{}",
+ self.catalog, self.schema, self.table_id, region_name
+ )
+ }
+ PathType::Data => {
+ format!(
+ "data/{}/{}/{}/{}/data",
+ self.catalog, self.schema, self.table_id, region_name
+ )
+ }
+ PathType::Metadata => {
+ format!(
+ "data/{}/{}/{}/{}/metadata",
+ self.catalog, self.schema, self.table_id, region_name
+ )
+ }
+ }
+ }
+}
+
+fn parse_file_dir_components(path: &str) -> error::Result {
+ // Define the regex pattern to match all three path styles
+ let pattern =
+ r"^data/([^/]+)/([^/]+)/([^/]+)/([^/]+)_([^/]+)(?:/data|/metadata)?/(.+).parquet$";
+
+ // Compile the regex
+ let re = Regex::new(pattern).expect("Invalid regex pattern");
+
+ // Determine the path type
+ let path_type = if path.contains("/data/") {
+ PathType::Data
+ } else if path.contains("/metadata/") {
+ PathType::Metadata
+ } else {
+ PathType::Bare
+ };
+
+ // Try to match the path
+ let components = (|| {
+ let captures = re.captures(path)?;
+ if captures.len() != 7 {
+ return None;
+ }
+ let mut components = FileDirComponents {
+ catalog: "".to_string(),
+ schema: "".to_string(),
+ table_id: 0,
+ region_sequence: 0,
+ path_type,
+ file_id: FileId::default(),
+ };
+ // Extract the components
+ components.catalog = captures.get(1)?.as_str().to_string();
+ components.schema = captures.get(2)?.as_str().to_string();
+ components.table_id = captures[3].parse().ok()?;
+ components.region_sequence = captures[5].parse().ok()?;
+ let file_id_str = &captures[6];
+ components.file_id = FileId::parse_str(file_id_str).ok()?;
+ Some(components)
+ })();
+ components.context(error::IllegalConfigSnafu {
+ msg: format!("Expect valid source file path, got: {}", path),
+ })
+}
+
+fn extract_region_metadata(
+ file_path: &str,
+ meta: &parquet::file::metadata::ParquetMetaData,
+) -> error::Result {
+ use parquet::format::KeyValue;
+ let kvs: Option<&Vec> = meta.file_metadata().key_value_metadata();
+ let Some(kvs) = kvs else {
+ return Err(error::IllegalConfigSnafu {
+ msg: format!("{file_path}: missing parquet key_value metadata"),
+ }
+ .build());
+ };
+ let json = kvs
+ .iter()
+ .find(|kv| kv.key == PARQUET_METADATA_KEY)
+ .and_then(|kv| kv.value.as_ref())
+ .ok_or_else(|| {
+ error::IllegalConfigSnafu {
+ msg: format!("{file_path}: key {PARQUET_METADATA_KEY} not found or empty"),
+ }
+ .build()
+ })?;
+ let region: RegionMetadata = RegionMetadata::from_json(json).map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("invalid region metadata json: {e}"),
+ }
+ .build()
+ })?;
+ Ok(Arc::new(region))
+}
+
+async fn build_object_store(sc: &StorageConfig) -> error::Result {
+ store::new_object_store(sc.store.clone(), &sc.data_home)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to build object store: {e:?}"),
+ }
+ .build()
+ })
+}
+
+async fn build_access_layer_simple(
+ components: &FileDirComponents,
+ object_store: ObjectStore,
+ config: &mut MitoConfig,
+ data_home: &str,
+) -> error::Result<(AccessLayerRef, CacheManagerRef)> {
+ let _ = config.index.sanitize(data_home, &config.inverted_index);
+ let puffin_manager = PuffinManagerFactory::new(
+ &config.index.aux_path,
+ config.index.staging_size.as_bytes(),
+ Some(config.index.write_buffer_size.as_bytes() as _),
+ config.index.staging_ttl,
+ )
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to build access layer: {e:?}"),
+ }
+ .build()
+ })?;
+
+ let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to build IntermediateManager: {e:?}"),
+ }
+ .build()
+ })?
+ .with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _));
+
+ let cache_manager =
+ build_cache_manager(config, puffin_manager.clone(), intermediate_manager.clone()).await?;
+ let layer = AccessLayer::new(
+ components.table_dir(),
+ components.path_type,
+ object_store,
+ puffin_manager,
+ intermediate_manager,
+ );
+ Ok((Arc::new(layer), cache_manager))
+}
+
+async fn build_cache_manager(
+ config: &MitoConfig,
+ puffin_manager: PuffinManagerFactory,
+ intermediate_manager: IntermediateManager,
+) -> error::Result {
+ let write_cache = write_cache_from_config(config, puffin_manager, intermediate_manager)
+ .await
+ .map_err(|e| {
+ error::IllegalConfigSnafu {
+ msg: format!("Failed to build write cache: {e:?}"),
+ }
+ .build()
+ })?;
+ let cache_manager = Arc::new(
+ CacheManager::builder()
+ .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
+ .vector_cache_size(config.vector_cache_size.as_bytes())
+ .page_cache_size(config.page_cache_size.as_bytes())
+ .selector_result_cache_size(config.selector_result_cache_size.as_bytes())
+ .index_metadata_size(config.index.metadata_cache_size.as_bytes())
+ .index_content_size(config.index.content_cache_size.as_bytes())
+ .index_content_page_size(config.index.content_cache_page_size.as_bytes())
+ .index_result_cache_size(config.index.result_cache_size.as_bytes())
+ .puffin_metadata_size(config.index.metadata_cache_size.as_bytes())
+ .write_cache(write_cache)
+ .build(),
+ );
+ Ok(cache_manager)
+}
+
+fn new_noop_file_purger() -> FilePurgerRef {
+ #[derive(Debug)]
+ struct Noop;
+ impl FilePurger for Noop {
+ fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool) {}
+ }
+ Arc::new(Noop)
+}
+
+async fn load_parquet_metadata(
+ object_store: ObjectStore,
+ path: &str,
+ file_size: u64,
+) -> Result> {
+ use parquet::file::FOOTER_SIZE;
+ use parquet::file::metadata::ParquetMetaDataReader;
+ let actual_size = if file_size == 0 {
+ object_store.stat(path).await?.content_length()
+ } else {
+ file_size
+ };
+ if actual_size < FOOTER_SIZE as u64 {
+ return Err("file too small".into());
+ }
+ let prefetch: u64 = 64 * 1024;
+ let start = actual_size.saturating_sub(prefetch);
+ let buffer = object_store
+ .read_with(path)
+ .range(start..actual_size)
+ .await?
+ .to_vec();
+ let buffer_len = buffer.len();
+ let mut footer = [0; 8];
+ footer.copy_from_slice(&buffer[buffer_len - FOOTER_SIZE..]);
+ let footer = ParquetMetaDataReader::decode_footer_tail(&footer)?;
+ let metadata_len = footer.metadata_length() as u64;
+ if actual_size - (FOOTER_SIZE as u64) < metadata_len {
+ return Err("invalid footer/metadata length".into());
+ }
+ if (metadata_len as usize) <= buffer_len - FOOTER_SIZE {
+ let metadata_start = buffer_len - metadata_len as usize - FOOTER_SIZE;
+ let meta = ParquetMetaDataReader::decode_metadata(
+ &buffer[metadata_start..buffer_len - FOOTER_SIZE],
+ )?;
+ Ok(meta)
+ } else {
+ let metadata_start = actual_size - metadata_len - FOOTER_SIZE as u64;
+ let data = object_store
+ .read_with(path)
+ .range(metadata_start..(actual_size - FOOTER_SIZE as u64))
+ .await?
+ .to_vec();
+ let meta = ParquetMetaDataReader::decode_metadata(&data)?;
+ Ok(meta)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::path::PathBuf;
+ use std::str::FromStr;
+
+ use common_base::readable_size::ReadableSize;
+ use store_api::region_request::PathType;
+
+ use crate::datanode::objbench::{parse_config, parse_file_dir_components};
+
+ #[test]
+ fn test_parse_dir() {
+ let meta_path = "data/greptime/public/1024/1024_0000000000/metadata/00020380-009c-426d-953e-b4e34c15af34.parquet";
+ let c = parse_file_dir_components(meta_path).unwrap();
+ assert_eq!(
+ c.file_id.to_string(),
+ "00020380-009c-426d-953e-b4e34c15af34"
+ );
+ assert_eq!(c.catalog, "greptime");
+ assert_eq!(c.schema, "public");
+ assert_eq!(c.table_id, 1024);
+ assert_eq!(c.region_sequence, 0);
+ assert_eq!(c.path_type, PathType::Metadata);
+
+ let c = parse_file_dir_components(
+ "data/greptime/public/1024/1024_0000000000/data/00020380-009c-426d-953e-b4e34c15af34.parquet",
+ ).unwrap();
+ assert_eq!(
+ c.file_id.to_string(),
+ "00020380-009c-426d-953e-b4e34c15af34"
+ );
+ assert_eq!(c.catalog, "greptime");
+ assert_eq!(c.schema, "public");
+ assert_eq!(c.table_id, 1024);
+ assert_eq!(c.region_sequence, 0);
+ assert_eq!(c.path_type, PathType::Data);
+
+ let c = parse_file_dir_components(
+ "data/greptime/public/1024/1024_0000000000/00020380-009c-426d-953e-b4e34c15af34.parquet",
+ ).unwrap();
+ assert_eq!(
+ c.file_id.to_string(),
+ "00020380-009c-426d-953e-b4e34c15af34"
+ );
+ assert_eq!(c.catalog, "greptime");
+ assert_eq!(c.schema, "public");
+ assert_eq!(c.table_id, 1024);
+ assert_eq!(c.region_sequence, 0);
+ assert_eq!(c.path_type, PathType::Bare);
+ }
+
+ #[test]
+ fn test_parse_config() {
+ let path = "../../config/datanode.example.toml";
+ let (storage, engine) = parse_config(&PathBuf::from_str(path).unwrap()).unwrap();
+ assert_eq!(storage.data_home, "./greptimedb_data");
+ assert_eq!(engine.index.staging_size, ReadableSize::gb(2));
+ }
+}
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 0b77dec341..fbff2d42e0 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -99,13 +99,6 @@ pub enum Error {
source: flow::Error,
},
- #[snafu(display("Servers error"))]
- Servers {
- #[snafu(implicit)]
- location: Location,
- source: servers::error::Error,
- },
-
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -336,7 +329,6 @@ impl ErrorExt for Error {
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
- Error::Servers { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::BuildCli { source, .. } => source.status_code(),
diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs
index 500e9bfa89..6cefdb0f79 100644
--- a/src/cmd/src/flownode.rs
+++ b/src/cmd/src/flownode.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -30,6 +31,7 @@ use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHand
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::key::TableMetadataManager;
use common_meta::key::flow::FlowMetadataManager;
+use common_stat::ResourceStatImpl;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_version::{short_version, verbose_version};
@@ -38,12 +40,14 @@ use flow::{
get_flow_auth_options,
};
use meta_client::{MetaClientOptions, MetaClientType};
+use plugins::flownode::context::GrpcConfigureContext;
+use servers::configurator::GrpcBuilderConfiguratorRef;
use snafu::{OptionExt, ResultExt, ensure};
use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MetaClientInitSnafu,
- MissingConfigSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
+ MissingConfigSnafu, OtherSnafu, Result, ShutdownFlownodeSnafu, StartFlownodeSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -54,33 +58,14 @@ type FlownodeOptions = GreptimeOptions;
pub struct Instance {
flownode: FlownodeInstance,
-
- // The components of flownode, which make it easier to expand based
- // on the components.
- #[cfg(feature = "enterprise")]
- components: Components,
-
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec,
}
-#[cfg(feature = "enterprise")]
-pub struct Components {
- pub catalog_manager: catalog::CatalogManagerRef,
- pub fe_client: Arc,
- pub kv_backend: common_meta::kv_backend::KvBackendRef,
-}
-
impl Instance {
- pub fn new(
- flownode: FlownodeInstance,
- #[cfg(feature = "enterprise")] components: Components,
- guard: Vec,
- ) -> Self {
+ pub fn new(flownode: FlownodeInstance, guard: Vec) -> Self {
Self {
flownode,
- #[cfg(feature = "enterprise")]
- components,
_guard: guard,
}
}
@@ -93,11 +78,6 @@ impl Instance {
pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
&mut self.flownode
}
-
- #[cfg(feature = "enterprise")]
- pub fn components(&self) -> &Components {
- &self.components
- }
}
#[async_trait::async_trait]
@@ -372,11 +352,15 @@ impl StartCommand {
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
]);
+ let mut resource_stat = ResourceStatImpl::default();
+ resource_stat.start_collect_cpu_usage();
+
let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
+ Arc::new(resource_stat),
);
let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
@@ -391,7 +375,7 @@ impl StartCommand {
let frontend_client = Arc::new(frontend_client);
let flownode_builder = FlownodeBuilder::new(
opts.clone(),
- plugins,
+ plugins.clone(),
table_metadata_manager,
catalog_manager.clone(),
flow_metadata_manager,
@@ -400,8 +384,29 @@ impl StartCommand {
.with_heartbeat_task(heartbeat_task);
let mut flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
+
+ let builder =
+ FlownodeServiceBuilder::grpc_server_builder(&opts, flownode.flownode_server());
+ let builder = if let Some(configurator) =
+ plugins.get::>()
+ {
+ let context = GrpcConfigureContext {
+ kv_backend: cached_meta_backend.clone(),
+ fe_client: frontend_client.clone(),
+ flownode_id: member_id,
+ catalog_manager: catalog_manager.clone(),
+ };
+ configurator
+ .configure(builder, context)
+ .await
+ .context(OtherSnafu)?
+ } else {
+ builder
+ };
+ let grpc_server = builder.build();
+
let services = FlownodeServiceBuilder::new(&opts)
- .with_default_grpc_server(flownode.flownode_server())
+ .with_grpc_server(grpc_server)
.enable_http_service()
.build()
.context(StartFlownodeSnafu)?;
@@ -425,16 +430,6 @@ impl StartCommand {
.set_frontend_invoker(invoker)
.await;
- #[cfg(feature = "enterprise")]
- let components = Components {
- catalog_manager: catalog_manager.clone(),
- fe_client: frontend_client,
- kv_backend: cached_meta_backend,
- };
-
- #[cfg(not(feature = "enterprise"))]
- return Ok(Instance::new(flownode, guard));
- #[cfg(feature = "enterprise")]
- Ok(Instance::new(flownode, components, guard))
+ Ok(Instance::new(flownode, guard))
}
}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 4c72021a47..d74b3cee5c 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Debug;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -19,17 +20,23 @@ use std::time::Duration;
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_extension::DistributedInformationExtension;
-use catalog::kvbackend::{CachedKvBackendBuilder, KvBackendCatalogManagerBuilder, MetaKvBackend};
+use catalog::kvbackend::{
+ CachedKvBackendBuilder, CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder,
+ MetaKvBackend,
+};
use catalog::process_manager::ProcessManager;
use clap::Parser;
use client::client_manager::NodeClients;
use common_base::Plugins;
use common_config::{Configurable, DEFAULT_DATA_HOME};
+use common_error::ext::BoxedError;
use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::HandlerGroupExecutor;
use common_meta::heartbeat::handler::invalidate_table_cache::InvalidateCacheHandler;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
+use common_query::prelude::set_default_prefix;
+use common_stat::ResourceStatImpl;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_time::timezone::set_default_timezone;
@@ -39,14 +46,16 @@ use frontend::heartbeat::HeartbeatTask;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
+use plugins::frontend::context::{
+ CatalogManagerConfigureContext, DistributedCatalogManagerConfigureContext,
+};
use servers::addrs;
-use servers::export_metrics::ExportMetricsTask;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
use tracing_appender::non_blocking::WorkerGuard;
-use crate::error::{self, Result};
+use crate::error::{self, OtherSnafu, Result};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, log_versions, maybe_activate_heap_profile};
@@ -174,6 +183,8 @@ pub struct StartCommand {
#[clap(long)]
tls_key_path: Option,
#[clap(long)]
+ tls_watch: bool,
+ #[clap(long)]
user_provider: Option,
#[clap(long)]
disable_dashboard: Option,
@@ -227,6 +238,7 @@ impl StartCommand {
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
+ self.tls_watch,
);
if let Some(addr) = &self.http_addr {
@@ -332,6 +344,9 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
set_default_timezone(opts.default_timezone.as_deref()).context(error::InitTimezoneSnafu)?;
+ set_default_prefix(opts.default_column_prefix.as_deref())
+ .map_err(BoxedError::new)
+ .context(error::BuildCliSnafu)?;
let meta_client_options = opts
.meta_client
@@ -408,9 +423,18 @@ impl StartCommand {
layered_cache_registry.clone(),
)
.with_process_manager(process_manager.clone());
- #[cfg(feature = "enterprise")]
- let builder = if let Some(factories) = plugins.get() {
- builder.with_extra_information_table_factories(factories)
+ let builder = if let Some(configurator) =
+ plugins.get::>()
+ {
+ let ctx = DistributedCatalogManagerConfigureContext {
+ meta_client: meta_client.clone(),
+ };
+ let ctx = CatalogManagerConfigureContext::Distributed(ctx);
+
+ configurator
+ .configure(builder, ctx)
+ .await
+ .context(OtherSnafu)?
} else {
builder
};
@@ -421,11 +445,15 @@ impl StartCommand {
Arc::new(InvalidateCacheHandler::new(layered_cache_registry.clone())),
]);
+ let mut resource_stat = ResourceStatImpl::default();
+ resource_stat.start_collect_cpu_usage();
+
let heartbeat_task = HeartbeatTask::new(
&opts,
meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(executor),
+ Arc::new(resource_stat),
);
let heartbeat_task = Some(heartbeat_task);
@@ -445,9 +473,6 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
let instance = Arc::new(instance);
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, instance.clone(), plugins)
.build()
.context(error::StartFrontendSnafu)?;
@@ -456,7 +481,6 @@ impl StartCommand {
instance,
servers,
heartbeat_task,
- export_metrics_task,
};
Ok(Instance::new(frontend, guard))
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 4f71775e74..ee67267de3 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt;
+use std::fmt::{self, Debug};
use std::path::Path;
use std::time::Duration;
@@ -23,7 +23,7 @@ use common_config::Configurable;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_version::{short_version, verbose_version};
-use meta_srv::bootstrap::MetasrvInstance;
+use meta_srv::bootstrap::{MetasrvInstance, metasrv_builder};
use meta_srv::metasrv::BackendImpl;
use snafu::ResultExt;
use tracing_appender::non_blocking::WorkerGuard;
@@ -177,7 +177,7 @@ pub struct StartCommand {
backend: Option,
}
-impl fmt::Debug for StartCommand {
+impl Debug for StartCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StartCommand")
.field("rpc_bind_addr", &self.rpc_bind_addr)
@@ -341,7 +341,7 @@ impl StartCommand {
.await
.context(StartMetaServerSnafu)?;
- let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins, None)
+ let builder = metasrv_builder(&opts, plugins, None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 58602d0a39..1ef16a830f 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Debug;
use std::net::SocketAddr;
use std::path::Path;
use std::sync::Arc;
@@ -20,7 +21,7 @@ use std::{fs, path};
use async_trait::async_trait;
use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
use catalog::information_schema::InformationExtensionRef;
-use catalog::kvbackend::KvBackendCatalogManagerBuilder;
+use catalog::kvbackend::{CatalogManagerConfiguratorRef, KvBackendCatalogManagerBuilder};
use catalog::process_manager::ProcessManager;
use clap::Parser;
use common_base::Plugins;
@@ -31,7 +32,7 @@ use common_meta::cache::LayeredCacheRegistryBuilder;
use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::{DdlContext, NoopRegionFailureDetectorControl};
-use common_meta::ddl_manager::DdlManager;
+use common_meta::ddl_manager::{DdlManager, DdlManagerConfiguratorRef};
use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
@@ -41,6 +42,7 @@ use common_meta::region_registry::LeaderRegionRegistry;
use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{WalOptionsAllocatorRef, build_wal_options_allocator};
use common_procedure::ProcedureManagerRef;
+use common_query::prelude::set_default_prefix;
use common_telemetry::info;
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, TracingOptions};
use common_time::timezone::set_default_timezone;
@@ -56,14 +58,17 @@ use frontend::instance::StandaloneDatanodeManager;
use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
-use servers::export_metrics::ExportMetricsTask;
+use plugins::frontend::context::{
+ CatalogManagerConfigureContext, StandaloneCatalogManagerConfigureContext,
+};
+use plugins::standalone::context::DdlManagerConfigureContext;
use servers::tls::{TlsMode, TlsOption};
use snafu::ResultExt;
use standalone::StandaloneInformationExtension;
use standalone::options::StandaloneOptions;
use tracing_appender::non_blocking::WorkerGuard;
-use crate::error::{Result, StartFlownodeSnafu};
+use crate::error::{OtherSnafu, Result, StartFlownodeSnafu};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{App, create_resource_limit_metrics, error, log_versions, maybe_activate_heap_profile};
@@ -116,34 +121,15 @@ pub struct Instance {
flownode: FlownodeInstance,
procedure_manager: ProcedureManagerRef,
wal_options_allocator: WalOptionsAllocatorRef,
-
- // The components of standalone, which make it easier to expand based
- // on the components.
- #[cfg(feature = "enterprise")]
- components: Components,
-
// Keep the logging guard to prevent the worker from being dropped.
_guard: Vec,
}
-#[cfg(feature = "enterprise")]
-pub struct Components {
- pub plugins: Plugins,
- pub kv_backend: KvBackendRef,
- pub frontend_client: Arc,
- pub catalog_manager: catalog::CatalogManagerRef,
-}
-
impl Instance {
/// Find the socket addr of a server by its `name`.
pub fn server_addr(&self, name: &str) -> Option {
self.frontend.server_handlers().addr(name)
}
-
- #[cfg(feature = "enterprise")]
- pub fn components(&self) -> &Components {
- &self.components
- }
}
#[async_trait]
@@ -227,6 +213,8 @@ pub struct StartCommand {
#[clap(long)]
tls_key_path: Option,
#[clap(long)]
+ tls_watch: bool,
+ #[clap(long)]
user_provider: Option,
#[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
pub env_prefix: String,
@@ -276,6 +264,7 @@ impl StartCommand {
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
+ self.tls_watch,
);
if let Some(addr) = &self.http_addr {
@@ -355,6 +344,10 @@ impl StartCommand {
let mut plugins = Plugins::new();
let plugin_opts = opts.plugins;
let mut opts = opts.component;
+ set_default_prefix(opts.default_column_prefix.as_deref())
+ .map_err(BoxedError::new)
+ .context(error::BuildCliSnafu)?;
+
opts.grpc.detect_server_addr();
let fe_opts = opts.frontend_options();
let dn_opts = opts.datanode_options();
@@ -408,6 +401,13 @@ impl StartCommand {
plugins.insert::(information_extension.clone());
let process_manager = Arc::new(ProcessManager::new(opts.grpc.server_addr.clone(), None));
+
+ // for standalone not use grpc, but get a handler to frontend grpc client without
+ // actually make a connection
+ let (frontend_client, frontend_instance_handler) =
+ FrontendClient::from_empty_grpc_handler(opts.query.clone());
+ let frontend_client = Arc::new(frontend_client);
+
let builder = KvBackendCatalogManagerBuilder::new(
information_extension.clone(),
kv_backend.clone(),
@@ -415,9 +415,17 @@ impl StartCommand {
)
.with_procedure_manager(procedure_manager.clone())
.with_process_manager(process_manager.clone());
- #[cfg(feature = "enterprise")]
- let builder = if let Some(factories) = plugins.get() {
- builder.with_extra_information_table_factories(factories)
+ let builder = if let Some(configurator) =
+ plugins.get::>()
+ {
+ let ctx = StandaloneCatalogManagerConfigureContext {
+ fe_client: frontend_client.clone(),
+ };
+ let ctx = CatalogManagerConfigureContext::Standalone(ctx);
+ configurator
+ .configure(builder, ctx)
+ .await
+ .context(OtherSnafu)?
} else {
builder
};
@@ -432,11 +440,6 @@ impl StartCommand {
..Default::default()
};
- // for standalone not use grpc, but get a handler to frontend grpc client without
- // actually make a connection
- let (frontend_client, frontend_instance_handler) =
- FrontendClient::from_empty_grpc_handler(opts.query.clone());
- let frontend_client = Arc::new(frontend_client);
let flow_builder = FlownodeBuilder::new(
flownode_options,
plugins.clone(),
@@ -507,11 +510,21 @@ impl StartCommand {
let ddl_manager = DdlManager::try_new(ddl_context, procedure_manager.clone(), true)
.context(error::InitDdlManagerSnafu)?;
- #[cfg(feature = "enterprise")]
- let ddl_manager = {
- let trigger_ddl_manager: Option =
- plugins.get();
- ddl_manager.with_trigger_ddl_manager(trigger_ddl_manager)
+
+ let ddl_manager = if let Some(configurator) =
+ plugins.get::>()
+ {
+ let ctx = DdlManagerConfigureContext {
+ kv_backend: kv_backend.clone(),
+ fe_client: frontend_client.clone(),
+ catalog_manager: catalog_manager.clone(),
+ };
+ configurator
+ .configure(ddl_manager, ctx)
+ .await
+ .context(OtherSnafu)?
+ } else {
+ ddl_manager
};
let procedure_executor = Arc::new(LocalProcedureExecutor::new(
@@ -557,9 +570,6 @@ impl StartCommand {
.context(StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
.build()
.context(error::StartFrontendSnafu)?;
@@ -568,15 +578,6 @@ impl StartCommand {
instance: fe_instance,
servers,
heartbeat_task: None,
- export_metrics_task,
- };
-
- #[cfg(feature = "enterprise")]
- let components = Components {
- plugins,
- kv_backend,
- frontend_client,
- catalog_manager,
};
Ok(Instance {
@@ -585,8 +586,6 @@ impl StartCommand {
flownode,
procedure_manager,
wal_options_allocator,
- #[cfg(feature = "enterprise")]
- components,
_guard: guard,
})
}
@@ -764,6 +763,9 @@ mod tests {
fn test_load_log_options_from_cli() {
let cmd = StartCommand {
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
+ mysql_addr: Some("127.0.0.1:4002".to_string()),
+ postgres_addr: Some("127.0.0.1:4003".to_string()),
+ tls_watch: true,
..Default::default()
};
@@ -780,6 +782,8 @@ mod tests {
assert_eq!("./greptimedb_data/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level.unwrap());
+ assert!(opts.mysql.tls.watch);
+ assert!(opts.postgres.tls.watch);
}
#[test]
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
index b92cf9631d..56a6caa71b 100644
--- a/src/cmd/tests/load_config_test.rs
+++ b/src/cmd/tests/load_config_test.rs
@@ -15,6 +15,7 @@
use std::time::Duration;
use cmd::options::GreptimeOptions;
+use common_base::memory_limit::MemoryLimit;
use common_config::{Configurable, DEFAULT_DATA_HOME};
use common_options::datanode::{ClientOptions, DatanodeClientOptions};
use common_telemetry::logging::{DEFAULT_LOGGING_DIR, DEFAULT_OTLP_HTTP_ENDPOINT, LoggingOptions};
@@ -30,7 +31,6 @@ use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use query::options::QueryOptions;
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -48,6 +48,7 @@ fn test_load_datanode_example_config() {
let expected = GreptimeOptions:: {
component: DatanodeOptions {
node_id: Some(42),
+ default_column_prefix: Some("greptime".to_string()),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
@@ -73,14 +74,19 @@ fn test_load_datanode_example_config() {
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
+ scan_memory_limit: MemoryLimit::Percentage(50),
..Default::default()
}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
- experimental_sparse_primary_key_encoding: false,
+ sparse_primary_key_encoding: true,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
+ query: QueryOptions {
+ memory_pool_size: MemoryLimit::Percentage(50),
+ ..Default::default()
+ },
logging: LoggingOptions {
level: Some("info".to_string()),
dir: format!("{}/{}", DEFAULT_DATA_HOME, DEFAULT_LOGGING_DIR),
@@ -88,11 +94,6 @@ fn test_load_datanode_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:3001")
.with_server_addr("127.0.0.1:3001"),
@@ -113,6 +114,7 @@ fn test_load_frontend_example_config() {
let expected = GreptimeOptions:: {
component: FrontendOptions {
default_timezone: Some("UTC".to_string()),
+ default_column_prefix: Some("greptime".to_string()),
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
timeout: Duration::from_secs(3),
@@ -138,11 +140,6 @@ fn test_load_frontend_example_config() {
..Default::default()
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
grpc: GrpcOptions {
bind_addr: "127.0.0.1:4001".to_string(),
server_addr: "127.0.0.1:4001".to_string(),
@@ -153,6 +150,10 @@ fn test_load_frontend_example_config() {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
+ query: QueryOptions {
+ memory_pool_size: MemoryLimit::Percentage(50),
+ ..Default::default()
+ },
..Default::default()
},
..Default::default()
@@ -189,11 +190,6 @@ fn test_load_metasrv_example_config() {
tcp_nodelay: true,
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
backend_tls: Some(TlsOption {
mode: TlsMode::Prefer,
cert_path: String::new(),
@@ -240,6 +236,7 @@ fn test_load_flownode_example_config() {
query: QueryOptions {
parallelism: 1,
allow_query_fallback: false,
+ memory_pool_size: MemoryLimit::Percentage(50),
},
meta_client: Some(MetaClientOptions {
metasrv_addrs: vec!["127.0.0.1:3002".to_string()],
@@ -273,6 +270,7 @@ fn test_load_standalone_example_config() {
let expected = GreptimeOptions:: {
component: StandaloneOptions {
default_timezone: Some("UTC".to_string()),
+ default_column_prefix: Some("greptime".to_string()),
wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
dir: Some(format!("{}/{}", DEFAULT_DATA_HOME, WAL_DIR)),
sync_period: Some(Duration::from_secs(10)),
@@ -283,11 +281,12 @@ fn test_load_standalone_example_config() {
RegionEngineConfig::Mito(MitoConfig {
auto_flush_interval: Duration::from_secs(3600),
write_cache_ttl: Some(Duration::from_secs(60 * 60 * 8)),
+ scan_memory_limit: MemoryLimit::Percentage(50),
..Default::default()
}),
RegionEngineConfig::File(FileEngineConfig {}),
RegionEngineConfig::Metric(MetricEngineConfig {
- experimental_sparse_primary_key_encoding: false,
+ sparse_primary_key_encoding: true,
flush_metadata_region_interval: Duration::from_secs(30),
}),
],
@@ -302,16 +301,14 @@ fn test_load_standalone_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: Some(Default::default()),
- remote_write: Some(Default::default()),
- ..Default::default()
- },
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
},
-
+ query: QueryOptions {
+ memory_pool_size: MemoryLimit::Percentage(50),
+ ..Default::default()
+ },
..Default::default()
},
..Default::default()
diff --git a/src/common/base/Cargo.toml b/src/common/base/Cargo.toml
index ae2945b1f5..4a881990b4 100644
--- a/src/common/base/Cargo.toml
+++ b/src/common/base/Cargo.toml
@@ -18,9 +18,11 @@ bytes.workspace = true
common-error.workspace = true
common-macro.workspace = true
futures.workspace = true
+lazy_static.workspace = true
paste.workspace = true
pin-project.workspace = true
rand.workspace = true
+regex.workspace = true
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true
tokio.workspace = true
diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs
index cc5acdbf47..91ee9d3343 100644
--- a/src/common/base/src/lib.rs
+++ b/src/common/base/src/lib.rs
@@ -15,10 +15,12 @@
pub mod bit_vec;
pub mod bytes;
pub mod cancellation;
+pub mod memory_limit;
pub mod plugins;
pub mod range_read;
#[allow(clippy::all)]
pub mod readable_size;
+pub mod regex_pattern;
pub mod secrets;
pub mod serde;
diff --git a/src/common/base/src/memory_limit.rs b/src/common/base/src/memory_limit.rs
new file mode 100644
index 0000000000..7129a4a027
--- /dev/null
+++ b/src/common/base/src/memory_limit.rs
@@ -0,0 +1,265 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{self, Display};
+use std::str::FromStr;
+
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+use crate::readable_size::ReadableSize;
+
+/// Memory limit configuration that supports both absolute size and percentage.
+///
+/// Examples:
+/// - Absolute size: "2GB", "4GiB", "512MB"
+/// - Percentage: "50%", "75%"
+/// - Unlimited: "unlimited", "0"
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub enum MemoryLimit {
+ /// Absolute memory size.
+ Size(ReadableSize),
+ /// Percentage of total system memory (0-100).
+ Percentage(u8),
+ /// No memory limit.
+ #[default]
+ Unlimited,
+}
+
+impl MemoryLimit {
+ /// Resolve the memory limit to bytes based on total system memory.
+ /// Returns 0 if the limit is unlimited.
+ pub fn resolve(&self, total_memory_bytes: u64) -> u64 {
+ match self {
+ MemoryLimit::Size(size) => size.as_bytes(),
+ MemoryLimit::Percentage(pct) => total_memory_bytes * (*pct as u64) / 100,
+ MemoryLimit::Unlimited => 0,
+ }
+ }
+
+ /// Returns true if this limit is unlimited.
+ pub fn is_unlimited(&self) -> bool {
+ match self {
+ MemoryLimit::Size(size) => size.as_bytes() == 0,
+ MemoryLimit::Percentage(pct) => *pct == 0,
+ MemoryLimit::Unlimited => true,
+ }
+ }
+}
+
+impl FromStr for MemoryLimit {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result {
+ let s = s.trim();
+
+ if s.eq_ignore_ascii_case("unlimited") {
+ return Ok(MemoryLimit::Unlimited);
+ }
+
+ if let Some(pct_str) = s.strip_suffix('%') {
+ let pct = pct_str
+ .trim()
+ .parse::()
+ .map_err(|e| format!("invalid percentage value '{}': {}", pct_str, e))?;
+
+ if pct > 100 {
+ return Err(format!("percentage must be between 0 and 100, got {}", pct));
+ }
+
+ if pct == 0 {
+ Ok(MemoryLimit::Unlimited)
+ } else {
+ Ok(MemoryLimit::Percentage(pct))
+ }
+ } else {
+ let size = ReadableSize::from_str(s)?;
+ if size.as_bytes() == 0 {
+ Ok(MemoryLimit::Unlimited)
+ } else {
+ Ok(MemoryLimit::Size(size))
+ }
+ }
+ }
+}
+
+impl Display for MemoryLimit {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryLimit::Size(size) => write!(f, "{}", size),
+ MemoryLimit::Percentage(pct) => write!(f, "{}%", pct),
+ MemoryLimit::Unlimited => write!(f, "unlimited"),
+ }
+ }
+}
+
+impl Serialize for MemoryLimit {
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ serializer.serialize_str(&self.to_string())
+ }
+}
+
+impl<'de> Deserialize<'de> for MemoryLimit {
+ fn deserialize(deserializer: D) -> Result
+ where
+ D: Deserializer<'de>,
+ {
+ let s = String::deserialize(deserializer)?;
+ MemoryLimit::from_str(&s).map_err(serde::de::Error::custom)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_absolute_size() {
+ assert_eq!(
+ "2GB".parse::().unwrap(),
+ MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024))
+ );
+ assert_eq!(
+ "512MB".parse::().unwrap(),
+ MemoryLimit::Size(ReadableSize(512 * 1024 * 1024))
+ );
+ assert_eq!("0".parse::().unwrap(), MemoryLimit::Unlimited);
+ }
+
+ #[test]
+ fn test_parse_percentage() {
+ assert_eq!(
+ "50%".parse::().unwrap(),
+ MemoryLimit::Percentage(50)
+ );
+ assert_eq!(
+ "75%".parse::().unwrap(),
+ MemoryLimit::Percentage(75)
+ );
+ assert_eq!("0%".parse::().unwrap(), MemoryLimit::Unlimited);
+ }
+
+ #[test]
+ fn test_parse_invalid() {
+ assert!("150%".parse::().is_err());
+ assert!("-10%".parse::().is_err());
+ assert!("invalid".parse::().is_err());
+ }
+
+ #[test]
+ fn test_resolve() {
+ let total = 8 * 1024 * 1024 * 1024; // 8GB
+
+ assert_eq!(
+ MemoryLimit::Size(ReadableSize(2 * 1024 * 1024 * 1024)).resolve(total),
+ 2 * 1024 * 1024 * 1024
+ );
+ assert_eq!(
+ MemoryLimit::Percentage(50).resolve(total),
+ 4 * 1024 * 1024 * 1024
+ );
+ assert_eq!(MemoryLimit::Unlimited.resolve(total), 0);
+ }
+
+ #[test]
+ fn test_is_unlimited() {
+ assert!(MemoryLimit::Unlimited.is_unlimited());
+ assert!(!MemoryLimit::Size(ReadableSize(1024)).is_unlimited());
+ assert!(!MemoryLimit::Percentage(50).is_unlimited());
+ assert!(!MemoryLimit::Percentage(1).is_unlimited());
+
+ // Defensive: these states shouldn't exist via public API, but check anyway
+ assert!(MemoryLimit::Size(ReadableSize(0)).is_unlimited());
+ assert!(MemoryLimit::Percentage(0).is_unlimited());
+ }
+
+ #[test]
+ fn test_parse_100_percent() {
+ assert_eq!(
+ "100%".parse::().unwrap(),
+ MemoryLimit::Percentage(100)
+ );
+ }
+
+ #[test]
+ fn test_display_percentage() {
+ assert_eq!(MemoryLimit::Percentage(20).to_string(), "20%");
+ assert_eq!(MemoryLimit::Percentage(50).to_string(), "50%");
+ assert_eq!(MemoryLimit::Percentage(100).to_string(), "100%");
+ }
+
+ #[test]
+ fn test_parse_unlimited() {
+ assert_eq!(
+ "unlimited".parse::().unwrap(),
+ MemoryLimit::Unlimited
+ );
+ assert_eq!(
+ "UNLIMITED".parse::().unwrap(),
+ MemoryLimit::Unlimited
+ );
+ assert_eq!(
+ "Unlimited".parse::().unwrap(),
+ MemoryLimit::Unlimited
+ );
+ }
+
+ #[test]
+ fn test_display_unlimited() {
+ assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
+ }
+
+ #[test]
+ fn test_parse_display_roundtrip() {
+ let cases = vec![
+ "50%",
+ "100%",
+ "1%",
+ "2GB",
+ "512MB",
+ "unlimited",
+ "UNLIMITED",
+ "0", // normalized to unlimited
+ "0%", // normalized to unlimited
+ ];
+
+ for input in cases {
+ let parsed = input.parse::().unwrap();
+ let displayed = parsed.to_string();
+ let reparsed = displayed.parse::().unwrap();
+ assert_eq!(
+ parsed, reparsed,
+ "round-trip failed: '{}' -> '{}' -> '{:?}'",
+ input, displayed, reparsed
+ );
+ }
+ }
+
+ #[test]
+ fn test_zero_normalization() {
+ // All forms of zero should normalize to Unlimited
+ assert_eq!("0".parse::().unwrap(), MemoryLimit::Unlimited);
+ assert_eq!("0%".parse::().unwrap(), MemoryLimit::Unlimited);
+ assert_eq!("0B".parse::().unwrap(), MemoryLimit::Unlimited);
+ assert_eq!(
+ "0KB".parse::().unwrap(),
+ MemoryLimit::Unlimited
+ );
+
+ // Unlimited always displays as "unlimited"
+ assert_eq!(MemoryLimit::Unlimited.to_string(), "unlimited");
+ }
+}
diff --git a/src/common/base/src/plugins.rs b/src/common/base/src/plugins.rs
index bbab003c69..aa1a9d1287 100644
--- a/src/common/base/src/plugins.rs
+++ b/src/common/base/src/plugins.rs
@@ -32,7 +32,12 @@ impl Plugins {
pub fn insert(&self, value: T) {
let last = self.write().insert(value);
- assert!(last.is_none(), "each type of plugins must be one and only");
+ if last.is_some() {
+ panic!(
+ "Plugin of type {} already exists",
+ std::any::type_name::()
+ );
+ }
}
pub fn get(&self) -> Option {
@@ -140,7 +145,7 @@ mod tests {
}
#[test]
- #[should_panic(expected = "each type of plugins must be one and only")]
+ #[should_panic(expected = "Plugin of type i32 already exists")]
fn test_plugin_uniqueness() {
let plugins = Plugins::new();
plugins.insert(1i32);
diff --git a/src/common/base/src/regex_pattern.rs b/src/common/base/src/regex_pattern.rs
new file mode 100644
index 0000000000..7ff46693ba
--- /dev/null
+++ b/src/common/base/src/regex_pattern.rs
@@ -0,0 +1,22 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use lazy_static::lazy_static;
+use regex::Regex;
+
+pub const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*";
+
+lazy_static! {
+ pub static ref NAME_PATTERN_REG: Regex = Regex::new(&format!("^{NAME_PATTERN}$")).unwrap();
+}
diff --git a/src/common/catalog/Cargo.toml b/src/common/catalog/Cargo.toml
index 051675fe93..357f180a33 100644
--- a/src/common/catalog/Cargo.toml
+++ b/src/common/catalog/Cargo.toml
@@ -8,5 +8,6 @@ license.workspace = true
workspace = true
[dependencies]
+const_format.workspace = true
[dev-dependencies]
diff --git a/src/common/catalog/build.rs b/src/common/catalog/build.rs
new file mode 100644
index 0000000000..311d6eef3f
--- /dev/null
+++ b/src/common/catalog/build.rs
@@ -0,0 +1,27 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+fn main() {
+ // Set DEFAULT_CATALOG_NAME from environment variable or use default value
+ let default_catalog_name =
+ std::env::var("DEFAULT_CATALOG_NAME").unwrap_or_else(|_| "greptime".to_string());
+
+ println!(
+ "cargo:rustc-env=DEFAULT_CATALOG_NAME={}",
+ default_catalog_name
+ );
+
+ // Rerun build script if the environment variable changes
+ println!("cargo:rerun-if-env-changed=DEFAULT_CATALOG_NAME");
+}
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 2bc5db9824..1cd5db8a0c 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -12,13 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use const_format::concatcp;
+
pub const SYSTEM_CATALOG_NAME: &str = "system";
pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
pub const PG_CATALOG_NAME: &str = "pg_catalog";
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
-pub const DEFAULT_CATALOG_NAME: &str = "greptime";
+pub const DEFAULT_CATALOG_NAME: &str = env!("DEFAULT_CATALOG_NAME");
pub const DEFAULT_SCHEMA_NAME: &str = "public";
-pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
+pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = concatcp!(DEFAULT_CATALOG_NAME, "_private");
/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
/// User defined table id starts from this value.
@@ -84,8 +86,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
-/// id for information_schema.RUNTIME_METRICS
-pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// id for information_schema.PARTITIONS
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
/// id for information_schema.REGION_PEERS
@@ -110,6 +110,8 @@ pub const INFORMATION_SCHEMA_SSTS_MANIFEST_TABLE_ID: u32 = 37;
pub const INFORMATION_SCHEMA_SSTS_STORAGE_TABLE_ID: u32 = 38;
/// id for information_schema.ssts_index_meta
pub const INFORMATION_SCHEMA_SSTS_INDEX_META_TABLE_ID: u32 = 39;
+/// id for information_schema.alerts
+pub const INFORMATION_SCHEMA_ALERTS_TABLE_ID: u32 = 40;
// ----- End of information_schema tables -----
@@ -150,4 +152,9 @@ pub const TRACE_TABLE_NAME_SESSION_KEY: &str = "trace_table_name";
pub fn trace_services_table_name(trace_table_name: &str) -> String {
format!("{}_services", trace_table_name)
}
+
+/// Generate the trace operations table name from the trace table name by adding `_operations` suffix.
+pub fn trace_operations_table_name(trace_table_name: &str) -> String {
+ format!("{}_operations", trace_table_name)
+}
// ---- End of special table and fields ----
diff --git a/src/common/config/Cargo.toml b/src/common/config/Cargo.toml
index 1d2b21602f..b45c03a6c3 100644
--- a/src/common/config/Cargo.toml
+++ b/src/common/config/Cargo.toml
@@ -11,7 +11,6 @@ workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
-common-stat.workspace = true
config.workspace = true
humantime-serde.workspace = true
object-store.workspace = true
diff --git a/src/common/config/src/lib.rs b/src/common/config/src/lib.rs
index b806924217..cc25ebce16 100644
--- a/src/common/config/src/lib.rs
+++ b/src/common/config/src/lib.rs
@@ -14,7 +14,6 @@
pub mod config;
pub mod error;
-pub mod utils;
use std::time::Duration;
diff --git a/src/common/config/src/utils.rs b/src/common/config/src/utils.rs
deleted file mode 100644
index 1bc986b77e..0000000000
--- a/src/common/config/src/utils.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use common_base::readable_size::ReadableSize;
-use common_stat::{get_total_cpu_millicores, get_total_memory_readable};
-
-/// `ResourceSpec` holds the static resource specifications of a node,
-/// such as CPU cores and memory capacity. These values are fixed
-/// at startup and do not change dynamically during runtime.
-#[derive(Debug, Clone, Copy)]
-pub struct ResourceSpec {
- pub cpus: i64,
- pub memory: Option,
-}
-
-impl Default for ResourceSpec {
- fn default() -> Self {
- Self {
- cpus: get_total_cpu_millicores(),
- memory: get_total_memory_readable(),
- }
- }
-}
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index 303d05ceb1..964f41736c 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -36,7 +36,7 @@ object_store_opendal.workspace = true
orc-rust = { version = "0.6.3", default-features = false, features = ["async"] }
parquet.workspace = true
paste.workspace = true
-regex = "1.7"
+regex.workspace = true
serde.workspace = true
snafu.workspace = true
strum.workspace = true
diff --git a/src/common/datasource/src/buffered_writer.rs b/src/common/datasource/src/buffered_writer.rs
index e1571b0187..953715b223 100644
--- a/src/common/datasource/src/buffered_writer.rs
+++ b/src/common/datasource/src/buffered_writer.rs
@@ -12,28 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::future::Future;
-
use arrow::record_batch::RecordBatch;
use async_trait::async_trait;
use datafusion::parquet::format::FileMetaData;
-use snafu::{OptionExt, ResultExt};
-use tokio::io::{AsyncWrite, AsyncWriteExt};
-use crate::error::{self, Result};
-use crate::share_buffer::SharedBuffer;
-
-pub struct LazyBufferedWriter {
- path: String,
- writer_factory: F,
- writer: Option,
- /// None stands for [`LazyBufferedWriter`] closed.
- encoder: Option,
- buffer: SharedBuffer,
- rows_written: usize,
- bytes_written: u64,
- threshold: usize,
-}
+use crate::error::Result;
pub trait DfRecordBatchEncoder {
fn write(&mut self, batch: &RecordBatch) -> Result<()>;
@@ -43,126 +26,3 @@ pub trait DfRecordBatchEncoder {
pub trait ArrowWriterCloser {
async fn close(mut self) -> Result;
}
-
-impl<
- T: AsyncWrite + Send + Unpin,
- U: DfRecordBatchEncoder + ArrowWriterCloser,
- F: Fn(String) -> Fut,
- Fut: Future