Compare commits

..

13 Commits

Author SHA1 Message Date
Ruihang Xia
d4aa4159d4 feat: support windowed sort with where condition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-04 19:34:03 +08:00
evenyag
960f6d821b feat: spawn block write wal 2024-11-04 17:35:12 +08:00
Ruihang Xia
9c5d044238 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 17:45:28 +08:00
Ruihang Xia
70c354eed6 fix: the way to retrieve time index column
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-11-01 12:10:12 +08:00
Ruihang Xia
23bf663d58 feat: handle sort that wont preserving partition
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 22:13:36 +08:00
Ruihang Xia
817648eac5 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-10-31 15:38:12 +08:00
Ruihang Xia
03b29439e2 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-11 11:09:07 +08:00
Ruihang Xia
712f4ca0ef try sort partial commutative
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-09 21:08:59 +08:00
Ruihang Xia
60bacff57e ignore unmatched left and right greater
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:12:21 +08:00
Ruihang Xia
6208772ba4 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-08 11:02:04 +08:00
Ruihang Xia
67184c0498 Merge branch 'main' into transform-count-min-max
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 14:30:47 +08:00
Ruihang Xia
1dd908fdf7 handle group by
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-05 12:50:13 +08:00
Ruihang Xia
8179b4798e feat: support transforming min/max/count aggr fn
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
2024-09-04 22:17:31 +08:00
2672 changed files with 98706 additions and 383544 deletions

View File

@@ -2,16 +2,4 @@
linker = "aarch64-linux-gnu-gcc" linker = "aarch64-linux-gnu-gcc"
[alias] [alias]
sqlness = "run --bin sqlness-runner --target-dir target/sqlness --" sqlness = "run --bin sqlness-runner --"
[unstable.git]
shallow_index = true
shallow_deps = true
[unstable.gitoxide]
fetch = true
checkout = true
list_files = true
internal_use_git2 = false
[env]
CARGO_WORKSPACE_DIR = { value = "", relative = true }

15
.coderabbit.yaml Normal file
View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: false
drafts: false
chat:
auto_reply: true

24
.github/CODEOWNERS vendored
View File

@@ -4,24 +4,24 @@
* @GreptimeTeam/db-approver * @GreptimeTeam/db-approver
## [Module] Database Engine ## [Module] Databse Engine
/src/index @evenyag @discord9 @WenyXu /src/index @zhongzc
/src/mito2 @evenyag @v0y4g3r @waynexia /src/mito2 @evenyag @v0y4g3r @waynexia
/src/query @evenyag @waynexia @discord9 /src/query @evenyag
## [Module] Distributed ## [Module] Distributed
/src/common/meta @MichaelScofield @WenyXu /src/common/meta @MichaelScofield
/src/common/procedure @MichaelScofield @WenyXu /src/common/procedure @MichaelScofield
/src/meta-client @MichaelScofield @WenyXu /src/meta-client @MichaelScofield
/src/meta-srv @MichaelScofield @WenyXu /src/meta-srv @MichaelScofield
## [Module] Write Ahead Log ## [Module] Write Ahead Log
/src/log-store @v0y4g3r @WenyXu /src/log-store @v0y4g3r
/src/store-api @v0y4g3r @evenyag /src/store-api @v0y4g3r
## [Module] Metrics Engine ## [Module] Metrics Engine
/src/metric-engine @waynexia @WenyXu /src/metric-engine @waynexia
/src/promql @waynexia @evenyag @discord9 /src/promql @waynexia
## [Module] Flow ## [Module] Flow
/src/flow @discord9 @waynexia /src/flow @zhongzc @waynexia

View File

@@ -41,14 +41,7 @@ runs:
username: ${{ inputs.dockerhub-image-registry-username }} username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }} password: ${{ inputs.dockerhub-image-registry-token }}
- name: Set up qemu for multi-platform builds - name: Build and push dev-builder-ubuntu image
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Build and push dev-builder-ubuntu image # Build image for amd64 and arm64 platform.
shell: bash shell: bash
if: ${{ inputs.build-dev-builder-ubuntu == 'true' }} if: ${{ inputs.build-dev-builder-ubuntu == 'true' }}
run: | run: |
@@ -59,7 +52,7 @@ runs:
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} DEV_BUILDER_IMAGE_TAG=${{ inputs.version }}
- name: Build and push dev-builder-centos image # Only build image for amd64 platform. - name: Build and push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.build-dev-builder-centos == 'true' }} if: ${{ inputs.build-dev-builder-centos == 'true' }}
run: | run: |
@@ -76,7 +69,8 @@ runs:
run: | run: |
make dev-builder \ make dev-builder \
BASE_IMAGE=android \ BASE_IMAGE=android \
BUILDX_MULTI_PLATFORM_BUILD=amd64 \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} DEV_BUILDER_IMAGE_TAG=${{ inputs.version }} && \
docker push ${{ inputs.dockerhub-image-registry }}/${{ inputs.dockerhub-image-namespace }}/dev-builder-android:${{ inputs.version }}

View File

@@ -32,23 +32,9 @@ inputs:
description: Image Registry description: Image Registry
required: false required: false
default: 'docker.io' default: 'docker.io'
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs: runs:
using: composite using: composite
steps: steps:
- name: Set extra build environment variables
shell: bash
run: |
if [[ '${{ inputs.large-page-size }}' == 'true' ]]; then
echo 'EXTRA_BUILD_ENVS="JEMALLOC_SYS_WITH_LG_PAGE=16"' >> $GITHUB_ENV
else
echo 'EXTRA_BUILD_ENVS=' >> $GITHUB_ENV
fi
- name: Build greptime binary - name: Build greptime binary
shell: bash shell: bash
if: ${{ inputs.build-android-artifacts == 'false' }} if: ${{ inputs.build-android-artifacts == 'false' }}
@@ -59,8 +45,7 @@ runs:
FEATURES=${{ inputs.features }} \ FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }} \ BASE_IMAGE=${{ inputs.base-image }} \
IMAGE_NAMESPACE=${{ inputs.image-namespace }} \ IMAGE_NAMESPACE=${{ inputs.image-namespace }} \
IMAGE_REGISTRY=${{ inputs.image-registry }} \ IMAGE_REGISTRY=${{ inputs.image-registry }}
EXTRA_BUILD_ENVS=$EXTRA_BUILD_ENVS
- name: Upload artifacts - name: Upload artifacts
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
@@ -69,7 +54,7 @@ runs:
PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }} PROFILE_TARGET: ${{ inputs.cargo-profile == 'dev' && 'debug' || inputs.cargo-profile }}
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: ./target/$PROFILE_TARGET/greptime target-file: ./target/$PROFILE_TARGET/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
@@ -87,6 +72,6 @@ runs:
if: ${{ inputs.build-android-artifacts == 'true' }} if: ${{ inputs.build-android-artifacts == 'true' }}
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: ./target/aarch64-linux-android/release/greptime target-file: ./target/aarch64-linux-android/release/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}

View File

@@ -34,8 +34,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
runs: runs:
using: composite using: composite
steps: steps:
@@ -47,11 +47,7 @@ runs:
password: ${{ inputs.image-registry-password }} password: ${{ inputs.image-registry-password }}
- name: Set up qemu for multi-platform builds - name: Set up qemu for multi-platform builds
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v2
with:
platforms: linux/amd64,linux/arm64
# The latest version will lead to segmentation fault.
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up buildx - name: Set up buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2

View File

@@ -22,8 +22,8 @@ inputs:
required: true required: true
push-latest-tag: push-latest-tag:
description: Whether to push the latest tag description: Whether to push the latest tag
required: true required: false
default: 'false' default: 'true'
dev-mode: dev-mode:
description: Enable dev mode, only build standard greptime description: Enable dev mode, only build standard greptime
required: false required: false
@@ -41,8 +41,8 @@ runs:
image-name: ${{ inputs.image-name }} image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }} image-tag: ${{ inputs.version }}
docker-file: docker/ci/ubuntu/Dockerfile docker-file: docker/ci/ubuntu/Dockerfile
amd64-artifact-name: greptime-linux-amd64-${{ inputs.version }} amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-${{ inputs.version }} arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push-latest-tag: ${{ inputs.push-latest-tag }} push-latest-tag: ${{ inputs.push-latest-tag }}

View File

@@ -27,10 +27,6 @@ inputs:
description: Working directory to build the artifacts description: Working directory to build the artifacts
required: false required: false
default: . default: .
large-page-size:
description: Build GreptimeDB with large page size (65536).
required: false
default: 'false'
runs: runs:
using: composite using: composite
steps: steps:
@@ -52,7 +48,20 @@ runs:
path: /tmp/greptime-*.log path: /tmp/greptime-*.log
retention-days: 3 retention-days: 3
- name: Build greptime # Builds standard greptime binary - name: Build standard greptime
uses: ./.github/actions/build-greptime-binary
with:
base-image: ubuntu
features: pyo3_backend,servers/dashboard
cargo-profile: ${{ inputs.cargo-profile }}
artifacts-dir: greptime-linux-${{ inputs.arch }}-pyo3-${{ inputs.version }}
version: ${{ inputs.version }}
working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }}
- name: Build greptime without pyo3
if: ${{ inputs.dev-mode == 'false' }}
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
with: with:
base-image: ubuntu base-image: ubuntu
@@ -63,7 +72,6 @@ runs:
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }} image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }} image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build. - name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
shell: bash shell: bash
@@ -82,7 +90,6 @@ runs:
working-dir: ${{ inputs.working-dir }} working-dir: ${{ inputs.working-dir }}
image-registry: ${{ inputs.image-registry }} image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }} image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}
- name: Build greptime on android base image - name: Build greptime on android base image
uses: ./.github/actions/build-greptime-binary uses: ./.github/actions/build-greptime-binary
@@ -95,4 +102,3 @@ runs:
build-android-artifacts: true build-android-artifacts: true
image-registry: ${{ inputs.image-registry }} image-registry: ${{ inputs.image-registry }}
image-namespace: ${{ inputs.image-namespace }} image-namespace: ${{ inputs.image-namespace }}
large-page-size: ${{ inputs.large-page-size }}

View File

@@ -90,5 +90,5 @@ runs:
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}

View File

@@ -33,6 +33,15 @@ runs:
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
shell: pwsh
run: pip install pyarrow numpy
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
@@ -47,6 +56,7 @@ runs:
shell: pwsh shell: pwsh
run: make test sqlness-test run: make test sqlness-test
env: env:
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
SQLNESS_OPTS: "--preserve-state" SQLNESS_OPTS: "--preserve-state"
@@ -66,5 +76,5 @@ runs:
uses: ./.github/actions/upload-artifacts uses: ./.github/actions/upload-artifacts
with: with:
artifacts-dir: ${{ inputs.artifacts-dir }} artifacts-dir: ${{ inputs.artifacts-dir }}
target-files: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime,target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime.pdb target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }} version: ${{ inputs.version }}

View File

@@ -9,8 +9,8 @@ runs:
steps: steps:
# Download artifacts from previous jobs, the artifacts will be downloaded to: # Download artifacts from previous jobs, the artifacts will be downloaded to:
# ${WORKING_DIR} # ${WORKING_DIR}
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-pyo3-v0.5.0/greptime-darwin-amd64-pyo3-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-pyo3-v0.5.0.sha256sum/greptime-darwin-amd64-pyo3-v0.5.0.sha256sum
# |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz # |- greptime-darwin-amd64-v0.5.0/greptime-darwin-amd64-v0.5.0.tar.gz
# |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum # |- greptime-darwin-amd64-v0.5.0.sha256sum/greptime-darwin-amd64-v0.5.0.sha256sum
# ... # ...

View File

@@ -51,8 +51,8 @@ inputs:
required: true required: true
upload-to-s3: upload-to-s3:
description: Upload to S3 description: Upload to S3
required: true required: false
default: 'false' default: 'true'
artifacts-dir: artifacts-dir:
description: Directory to store artifacts description: Directory to store artifacts
required: false required: false
@@ -64,11 +64,11 @@ inputs:
upload-max-retry-times: upload-max-retry-times:
description: Max retry times for uploading artifacts to S3 description: Max retry times for uploading artifacts to S3
required: false required: false
default: "30" default: "20"
upload-retry-timeout: upload-retry-timeout:
description: Timeout for uploading artifacts to S3 description: Timeout for uploading artifacts to S3
required: false required: false
default: "120" # minutes default: "30" # minutes
runs: runs:
using: composite using: composite
steps: steps:
@@ -77,21 +77,13 @@ runs:
with: with:
path: ${{ inputs.artifacts-dir }} path: ${{ inputs.artifacts-dir }}
- name: Install s5cmd
shell: bash
run: |
wget https://github.com/peak/s5cmd/releases/download/v2.3.0/s5cmd_2.3.0_Linux-64bit.tar.gz
tar -xzf s5cmd_2.3.0_Linux-64bit.tar.gz
sudo mv s5cmd /usr/local/bin/
sudo chmod +x /usr/local/bin/s5cmd
- name: Release artifacts to cn region - name: Release artifacts to cn region
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }} if: ${{ inputs.upload-to-s3 == 'true' }}
env: env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }} AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }} AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_REGION: ${{ inputs.aws-cn-region }} AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }} UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with: with:
max_attempts: ${{ inputs.upload-max-retry-times }} max_attempts: ${{ inputs.upload-max-retry-times }}

View File

@@ -24,9 +24,4 @@ runs:
--set auth.rbac.token.enabled=false \ --set auth.rbac.token.enabled=false \
--set persistence.size=2Gi \ --set persistence.size=2Gi \
--create-namespace \ --create-namespace \
--set global.security.allowInsecureImages=true \
--set image.registry=docker.io \
--set image.repository=greptime/etcd \
--set image.tag=3.6.1-debian-12-r3 \
--version 12.0.8 \
-n ${{ inputs.namespace }} -n ${{ inputs.namespace }}

View File

@@ -8,7 +8,7 @@ inputs:
default: 2 default: 2
description: "Number of Datanode replicas" description: "Number of Datanode replicas"
meta-replicas: meta-replicas:
default: 2 default: 3
description: "Number of Metasrv replicas" description: "Number of Metasrv replicas"
image-registry: image-registry:
default: "docker.io" default: "docker.io"
@@ -51,15 +51,15 @@ runs:
run: | run: |
helm upgrade \ helm upgrade \
--install my-greptimedb \ --install my-greptimedb \
--set meta.backendStorage.etcd.endpoints=${{ inputs.etcd-endpoints }} \ --set meta.etcdEndpoints=${{ inputs.etcd-endpoints }} \
--set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \ --set meta.enableRegionFailover=${{ inputs.enable-region-failover }} \
--set image.registry=${{ inputs.image-registry }} \ --set image.registry=${{ inputs.image-registry }} \
--set image.repository=${{ inputs.image-repository }} \ --set image.repository=${{ inputs.image-repository }} \
--set image.tag=${{ inputs.image-tag }} \ --set image.tag=${{ inputs.image-tag }} \
--set base.podTemplate.main.resources.requests.cpu=50m \ --set base.podTemplate.main.resources.requests.cpu=50m \
--set base.podTemplate.main.resources.requests.memory=256Mi \ --set base.podTemplate.main.resources.requests.memory=256Mi \
--set base.podTemplate.main.resources.limits.cpu=2000m \ --set base.podTemplate.main.resources.limits.cpu=1000m \
--set base.podTemplate.main.resources.limits.memory=3Gi \ --set base.podTemplate.main.resources.limits.memory=2Gi \
--set frontend.replicas=${{ inputs.frontend-replicas }} \ --set frontend.replicas=${{ inputs.frontend-replicas }} \
--set datanode.replicas=${{ inputs.datanode-replicas }} \ --set datanode.replicas=${{ inputs.datanode-replicas }} \
--set meta.replicas=${{ inputs.meta-replicas }} \ --set meta.replicas=${{ inputs.meta-replicas }} \

View File

@@ -5,7 +5,7 @@ meta:
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -21,7 +21,7 @@ frontend:
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -5,7 +5,7 @@ meta:
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -17,7 +17,7 @@ frontend:
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -1,8 +1,3 @@
logging:
level: "info"
format: "json"
filters:
- log_store=debug
meta: meta:
configData: |- configData: |-
[runtime] [runtime]
@@ -12,12 +7,11 @@ meta:
provider = "kafka" provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
num_topics = 3 num_topics = 3
auto_prune_interval = "30s"
trigger_flush_threshold = 100
[datanode] [datanode]
[datanode.client] [datanode.client]
timeout = "120s" timeout = "60s"
datanode: datanode:
configData: |- configData: |-
[runtime] [runtime]
@@ -27,14 +21,14 @@ datanode:
[wal] [wal]
provider = "kafka" provider = "kafka"
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"] broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
overwrite_entry_start_id = true linger = "2ms"
frontend: frontend:
configData: |- configData: |-
[runtime] [runtime]
global_rt_size = 4 global_rt_size = 4
[meta_client] [meta_client]
ddl_timeout = "120s" ddl_timeout = "60s"
objectStorage: objectStorage:
s3: s3:
bucket: default bucket: default

View File

@@ -18,13 +18,7 @@ runs:
--set controller.replicaCount=${{ inputs.controller-replicas }} \ --set controller.replicaCount=${{ inputs.controller-replicas }} \
--set controller.resources.requests.cpu=50m \ --set controller.resources.requests.cpu=50m \
--set controller.resources.requests.memory=128Mi \ --set controller.resources.requests.memory=128Mi \
--set controller.resources.limits.cpu=2000m \
--set controller.resources.limits.memory=2Gi \
--set listeners.controller.protocol=PLAINTEXT \ --set listeners.controller.protocol=PLAINTEXT \
--set listeners.client.protocol=PLAINTEXT \ --set listeners.client.protocol=PLAINTEXT \
--create-namespace \ --create-namespace \
--set image.registry=docker.io \
--set image.repository=greptime/kafka \
--set image.tag=3.9.0-debian-12-r1 \
--version 31.0.0 \
-n ${{ inputs.namespace }} -n ${{ inputs.namespace }}

View File

@@ -6,7 +6,9 @@ inputs:
description: "Number of PostgreSQL replicas" description: "Number of PostgreSQL replicas"
namespace: namespace:
default: "postgres-namespace" default: "postgres-namespace"
description: "The PostgreSQL namespace" postgres-version:
default: "14.2"
description: "PostgreSQL version"
storage-size: storage-size:
default: "1Gi" default: "1Gi"
description: "Storage size for PostgreSQL" description: "Storage size for PostgreSQL"
@@ -20,11 +22,7 @@ runs:
helm upgrade \ helm upgrade \
--install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \ --install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
--set replicaCount=${{ inputs.postgres-replicas }} \ --set replicaCount=${{ inputs.postgres-replicas }} \
--set global.security.allowInsecureImages=true \ --set image.tag=${{ inputs.postgres-version }} \
--set image.registry=docker.io \
--set image.repository=greptime/postgresql \
--set image.tag=17.5.0-debian-12-r3 \
--version 16.7.4 \
--set persistence.size=${{ inputs.storage-size }} \ --set persistence.size=${{ inputs.storage-size }} \
--set postgresql.username=greptimedb \ --set postgresql.username=greptimedb \
--set postgresql.password=admin \ --set postgresql.password=admin \

View File

@@ -56,7 +56,7 @@ runs:
- name: Start EC2 runner - name: Start EC2 runner
if: startsWith(inputs.runner, 'ec2') if: startsWith(inputs.runner, 'ec2')
uses: machulav/ec2-github-runner@v2.3.8 uses: machulav/ec2-github-runner@v2
id: start-linux-arm64-ec2-runner id: start-linux-arm64-ec2-runner
with: with:
mode: start mode: start

View File

@@ -33,7 +33,7 @@ runs:
- name: Stop EC2 runner - name: Stop EC2 runner
if: ${{ inputs.label && inputs.ec2-instance-id }} if: ${{ inputs.label && inputs.ec2-instance-id }}
uses: machulav/ec2-github-runner@v2.3.8 uses: machulav/ec2-github-runner@v2
with: with:
mode: stop mode: stop
label: ${{ inputs.label }} label: ${{ inputs.label }}

View File

@@ -4,8 +4,8 @@ inputs:
artifacts-dir: artifacts-dir:
description: Directory to store artifacts description: Directory to store artifacts
required: true required: true
target-files: target-file:
description: The multiple target files to upload, separated by comma description: The path of the target artifact
required: false required: false
version: version:
description: Version of the artifact description: Version of the artifact
@@ -18,21 +18,17 @@ runs:
using: composite using: composite
steps: steps:
- name: Create artifacts directory - name: Create artifacts directory
if: ${{ inputs.target-files != '' }} if: ${{ inputs.target-file != '' }}
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}
shell: bash shell: bash
run: | run: |
set -e mkdir -p ${{ inputs.artifacts-dir }} && \
mkdir -p ${{ inputs.artifacts-dir }} cp ${{ inputs.target-file }} ${{ inputs.artifacts-dir }}
IFS=',' read -ra FILES <<< "${{ inputs.target-files }}"
for file in "${FILES[@]}"; do
cp "$file" ${{ inputs.artifacts-dir }}/
done
# The compressed artifacts will use the following layout: # The compressed artifacts will use the following layout:
# greptime-linux-amd64-v0.3.0sha256sum # greptime-linux-amd64-pyo3-v0.3.0sha256sum
# greptime-linux-amd64-v0.3.0.tar.gz # greptime-linux-amd64-pyo3-v0.3.0.tar.gz
# greptime-linux-amd64-v0.3.0 # greptime-linux-amd64-pyo3-v0.3.0
# └── greptime # └── greptime
- name: Compress artifacts and calculate checksum - name: Compress artifacts and calculate checksum
working-directory: ${{ inputs.working-dir }} working-directory: ${{ inputs.working-dir }}

View File

@@ -1,3 +0,0 @@
native-tls
openssl
aws-lc-sys

15
.github/labeler.yaml vendored
View File

@@ -1,15 +0,0 @@
ci:
- changed-files:
- any-glob-to-any-file: .github/**
docker:
- changed-files:
- any-glob-to-any-file: docker/**
documentation:
- changed-files:
- any-glob-to-any-file: docs/**
dashboard:
- changed-files:
- any-glob-to-any-file: grafana/**

View File

@@ -4,8 +4,7 @@ I hereby agree to the terms of the [GreptimeDB CLA](https://github.com/GreptimeT
## What's changed and what's your intention? ## What's changed and what's your intention?
<!-- __!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
__!!! DO NOT LEAVE THIS BLOCK EMPTY !!!__
Please explain IN DETAIL what the changes are in this PR and why they are needed: Please explain IN DETAIL what the changes are in this PR and why they are needed:
@@ -13,14 +12,9 @@ Please explain IN DETAIL what the changes are in this PR and why they are needed
- How does this PR work? Need a brief introduction for the changed logic (optional) - How does this PR work? Need a brief introduction for the changed logic (optional)
- Describe clearly one logical change and avoid lazy messages (optional) - Describe clearly one logical change and avoid lazy messages (optional)
- Describe any limitations of the current code (optional) - Describe any limitations of the current code (optional)
- Describe if this PR will break **API or data compatibility** (optional)
-->
## PR Checklist ## Checklist
Please convert it to a draft if some of the following conditions are not met.
- [ ] I have written the necessary rustdoc comments. - [ ] I have written the necessary rustdoc comments.
- [ ] I have added the necessary unit tests and integration tests. - [ ] I have added the necessary unit tests and integration tests.
- [ ] This PR requires documentation updates. - [ ] This PR requires documentation updates.
- [ ] API changes are backward compatible.
- [ ] Schema or data changes are backward compatible.

View File

@@ -1,14 +0,0 @@
#!/bin/sh
set -e
# Get the latest version of github.com/GreptimeTeam/greptimedb
VERSION=$(curl -s https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest | jq -r '.tag_name')
echo "Downloading the latest version: $VERSION"
# Download the install script
curl -fsSL https://raw.githubusercontent.com/greptimeteam/greptimedb/main/scripts/install.sh | sh -s $VERSION
# Execute the `greptime` command
./greptime --version

View File

@@ -1,42 +0,0 @@
#!/bin/bash
# Get current version
CURRENT_VERSION=$1
if [ -z "$CURRENT_VERSION" ]; then
echo "Error: Failed to get current version"
exit 1
fi
# Get the latest version from GitHub Releases
API_RESPONSE=$(curl -s "https://api.github.com/repos/GreptimeTeam/greptimedb/releases/latest")
if [ -z "$API_RESPONSE" ] || [ "$(echo "$API_RESPONSE" | jq -r '.message')" = "Not Found" ]; then
echo "Error: Failed to fetch latest version from GitHub"
exit 1
fi
# Get the latest version
LATEST_VERSION=$(echo "$API_RESPONSE" | jq -r '.tag_name')
if [ -z "$LATEST_VERSION" ] || [ "$LATEST_VERSION" = "null" ]; then
echo "Error: No valid version found in GitHub releases"
exit 1
fi
# Cleaned up version number format (removed possible 'v' prefix and -nightly suffix)
CLEAN_CURRENT=$(echo "$CURRENT_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
CLEAN_LATEST=$(echo "$LATEST_VERSION" | sed 's/^v//' | sed 's/-nightly-.*//')
echo "Current version: $CLEAN_CURRENT"
echo "Latest release version: $CLEAN_LATEST"
# Use sort -V to compare versions
HIGHER_VERSION=$(printf "%s\n%s" "$CLEAN_CURRENT" "$CLEAN_LATEST" | sort -V | tail -n1)
if [ "$HIGHER_VERSION" = "$CLEAN_CURRENT" ]; then
echo "Current version ($CLEAN_CURRENT) is NEWER than or EQUAL to latest ($CLEAN_LATEST)"
echo "is-current-version-latest=true" >> $GITHUB_OUTPUT
else
echo "Current version ($CLEAN_CURRENT) is OLDER than latest ($CLEAN_LATEST)"
echo "is-current-version-latest=false" >> $GITHUB_OUTPUT
fi

View File

@@ -8,25 +8,24 @@ set -e
# - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'. # - If it's a nightly build, the version is 'nightly-YYYYMMDD-$(git rev-parse --short HEAD)', like 'nightly-20230712-e5b243c'.
# create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX} # create_version ${GIHUB_EVENT_NAME} ${NEXT_RELEASE_VERSION} ${NIGHTLY_RELEASE_PREFIX}
function create_version() { function create_version() {
# Read from environment variables. # Read from envrionment variables.
if [ -z "$GITHUB_EVENT_NAME" ]; then if [ -z "$GITHUB_EVENT_NAME" ]; then
echo "GITHUB_EVENT_NAME is empty" >&2 echo "GITHUB_EVENT_NAME is empty"
exit 1 exit 1
fi fi
if [ -z "$NEXT_RELEASE_VERSION" ]; then if [ -z "$NEXT_RELEASE_VERSION" ]; then
echo "NEXT_RELEASE_VERSION is empty, use version from Cargo.toml" >&2 echo "NEXT_RELEASE_VERSION is empty"
# NOTE: Need a `v` prefix for the version string. exit 1
export NEXT_RELEASE_VERSION=v$(grep '^version = ' Cargo.toml | cut -d '"' -f 2 | head -n 1)
fi fi
if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then if [ -z "$NIGHTLY_RELEASE_PREFIX" ]; then
echo "NIGHTLY_RELEASE_PREFIX is empty" >&2 echo "NIGHTLY_RELEASE_PREFIX is empty"
exit 1 exit 1
fi fi
# Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build. # Reuse $NEXT_RELEASE_VERSION to identify whether it's a nightly build.
# It will be like 'nightly-20230808-7d0d8dc6'. # It will be like 'nigtly-20230808-7d0d8dc6'.
if [ "$NEXT_RELEASE_VERSION" = nightly ]; then if [ "$NEXT_RELEASE_VERSION" = nightly ]; then
echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)" echo "$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")-$(git rev-parse --short HEAD)"
exit 0 exit 0
@@ -36,7 +35,7 @@ function create_version() {
# It will be like 'dev-2023080819-f0e7216c'. # It will be like 'dev-2023080819-f0e7216c'.
if [ "$NEXT_RELEASE_VERSION" = dev ]; then if [ "$NEXT_RELEASE_VERSION" = dev ]; then
if [ -z "$COMMIT_SHA" ]; then if [ -z "$COMMIT_SHA" ]; then
echo "COMMIT_SHA is empty in dev build" >&2 echo "COMMIT_SHA is empty in dev build"
exit 1 exit 1
fi fi
echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)" echo "dev-$(date "+%Y%m%d-%s")-$(echo "$COMMIT_SHA" | cut -c1-8)"
@@ -46,7 +45,7 @@ function create_version() {
# Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs. # Note: Only output 'version=xxx' to stdout when everything is ok, so that it can be used in GitHub Actions Outputs.
if [ "$GITHUB_EVENT_NAME" = push ]; then if [ "$GITHUB_EVENT_NAME" = push ]; then
if [ -z "$GITHUB_REF_NAME" ]; then if [ -z "$GITHUB_REF_NAME" ]; then
echo "GITHUB_REF_NAME is empty in push event" >&2 echo "GITHUB_REF_NAME is empty in push event"
exit 1 exit 1
fi fi
echo "$GITHUB_REF_NAME" echo "$GITHUB_REF_NAME"
@@ -55,15 +54,15 @@ function create_version() {
elif [ "$GITHUB_EVENT_NAME" = schedule ]; then elif [ "$GITHUB_EVENT_NAME" = schedule ]; then
echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")" echo "$NEXT_RELEASE_VERSION-$NIGHTLY_RELEASE_PREFIX-$(date "+%Y%m%d")"
else else
echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME" >&2 echo "Unsupported GITHUB_EVENT_NAME: $GITHUB_EVENT_NAME"
exit 1 exit 1
fi fi
} }
# You can run as following examples: # You can run as following examples:
# GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly GITHUB_REF_NAME=v0.3.0 ./create-version.sh # GITHUB_EVENT_NAME=push NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly GITHUB_REF_NAME=v0.3.0 ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=workflow_dispatch NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=v0.4.0 NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=schedule NEXT_RELEASE_VERSION=nightly NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
# GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nightly ./create-version.sh # GITHUB_EVENT_NAME=workflow_dispatch COMMIT_SHA=f0e7216c4bb6acce9b29a21ec2d683be2e3f984a NEXT_RELEASE_VERSION=dev NIGHTLY_RELEASE_PREFIX=nigtly ./create-version.sh
create_version create_version

View File

@@ -3,18 +3,14 @@
set -e set -e
set -o pipefail set -o pipefail
KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.32.0}" KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.24.0}"
ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}" ENABLE_STANDALONE_MODE="${ENABLE_STANDALONE_MODE:-true}"
DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default} DEFAULT_INSTALL_NAMESPACE=${DEFAULT_INSTALL_NAMESPACE:-default}
GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest} GREPTIMEDB_IMAGE_TAG=${GREPTIMEDB_IMAGE_TAG:-latest}
GREPTIMEDB_OPERATOR_IMAGE_TAG=${GREPTIMEDB_OPERATOR_IMAGE_TAG:-v0.5.1}
GREPTIMEDB_INITIALIZER_IMAGE_TAG="${GREPTIMEDB_OPERATOR_IMAGE_TAG}"
GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd" ETCD_CHART="oci://registry-1.docker.io/bitnamicharts/etcd"
ETCD_CHART_VERSION="${ETCD_CHART_VERSION:-12.0.8}" GREPTIME_CHART="https://greptimeteam.github.io/helm-charts/"
ETCD_IMAGE_TAG="${ETCD_IMAGE_TAG:-3.6.1-debian-12-r3}"
# Create a cluster with 1 control-plane node and 5 workers. # Ceate a cluster with 1 control-plane node and 5 workers.
function create_kind_cluster() { function create_kind_cluster() {
cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=- cat <<EOF | kind create cluster --name "${CLUSTER}" --image kindest/node:"$KUBERNETES_VERSION" --config=-
kind: Cluster kind: Cluster
@@ -39,16 +35,10 @@ function add_greptime_chart() {
function deploy_etcd_cluster() { function deploy_etcd_cluster() {
local namespace="$1" local namespace="$1"
helm upgrade --install etcd "$ETCD_CHART" \ helm install etcd "$ETCD_CHART" \
--version "$ETCD_CHART_VERSION" \
--create-namespace \
--set replicaCount=3 \ --set replicaCount=3 \
--set auth.rbac.create=false \ --set auth.rbac.create=false \
--set auth.rbac.token.enabled=false \ --set auth.rbac.token.enabled=false \
--set global.security.allowInsecureImages=true \
--set image.registry=docker.io \
--set image.repository=greptime/etcd \
--set image.tag="$ETCD_IMAGE_TAG" \
-n "$namespace" -n "$namespace"
# Wait for etcd cluster to be ready. # Wait for etcd cluster to be ready.
@@ -58,9 +48,8 @@ function deploy_etcd_cluster() {
# Deploy greptimedb-operator. # Deploy greptimedb-operator.
function deploy_greptimedb_operator() { function deploy_greptimedb_operator() {
# Use the latest chart and image. # Use the latest chart and image.
helm upgrade --install greptimedb-operator greptime/greptimedb-operator \ helm install greptimedb-operator greptime/greptimedb-operator \
--create-namespace \ --set image.tag=latest \
--set image.tag="$GREPTIMEDB_OPERATOR_IMAGE_TAG" \
-n "$DEFAULT_INSTALL_NAMESPACE" -n "$DEFAULT_INSTALL_NAMESPACE"
# Wait for greptimedb-operator to be ready. # Wait for greptimedb-operator to be ready.
@@ -77,12 +66,9 @@ function deploy_greptimedb_cluster() {
deploy_etcd_cluster "$install_namespace" deploy_etcd_cluster "$install_namespace"
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster \ helm install "$cluster_name" greptime/greptimedb-cluster \
--create-namespace \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \ --set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \ --set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \
-n "$install_namespace" -n "$install_namespace"
# Wait for greptimedb cluster to be ready. # Wait for greptimedb cluster to be ready.
@@ -115,18 +101,15 @@ function deploy_greptimedb_cluster_with_s3_storage() {
deploy_etcd_cluster "$install_namespace" deploy_etcd_cluster "$install_namespace"
helm upgrade --install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \ helm install "$cluster_name" greptime/greptimedb-cluster -n "$install_namespace" \
--create-namespace \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \ --set image.tag="$GREPTIMEDB_IMAGE_TAG" \
--set initializer.tag="$GREPTIMEDB_INITIALIZER_IMAGE_TAG" \ --set meta.etcdEndpoints="etcd.$install_namespace:2379" \
--set meta.backendStorage.etcd.endpoints="etcd.$install_namespace:2379" \ --set storage.s3.bucket="$AWS_CI_TEST_BUCKET" \
--set meta.backendStorage.etcd.storeKeyPrefix="$cluster_name" \ --set storage.s3.region="$AWS_REGION" \
--set objectStorage.s3.bucket="$AWS_CI_TEST_BUCKET" \ --set storage.s3.root="$DATA_ROOT" \
--set objectStorage.s3.region="$AWS_REGION" \ --set storage.credentials.secretName=s3-credentials \
--set objectStorage.s3.root="$DATA_ROOT" \ --set storage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
--set objectStorage.credentials.secretName=s3-credentials \ --set storage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
--set objectStorage.credentials.accessKeyId="$AWS_ACCESS_KEY_ID" \
--set objectStorage.credentials.secretAccessKey="$AWS_SECRET_ACCESS_KEY"
# Wait for greptimedb cluster to be ready. # Wait for greptimedb cluster to be ready.
while true; do while true; do
@@ -151,8 +134,7 @@ function deploy_greptimedb_cluster_with_s3_storage() {
# Deploy standalone greptimedb. # Deploy standalone greptimedb.
# It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access. # It will expose cluster service ports as '34000', '34001', '34002', '34003' to local access.
function deploy_standalone_greptimedb() { function deploy_standalone_greptimedb() {
helm upgrade --install greptimedb-standalone greptime/greptimedb-standalone \ helm install greptimedb-standalone greptime/greptimedb-standalone \
--create-namespace \
--set image.tag="$GREPTIMEDB_IMAGE_TAG" \ --set image.tag="$GREPTIMEDB_IMAGE_TAG" \
-n "$DEFAULT_INSTALL_NAMESPACE" -n "$DEFAULT_INSTALL_NAMESPACE"

507
.github/scripts/package-lock.json generated vendored
View File

@@ -1,507 +0,0 @@
{
"name": "greptimedb-github-scripts",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "greptimedb-github-scripts",
"version": "1.0.0",
"dependencies": {
"@octokit/rest": "^21.0.0",
"axios": "^1.7.0"
}
},
"node_modules/@octokit/auth-token": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
"integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
"license": "MIT",
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/core": {
"version": "6.1.6",
"resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.6.tgz",
"integrity": "sha512-kIU8SLQkYWGp3pVKiYzA5OSaNF5EE03P/R8zEmmrG6XwOg5oBjXyQVVIauQ0dgau4zYhpZEhJrvIYt6oM+zZZA==",
"license": "MIT",
"dependencies": {
"@octokit/auth-token": "^5.0.0",
"@octokit/graphql": "^8.2.2",
"@octokit/request": "^9.2.3",
"@octokit/request-error": "^6.1.8",
"@octokit/types": "^14.0.0",
"before-after-hook": "^3.0.2",
"universal-user-agent": "^7.0.0"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/endpoint": {
"version": "10.1.4",
"resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.4.tgz",
"integrity": "sha512-OlYOlZIsfEVZm5HCSR8aSg02T2lbUWOsCQoPKfTXJwDzcHQBrVBGdGXb89dv2Kw2ToZaRtudp8O3ZIYoaOjKlA==",
"license": "MIT",
"dependencies": {
"@octokit/types": "^14.0.0",
"universal-user-agent": "^7.0.2"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/graphql": {
"version": "8.2.2",
"resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.2.tgz",
"integrity": "sha512-Yi8hcoqsrXGdt0yObxbebHXFOiUA+2v3n53epuOg1QUgOB6c4XzvisBNVXJSl8RYA5KrDuSL2yq9Qmqe5N0ryA==",
"license": "MIT",
"dependencies": {
"@octokit/request": "^9.2.3",
"@octokit/types": "^14.0.0",
"universal-user-agent": "^7.0.0"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/openapi-types": {
"version": "25.1.0",
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-25.1.0.tgz",
"integrity": "sha512-idsIggNXUKkk0+BExUn1dQ92sfysJrje03Q0bv0e+KPLrvyqZF8MnBpFz8UNfYDwB3Ie7Z0TByjWfzxt7vseaA==",
"license": "MIT"
},
"node_modules/@octokit/plugin-paginate-rest": {
"version": "11.6.0",
"resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.6.0.tgz",
"integrity": "sha512-n5KPteiF7pWKgBIBJSk8qzoZWcUkza2O6A0za97pMGVrGfPdltxrfmfF5GucHYvHGZD8BdaZmmHGz5cX/3gdpw==",
"license": "MIT",
"dependencies": {
"@octokit/types": "^13.10.0"
},
"engines": {
"node": ">= 18"
},
"peerDependencies": {
"@octokit/core": ">=6"
}
},
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": {
"version": "24.2.0",
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
"license": "MIT"
},
"node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": {
"version": "13.10.0",
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
"license": "MIT",
"dependencies": {
"@octokit/openapi-types": "^24.2.0"
}
},
"node_modules/@octokit/plugin-request-log": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
"integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
"license": "MIT",
"engines": {
"node": ">= 18"
},
"peerDependencies": {
"@octokit/core": ">=6"
}
},
"node_modules/@octokit/plugin-rest-endpoint-methods": {
"version": "13.5.0",
"resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.5.0.tgz",
"integrity": "sha512-9Pas60Iv9ejO3WlAX3maE1+38c5nqbJXV5GrncEfkndIpZrJ/WPMRd2xYDcPPEt5yzpxcjw9fWNoPhsSGzqKqw==",
"license": "MIT",
"dependencies": {
"@octokit/types": "^13.10.0"
},
"engines": {
"node": ">= 18"
},
"peerDependencies": {
"@octokit/core": ">=6"
}
},
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/openapi-types": {
"version": "24.2.0",
"resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz",
"integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==",
"license": "MIT"
},
"node_modules/@octokit/plugin-rest-endpoint-methods/node_modules/@octokit/types": {
"version": "13.10.0",
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz",
"integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==",
"license": "MIT",
"dependencies": {
"@octokit/openapi-types": "^24.2.0"
}
},
"node_modules/@octokit/request": {
"version": "9.2.4",
"resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.4.tgz",
"integrity": "sha512-q8ybdytBmxa6KogWlNa818r0k1wlqzNC+yNkcQDECHvQo8Vmstrg18JwqJHdJdUiHD2sjlwBgSm9kHkOKe2iyA==",
"license": "MIT",
"dependencies": {
"@octokit/endpoint": "^10.1.4",
"@octokit/request-error": "^6.1.8",
"@octokit/types": "^14.0.0",
"fast-content-type-parse": "^2.0.0",
"universal-user-agent": "^7.0.2"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/request-error": {
"version": "6.1.8",
"resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.8.tgz",
"integrity": "sha512-WEi/R0Jmq+IJKydWlKDmryPcmdYSVjL3ekaiEL1L9eo1sUnqMJ+grqmC9cjk7CA7+b2/T397tO5d8YLOH3qYpQ==",
"license": "MIT",
"dependencies": {
"@octokit/types": "^14.0.0"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/rest": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
"integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
"license": "MIT",
"dependencies": {
"@octokit/core": "^6.1.4",
"@octokit/plugin-paginate-rest": "^11.4.2",
"@octokit/plugin-request-log": "^5.3.1",
"@octokit/plugin-rest-endpoint-methods": "^13.3.0"
},
"engines": {
"node": ">= 18"
}
},
"node_modules/@octokit/types": {
"version": "14.1.0",
"resolved": "https://registry.npmjs.org/@octokit/types/-/types-14.1.0.tgz",
"integrity": "sha512-1y6DgTy8Jomcpu33N+p5w58l6xyt55Ar2I91RPiIA0xCJBXyUAhXCcmZaDWSANiha7R9a6qJJ2CRomGPZ6f46g==",
"license": "MIT",
"dependencies": {
"@octokit/openapi-types": "^25.1.0"
}
},
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"license": "MIT"
},
"node_modules/axios": {
"version": "1.12.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
"integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
"license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.4",
"proxy-from-env": "^1.1.0"
}
},
"node_modules/before-after-hook": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
"integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
"license": "Apache-2.0"
},
"node_modules/call-bind-apply-helpers": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"license": "MIT",
"dependencies": {
"delayed-stream": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"license": "MIT",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"es-errors": "^1.3.0",
"gopd": "^1.2.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-object-atoms": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-set-tostringtag": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.6",
"has-tostringtag": "^1.0.2",
"hasown": "^2.0.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/fast-content-type-parse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
"integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/fastify"
},
{
"type": "opencollective",
"url": "https://opencollective.com/fastify"
}
],
"license": "MIT"
},
"node_modules/follow-redirects": {
"version": "1.15.11",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
"integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"license": "MIT",
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/form-data": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"es-set-tostringtag": "^2.1.0",
"hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.2",
"es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
"es-object-atoms": "^1.1.1",
"function-bind": "^1.1.2",
"get-proto": "^1.0.1",
"gopd": "^1.2.0",
"has-symbols": "^1.1.0",
"hasown": "^2.0.2",
"math-intrinsics": "^1.1.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
"license": "MIT",
"dependencies": {
"dunder-proto": "^1.0.1",
"es-object-atoms": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-tostringtag": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"license": "MIT",
"dependencies": {
"has-symbols": "^1.0.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/universal-user-agent": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz",
"integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==",
"license": "ISC"
}
}
}

View File

@@ -1,10 +0,0 @@
{
"name": "greptimedb-github-scripts",
"version": "1.0.0",
"type": "module",
"description": "GitHub automation scripts for GreptimeDB",
"dependencies": {
"@octokit/rest": "^21.0.0",
"axios": "^1.7.0"
}
}

View File

@@ -1,152 +0,0 @@
// Daily PR Review Reminder Script
// Fetches open PRs from GreptimeDB repository and sends Slack notifications
// to PR owners and assigned reviewers to keep review process moving.
(async () => {
const { Octokit } = await import("@octokit/rest");
const { default: axios } = await import('axios');
// Configuration
const GITHUB_TOKEN = process.env.GITHUB_TOKEN;
const SLACK_WEBHOOK_URL = process.env.SLACK_PR_REVIEW_WEBHOOK_URL;
const REPO_OWNER = "GreptimeTeam";
const REPO_NAME = "greptimedb";
const GITHUB_TO_SLACK = JSON.parse(process.env.GITHUBID_SLACKID_MAPPING || '{}');
// Debug: Print environment variable status
console.log("=== Environment Variables Debug ===");
console.log(`GITHUB_TOKEN: ${GITHUB_TOKEN ? 'Set ✓' : 'NOT SET ✗'}`);
console.log(`SLACK_PR_REVIEW_WEBHOOK_URL: ${SLACK_WEBHOOK_URL ? 'Set ✓' : 'NOT SET ✗'}`);
console.log(`GITHUBID_SLACKID_MAPPING: ${process.env.GITHUBID_SLACKID_MAPPING ? `Set ✓ (${Object.keys(GITHUB_TO_SLACK).length} mappings)` : 'NOT SET ✗'}`);
console.log("===================================\n");
const octokit = new Octokit({
auth: GITHUB_TOKEN
});
// Fetch all open PRs from the repository
async function fetchOpenPRs() {
try {
const prs = await octokit.pulls.list({
owner: REPO_OWNER,
repo: REPO_NAME,
state: "open",
per_page: 100,
sort: "created",
direction: "asc"
});
return prs.data.filter((pr) => !pr.draft);
} catch (error) {
console.error("Error fetching PRs:", error);
return [];
}
}
// Convert GitHub username to Slack mention or fallback to GitHub username
function toSlackMention(githubUser) {
const slackUserId = GITHUB_TO_SLACK[githubUser];
return slackUserId ? `<@${slackUserId}>` : `@${githubUser}`;
}
// Calculate days since PR was opened
function getDaysOpen(createdAt) {
const created = new Date(createdAt);
const now = new Date();
const diffMs = now - created;
const days = Math.floor(diffMs / (1000 * 60 * 60 * 24));
return days;
}
// Build Slack notification message from PR list
function buildSlackMessage(prs) {
if (prs.length === 0) {
return "*🎉 Great job! No pending PRs for review.*";
}
// Separate PRs by age threshold (14 days)
const criticalPRs = [];
const recentPRs = [];
prs.forEach(pr => {
const daysOpen = getDaysOpen(pr.created_at);
if (daysOpen >= 14) {
criticalPRs.push(pr);
} else {
recentPRs.push(pr);
}
});
const lines = [
`*🔍 Daily PR Review Reminder 🔍*`,
`Found *${criticalPRs.length}* critical PR(s) (14+ days old)\n`
];
// Show critical PRs (14+ days) in detail
if (criticalPRs.length > 0) {
criticalPRs.forEach((pr, index) => {
const owner = toSlackMention(pr.user.login);
const reviewers = pr.requested_reviewers || [];
const reviewerMentions = reviewers.map(r => toSlackMention(r.login)).join(", ");
const daysOpen = getDaysOpen(pr.created_at);
const prInfo = `${index + 1}. <${pr.html_url}|#${pr.number}: ${pr.title}>`;
const ageInfo = ` 🔴 Opened *${daysOpen}* day(s) ago`;
const ownerInfo = ` 👤 Owner: ${owner}`;
const reviewerInfo = reviewers.length > 0
? ` 👁️ Reviewers: ${reviewerMentions}`
: ` 👁️ Reviewers: _Not assigned yet_`;
lines.push(prInfo);
lines.push(ageInfo);
lines.push(ownerInfo);
lines.push(reviewerInfo);
lines.push(""); // Empty line between PRs
});
}
lines.push("_Let's keep the code review process moving! 🚀_");
return lines.join("\n");
}
// Send notification to Slack webhook
async function sendSlackNotification(message) {
if (!SLACK_WEBHOOK_URL) {
console.log("⚠️ SLACK_PR_REVIEW_WEBHOOK_URL not configured. Message preview:");
console.log("=".repeat(60));
console.log(message);
console.log("=".repeat(60));
return;
}
try {
const response = await axios.post(SLACK_WEBHOOK_URL, {
text: message
});
if (response.status !== 200) {
throw new Error(`Slack API returned status ${response.status}`);
}
console.log("Slack notification sent successfully.");
} catch (error) {
console.error("Error sending Slack notification:", error);
throw error;
}
}
// Main execution flow
async function run() {
console.log(`Fetching open PRs from ${REPO_OWNER}/${REPO_NAME}...`);
const prs = await fetchOpenPRs();
console.log(`Found ${prs.length} open PR(s).`);
const message = buildSlackMessage(prs);
console.log("Sending Slack notification...");
await sendSlackNotification(message);
}
run().catch(error => {
console.error("Script execution failed:", error);
process.exit(1);
});
})();

View File

@@ -1,34 +0,0 @@
#!/bin/bash
# This script is used to pull the test dependency images that are stored in public ECR one by one to avoid rate limiting.
set -e
MAX_RETRIES=3
IMAGES=(
"greptime/zookeeper:3.7"
"greptime/kafka:3.9.0-debian-12-r1"
"greptime/etcd:3.6.1-debian-12-r3"
"greptime/minio:2024"
"greptime/mysql:5.7"
)
for image in "${IMAGES[@]}"; do
for ((attempt=1; attempt<=MAX_RETRIES; attempt++)); do
if docker pull "$image"; then
# Successfully pulled the image.
break
else
# Use some simple exponential backoff to avoid rate limiting.
if [ $attempt -lt $MAX_RETRIES ]; then
sleep_seconds=$((attempt * 5))
echo "Attempt $attempt failed for $image, waiting $sleep_seconds seconds"
sleep $sleep_seconds # 5s, 10s delays
else
echo "Failed to pull $image after $MAX_RETRIES attempts"
exit 1
fi
fi
done
done

View File

@@ -1,37 +0,0 @@
#!/bin/bash
DEV_BUILDER_IMAGE_TAG=$1
update_dev_builder_version() {
if [ -z "$DEV_BUILDER_IMAGE_TAG" ]; then
echo "Error: Should specify the dev-builder image tag"
exit 1
fi
# Configure Git configs.
git config --global user.email greptimedb-ci@greptime.com
git config --global user.name greptimedb-ci
# Checkout a new branch.
BRANCH_NAME="ci/update-dev-builder-$(date +%Y%m%d%H%M%S)"
git checkout -b $BRANCH_NAME
# Update the dev-builder image tag in the Makefile.
sed -i "s/DEV_BUILDER_IMAGE_TAG ?=.*/DEV_BUILDER_IMAGE_TAG ?= ${DEV_BUILDER_IMAGE_TAG}/g" Makefile
# Commit the changes.
git add Makefile
git commit -s -m "ci: update dev-builder image tag"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "ci: update dev-builder image tag" \
--body "This PR updates the dev-builder image tag" \
--base main \
--head $BRANCH_NAME \
--reviewer zyy17 \
--reviewer daviderli614
}
update_dev_builder_version

View File

@@ -1,49 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_helm_charts_version() {
# Configure Git configs.
git config --global user.email update-helm-charts-version@greptime.com
git config --global user.name update-helm-charts-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/helm-charts.git"
cd helm-charts
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/helm-charts
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-version CHART=greptimedb-cluster VERSION=${VERSION}
make update-version CHART=greptimedb-standalone VERSION=${VERSION}
# Update docs.
make docs
# Commit the changes.
git add .
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer sunng87 \
--reviewer daviderli614 \
--reviewer killme2008 \
--reviewer evenyag \
--reviewer fengjiachun
}
update_helm_charts_version

View File

@@ -1,45 +0,0 @@
#!/bin/bash
set -e
VERSION=${VERSION}
GITHUB_TOKEN=${GITHUB_TOKEN}
update_homebrew_greptime_version() {
# Configure Git configs.
git config --global user.email update-greptime-version@greptime.com
git config --global user.name update-greptime-version
# Clone helm-charts repository.
git clone "https://x-access-token:${GITHUB_TOKEN}@github.com/GreptimeTeam/homebrew-greptime.git"
cd homebrew-greptime
# Set default remote for gh CLI
gh repo set-default GreptimeTeam/homebrew-greptime
# Checkout a new branch.
BRANCH_NAME="chore/greptimedb-${VERSION}"
git checkout -b $BRANCH_NAME
# Update version.
make update-greptime-version VERSION=${VERSION}
# Commit the changes.
git add .
git commit -s -m "chore: Update GreptimeDB version to ${VERSION}"
git push origin $BRANCH_NAME
# Create a Pull Request.
gh pr create \
--title "chore: Update GreptimeDB version to ${VERSION}" \
--body "This PR updates the GreptimeDB version." \
--base main \
--head $BRANCH_NAME \
--reviewer sunng87 \
--reviewer daviderli614 \
--reviewer killme2008 \
--reviewer evenyag \
--reviewer fengjiachun
}
update_homebrew_greptime_version

View File

@@ -27,13 +27,13 @@ function upload_artifacts() {
# ├── latest-version.txt # ├── latest-version.txt
# ├── latest-nightly-version.txt # ├── latest-nightly-version.txt
# ├── v0.1.0 # ├── v0.1.0
# │ ├── greptime-darwin-amd64-v0.1.0.sha256sum # │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-v0.1.0.tar.gz # │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0 # └── v0.2.0
# ├── greptime-darwin-amd64-v0.2.0.sha256sum # ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-v0.2.0.tar.gz # └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
s5cmd cp \ aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")" "$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done done
} }
@@ -41,11 +41,11 @@ function upload_artifacts() {
# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true. # Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() { function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the official release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt. # If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt" echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt echo "$VERSION" > latest-version.txt
s5cmd cp \ aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt" latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi fi
@@ -53,7 +53,7 @@ function update_version_info() {
if [[ "$VERSION" == *"nightly"* ]]; then if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt" echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt echo "$VERSION" > latest-nightly-version.txt
s5cmd cp \ aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt" latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi fi
fi fi

View File

@@ -14,11 +14,9 @@ name: Build API docs
jobs: jobs:
apidoc: apidoc:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,35 +0,0 @@
name: Check Dependencies
on:
pull_request:
branches:
- main
jobs:
check-dependencies:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Run cargo tree
run: cargo tree --prefix none > dependencies.txt
- name: Extract dependency names
run: awk '{print $1}' dependencies.txt > dependency_names.txt
- name: Check for blacklisted crates
run: |
while read -r dep; do
if grep -qFx "$dep" dependency_names.txt; then
echo "Blacklisted crate '$dep' found in dependencies."
exit 1
fi
done < .github/cargo-blacklist.txt
echo "No blacklisted crates found."

View File

@@ -4,11 +4,10 @@ name: GreptimeDB Development Build
on: on:
workflow_dispatch: # Allows you to run this workflow manually. workflow_dispatch: # Allows you to run this workflow manually.
inputs: inputs:
large-page-size: repository:
description: Build GreptimeDB with large page size (65536). description: The public repository to build
type: boolean
required: false required: false
default: false default: GreptimeTeam/greptimedb
commit: # Note: We only pull the source code and use the current workflow to build the artifacts. commit: # Note: We only pull the source code and use the current workflow to build the artifacts.
description: The commit to build description: The commit to build
required: true required: true
@@ -17,11 +16,11 @@ on:
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.4xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -56,11 +55,6 @@ on:
description: Build and push images to DockerHub and ACR description: Build and push images to DockerHub and ACR
required: false required: false
default: true default: true
upload_artifacts_to_s3:
type: boolean
description: Whether upload artifacts to s3
required: false
default: false
cargo_profile: cargo_profile:
type: choice type: choice
description: The cargo profile to use in building GreptimeDB. description: The cargo profile to use in building GreptimeDB.
@@ -82,14 +76,20 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
IMAGE_NAME: greptimedb-dev
# The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'. # The source code will check out in the following path: '${WORKING_DIR}/dev/greptime'.
CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb CHECKOUT_GREPTIMEDB_PATH: dev/greptimedb
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -107,7 +107,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -162,7 +161,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -170,7 +168,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -182,7 +179,6 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }} image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }} image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
build-linux-arm64-artifacts: build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts name: Build linux-arm64 artifacts
@@ -196,7 +192,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Checkout greptimedb - name: Checkout greptimedb
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -204,7 +199,6 @@ jobs:
repository: ${{ inputs.repository }} repository: ${{ inputs.repository }}
ref: ${{ inputs.commit }} ref: ${{ inputs.commit }}
path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} path: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
persist-credentials: true
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -216,7 +210,6 @@ jobs:
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }} working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }} image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }} image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
large-page-size: ${{ inputs.large-page-size }}
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
@@ -226,34 +219,26 @@ jobs:
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
build-result: ${{ steps.set-build-result.outputs.build-result }} build-result: ${{ steps.set-build-result.outputs.build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry. push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images. dev-mode: true # Only build the standard images.
- name: Echo Docker image tag to step summary
run: |
echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
- name: Set build result - name: Set build result
id: set-build-result id: set-build-result
run: | run: |
@@ -266,20 +251,19 @@ jobs:
allocate-runners, allocate-runners,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
continue-on-error: true continue-on-error: true
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.DEV_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -289,7 +273,6 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: ${{ inputs.upload_artifacts_to_s3 }}
dev-mode: true # Only build the standard images(exclude centos images). dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry. push-latest-tag: false # Don't push the latest tag to registry.
update-version-info: false # Don't update the version info in S3. update-version-info: false # Don't update the version info in S3.
@@ -298,7 +281,7 @@ jobs:
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -308,7 +291,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -324,7 +306,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -334,7 +316,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -352,17 +333,11 @@ jobs:
needs: [ needs: [
release-images-to-dockerhub release-images-to-dockerhub
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,6 +1,4 @@
on: on:
schedule:
- cron: "0 15 * * 1-5"
merge_group: merge_group:
pull_request: pull_request:
types: [ opened, synchronize, reopened, ready_for_review ] types: [ opened, synchronize, reopened, ready_for_review ]
@@ -12,7 +10,17 @@ on:
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**' - 'grafana/**'
- 'Makefile' push:
branches:
- main
paths-ignore:
- 'docs/**'
- 'config/**'
- '**.md'
- '.dockerignore'
- 'docker/**'
- '.gitignore'
- 'grafana/**'
workflow_dispatch: workflow_dispatch:
name: CI name: CI
@@ -23,13 +31,10 @@ concurrency:
jobs: jobs:
check-typos-and-docs: check-typos-and-docs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check typos and docs name: Check typos and docs
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
- name: Check the config docs - name: Check the config docs
run: | run: |
@@ -38,27 +43,21 @@ jobs:
|| (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1) || (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1)
license-header-check: license-header-check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check name: Check
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ windows-2022, ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -69,38 +68,35 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Clippy` job # Shares with `Clippy` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo check - name: Run cargo check
run: cargo check --locked --workspace --all-targets run: cargo check --locked --workspace --all-targets
toml: toml:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Toml Check name: Toml Check
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo - name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked --force run: cargo +stable install taplo-cli --version ^0.9 --locked
- name: Run taplo - name: Run taplo
run: taplo format --check run: taplo format --check
build: build:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binaries name: Build GreptimeDB binaries
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -109,15 +105,13 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-binaries" shared-key: "build-binaries"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin
- name: Build greptime binaries - name: Build greptime binaries
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc -- --bin greptime --bin sqlness-runner --features "pg_kvbackend,mysql_kvbackend" run: cargo gc -- --bin greptime --bin sqlness-runner
- name: Pack greptime binaries - name: Pack greptime binaries
shell: bash shell: bash
run: | run: |
@@ -133,7 +127,6 @@ jobs:
version: current version: current
fuzztest: fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test name: Fuzz Test
needs: build needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -156,18 +149,21 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -190,13 +186,11 @@ jobs:
max-total-time: 120 max-total-time: 120
unstable-fuzztest: unstable-fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Unstable Fuzz Test name: Unstable Fuzz Test
needs: build-greptime-ci needs: build-greptime-ci
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: [ "unstable_fuzz_create_table_standalone" ] target: [ "unstable_fuzz_create_table_standalone" ]
steps: steps:
@@ -213,23 +207,26 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt update && sudo apt install -y libfuzzer-14-dev sudo apt update && sudo apt install -y libfuzzer-14-dev
cargo install cargo-fuzz cargo-gc-bin --force cargo install cargo-fuzz cargo-gc-bin
- name: Download pre-built binary - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
name: bin name: bin
path: . path: .
- name: Unzip binary - name: Unzip bianry
run: | run: |
tar -xvf ./bin.tar.gz tar -xvf ./bin.tar.gz
rm ./bin.tar.gz rm ./bin.tar.gz
@@ -251,24 +248,16 @@ jobs:
name: unstable-fuzz-logs name: unstable-fuzz-logs
path: /tmp/unstable-greptime/ path: /tmp/unstable-greptime/
retention-days: 3 retention-days: 3
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
build-greptime-ci: build-greptime-ci:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binary (profile-CI) name: Build GreptimeDB binary (profile-CI)
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -277,15 +266,20 @@ jobs:
with: with:
# Shares across multiple jobs # Shares across multiple jobs
shared-key: "build-greptime-ci" shared-key: "build-greptime-ci"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin - name: Install cargo-gc-bin
shell: bash shell: bash
run: cargo install cargo-gc-bin --force run: cargo install cargo-gc-bin
- name: Build greptime binary - name: Check aws-lc-sys will not build
shell: bash
run: |
if cargo tree -i aws-lc-sys -e features | grep -q aws-lc-sys; then
echo "Found aws-lc-sys, which has compilation problems on older gcc versions. Please replace it with ring until its building experience improves."
exit 1
fi
- name: Build greptime bianry
shell: bash shell: bash
# `cargo gc` will invoke `cargo build` with specified args # `cargo gc` will invoke `cargo build` with specified args
run: cargo gc --profile ci -- --bin greptime --features "pg_kvbackend,mysql_kvbackend" run: cargo gc --profile ci -- --bin greptime
- name: Pack greptime binary - name: Pack greptime binary
shell: bash shell: bash
run: | run: |
@@ -300,13 +294,11 @@ jobs:
version: current version: current
distributed-fuzztest: distributed-fuzztest:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) name: Fuzz Test (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build-greptime-ci needs: build-greptime-ci
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
mode: mode:
@@ -328,29 +320,34 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- if: matrix.mode.minio - if: matrix.mode.minio
name: Setup Minio name: Setup Minio
uses: ./.github/actions/setup-minio uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup Kafka cluster name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster - name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests # Prepares for fuzz tests
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image # Downloads ci image
- name: Download pre-built binariy - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -411,11 +408,6 @@ jobs:
shell: bash shell: bash
run: | run: |
kubectl describe nodes kubectl describe nodes
- name: Describe pod
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs - name: Export kind logs
if: failure() if: failure()
shell: bash shell: bash
@@ -438,13 +430,11 @@ jobs:
docker system prune -f docker system prune -f
distributed-fuzztest-with-chaos: distributed-fuzztest-with-chaos:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }}) name: Fuzz Test with Chaos (Distributed, ${{ matrix.mode.name }}, ${{ matrix.target }})
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build-greptime-ci needs: build-greptime-ci
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false
matrix: matrix:
target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"] target: ["fuzz_migrate_mito_regions", "fuzz_migrate_metric_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode: mode:
@@ -479,8 +469,6 @@ jobs:
echo "Disk space after:" echo "Disk space after:"
df -h df -h
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- name: Setup Kind - name: Setup Kind
uses: ./.github/actions/setup-kind uses: ./.github/actions/setup-kind
- name: Setup Chaos Mesh - name: Setup Chaos Mesh
@@ -489,21 +477,28 @@ jobs:
name: Setup Minio name: Setup Minio
uses: ./.github/actions/setup-minio uses: ./.github/actions/setup-minio
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup Kafka cluster name: Setup Kafka cluser
uses: ./.github/actions/setup-kafka-cluster uses: ./.github/actions/setup-kafka-cluster
- name: Setup Etcd cluster - name: Setup Etcd cluser
uses: ./.github/actions/setup-etcd-cluster uses: ./.github/actions/setup-etcd-cluster
- name: Setup Postgres cluser
uses: ./.github/actions/setup-postgres-cluster
# Prepares for fuzz tests # Prepares for fuzz tests
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz - name: Set Rust Fuzz
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libfuzzer-14-dev sudo apt-get install -y libfuzzer-14-dev
rustup install nightly rustup install nightly
cargo +nightly install cargo-fuzz cargo-gc-bin --force cargo +nightly install cargo-fuzz cargo-gc-bin
# Downloads ci image # Downloads ci image
- name: Download pre-built binariy - name: Download pre-built binariy
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@@ -565,11 +560,6 @@ jobs:
shell: bash shell: bash
run: | run: |
kubectl describe nodes kubectl describe nodes
- name: Describe pods
if: failure()
shell: bash
run: |
kubectl describe pod -n my-greptimedb
- name: Export kind logs - name: Export kind logs
if: failure() if: failure()
shell: bash shell: bash
@@ -592,14 +582,12 @@ jobs:
docker system prune -f docker system prune -f
sqlness: sqlness:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Sqlness Test (${{ matrix.mode.name }}) name: Sqlness Test (${{ matrix.mode.name }})
needs: build needs: build
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
fail-fast: false
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
mode: mode:
- name: "Basic" - name: "Basic"
opts: "" opts: ""
@@ -607,26 +595,13 @@ jobs:
- name: "Remote WAL" - name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092" opts: "-w kafka -k 127.0.0.1:9092"
kafka: true kafka: true
- name: "PostgreSQL KvBackend"
opts: "--setup-pg"
kafka: false
- name: "MySQL Kvbackend"
opts: "--setup-mysql"
kafka: false
- name: "Flat format"
opts: "--enable-flat-format"
kafka: false
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- if: matrix.mode.kafka - if: matrix.mode.kafka
name: Setup kafka server name: Setup kafka server
working-directory: tests-integration/fixtures working-directory: tests-integration/fixtures/kafka
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait kafka run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Download pre-built binaries - name: Download pre-built binaries
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
@@ -635,7 +610,7 @@ jobs:
- name: Unzip binaries - name: Unzip binaries
run: tar -xvf ./bins.tar.gz run: tar -xvf ./bins.tar.gz
- name: Run sqlness - name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner bare ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state run: RUST_BACKTRACE=1 ./bins/sqlness-runner ${{ matrix.mode.opts }} -c ./tests/cases --bins-dir ./bins --preserve-state
- name: Upload sqlness logs - name: Upload sqlness logs
if: failure() if: failure()
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@@ -645,32 +620,31 @@ jobs:
retention-days: 3 retention-days: 3
fmt: fmt:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Rustfmt name: Rustfmt
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1 - uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: rustfmt components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Check format - name: Check format
run: make fmt-check run: make fmt-check
clippy: clippy:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -683,139 +657,60 @@ jobs:
# Shares across multiple jobs # Shares across multiple jobs
# Shares with `Check` job # Shares with `Check` job
shared-key: "check-lint" shared-key: "check-lint"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy - name: Run cargo clippy
run: make clippy run: make clippy
check-udeps:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check Unused Dependencies
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
shared-key: "check-udeps"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-udeps
run: cargo install cargo-udeps --locked
- name: Check unused dependencies
run: make check-udeps
conflict-check:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Check for conflict
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Merge Conflict Finder
uses: olivernybroe/action-conflict-finder@v4.0
test:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name != 'merge_group' }}
runs-on: ubuntu-22.04-arm
timeout-minutes: 60
needs: [conflict-check, clippy, fmt, check-udeps]
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Rust Cache
uses: Swatinem/rust-cache@v2
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Setup external services
working-directory: tests-integration/fixtures
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait
- name: Run nextest cases
run: cargo nextest run --workspace -F dashboard -F pg_kvbackend -F mysql_kvbackend
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
RUST_MIN_STACK: 8388608 # 8MB
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
GT_MINIO_BUCKET: greptime
GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
GT_POSTGRES15_SCHEMA: test_schema
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs"
coverage: coverage:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && github.event_name == 'merge_group' }} if: github.event.pull_request.draft == false
runs-on: ubuntu-22.04-8-cores runs-on: ubuntu-20.04-8-cores
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: rui314/setup-mold@v1 - uses: KyleMayes/install-llvm-action@v1
with:
version: "14.0"
- name: Install toolchain - name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: actions-rust-lang/setup-rust-toolchain@v1
with: with:
components: llvm-tools components: llvm-tools-preview
cache: false
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
with: with:
# Shares cross multiple jobs # Shares cross multiple jobs
shared-key: "coverage-test" shared-key: "coverage-test"
save-if: ${{ github.ref == 'refs/heads/main' }} - name: Docker Cache
uses: ScribeMD/docker-cache@0.3.7
with:
key: docker-${{ runner.os }}-coverage
- name: Install latest nextest release - name: Install latest nextest release
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov - name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov
- name: Install Python
- name: Setup external services uses: actions/setup-python@v5
working-directory: tests-integration/fixtures with:
run: ../../.github/scripts/pull-test-deps-images.sh && docker compose up -d --wait python-version: '3.10'
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Setup etcd server
working-directory: tests-integration/fixtures/etcd
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup kafka server
working-directory: tests-integration/fixtures/kafka
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup minio
working-directory: tests-integration/fixtures/minio
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Setup postgres server
working-directory: tests-integration/fixtures/postgres
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run nextest cases - name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend -F mysql_kvbackend run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F pyo3_backend -F dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold" CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
@@ -827,12 +722,8 @@ jobs:
GT_MINIO_ACCESS_KEY: superpower_password GT_MINIO_ACCESS_KEY: superpower_password
GT_MINIO_REGION: us-west-2 GT_MINIO_REGION: us-west-2
GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000 GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
GT_ETCD_TLS_ENDPOINTS: https://127.0.0.1:2378
GT_ETCD_ENDPOINTS: http://127.0.0.1:2379 GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres GT_POSTGRES_ENDPOINTS: postgres://greptimedb:admin@127.0.0.1:5432/postgres
GT_POSTGRES15_ENDPOINTS: postgres://test_user:test_password@127.0.0.1:5433/postgres
GT_POSTGRES15_SCHEMA: test_schema
GT_MYSQL_ENDPOINTS: mysql://greptimedb:admin@127.0.0.1:3306/mysql
GT_KAFKA_ENDPOINTS: 127.0.0.1:9092 GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093 GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
@@ -846,10 +737,9 @@ jobs:
verbose: true verbose: true
# compat: # compat:
# if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
# name: Compatibility Test # name: Compatibility Test
# needs: build # needs: build
# runs-on: ubuntu-22.04 # runs-on: ubuntu-20.04
# timeout-minutes: 60 # timeout-minutes: 60
# steps: # steps:
# - uses: actions/checkout@v4 # - uses: actions/checkout@v4

View File

@@ -3,21 +3,16 @@ on:
pull_request_target: pull_request_target:
types: [opened, edited] types: [opened, edited]
concurrency: permissions:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} pull-requests: write
cancel-in-progress: true contents: read
jobs: jobs:
docbot: docbot:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
pull-requests: write
contents: read
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Maybe Follow Up Docs Issue - name: Maybe Follow Up Docs Issue
working-directory: cyborg working-directory: cyborg

View File

@@ -10,7 +10,6 @@ on:
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**' - 'grafana/**'
- 'Makefile'
push: push:
branches: branches:
- main - main
@@ -22,7 +21,6 @@ on:
- 'docker/**' - 'docker/**'
- '.gitignore' - '.gitignore'
- 'grafana/**' - 'grafana/**'
- 'Makefile'
workflow_dispatch: workflow_dispatch:
name: CI name: CI
@@ -33,53 +31,38 @@ name: CI
jobs: jobs:
typos: typos:
name: Spell Check with Typos name: Spell Check with Typos
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: crate-ci/typos@master - uses: crate-ci/typos@master
license-header-check: license-header-check:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
name: Check License Header name: Check License Header
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: korandoru/hawkeye@v5 - uses: korandoru/hawkeye@v5
check: check:
name: Check name: Check
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
fmt: fmt:
name: Rustfmt name: Rustfmt
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
clippy: clippy:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
check-udeps:
name: Unused Dependencies
runs-on: ubuntu-latest
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
coverage: coverage:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps:
- run: 'echo "No action required"'
test:
runs-on: ubuntu-latest
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'
@@ -88,10 +71,9 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [ ubuntu-latest ] os: [ ubuntu-20.04 ]
mode: mode:
- name: "Basic" - name: "Basic"
- name: "Remote WAL" - name: "Remote WAL"
- name: "Flat format"
steps: steps:
- run: 'echo "No action required"' - run: 'echo "No action required"'

View File

@@ -1,26 +0,0 @@
name: Check Grafana Panels
on:
pull_request:
branches:
- main
paths:
- 'grafana/**' # Trigger only when files under the grafana/ directory change
jobs:
check-panels:
runs-on: ubuntu-latest
steps:
# Check out the repository
- name: Checkout repository
uses: actions/checkout@v4
# Install jq (required for the script)
- name: Install jq
run: sudo apt-get install -y jq
# Make the check.sh script executable
- name: Check grafana dashboards
run: |
make check-dashboards

View File

@@ -1,57 +0,0 @@
name: Multi-language Integration Tests
on:
push:
branches:
- main
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-greptimedb:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
name: Build GreptimeDB binary
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: Swatinem/rust-cache@v2
with:
shared-key: "multi-lang-build"
cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
- name: Build greptime binary
shell: bash
run: cargo gc -- --bin greptime --features "pg_kvbackend,mysql_kvbackend"
- name: Pack greptime binary
shell: bash
run: |
mkdir bin && \
mv ./target/debug/greptime bin
- name: Print greptime binary info
run: ls -lh bin
- name: Upload greptime binary
uses: actions/upload-artifact@v4
with:
name: greptime-bin
path: bin/
retention-days: 1
run-multi-lang-tests:
name: Run Multi-language SDK Tests
needs: build-greptimedb
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-bin

View File

@@ -12,13 +12,13 @@ on:
linux_amd64_runner: linux_amd64_runner:
type: choice type: choice
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.2xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -27,7 +27,7 @@ on:
linux_arm64_runner: linux_arm64_runner:
type: choice type: choice
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.4xlarge-arm64 default: ec2-c6g.2xlarge-arm64
options: options:
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G - ec2-c6g.2xlarge-arm64 # 8C16G
@@ -66,11 +66,18 @@ env:
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Use the different image name to avoid conflict with the release images.
# The DockerHub image will be greptime/greptimedb-nightly.
IMAGE_NAME: greptimedb-nightly
permissions:
issues: write
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -88,7 +95,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Create version - name: Create version
id: create-version id: create-version
@@ -141,7 +147,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -163,7 +168,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -174,18 +178,6 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }} image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }} image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
release-images-to-dockerhub: release-images-to-dockerhub:
name: Build and push images to DockerHub name: Build and push images to DockerHub
if: ${{ inputs.release_images || github.event_name == 'schedule' }} if: ${{ inputs.release_images || github.event_name == 'schedule' }}
@@ -194,25 +186,24 @@ jobs:
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }} nightly-build-result: ${{ steps.set-nightly-build-result.outputs.nightly-build-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false push-latest-tag: true
- name: Set nightly build result - name: Set nightly build result
id: set-nightly-build-result id: set-nightly-build-result
@@ -226,7 +217,7 @@ jobs:
allocate-runners, allocate-runners,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues. # When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this. # However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -235,14 +226,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.NIGHTLY_BUILD_IMAGE_NAME }} src-image-name: ${{ env.IMAGE_NAME }}
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -252,16 +242,15 @@ jobs:
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }} aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
upload-to-s3: false
dev-mode: false dev-mode: false
update-version-info: false # Don't update version info in S3. update-version-info: false # Don't update version info in S3.
push-latest-tag: false push-latest-tag: true
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -271,7 +260,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -287,7 +275,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -297,7 +285,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -313,18 +300,13 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [ needs: [
release-images-to-dockerhub, release-images-to-dockerhub
run-multi-lang-tests,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
permissions:
issues: write
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status
@@ -332,17 +314,17 @@ jobs:
run: pnpm tsx bin/report-ci-failure.ts run: pnpm tsx bin/report-ci-failure.ts
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }} CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result - name: Notify nightly build successful result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' && (needs.run-multi-lang-tests.result == 'success' || needs.run-multi-lang-tests.result == 'skipped') }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notify nightly build failed result - name: Notify nightly build failed result
uses: slackapi/slack-github-action@v1.23.0 uses: slackapi/slack-github-action@v1.23.0
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' || needs.run-multi-lang-tests.result == 'failure' }} if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with: with:
payload: | payload: |
{"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."} {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check ${{ steps.report-ci-status.outputs.html_url }}."}

View File

@@ -1,6 +1,6 @@
on: on:
schedule: schedule:
- cron: "0 23 * * 1-4" - cron: "0 23 * * 1-5"
workflow_dispatch: workflow_dispatch:
name: Nightly CI name: Nightly CI
@@ -9,21 +9,19 @@ concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
permissions:
issues: write
jobs: jobs:
sqlness-test: sqlness-test:
name: Run sqlness test name: Run sqlness test
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-22.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check install.sh
run: ./.github/scripts/check-install-script.sh
- name: Run sqlness test - name: Run sqlness test
uses: ./.github/actions/sqlness-test uses: ./.github/actions/sqlness-test
with: with:
@@ -44,14 +42,9 @@ jobs:
name: Sqlness tests on Windows name: Sqlness tests on Windows
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: windows-2022-8-cores runs-on: windows-2022-8-cores
permissions:
issues: write
timeout-minutes: 60 timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -79,9 +72,6 @@ jobs:
steps: steps:
- run: git config --global core.autocrlf false - run: git config --global core.autocrlf false
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- uses: arduino/setup-protoc@v3 - uses: arduino/setup-protoc@v3
with: with:
@@ -97,42 +87,34 @@ jobs:
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Install Cargo Nextest - name: Install Cargo Nextest
uses: taiki-e/install-action@nextest uses: taiki-e/install-action@nextest
- name: Install Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install PyArrow Package
run: pip install pyarrow numpy
- name: Install WSL distribution - name: Install WSL distribution
uses: Vampire/setup-wsl@v2 uses: Vampire/setup-wsl@v2
with: with:
distribution: Ubuntu-22.04 distribution: Ubuntu-22.04
- name: Running tests - name: Running tests
run: cargo nextest run -F dashboard run: cargo nextest run -F pyo3_backend,dashboard
env: env:
CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link" CARGO_BUILD_RUSTFLAGS: "-C linker=lld-link"
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
RUSTUP_WINDOWS_PATH_ADD_BIN: 1 # Workaround for https://github.com/nextest-rs/nextest/issues/1493
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }} GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs" UNITTEST_LOG_DIR: "__unittest_logs"
cleanbuild-linux-nix:
name: Run clean build on Linux
runs-on: ubuntu-latest
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
timeout-minutes: 45
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: cachix/install-nix-action@v31
- run: nix develop --command cargo check --bin greptime
env:
CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
check-status: check-status:
name: Check status name: Check status
needs: [sqlness-test, sqlness-windows, test-on-windows] needs: [sqlness-test, sqlness-windows, test-on-windows]
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
check-result: ${{ steps.set-check-result.outputs.check-result }} check-result: ${{ steps.set-check-result.outputs.check-result }}
steps: steps:
@@ -145,14 +127,11 @@ jobs:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run. if: ${{ github.repository == 'GreptimeTeam/greptimedb' && always() }} # Not requiring successful dependent jobs, always run.
name: Send notification to Greptime team name: Send notification to Greptime team
needs: [check-status] needs: [check-status]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,42 +0,0 @@
name: 'PR Labeling'
on:
pull_request_target:
types:
- opened
- synchronize
- reopened
permissions:
contents: read
pull-requests: write
issues: write
jobs:
labeler:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- uses: actions/labeler@v5
with:
configuration-path: ".github/labeler.yaml"
repo-token: "${{ secrets.GITHUB_TOKEN }}"
size-label:
runs-on: ubuntu-latest
steps:
- uses: pascalgn/size-label-action@v0.5.5
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
with:
sizes: >
{
"0": "XS",
"100": "S",
"300": "M",
"1000": "L",
"1500": "XL",
"2000": "XXL"
}

View File

@@ -1,36 +0,0 @@
name: PR Review Reminder
on:
schedule:
# Run at 9:00 AM UTC+8 (01:00 AM UTC) on Monday, Wednesday, Friday
- cron: '0 1 * * 1,3,5'
workflow_dispatch:
jobs:
pr-review-reminder:
name: Send PR Review Reminders
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install dependencies
working-directory: .github/scripts
run: npm ci
- name: Run PR review reminder
working-directory: .github/scripts
run: node pr-review-reminder.js
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SLACK_PR_REVIEW_WEBHOOK_URL: ${{ vars.SLACK_PR_REVIEW_WEBHOOK_URL }}
GITHUBID_SLACKID_MAPPING: ${{ vars.GITHUBID_SLACKID_MAPPING }}

View File

@@ -24,20 +24,12 @@ on:
description: Release dev-builder-android image description: Release dev-builder-android image
required: false required: false
default: false default: false
update_dev_builder_image_tag:
type: boolean
description: Update the DEV_BUILDER_IMAGE_TAG in Makefile and create a PR
required: false
default: false
jobs: jobs:
release-dev-builder-images: release-dev-builder-images:
name: Release dev builder images name: Release dev builder images
# The jobs are triggered by the following events: if: ${{ inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }} # Only manually trigger this job.
# 1. Manually triggered workflow_dispatch event runs-on: ubuntu-20.04-16-cores
# 2. Push event when the PR that modifies the `rust-toolchain.toml` or `docker/dev-builder/**` is merged to main
if: ${{ github.event_name == 'push' || inputs.release_dev_builder_ubuntu_image || inputs.release_dev_builder_centos_image || inputs.release_dev_builder_android_image }}
runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.set-version.outputs.version }} version: ${{ steps.set-version.outputs.version }}
steps: steps:
@@ -45,7 +37,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Configure build image version - name: Configure build image version
id: set-version id: set-version
@@ -65,13 +56,13 @@ jobs:
version: ${{ env.VERSION }} version: ${{ env.VERSION }}
dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub-image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }} dockerhub-image-registry-token: ${{ secrets.DOCKERHUB_TOKEN }}
build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} build-dev-builder-ubuntu: ${{ inputs.release_dev_builder_ubuntu_image }}
build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} build-dev-builder-centos: ${{ inputs.release_dev_builder_centos_image }}
build-dev-builder-android: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} build-dev-builder-android: ${{ inputs.release_dev_builder_android_image }}
release-dev-builder-images-ecr: release-dev-builder-images-ecr:
name: Release dev builder images to AWS ECR name: Release dev builder images to AWS ECR
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -93,70 +84,52 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-ubuntu:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-ubuntu:latest
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-centos:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-centos:latest
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ECR_IMAGE_REGISTRY: ${{ vars.ECR_IMAGE_REGISTRY }}
ECR_IMAGE_NAMESPACE: ${{ vars.ECR_IMAGE_NAMESPACE }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:latest \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:latest \
docker://$ECR_IMAGE_REGISTRY/$ECR_IMAGE_NAMESPACE/dev-builder-android:latest docker://${{ vars.ECR_IMAGE_REGISTRY }}/${{ vars.ECR_IMAGE_NAMESPACE }}/dev-builder-android:latest
release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container. release-dev-builder-images-cn: # Note: Be careful issue: https://github.com/containers/skopeo/issues/1874 and we decide to use the latest stable skopeo container.
name: Release dev builder images to CN region name: Release dev builder images to CN region
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
release-dev-builder-images release-dev-builder-images
] ]
@@ -170,63 +143,30 @@ jobs:
- name: Push dev-builder-ubuntu image - name: Push dev-builder-ubuntu image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_ubuntu_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_ubuntu_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-ubuntu:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-ubuntu:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-centos image - name: Push dev-builder-centos image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_centos_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_centos_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-centos:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-centos:${{ needs.release-dev-builder-images.outputs.version }}
- name: Push dev-builder-android image - name: Push dev-builder-android image
shell: bash shell: bash
if: ${{ inputs.release_dev_builder_android_image || github.event_name == 'push' }} if: ${{ inputs.release_dev_builder_android_image }}
env:
IMAGE_VERSION: ${{ needs.release-dev-builder-images.outputs.version }}
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE }}
ACR_IMAGE_REGISTRY: ${{ vars.ACR_IMAGE_REGISTRY }}
run: | run: |
docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \ docker run -v "${DOCKER_CONFIG:-$HOME/.docker}:/root/.docker:ro" \
-e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \ -e "REGISTRY_AUTH_FILE=/root/.docker/config.json" \
quay.io/skopeo/stable:latest \ quay.io/skopeo/stable:latest \
copy -a docker://docker.io/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION \ copy -a docker://docker.io/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }} \
docker://$ACR_IMAGE_REGISTRY/$IMAGE_NAMESPACE/dev-builder-android:$IMAGE_VERSION docker://${{ vars.ACR_IMAGE_REGISTRY }}/${{ vars.IMAGE_NAMESPACE }}/dev-builder-android:${{ needs.release-dev-builder-images.outputs.version }}
update-dev-builder-image-tag:
name: Update dev-builder image tag
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: ${{ github.event_name == 'push' || inputs.update_dev_builder_image_tag }}
needs: [
release-dev-builder-images
]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update dev-builder image tag
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
./.github/scripts/update-dev-builder-version.sh ${{ needs.release-dev-builder-images.outputs.version }}

View File

@@ -18,11 +18,11 @@ on:
description: The runner uses to build linux-amd64 artifacts description: The runner uses to build linux-amd64 artifacts
default: ec2-c6i.4xlarge-amd64 default: ec2-c6i.4xlarge-amd64
options: options:
- ubuntu-22.04 - ubuntu-20.04
- ubuntu-22.04-8-cores - ubuntu-20.04-8-cores
- ubuntu-22.04-16-cores - ubuntu-20.04-16-cores
- ubuntu-22.04-32-cores - ubuntu-20.04-32-cores
- ubuntu-22.04-64-cores - ubuntu-20.04-64-cores
- ec2-c6i.xlarge-amd64 # 4C8G - ec2-c6i.xlarge-amd64 # 4C8G
- ec2-c6i.2xlarge-amd64 # 8C16G - ec2-c6i.2xlarge-amd64 # 8C16G
- ec2-c6i.4xlarge-amd64 # 16C32G - ec2-c6i.4xlarge-amd64 # 16C32G
@@ -31,7 +31,7 @@ on:
linux_arm64_runner: linux_arm64_runner:
type: choice type: choice
description: The runner uses to build linux-arm64 artifacts description: The runner uses to build linux-arm64 artifacts
default: ec2-c6g.8xlarge-arm64 default: ec2-c6g.4xlarge-arm64
options: options:
- ubuntu-2204-32-cores-arm - ubuntu-2204-32-cores-arm
- ec2-c6g.xlarge-arm64 # 4C8G - ec2-c6g.xlarge-arm64 # 4C8G
@@ -88,14 +88,21 @@ env:
# Controls whether to run tests, include unit-test, integration-test and sqlness. # Controls whether to run tests, include unit-test, integration-test and sqlness.
DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }} DISABLE_RUN_TESTS: ${{ inputs.skip_test || vars.DEFAULT_SKIP_TEST }}
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nightly-20230313; # The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
NEXT_RELEASE_VERSION: v0.10.0
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
jobs: jobs:
allocate-runners: allocate-runners:
name: Allocate runners name: Allocate runners
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
outputs: outputs:
linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }} linux-amd64-runner: ${{ steps.start-linux-amd64-runner.outputs.label }}
linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }} linux-arm64-runner: ${{ steps.start-linux-arm64-runner.outputs.label }}
@@ -110,15 +117,11 @@ jobs:
# The 'version' use as the global tag name of the release workflow. # The 'version' use as the global tag name of the release workflow.
version: ${{ steps.create-version.outputs.version }} version: ${{ steps.create-version.outputs.version }}
# The 'is-current-version-latest' determines whether to update 'latest' Docker tags and downstream repositories.
is-current-version-latest: ${{ steps.check-version.outputs.is-current-version-latest }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Check Rust toolchain version - name: Check Rust toolchain version
shell: bash shell: bash
@@ -127,7 +130,7 @@ jobs:
# The create-version will create a global variable named 'version' in the global workflows. # The create-version will create a global variable named 'version' in the global workflows.
# - If it's a tag push release, the version is the tag name(${{ github.ref_name }}); # - If it's a tag push release, the version is the tag name(${{ github.ref_name }});
# - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nightly-20230313; # - If it's a scheduled release, the version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-$buildTime', like v0.2.0-nigthly-20230313;
# - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245; # - If it's a manual release, the version is '${{ env.NEXT_RELEASE_VERSION }}-<short-git-sha>-YYYYMMDDSS', like v0.2.0-e5b243c-2023071245;
- name: Create version - name: Create version
id: create-version id: create-version
@@ -136,13 +139,9 @@ jobs:
env: env:
GITHUB_EVENT_NAME: ${{ github.event_name }} GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REF_NAME: ${{ github.ref_name }} GITHUB_REF_NAME: ${{ github.ref_name }}
NEXT_RELEASE_VERSION: ${{ env.NEXT_RELEASE_VERSION }}
NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }} NIGHTLY_RELEASE_PREFIX: ${{ env.NIGHTLY_RELEASE_PREFIX }}
- name: Check version
id: check-version
run: |
./.github/scripts/check-version.sh "${{ steps.create-version.outputs.version }}"
- name: Allocate linux-amd64 runner - name: Allocate linux-amd64 runner
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }} if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
uses: ./.github/actions/start-runner uses: ./.github/actions/start-runner
@@ -182,7 +181,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -204,7 +202,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-linux-artifacts - uses: ./.github/actions/build-linux-artifacts
with: with:
@@ -215,18 +212,6 @@ jobs:
image-registry: ${{ vars.ECR_IMAGE_REGISTRY }} image-registry: ${{ vars.ECR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }} image-namespace: ${{ vars.ECR_IMAGE_NAMESPACE }}
run-multi-lang-tests:
name: Run Multi-language SDK Tests
if: ${{ inputs.build_linux_amd64_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
]
uses: ./.github/workflows/run-multi-lang-tests.yml
with:
artifact-name: greptime-linux-amd64-${{ needs.allocate-runners.outputs.version }}
artifact-is-tarball: true
build-macos-artifacts: build-macos-artifacts:
name: Build macOS artifacts name: Build macOS artifacts
strategy: strategy:
@@ -237,10 +222,18 @@ jobs:
arch: aarch64-apple-darwin arch: aarch64-apple-darwin
features: servers/dashboard features: servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64 artifacts-dir-prefix: greptime-darwin-arm64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
arch: aarch64-apple-darwin
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-darwin-arm64-pyo3
- os: ${{ needs.allocate-runners.outputs.macos-runner }} - os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: servers/dashboard features: servers/dashboard
arch: x86_64-apple-darwin arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64 artifacts-dir-prefix: greptime-darwin-amd64
- os: ${{ needs.allocate-runners.outputs.macos-runner }}
features: pyo3_backend,servers/dashboard
arch: x86_64-apple-darwin
artifacts-dir-prefix: greptime-darwin-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs: outputs:
build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }} build-macos-result: ${{ steps.set-build-macos-result.outputs.build-macos-result }}
@@ -252,7 +245,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-macos-artifacts - uses: ./.github/actions/build-macos-artifacts
with: with:
@@ -279,6 +271,10 @@ jobs:
arch: x86_64-pc-windows-msvc arch: x86_64-pc-windows-msvc
features: servers/dashboard features: servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64 artifacts-dir-prefix: greptime-windows-amd64
- os: ${{ needs.allocate-runners.outputs.windows-runner }}
arch: x86_64-pc-windows-msvc
features: pyo3_backend,servers/dashboard
artifacts-dir-prefix: greptime-windows-amd64-pyo3
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
outputs: outputs:
build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }} build-windows-result: ${{ steps.set-build-windows-result.outputs.build-windows-result }}
@@ -292,7 +288,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/build-windows-artifacts - uses: ./.github/actions/build-windows-artifacts
with: with:
@@ -315,27 +310,23 @@ jobs:
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
run-multi-lang-tests,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-2004-16-cores
outputs: outputs:
build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }} build-image-result: ${{ steps.set-build-image-result.outputs.build-image-result }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Build and push images to dockerhub - name: Build and push images to dockerhub
uses: ./.github/actions/build-images uses: ./.github/actions/build-images
with: with:
image-registry: docker.io image-registry: docker.io
image-namespace: ${{ vars.IMAGE_NAMESPACE }} image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }}
image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }} image-registry-username: ${{ secrets.DOCKERHUB_USERNAME }}
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }} image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }} version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }}
- name: Set build image result - name: Set build image result
id: set-build-image-result id: set-build-image-result
@@ -353,7 +344,7 @@ jobs:
build-windows-artifacts, build-windows-artifacts,
release-images-to-dockerhub, release-images-to-dockerhub,
] ]
runs-on: ubuntu-latest-16-cores runs-on: ubuntu-20.04
# When we push to ACR, it's easy to fail due to some unknown network issues. # When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this. # However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated. # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -362,14 +353,13 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Release artifacts to CN region - name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts uses: ./.github/actions/release-cn-artifacts
with: with:
src-image-registry: docker.io src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }} src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: ${{ vars.GREPTIMEDB_IMAGE_NAME }} src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }} dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }} dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }} dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
@@ -380,9 +370,8 @@ jobs:
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }} aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }} aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: false dev-mode: false
upload-to-s3: true
update-version-info: true update-version-info: true
push-latest-tag: ${{ needs.allocate-runners.outputs.is-current-version-latest == 'true' && github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' }} push-latest-tag: true
publish-github-release: publish-github-release:
name: Create GitHub release and upload artifacts name: Create GitHub release and upload artifacts
@@ -394,14 +383,12 @@ jobs:
build-macos-artifacts, build-macos-artifacts,
build-windows-artifacts, build-windows-artifacts,
release-images-to-dockerhub, release-images-to-dockerhub,
run-multi-lang-tests,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Publish GitHub release - name: Publish GitHub release
uses: ./.github/actions/publish-github-release uses: ./.github/actions/publish-github-release
@@ -410,12 +397,12 @@ jobs:
### Stop runners ### ### Stop runners ###
# It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'. # It's very necessary to split the job of releasing runners into 'stop-linux-amd64-runner' and 'stop-linux-arm64-runner'.
# Because we can terminate the specified EC2 instance immediately after the job is finished without unnecessary waiting. # Because we can terminate the specified EC2 instance immediately after the job is finished without uncessary waiting.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released. stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner name: Stop linux-amd64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-amd64-artifacts, build-linux-amd64-artifacts,
@@ -425,7 +412,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -441,7 +427,7 @@ jobs:
name: Stop linux-arm64 runner name: Stop linux-arm64 runner
# Only run this job when the runner is allocated. # Only run this job when the runner is allocated.
if: ${{ always() }} if: ${{ always() }}
runs-on: ubuntu-latest runs-on: ubuntu-20.04
needs: [ needs: [
allocate-runners, allocate-runners,
build-linux-arm64-artifacts, build-linux-arm64-artifacts,
@@ -451,7 +437,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
persist-credentials: false
- name: Stop EC2 runner - name: Stop EC2 runner
uses: ./.github/actions/stop-runner uses: ./.github/actions/stop-runner
@@ -463,74 +448,6 @@ jobs:
aws-region: ${{ vars.EC2_RUNNER_REGION }} aws-region: ${{ vars.EC2_RUNNER_REGION }}
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
bump-downstream-repo-versions:
name: Bump downstream repo versions
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg
- name: Bump downstream repo versions
working-directory: cyborg
run: pnpm tsx bin/bump-versions.ts
env:
TARGET_REPOS: website,docs,demo
VERSION: ${{ needs.allocate-runners.outputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WEBSITE_REPO_TOKEN: ${{ secrets.WEBSITE_REPO_TOKEN }}
DOCS_REPO_TOKEN: ${{ secrets.DOCS_REPO_TOKEN }}
DEMO_REPO_TOKEN: ${{ secrets.DEMO_REPO_TOKEN }}
bump-helm-charts-version:
name: Bump helm charts version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump helm charts version
env:
GITHUB_TOKEN: ${{ secrets.HELM_CHARTS_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-helm-charts-version.sh
bump-homebrew-greptime-version:
name: Bump homebrew greptime version
if: ${{ github.ref_type == 'tag' && !contains(github.ref_name, 'nightly') && github.event_name != 'schedule' && needs.allocate-runners.outputs.is-current-version-latest == 'true' }}
needs: [allocate-runners, publish-github-release]
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Bump homebrew greptime version
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_GREPTIME_REPO_TOKEN }}
VERSION: ${{ needs.allocate-runners.outputs.version }}
run: |
./.github/scripts/update-homebrew-greptme-version.sh
notification: notification:
if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' && (github.event_name == 'push' || github.event_name == 'schedule') && always() }}
name: Send notification to Greptime team name: Send notification to Greptime team
@@ -539,18 +456,11 @@ jobs:
build-macos-artifacts, build-macos-artifacts,
build-windows-artifacts, build-windows-artifacts,
] ]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
# Permission reference: https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
permissions:
issues: write # Allows the action to create issues for cyborg.
contents: write # Allows the action to create a release.
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }} SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DEVELOP_CHANNEL }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Report CI status - name: Report CI status
id: report-ci-status id: report-ci-status

View File

@@ -1,194 +0,0 @@
# Reusable workflow for running multi-language SDK tests against GreptimeDB
# Used by: multi-lang-tests.yml, release.yml, nightly-build.yml
# Supports both direct binary artifacts and tarball artifacts
name: Run Multi-language SDK Tests
on:
workflow_call:
inputs:
artifact-name:
required: true
type: string
description: 'Name of the artifact containing greptime binary'
http-port:
required: false
type: string
default: '4000'
description: 'HTTP server port'
mysql-port:
required: false
type: string
default: '4002'
description: 'MySQL server port'
postgres-port:
required: false
type: string
default: '4003'
description: 'PostgreSQL server port'
db-name:
required: false
type: string
default: 'test_db'
description: 'Test database name'
username:
required: false
type: string
default: 'greptime_user'
description: 'Authentication username'
password:
required: false
type: string
default: 'greptime_pwd'
description: 'Authentication password'
timeout-minutes:
required: false
type: number
default: 30
description: 'Job timeout in minutes'
artifact-is-tarball:
required: false
type: boolean
default: false
description: 'Whether the artifact is a tarball (tar.gz) that needs to be extracted'
jobs:
run-tests:
name: Run Multi-language SDK Tests
runs-on: ubuntu-latest
timeout-minutes: ${{ inputs.timeout-minutes }}
steps:
- name: Checkout greptimedb-tests repository
uses: actions/checkout@v4
with:
repository: GreptimeTeam/greptimedb-tests
persist-credentials: false
- name: Download pre-built greptime binary
uses: actions/download-artifact@v4
with:
name: ${{ inputs.artifact-name }}
path: artifact
- name: Setup greptime binary
run: |
mkdir -p bin
if [ "${{ inputs.artifact-is-tarball }}" = "true" ]; then
# Extract tarball and find greptime binary
tar -xzf artifact/*.tar.gz -C artifact
find artifact -name "greptime" -type f -exec cp {} bin/greptime \;
else
# Direct binary format
if [ -f artifact/greptime ]; then
cp artifact/greptime bin/greptime
else
cp artifact/* bin/greptime
fi
fi
chmod +x ./bin/greptime
ls -lh ./bin/greptime
./bin/greptime --version
- name: Setup Java 17
uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '17'
cache: 'maven'
- name: Setup Python 3.8
uses: actions/setup-python@v5
with:
python-version: '3.8'
- name: Setup Go 1.24
uses: actions/setup-go@v5
with:
go-version: '1.24'
cache: true
cache-dependency-path: go-tests/go.sum
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install Python dependencies
run: |
pip install mysql-connector-python psycopg2-binary
python3 -c "import mysql.connector; print(f'mysql-connector-python {mysql.connector.__version__}')"
python3 -c "import psycopg2; print(f'psycopg2 {psycopg2.__version__}')"
- name: Install Go dependencies
working-directory: go-tests
run: |
go mod download
go mod verify
go version
- name: Kill existing GreptimeDB processes
run: |
pkill -f greptime || true
sleep 2
- name: Start GreptimeDB standalone
run: |
./bin/greptime standalone start \
--http-addr 0.0.0.0:${{ inputs.http-port }} \
--rpc-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:${{ inputs.mysql-port }} \
--postgres-addr 0.0.0.0:${{ inputs.postgres-port }} \
--user-provider=static_user_provider:cmd:${{ inputs.username }}=${{ inputs.password }} > /tmp/greptimedb.log 2>&1 &
- name: Wait for GreptimeDB to be ready
run: |
echo "Waiting for GreptimeDB..."
for i in {1..60}; do
if curl -sf http://localhost:${{ inputs.http-port }}/health > /dev/null; then
echo "✅ GreptimeDB is ready"
exit 0
fi
sleep 2
done
echo "❌ GreptimeDB failed to start"
cat /tmp/greptimedb.log
exit 1
- name: Run multi-language tests
env:
DB_NAME: ${{ inputs.db-name }}
MYSQL_HOST: 127.0.0.1
MYSQL_PORT: ${{ inputs.mysql-port }}
POSTGRES_HOST: 127.0.0.1
POSTGRES_PORT: ${{ inputs.postgres-port }}
HTTP_HOST: 127.0.0.1
HTTP_PORT: ${{ inputs.http-port }}
GREPTIME_USERNAME: ${{ inputs.username }}
GREPTIME_PASSWORD: ${{ inputs.password }}
run: |
chmod +x ./run_tests.sh
./run_tests.sh
- name: Collect logs on failure
if: failure()
run: |
echo "=== GreptimeDB Logs ==="
cat /tmp/greptimedb.log || true
- name: Upload test logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-logs
path: |
/tmp/greptimedb.log
java-tests/target/surefire-reports/
python-tests/.pytest_cache/
go-tests/*.log
**/test-output/
retention-days: 7
- name: Cleanup
if: always()
run: |
pkill -f greptime || true

View File

@@ -4,20 +4,18 @@ on:
- cron: '4 2 * * *' - cron: '4 2 * * *'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
jobs: jobs:
maintenance: maintenance:
name: Periodic Maintenance name: Periodic Maintenance
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
issues: write
pull-requests: write
if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
persist-credentials: false
- uses: ./.github/actions/setup-cyborg - uses: ./.github/actions/setup-cyborg
- name: Do Maintenance - name: Do Maintenance
working-directory: cyborg working-directory: cyborg

View File

@@ -7,18 +7,9 @@ on:
- reopened - reopened
- edited - edited
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
issues: write
jobs: jobs:
check: check:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4

17
.gitignore vendored
View File

@@ -28,7 +28,6 @@ debug/
# Logs # Logs
**/__unittest_logs **/__unittest_logs
logs/ logs/
!grafana/dashboards/logs/
# cpython's generated python byte code # cpython's generated python byte code
**/__pycache__/ **/__pycache__/
@@ -51,19 +50,3 @@ venv/
# Fuzz tests # Fuzz tests
tests-fuzz/artifacts/ tests-fuzz/artifacts/
tests-fuzz/corpus/ tests-fuzz/corpus/
# cargo-udeps reports
udeps-report.json
# Nix
.direnv
.envrc
## default data home
greptimedb_data
# github
!/.github
# Claude code
CLAUDE.md

View File

@@ -17,6 +17,6 @@ repos:
- id: fmt - id: fmt
- id: clippy - id: clippy
args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"] args: ["--workspace", "--all-targets", "--all-features", "--", "-D", "warnings"]
stages: [pre-push] stages: [push]
- id: cargo-check - id: cargo-check
args: ["--workspace", "--all-targets", "--all-features"] args: ["--workspace", "--all-targets", "--all-features"]

View File

@@ -2,42 +2,42 @@
## Individual Committers (in alphabetical order) ## Individual Committers (in alphabetical order)
- [apdong2022](https://github.com/apdong2022) * [CookiePieWw](https://github.com/CookiePieWw)
- [beryl678](https://github.com/beryl678) * [KKould](https://github.com/KKould)
- [CookiePieWw](https://github.com/CookiePieWw) * [NiwakaDev](https://github.com/NiwakaDev)
- [etolbakov](https://github.com/etolbakov) * [etolbakov](https://github.com/etolbakov)
- [irenjj](https://github.com/irenjj) * [irenjj](https://github.com/irenjj)
- [KKould](https://github.com/KKould)
- [Lanqing Yang](https://github.com/lyang24)
- [nicecui](https://github.com/nicecui)
- [NiwakaDev](https://github.com/NiwakaDev)
- [paomian](https://github.com/paomian)
- [tisonkun](https://github.com/tisonkun)
- [Wenjie0329](https://github.com/Wenjie0329)
- [zhaoyingnan01](https://github.com/zhaoyingnan01)
- [zhongzc](https://github.com/zhongzc)
- [ZonaHex](https://github.com/ZonaHex)
- [zyy17](https://github.com/zyy17)
## Team Members (in alphabetical order) ## Team Members (in alphabetical order)
- [daviderli614](https://github.com/daviderli614) * [Breeze-P](https://github.com/Breeze-P)
- [discord9](https://github.com/discord9) * [GrepTime](https://github.com/GrepTime)
- [evenyag](https://github.com/evenyag) * [MichaelScofield](https://github.com/MichaelScofield)
- [fengjiachun](https://github.com/fengjiachun) * [Wenjie0329](https://github.com/Wenjie0329)
- [fengys1996](https://github.com/fengys1996) * [WenyXu](https://github.com/WenyXu)
- [GrepTime](https://github.com/GrepTime) * [ZonaHex](https://github.com/ZonaHex)
- [holalengyu](https://github.com/holalengyu) * [apdong2022](https://github.com/apdong2022)
- [killme2008](https://github.com/killme2008) * [beryl678](https://github.com/beryl678)
- [MichaelScofield](https://github.com/MichaelScofield) * [daviderli614](https://github.com/daviderli614)
- [shuiyisong](https://github.com/shuiyisong) * [discord9](https://github.com/discord9)
- [sunchanglong](https://github.com/sunchanglong) * [evenyag](https://github.com/evenyag)
- [sunng87](https://github.com/sunng87) * [fengjiachun](https://github.com/fengjiachun)
- [v0y4g3r](https://github.com/v0y4g3r) * [fengys1996](https://github.com/fengys1996)
- [waynexia](https://github.com/waynexia) * [holalengyu](https://github.com/holalengyu)
- [WenyXu](https://github.com/WenyXu) * [killme2008](https://github.com/killme2008)
- [xtang](https://github.com/xtang) * [nicecui](https://github.com/nicecui)
* [paomian](https://github.com/paomian)
* [shuiyisong](https://github.com/shuiyisong)
* [sunchanglong](https://github.com/sunchanglong)
* [sunng87](https://github.com/sunng87)
* [tisonkun](https://github.com/tisonkun)
* [v0y4g3r](https://github.com/v0y4g3r)
* [waynexia](https://github.com/waynexia)
* [xtang](https://github.com/xtang)
* [zhaoyingnan01](https://github.com/zhaoyingnan01)
* [zhongzc](https://github.com/zhongzc)
* [zyy17](https://github.com/zyy17)
## All Contributors ## All Contributors
To see the full list of contributors, please visit our [Contributors page](https://github.com/GreptimeTeam/greptimedb/graphs/contributors) [![All Contributors](https://contrib.rocks/image?repo=GreptimeTeam/greptimedb)](https://github.com/GreptimeTeam/greptimedb/graphs/contributors)

View File

@@ -55,12 +55,8 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run --workspace --features pg_kvbackend,mysql_kvbackend` or `make test`. - Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings` or `make clippy`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
- Ensure there are no unused dependencies by running `make check-udeps` (clean them up with `make fix-udeps` if reported).
- If you must keep a target-specific dependency (e.g. under `[target.'cfg(...)'.dev-dependencies]`), add a cargo-udeps ignore entry in the same `Cargo.toml`, for example:
`[package.metadata.cargo-udeps.ignore]` with `development = ["rexpect"]` (or `dependencies`/`build` as appropriate).
- When modifying sample configuration files in `config/`, run `make config-docs` (which requires Docker to be installed) to update the configuration documentation and include it in your commit.
#### `pre-commit` Hooks #### `pre-commit` Hooks
@@ -112,7 +108,7 @@ of what you were trying to do and what went wrong. You can also reach for help i
The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out: The core team will be thrilled if you would like to participate in any way you like. When you are stuck, try to ask for help by filing an issue, with a detailed description of what you were trying to do and what went wrong. If you have any questions or if you would like to get involved in our community, please check out:
- [GreptimeDB Community Slack](https://greptime.com/slack) - [GreptimeDB Community Slack](https://greptime.com/slack)
- [GreptimeDB GitHub Discussions](https://github.com/GreptimeTeam/greptimedb/discussions) - [GreptimeDB Github Discussions](https://github.com/GreptimeTeam/greptimedb/discussions)
Also, see some extra GreptimeDB content: Also, see some extra GreptimeDB content:

10006
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,27 +2,23 @@
members = [ members = [
"src/api", "src/api",
"src/auth", "src/auth",
"src/cache",
"src/catalog", "src/catalog",
"src/cli", "src/cache",
"src/client", "src/client",
"src/cmd", "src/cmd",
"src/common/base", "src/common/base",
"src/common/catalog", "src/common/catalog",
"src/common/config", "src/common/config",
"src/common/datasource", "src/common/datasource",
"src/common/decimal",
"src/common/error", "src/common/error",
"src/common/event-recorder",
"src/common/frontend", "src/common/frontend",
"src/common/function", "src/common/function",
"src/common/macro",
"src/common/greptimedb-telemetry", "src/common/greptimedb-telemetry",
"src/common/grpc", "src/common/grpc",
"src/common/grpc-expr", "src/common/grpc-expr",
"src/common/macro",
"src/common/mem-prof", "src/common/mem-prof",
"src/common/meta", "src/common/meta",
"src/common/options",
"src/common/plugins", "src/common/plugins",
"src/common/pprof", "src/common/pprof",
"src/common/procedure", "src/common/procedure",
@@ -30,28 +26,22 @@ members = [
"src/common/query", "src/common/query",
"src/common/recordbatch", "src/common/recordbatch",
"src/common/runtime", "src/common/runtime",
"src/common/session",
"src/common/sql",
"src/common/stat",
"src/common/substrait", "src/common/substrait",
"src/common/telemetry", "src/common/telemetry",
"src/common/test-util", "src/common/test-util",
"src/common/time", "src/common/time",
"src/common/decimal",
"src/common/version", "src/common/version",
"src/common/wal", "src/common/wal",
"src/common/workload",
"src/datanode", "src/datanode",
"src/datatypes", "src/datatypes",
"src/file-engine", "src/file-engine",
"src/flow", "src/flow",
"src/frontend", "src/frontend",
"src/index",
"src/log-query",
"src/log-store", "src/log-store",
"src/meta-client", "src/meta-client",
"src/meta-srv", "src/meta-srv",
"src/metric-engine", "src/metric-engine",
"src/mito-codec",
"src/mito2", "src/mito2",
"src/object-store", "src/object-store",
"src/operator", "src/operator",
@@ -61,12 +51,13 @@ members = [
"src/promql", "src/promql",
"src/puffin", "src/puffin",
"src/query", "src/query",
"src/standalone", "src/script",
"src/servers", "src/servers",
"src/session", "src/session",
"src/sql", "src/sql",
"src/store-api", "src/store-api",
"src/table", "src/table",
"src/index",
"tests-fuzz", "tests-fuzz",
"tests-integration", "tests-integration",
"tests/runner", "tests/runner",
@@ -74,8 +65,8 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "1.0.0-beta.2" version = "0.9.5"
edition = "2024" edition = "2021"
license = "Apache-2.0" license = "Apache-2.0"
[workspace.lints] [workspace.lints]
@@ -83,171 +74,130 @@ clippy.print_stdout = "warn"
clippy.print_stderr = "warn" clippy.print_stderr = "warn"
clippy.dbg_macro = "warn" clippy.dbg_macro = "warn"
clippy.implicit_clone = "warn" clippy.implicit_clone = "warn"
clippy.result_large_err = "allow" clippy.readonly_write_lock = "allow"
clippy.large_enum_variant = "allow"
clippy.doc_overindented_list_items = "allow"
clippy.uninlined_format_args = "allow"
rust.unknown_lints = "deny" rust.unknown_lints = "deny"
# Remove this after https://github.com/PyO3/pyo3/issues/4094
rust.non_local_definitions = "allow"
rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } rust.unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
[workspace.dependencies] [workspace.dependencies]
# DO_NOT_REMOVE_THIS: BEGIN_OF_EXTERNAL_DEPENDENCIES
# We turn off default-features for some dependencies here so the workspaces which inherit them can # We turn off default-features for some dependencies here so the workspaces which inherit them can
# selectively turn them on if needed, since we can override default-features = true (from false) # selectively turn them on if needed, since we can override default-features = true (from false)
# for the inherited dependency but cannot do the reverse (override from true to false). # for the inherited dependency but cannot do the reverse (override from true to false).
# #
# See for more detaiils: https://github.com/rust-lang/cargo/issues/11329 # See for more detaiils: https://github.com/rust-lang/cargo/issues/11329
ahash = { version = "0.8", features = ["compile-time-rng"] } ahash = { version = "0.8", features = ["compile-time-rng"] }
aquamarine = "0.6" aquamarine = "0.3"
arrow = { version = "56.2", features = ["prettyprint"] } arrow = { version = "51.0.0", features = ["prettyprint"] }
arrow-array = { version = "56.2", default-features = false, features = ["chrono-tz"] } arrow-array = { version = "51.0.0", default-features = false, features = ["chrono-tz"] }
arrow-buffer = "56.2" arrow-flight = "51.0"
arrow-flight = "56.2" arrow-ipc = { version = "51.0.0", default-features = false, features = ["lz4", "zstd"] }
arrow-ipc = { version = "56.2", default-features = false, features = ["lz4", "zstd"] } arrow-schema = { version = "51.0", features = ["serde"] }
arrow-schema = { version = "56.2", features = ["serde"] }
async-stream = "0.3" async-stream = "0.3"
async-trait = "0.1" async-trait = "0.1"
# Remember to update axum-extra, axum-macros when updating axum axum = { version = "0.6", features = ["headers"] }
axum = "0.8" base64 = "0.21"
axum-extra = "0.10"
axum-macros = "0.5"
backon = "1"
base64 = "0.22"
bigdecimal = "0.4.2" bigdecimal = "0.4.2"
bitflags = "2.4.1" bitflags = "2.4.1"
bytemuck = "1.12" bytemuck = "1.12"
bytes = { version = "1.7", features = ["serde"] } bytes = { version = "1.7", features = ["serde"] }
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }
chrono-tz = { version = "0.10.1", features = ["case-insensitive"] }
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
config = "0.13.0" config = "0.13.0"
const_format = "0.2"
crossbeam-utils = "0.8" crossbeam-utils = "0.8"
dashmap = "6.1" dashmap = "5.4"
datafusion = "50" datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-common = "50" datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-expr = "50" datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions = "50" datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-functions-aggregate-common = "50" datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-optimizer = "50" datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-orc = "0.5" datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-pg-catalog = "0.12.2" datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-expr = "50" datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
datafusion-physical-plan = "50" derive_builder = "0.12"
datafusion-sql = "50"
datafusion-substrait = "50"
deadpool = "0.12"
deadpool-postgres = "0.14"
derive_builder = "0.20"
dotenv = "0.15" dotenv = "0.15"
either = "1.15" etcd-client = { version = "0.13" }
etcd-client = { git = "https://github.com/GreptimeTeam/etcd-client", rev = "f62df834f0cffda355eba96691fe1a9a332b75a7", features = [
"tls",
"tls-roots",
] }
fst = "0.4.7" fst = "0.4.7"
futures = "0.3" futures = "0.3"
futures-util = "0.3" futures-util = "0.3"
greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0df99f09f1d6785055b2d9da96fc4ecc2bdf6803" } greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
hex = "0.4"
http = "1"
humantime = "2.1" humantime = "2.1"
humantime-serde = "1.1" humantime-serde = "1.1"
hyper = "1.1" itertools = "0.10"
hyper-util = "0.1" jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
itertools = "0.14"
jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4" lazy_static = "1.4"
local-ip-address = "0.6" meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
loki-proto = { git = "https://github.com/GreptimeTeam/loki-proto.git", rev = "3b7cd33234358b18ece977bf689dc6fb760f29ab" } mockall = "0.11.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" }
mockall = "0.13"
moka = "0.12" moka = "0.12"
nalgebra = "0.33" notify = "6.1"
nix = { version = "0.30.1", default-features = false, features = ["event", "fs", "process"] }
notify = "8.0"
num_cpus = "1.16" num_cpus = "1.16"
object_store_opendal = "0.54"
once_cell = "1.18" once_cell = "1.18"
opentelemetry-proto = { version = "0.30", features = [ opentelemetry-proto = { version = "0.5", features = [
"gen-tonic", "gen-tonic",
"metrics", "metrics",
"trace", "trace",
"with-serde", "with-serde",
"logs", "logs",
] } ] }
ordered-float = { version = "4.3", features = ["serde"] }
otel-arrow-rust = { git = "https://github.com/GreptimeTeam/otel-arrow", rev = "2d64b7c0fa95642028a8205b36fe9ea0b023ec59", features = [
"server",
] }
parking_lot = "0.12" parking_lot = "0.12"
parquet = { version = "56.2", default-features = false, features = ["arrow", "async", "object_store"] } parquet = { version = "51.0.0", default-features = false, features = ["arrow", "async", "object_store"] }
paste = "1.0" paste = "1.0"
pin-project = "1.0" pin-project = "1.0"
pretty_assertions = "1.4.0"
prometheus = { version = "0.13.3", features = ["process"] } prometheus = { version = "0.13.3", features = ["process"] }
promql-parser = { version = "0.6", features = ["ser"] } promql-parser = { version = "0.4.3", features = ["ser"] }
prost = { version = "0.13", features = ["no-recursion-limit"] } prost = "0.12"
prost-types = "0.13"
raft-engine = { version = "0.4.1", default-features = false } raft-engine = { version = "0.4.1", default-features = false }
rand = "0.9" rand = "0.8"
ratelimit = "0.10" ratelimit = "0.9"
regex = "1.12" regex = "1.8"
regex-automata = "0.4" regex-automata = { version = "0.4" }
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.12", default-features = false, features = [
"json", "json",
"rustls-tls-native-roots", "rustls-tls-native-roots",
"stream", "stream",
"multipart", "multipart",
] } ] }
rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "7b0f31ed39db049b4ee2e5f1e95b5a30be9baf76", features = [ rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls", "transport-tls",
] } ] }
rstest = "0.25" rstest = "0.21"
rstest_reuse = "0.7" rstest_reuse = "0.7"
rust_decimal = "1.33" rust_decimal = "1.33"
rustc-hash = "2.0" rustc-hash = "2.0"
# It is worth noting that we should try to avoid using aws-lc-rs until it can be compiled on various platforms. schemars = "0.8"
hostname = "0.4.0"
rustls = { version = "0.23.25", default-features = false }
sea-query = "0.32"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["float_roundtrip"] } serde_json = { version = "1.0", features = ["float_roundtrip"] }
serde_with = "3" serde_with = "3"
simd-json = "0.15" shadow-rs = "0.35"
similar-asserts = "1.6.0" similar-asserts = "1.6.0"
smallvec = { version = "1", features = ["serde"] } smallvec = { version = "1", features = ["serde"] }
snafu = "0.8" snafu = "0.8"
sqlparser = { version = "0.58.0", default-features = false, features = ["std", "visitor", "serde"] } sysinfo = "0.30"
sqlx = { version = "0.8", default-features = false, features = ["any", "macros", "json", "runtime-tokio-rustls"] } # on branch v0.44.x
strum = { version = "0.27", features = ["derive"] } sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
sysinfo = "0.33" "visitor",
] }
strum = { version = "0.25", features = ["derive"] }
tempfile = "3" tempfile = "3"
tokio = { version = "1.47", features = ["full"] } tokio = { version = "1.40", features = ["full"] }
tokio-postgres = "0.7" tokio-postgres = "0.7"
tokio-rustls = { version = "0.26.2", default-features = false } tokio-stream = { version = "0.1" }
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["io-util", "compat"] } tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8" toml = "0.8.8"
tonic = { version = "0.13", features = ["tls-ring", "gzip", "zstd"] } tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
tower = "0.5" tower = { version = "0.4" }
tower-http = "0.6"
tracing = "0.1"
tracing-appender = "0.2" tracing-appender = "0.2"
tracing-opentelemetry = "0.31.0"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "fmt"] }
typetag = "0.2" typetag = "0.2"
uuid = { version = "1.17", features = ["serde", "v4", "fast-rng"] } uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
vrl = "0.25"
zstd = "0.13" zstd = "0.13"
# DO_NOT_REMOVE_THIS: END_OF_EXTERNAL_DEPENDENCIES
## workspaces members ## workspaces members
api = { path = "src/api" } api = { path = "src/api" }
auth = { path = "src/auth" } auth = { path = "src/auth" }
cache = { path = "src/cache" } cache = { path = "src/cache" }
catalog = { path = "src/catalog" } catalog = { path = "src/catalog" }
cli = { path = "src/cli" }
client = { path = "src/client" } client = { path = "src/client" }
cmd = { path = "src/cmd", default-features = false } cmd = { path = "src/cmd", default-features = false }
common-base = { path = "src/common/base" } common-base = { path = "src/common/base" }
@@ -256,7 +206,6 @@ common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" } common-datasource = { path = "src/common/datasource" }
common-decimal = { path = "src/common/decimal" } common-decimal = { path = "src/common/decimal" }
common-error = { path = "src/common/error" } common-error = { path = "src/common/error" }
common-event-recorder = { path = "src/common/event-recorder" }
common-frontend = { path = "src/common/frontend" } common-frontend = { path = "src/common/frontend" }
common-function = { path = "src/common/function" } common-function = { path = "src/common/function" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" } common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
@@ -265,7 +214,6 @@ common-grpc-expr = { path = "src/common/grpc-expr" }
common-macro = { path = "src/common/macro" } common-macro = { path = "src/common/macro" }
common-mem-prof = { path = "src/common/mem-prof" } common-mem-prof = { path = "src/common/mem-prof" }
common-meta = { path = "src/common/meta" } common-meta = { path = "src/common/meta" }
common-options = { path = "src/common/options" }
common-plugins = { path = "src/common/plugins" } common-plugins = { path = "src/common/plugins" }
common-pprof = { path = "src/common/pprof" } common-pprof = { path = "src/common/pprof" }
common-procedure = { path = "src/common/procedure" } common-procedure = { path = "src/common/procedure" }
@@ -273,27 +221,21 @@ common-procedure-test = { path = "src/common/procedure-test" }
common-query = { path = "src/common/query" } common-query = { path = "src/common/query" }
common-recordbatch = { path = "src/common/recordbatch" } common-recordbatch = { path = "src/common/recordbatch" }
common-runtime = { path = "src/common/runtime" } common-runtime = { path = "src/common/runtime" }
common-session = { path = "src/common/session" }
common-sql = { path = "src/common/sql" }
common-stat = { path = "src/common/stat" }
common-telemetry = { path = "src/common/telemetry" } common-telemetry = { path = "src/common/telemetry" }
common-test-util = { path = "src/common/test-util" } common-test-util = { path = "src/common/test-util" }
common-time = { path = "src/common/time" } common-time = { path = "src/common/time" }
common-version = { path = "src/common/version" } common-version = { path = "src/common/version" }
common-wal = { path = "src/common/wal" } common-wal = { path = "src/common/wal" }
common-workload = { path = "src/common/workload" }
datanode = { path = "src/datanode" } datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" } datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" } file-engine = { path = "src/file-engine" }
flow = { path = "src/flow" } flow = { path = "src/flow" }
frontend = { path = "src/frontend", default-features = false } frontend = { path = "src/frontend", default-features = false }
index = { path = "src/index" } index = { path = "src/index" }
log-query = { path = "src/log-query" }
log-store = { path = "src/log-store" } log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" } meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" } meta-srv = { path = "src/meta-srv" }
metric-engine = { path = "src/metric-engine" } metric-engine = { path = "src/metric-engine" }
mito-codec = { path = "src/mito-codec" }
mito2 = { path = "src/mito2" } mito2 = { path = "src/mito2" }
object-store = { path = "src/object-store" } object-store = { path = "src/object-store" }
operator = { path = "src/operator" } operator = { path = "src/operator" }
@@ -303,32 +245,26 @@ plugins = { path = "src/plugins" }
promql = { path = "src/promql" } promql = { path = "src/promql" }
puffin = { path = "src/puffin" } puffin = { path = "src/puffin" }
query = { path = "src/query" } query = { path = "src/query" }
script = { path = "src/script" }
servers = { path = "src/servers" } servers = { path = "src/servers" }
session = { path = "src/session" } session = { path = "src/session" }
sql = { path = "src/sql" } sql = { path = "src/sql" }
standalone = { path = "src/standalone" }
store-api = { path = "src/store-api" } store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" } substrait = { path = "src/common/substrait" }
table = { path = "src/table" } table = { path = "src/table" }
[patch.crates-io]
# change all rustls dependencies to use our fork to default to `ring` to make it "just work"
hyper-rustls = { git = "https://github.com/GreptimeTeam/hyper-rustls" }
rustls = { git = "https://github.com/GreptimeTeam/rustls" }
tokio-rustls = { git = "https://github.com/GreptimeTeam/tokio-rustls" }
# This is commented, since we are not using aws-lc-sys, if we need to use it, we need to uncomment this line or use a release after this commit, or it wouldn't compile with gcc < 8.1
# see https://github.com/aws/aws-lc-rs/pull/526
# aws-lc-sys = { git ="https://github.com/aws/aws-lc-rs", rev = "556558441e3494af4b156ae95ebc07ebc2fd38aa" }
[workspace.dependencies.meter-macros] [workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git" git = "https://github.com/GreptimeTeam/greptime-meter.git"
rev = "5618e779cf2bb4755b499c630fba4c35e91898cb" rev = "a10facb353b41460eeb98578868ebf19c2084fac"
[patch.crates-io]
datafusion = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-functions = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-functions-aggregate-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-optimizer = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-expr = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-expr-common = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-physical-plan = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-datasource = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-sql = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
datafusion-substrait = { git = "https://github.com/GreptimeTeam/datafusion.git", rev = "fd4b2abcf3c3e43e94951bda452c9fd35243aab0" }
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "4b519a5caa95472cc3988f5556813a583dd35af1" } # branch = "v0.58.x"
[profile.release] [profile.release]
debug = 1 debug = 1

View File

@@ -1,6 +1,3 @@
[target.aarch64-unknown-linux-gnu]
image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:0.2.5"
[build] [build]
pre-build = [ pre-build = [
"dpkg --add-architecture $CROSS_DEB_ARCH", "dpkg --add-architecture $CROSS_DEB_ARCH",
@@ -8,8 +5,3 @@ pre-build = [
"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/", "curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/",
"chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google", "chmod a+x /usr/bin/protoc && chmod -R a+rx /usr/include/google",
] ]
[build.env]
passthrough = [
"JEMALLOC_SYS_WITH_LG_PAGE",
]

View File

@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest IMAGE_TAG ?= latest
DEV_BUILDER_IMAGE_TAG ?= 2025-10-01-8fe17d43-20251011080129 DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
BUILDX_MULTI_PLATFORM_BUILD ?= false BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu BASE_IMAGE ?= ubuntu
@@ -17,14 +17,12 @@ CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi) OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
SQLNESS_OPTS ?= SQLNESS_OPTS ?=
EXTRA_BUILD_ENVS ?=
ASSEMBLED_EXTRA_BUILD_ENV := $(foreach var,$(EXTRA_BUILD_ENVS),-e $(var))
# The arguments for running integration tests. # The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9 ETCD_VERSION ?= v3.5.9
ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION} ETCD_IMAGE ?= quay.io/coreos/etcd:${ETCD_VERSION}
RETRY_COUNT ?= 3 RETRY_COUNT ?= 3
NEXTEST_OPTS := --retries ${RETRY_COUNT} --features pg_kvbackend,mysql_kvbackend NEXTEST_OPTS := --retries ${RETRY_COUNT}
BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs. BUILD_JOBS ?= $(shell which nproc 1>/dev/null && expr $$(nproc) / 2) # If nproc is not available, we don't set the build jobs.
ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1. ifeq ($(BUILD_JOBS), 0) # If the number of cores is less than 2, set the build jobs to 1.
BUILD_JOBS := 1 BUILD_JOBS := 1
@@ -34,10 +32,6 @@ ifneq ($(strip $(BUILD_JOBS)),)
NEXTEST_OPTS += --build-jobs=${BUILD_JOBS} NEXTEST_OPTS += --build-jobs=${BUILD_JOBS}
endif endif
ifneq ($(strip $(BUILD_JOBS)),)
SQLNESS_OPTS += --jobs ${BUILD_JOBS}
endif
ifneq ($(strip $(CARGO_PROFILE)),) ifneq ($(strip $(CARGO_PROFILE)),)
CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE} CARGO_BUILD_OPTS += --profile ${CARGO_PROFILE}
endif endif
@@ -66,8 +60,6 @@ ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), all)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64) else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), amd64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64 --push
else ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), arm64)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/arm64 --push
else else
BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker BUILDX_MULTI_PLATFORM_BUILD_OPTS := -o type=docker
endif endif
@@ -85,7 +77,6 @@ build: ## Build debug version greptime.
.PHONY: build-by-dev-builder .PHONY: build-by-dev-builder
build-by-dev-builder: ## Build greptime by dev-builder. build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \ docker run --network=host \
${ASSEMBLED_EXTRA_BUILD_ENV} \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \ -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
-w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${DEV_BUILDER_IMAGE_TAG} \
make build \ make build \
@@ -172,16 +163,17 @@ nextest: ## Install nextest tools.
.PHONY: sqlness-test .PHONY: sqlness-test
sqlness-test: ## Run sqlness test. sqlness-test: ## Run sqlness test.
cargo sqlness bare ${SQLNESS_OPTS} cargo sqlness ${SQLNESS_OPTS}
# Run fuzz test ${FUZZ_TARGET}.
RUNS ?= 1 RUNS ?= 1
FUZZ_TARGET ?= fuzz_alter_table FUZZ_TARGET ?= fuzz_alter_table
.PHONY: fuzz .PHONY: fuzz
fuzz: ## Run fuzz test ${FUZZ_TARGET}. fuzz:
cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS} cargo fuzz run ${FUZZ_TARGET} --fuzz-dir tests-fuzz -D -s none -- -runs=${RUNS}
.PHONY: fuzz-ls .PHONY: fuzz-ls
fuzz-ls: ## List all fuzz targets. fuzz-ls:
cargo fuzz list --fuzz-dir tests-fuzz cargo fuzz list --fuzz-dir tests-fuzz
.PHONY: check .PHONY: check
@@ -196,22 +188,10 @@ clippy: ## Check clippy rules.
fix-clippy: ## Fix clippy violations. fix-clippy: ## Fix clippy violations.
cargo clippy --workspace --all-targets --all-features --fix cargo clippy --workspace --all-targets --all-features --fix
.PHONY: check-udeps
check-udeps: ## Check unused dependencies.
cargo udeps --workspace --all-targets
.PHONY: fix-udeps
fix-udeps: ## Remove unused dependencies automatically.
@echo "Running cargo-udeps to find unused dependencies..."
@cargo udeps --workspace --all-targets --output json > udeps-report.json || true
@echo "Removing unused dependencies..."
@python3 scripts/fix-udeps.py udeps-report.json
.PHONY: fmt-check .PHONY: fmt-check
fmt-check: ## Check code format. fmt-check: ## Check code format.
cargo fmt --all -- --check cargo fmt --all -- --check
python3 scripts/check-snafu.py python3 scripts/check-snafu.py
python3 scripts/check-super-imports.py
.PHONY: start-etcd .PHONY: start-etcd
start-etcd: ## Start single node etcd for testing purpose. start-etcd: ## Start single node etcd for testing purpose.
@@ -236,16 +216,6 @@ start-cluster: ## Start the greptimedb cluster with etcd by using docker compose
stop-cluster: ## Stop the greptimedb cluster that created by docker compose. stop-cluster: ## Stop the greptimedb cluster that created by docker compose.
docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop docker compose -f ./docker/docker-compose/cluster-with-etcd.yaml stop
##@ Grafana
.PHONY: check-dashboards
check-dashboards: ## Check the Grafana dashboards.
@./grafana/scripts/check.sh
.PHONY: dashboards
dashboards: ## Generate the Grafana dashboards for standalone mode and intermediate dashboards.
@./grafana/scripts/gen-dashboards.sh
##@ Docs ##@ Docs
config-docs: ## Generate configuration documentation from toml files. config-docs: ## Generate configuration documentation from toml files.
docker run --rm \ docker run --rm \

225
README.md
View File

@@ -6,15 +6,14 @@
</picture> </picture>
</p> </p>
<h2 align="center">Real-Time & Cloud-Native Observability Database<br/>for metrics, logs, and traces</h2> <h2 align="center">Unified Time Series Database for Metrics, Logs, and Events</h2>
> Delivers sub-second querying at PB scale and exceptional cost efficiency from edge to cloud.
<div align="center"> <div align="center">
<h3 align="center"> <h3 align="center">
<a href="https://docs.greptime.com/user-guide/overview/">User Guide</a> | <a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> | <a href="https://greptimedb.rs/">API Docs</a> |
<a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a> <a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
</h4> </h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest"> <a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
@@ -49,178 +48,152 @@
</a> </a>
</div> </div>
- [Introduction](#introduction)
- [⭐ Key Features](#features)
- [Quick Comparison](#quick-comparison)
- [Architecture](#architecture)
- [Try GreptimeDB](#try-greptimedb)
- [Getting Started](#getting-started)
- [Build From Source](#build-from-source)
- [Tools & Extensions](#tools--extensions)
- [Project Status](#project-status)
- [Community](#community)
- [License](#license)
- [Commercial Support](#commercial-support)
- [Contributing](#contributing)
- [Acknowledgement](#acknowledgement)
## Introduction ## Introduction
**GreptimeDB** is an open-source, cloud-native database that unifies metrics, logs, and traces, enabling real-time observability at any scale — across edge, cloud, and hybrid environments. **GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
## Features ## Why GreptimeDB
| Feature | Description | Our core developers have been building time-series data platforms for years. Based on our best-practices, GreptimeDB is born to give you:
| --------- | ----------- |
| [All-in-One Observability](https://docs.greptime.com/user-guide/concepts/why-greptimedb) | OpenTelemetry-native platform unifying metrics, logs, and traces. Query via [SQL](https://docs.greptime.com/user-guide/query-data/sql), [PromQL](https://docs.greptime.com/user-guide/query-data/promql), and [Flow](https://docs.greptime.com/user-guide/flow-computation/overview). |
| [High Performance](https://docs.greptime.com/user-guide/manage-data/data-index) | Written in Rust with [rich indexing](https://docs.greptime.com/user-guide/manage-data/data-index) (inverted, fulltext, skipping, vector), delivering sub-second responses at PB scale. |
| [Cost Efficiency](https://docs.greptime.com/user-guide/concepts/architecture) | 50x lower operational and storage costs with compute-storage separation and native object storage (S3, Azure Blob, etc.). |
| [Cloud-Native & Scalable](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) | Purpose-built for [Kubernetes](https://docs.greptime.com/user-guide/deployments-administration/deploy-on-kubernetes/greptimedb-operator-management) with unlimited cross-cloud scaling, handling hundreds of thousands of concurrent requests. |
| [Developer-Friendly](https://docs.greptime.com/user-guide/protocols/overview) | SQL/PromQL interfaces, built-in web dashboard, REST API, MySQL/PostgreSQL protocol compatibility, and native [OpenTelemetry](https://docs.greptime.com/user-guide/ingest-data/for-observability/opentelemetry/) support. |
| [Flexible Deployment](https://docs.greptime.com/user-guide/deployments-administration/overview) | Deploy anywhere from ARM-based edge devices (including [Android](https://docs.greptime.com/user-guide/deployments-administration/run-on-android)) to cloud, with unified APIs and efficient data sync. |
**Perfect for:** * **Unified all kinds of time series**
- Unified observability stack replacing Prometheus + Loki + Tempo
- Large-scale metrics with high cardinality (millions to billions of time series)
- Large-scale observability platform requiring cost efficiency and scalability
- IoT and edge computing with resource and bandwidth constraints
Learn more in [Why GreptimeDB](https://docs.greptime.com/user-guide/concepts/why-greptimedb) and [Observability 2.0 and the Database for It](https://greptime.com/blogs/2025-04-25-greptimedb-observability2-new-database). GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
## Quick Comparison * **Cloud-Edge collaboration**
| Feature | GreptimeDB | Traditional TSDB | Log Stores | GreptimeDB can be deployed on ARM architecture-compatible Android/Linux systems as well as cloud environments from various vendors. Both sides run the same software, providing identical APIs and control planes, so your application can run at the edge or on the cloud without modification, and data synchronization also becomes extremely easy and efficient.
|----------------------------------|-----------------------|--------------------|-----------------|
| Data Types | Metrics, Logs, Traces | Metrics only | Logs only |
| Query Language | SQL, PromQL | Custom/PromQL | Custom/DSL |
| Deployment | Edge + Cloud | Cloud/On-prem | Mostly central |
| Indexing & Performance | PB-Scale, Sub-second | Varies | Varies |
| Integration | REST API, SQL, Common protocols | Varies | Varies |
**Performance:** * **Cloud-native distributed database**
* [GreptimeDB tops JSONBench's billion-record cold run test!](https://greptime.com/blogs/2025-03-18-jsonbench-greptimedb-performance)
* [TSBS Benchmark](https://github.com/GreptimeTeam/greptimedb/tree/main/docs/benchmarks/tsbs)
Read [more benchmark reports](https://docs.greptime.com/user-guide/concepts/features-that-you-concern#how-is-greptimedbs-performance-compared-to-other-solutions). By leveraging object storage (S3 and others), separating compute and storage, scaling stateless compute nodes arbitrarily, GreptimeDB implements seamless scalability. It also supports cross-cloud deployment with a built-in unified data access layer over different object storages.
## Architecture * **Performance and Cost-effective**
GreptimeDB can run in two modes: Flexible indexing capabilities and distributed, parallel-processing query engine, tackling high cardinality issues down. Optimized columnar layout for handling time-series data; compacted, compressed, and stored on various storage backends, particularly cloud object storage with 50x cost efficiency.
* **Standalone Mode** - Single binary for development and small deployments
* **Distributed Mode** - Separate components for production scale:
- Frontend: Query processing and protocol handling
- Datanode: Data storage and retrieval
- Metasrv: Metadata management and coordination
Read the [architecture](https://docs.greptime.com/contributor-guide/overview/#architecture) document. [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb/1-overview) provides an in-depth look at GreptimeDB: * **Compatible with InfluxDB, Prometheus and more protocols**
<img alt="GreptimeDB System Overview" src="docs/architecture.png">
Widely adopted database protocols and APIs, including MySQL, PostgreSQL, and Prometheus Remote Storage, etc. [Read more](https://docs.greptime.com/user-guide/protocols/overview).
## Try GreptimeDB ## Try GreptimeDB
### 1. [GreptimePlay](https://greptime.com/playground)
Try out the features of GreptimeDB right from your browser.
### 2. [GreptimeCloud](https://console.greptime.cloud/)
Start instantly with a free cluster.
### 3. Docker Image
To install GreptimeDB locally, the recommended way is via Docker:
```shell ```shell
docker pull greptime/greptimedb docker pull greptime/greptimedb
``` ```
Start a GreptimeDB container with:
```shell ```shell
docker run -p 127.0.0.1:4000-4003:4000-4003 \ docker run --rm --name greptime --net=host greptime/greptimedb standalone start
-v "$(pwd)/greptimedb_data:/greptimedb_data" \
--name greptime --rm \
greptime/greptimedb:latest standalone start \
--http-addr 0.0.0.0:4000 \
--rpc-bind-addr 0.0.0.0:4001 \
--mysql-addr 0.0.0.0:4002 \
--postgres-addr 0.0.0.0:4003
``` ```
Dashboard: [http://localhost:4000/dashboard](http://localhost:4000/dashboard)
Read more in the [full Install Guide](https://docs.greptime.com/getting-started/installation/overview). Read more about [Installation](https://docs.greptime.com/getting-started/installation/overview) on docs.
**Troubleshooting:**
* Cannot connect to the database? Ensure that ports `4000`, `4001`, `4002`, and `4003` are not blocked by a firewall or used by other services.
* Failed to start? Check the container logs with `docker logs greptime` for further details.
## Getting Started ## Getting Started
- [Quickstart](https://docs.greptime.com/getting-started/quick-start) * [Quickstart](https://docs.greptime.com/getting-started/quick-start)
- [User Guide](https://docs.greptime.com/user-guide/overview) * [User Guide](https://docs.greptime.com/user-guide/overview)
- [Demo Scenes](https://github.com/GreptimeTeam/demo-scene) * [Demos](https://github.com/GreptimeTeam/demo-scene)
- [FAQ](https://docs.greptime.com/faq-and-others/faq) * [FAQ](https://docs.greptime.com/faq-and-others/faq)
## Build From Source ## Build
Check the prerequisite:
**Prerequisites:**
* [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly) * [Rust toolchain](https://www.rust-lang.org/tools/install) (nightly)
* [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15) * [Protobuf compiler](https://grpc.io/docs/protoc-installation/) (>= 3.15)
* C/C++ building essentials, including `gcc`/`g++`/`autoconf` and glibc library (eg. `libc6-dev` on Ubuntu and `glibc-devel` on Fedora) * Python toolchain (optional): Required only if built with PyO3 backend. More detail for compiling with PyO3 can be found in its [documentation](https://pyo3.rs/v0.18.1/building_and_distribution#configuring-the-python-version).
* Python toolchain (optional): Required only if using some test scripts.
**Build and Run:** Build GreptimeDB binary:
```bash
```shell
make make
```
Run a standalone server:
```shell
cargo run -- standalone start cargo run -- standalone start
``` ```
## Tools & Extensions ## Extension
- **Kubernetes**: [GreptimeDB Operator](https://github.com/GrepTimeTeam/greptimedb-operator) ### Dashboard
- **Helm Charts**: [Greptime Helm Charts](https://github.com/GreptimeTeam/helm-charts)
- **Dashboard**: [Web UI](https://github.com/GreptimeTeam/dashboard) - [The dashboard UI for GreptimeDB](https://github.com/GreptimeTeam/dashboard)
- **gRPC Ingester**: [Go](https://github.com/GreptimeTeam/greptimedb-ingester-go), [Java](https://github.com/GreptimeTeam/greptimedb-ingester-java), [C++](https://github.com/GreptimeTeam/greptimedb-ingester-cpp), [Erlang](https://github.com/GreptimeTeam/greptimedb-ingester-erl), [Rust](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- **Grafana Data Source**: [GreptimeDB Grafana data source plugin](https://github.com/GreptimeTeam/greptimedb-grafana-datasource) ### SDK
- **Grafana Dashboard**: [Official Dashboard for monitoring](https://github.com/GreptimeTeam/greptimedb/blob/main/grafana/README.md)
- [GreptimeDB Go Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-go)
- [GreptimeDB Java Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-java)
- [GreptimeDB C++ Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-cpp)
- [GreptimeDB Erlang Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-erl)
- [GreptimeDB Rust Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-rust)
- [GreptimeDB JavaScript Ingester](https://github.com/GreptimeTeam/greptimedb-ingester-js)
### Grafana Dashboard
Our official Grafana dashboard is available at [grafana](grafana/README.md) directory.
## Project Status ## Project Status
> **Status:** Beta — marching toward v1.0 GA! The current version has not yet reached the standards for General Availability.
> **GA (v1.0):** January 10, 2026 According to our Greptime 2024 Roadmap, we aim to achieve a production-level version with the release of v1.0 by the end of 2024. [Join Us](https://github.com/GreptimeTeam/greptimedb/issues/3412)
- Deployed in production by open-source projects and commercial users We welcome you to test and use GreptimeDB. Some users have already adopted it in their production environments. If you're interested in trying it out, please use the latest stable release available.
- Stable, actively maintained, with regular releases ([version info](https://docs.greptime.com/nightly/reference/about-greptimedb-version))
- Suitable for evaluation and pilot deployments
GreptimeDB v1.0 represents a major milestone toward maturity — marking stable APIs, production readiness, and proven performance.
**Roadmap:** Beta1 (Nov 10) → Beta2 (Nov 24) → RC1 (Dec 8) → GA (Jan 10, 2026), please read [v1.0 highlights and release plan](https://greptime.com/blogs/2025-11-05-greptimedb-v1-highlights) for details.
For production use, we recommend using the latest stable release.
[![Star History Chart](https://api.star-history.com/svg?repos=GreptimeTeam/GreptimeDB&type=Date)](https://www.star-history.com/#GreptimeTeam/GreptimeDB&Date)
If you find this project useful, a ⭐ would mean a lot to us!
<img alt="Known Users" src="https://greptime.com/logo/img/users.png"/>
## Community ## Community
We invite you to engage and contribute! Our core team is thrilled to see you participate in any ways you like. When you are stuck, try to
ask for help by filling an issue with a detailed description of what you were trying to do
and what went wrong. If you have any questions or if you would like to get involved in our
community, please check out:
- [Slack](https://greptime.com/slack) - GreptimeDB Community on [Slack](https://greptime.com/slack)
- [Discussions](https://github.com/GreptimeTeam/greptimedb/discussions) - GreptimeDB [GitHub Discussions forum](https://github.com/GreptimeTeam/greptimedb/discussions)
- [Official Website](https://greptime.com/) - Greptime official [website](https://greptime.com)
- [Blog](https://greptime.com/blogs/)
- [LinkedIn](https://www.linkedin.com/company/greptime/) In addition, you may:
- [X (Twitter)](https://X.com/greptime)
- [YouTube](https://www.youtube.com/@greptime) - View our official [Blog](https://greptime.com/blogs/)
- Connect us with [Linkedin](https://www.linkedin.com/company/greptime/)
- Follow us on [Twitter](https://twitter.com/greptime)
## Commerial Support
If you are running GreptimeDB OSS in your organization, we offer additional
enterprise addons, installation service, training and consulting. [Contact
us](https://greptime.com/contactus) and we will reach out to you with more
detail of our commerial license.
## License ## License
GreptimeDB is licensed under the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt). GreptimeDB uses the [Apache License 2.0](https://apache.org/licenses/LICENSE-2.0.txt) to strike a balance between
open contributions and allowing you to use the software however you want.
## Commercial Support
Running GreptimeDB in your organization?
We offer enterprise add-ons, services, training, and consulting.
[Contact us](https://greptime.com/contactus) for details.
## Contributing ## Contributing
- Read our [Contribution Guidelines](https://github.com/GreptimeTeam/greptimedb/blob/main/CONTRIBUTING.md). Please refer to [contribution guidelines](CONTRIBUTING.md) and [internal concepts docs](https://docs.greptime.com/contributor-guide/overview.html) for more information.
- Explore [Internal Concepts](https://docs.greptime.com/contributor-guide/overview.html) and [DeepWiki](https://deepwiki.com/GreptimeTeam/greptimedb).
- Pick up a [good first issue](https://github.com/GreptimeTeam/greptimedb/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and join the #contributors [Slack](https://greptime.com/slack) channel.
## Acknowledgement ## Acknowledgement
Special thanks to all contributors! See [AUTHORS.md](https://github.com/GreptimeTeam/greptimedb/blob/main/AUTHOR.md). Special thanks to all the contributors who have propelled GreptimeDB forward. For a complete list of contributors, please refer to [AUTHOR.md](AUTHOR.md).
- Uses [Apache Arrow™](https://arrow.apache.org/) (memory model) - GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
- [Apache Parquet](https://parquet.apache.org/) (file storage) - GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://arrow.apache.org/datafusion/).
- [Apache DataFusion](https://arrow.apache.org/datafusion/) (query engine) - [Apache OpenDAL](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- [Apache OpenDAL™](https://opendal.apache.org/) (data access abstraction) - GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.

View File

@@ -12,29 +12,22 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `default_timezone` | String | Unset | The default timezone of the server. | | `default_timezone` | String | Unset | The default timezone of the server. |
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. | | `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). | | `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. | | `grpc.tls.cert_path` | String | Unset | Certificate file path. |
@@ -44,8 +37,6 @@
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -55,7 +46,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -65,17 +55,15 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -91,26 +79,22 @@
| `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. | | `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. | | `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. | | `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. | | `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `metadata_store` | -- | -- | Metadata storage options. | | `metadata_store` | -- | -- | Metadata storage options. |
| `metadata_store.file_size` | String | `64MB` | The size of the metadata store log file. | | `metadata_store.file_size` | String | `256MB` | Kv file size in bytes. |
| `metadata_store.purge_threshold` | String | `256MB` | The threshold of the metadata store size to trigger a purge. | | `metadata_store.purge_threshold` | String | `4GB` | Kv purge threshold. |
| `metadata_store.purge_interval` | String | `1m` | The interval of the metadata store to trigger a purge. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `3` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. | | `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -125,12 +109,6 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
@@ -148,72 +126,61 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). | | `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `auto` | Memory threshold for performing an external sort during index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. | | `region_engine.mito.inverted_index.intermediate_path` | String | `""` | Deprecated, use `region_engine.mito.index.aux_path` instead. |
| `region_engine.mito.inverted_index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.inverted_index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. | | `region_engine.mito.fulltext_index` | -- | -- | The options for full-text index in Mito engine. |
| `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the bloom filter on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the bloom filter on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the bloom filter on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for bloom filter creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. | | `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. | | `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `slow_query` | -- | -- | The slow query log options. | | `logging.slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `false` | Whether to enable slow query log. | | `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. | | `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. | | `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
## Distributed Mode ## Distributed Mode
@@ -223,8 +190,6 @@
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `default_timezone` | String | Unset | The default timezone of the server. | | `default_timezone` | String | Unset | The default timezone of the server. |
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `max_in_flight_write_bytes` | String | Unset | The maximum in-flight write bytes. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
@@ -233,40 +198,21 @@
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `http.max_total_body_memory` | String | Unset | Maximum total memory for all concurrent HTTP request bodies.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
| `http.enable_cors` | Bool | `true` | HTTP CORS support, it's turned on by default<br/>This allows browser to access http APIs without CORS restrictions |
| `http.cors_allowed_origins` | Array | Unset | Customize allowed origins for HTTP CORS. |
| `http.prom_validation_mode` | String | `strict` | Whether to enable validation for Prometheus remote write requests.<br/>Available options:<br/>- strict: deny invalid UTF-8 strings (default).<br/>- lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).<br/>- unchecked: do not valid strings. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:4001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:4001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_total_message_memory` | String | Unset | Maximum total memory for all concurrent gRPC request messages.<br/>Set to 0 to disable the limit. Default: "0" (unlimited) |
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
| `grpc.max_connection_age` | String | Unset | The maximum connection age for gRPC connection.<br/>The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.<br/>Refer to https://grpc.io/docs/guides/keepalive/ for more details. |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. | | `grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `grpc.tls.key_path` | String | Unset | Private key file path. | | `grpc.tls.key_path` | String | Unset | Private key file path. |
| `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. | | `grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `internal_grpc` | -- | -- | The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend. |
| `internal_grpc.bind_addr` | String | `127.0.0.1:4010` | The address to bind the gRPC server. |
| `internal_grpc.server_addr` | String | `127.0.0.1:4010` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. |
| `internal_grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `internal_grpc.flight_compression` | String | `arrow_ipc` | Compression mode for frontend side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
| `internal_grpc.tls` | -- | -- | internal gRPC server TLS options, see `mysql.tls` section. |
| `internal_grpc.tls.mode` | String | `disable` | TLS mode. |
| `internal_grpc.tls.cert_path` | String | Unset | Certificate file path. |
| `internal_grpc.tls.key_path` | String | Unset | Private key file path. |
| `internal_grpc.tls.watch` | Bool | `false` | Watch for Certificate and key file change and auto reload.<br/>For now, gRPC tls config does not support auto reload. |
| `mysql` | -- | -- | MySQL server options. | | `mysql` | -- | -- | MySQL server options. |
| `mysql.enable` | Bool | `true` | Whether to enable. | | `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. | | `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. | | `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.prepared_stmt_cache_size` | Integer | `10000` | Maximum entries in the MySQL prepared statement cache; default is 10,000. |
| `mysql.tls` | -- | -- | -- | | `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` | | `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. | | `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -276,7 +222,6 @@
| `postgres.enable` | Bool | `true` | Whether to enable | | `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. | | `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. | | `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. | | `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. | | `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. | | `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -286,99 +231,75 @@
| `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. | | `opentsdb.enable` | Bool | `true` | Whether to enable OpenTSDB put in HTTP API. |
| `influxdb` | -- | -- | InfluxDB protocol options. | | `influxdb` | -- | -- | InfluxDB protocol options. |
| `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. | | `influxdb.enable` | Bool | `true` | Whether to enable InfluxDB protocol in HTTP API. |
| `jaeger` | -- | -- | Jaeger protocol options. |
| `jaeger.enable` | Bool | `true` | Whether to enable Jaeger protocol in HTTP API. |
| `prom_store` | -- | -- | Prometheus remote storage options | | `prom_store` | -- | -- | Prometheus remote storage options |
| `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. | | `prom_store.enable` | Bool | `true` | Whether to enable Prometheus remote write and read in HTTP API. |
| `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. | | `prom_store.with_metric_engine` | Bool | `true` | Whether to store the data from Prometheus remote write in metric engine. |
| `meta_client` | -- | -- | The metasrv client options. | | `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | | `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. | | `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. | | `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. | | `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | | `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. | | `meta_client.metadata_cache_max_capacity` | Integer | `100000` | The configuration about the cache of the metadata. |
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. | | `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- | | `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.allow_query_fallback` | Bool | `false` | Whether to allow query fallback when push down optimize fails.<br/>Default to false, meaning when push down optimize failed, return error msg |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans (only applies to datanodes). |
| `datanode` | -- | -- | Datanode options. | | `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. | | `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.connect_timeout` | String | `10s` | -- | | `datanode.client.connect_timeout` | String | `10s` | -- |
| `datanode.client.tcp_nodelay` | Bool | `true` | -- | | `datanode.client.tcp_nodelay` | Bool | `true` | -- |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. | | `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. | | `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `slow_query` | -- | -- | The slow query log options. | | `logging.slow_query` | -- | -- | The slow query log options. |
| `slow_query.enable` | Bool | `true` | Whether to enable slow query log. | | `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `slow_query.record_type` | String | `system_table` | The record type of slow queries. It can be `system_table` or `log`.<br/>If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`.<br/>If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. | | `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. | | `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. | | `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. | | `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
### Metasrv ### Metasrv
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `data_home` | String | `./greptimedb_data` | The working home directory. | | `data_home` | String | `/tmp/metasrv/` | The working home directory. |
| `store_addrs` | Array | -- | Store server address(es). The format depends on the selected backend.<br/><br/>For etcd: a list of "host:port" endpoints.<br/>e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]<br/><br/>For PostgreSQL: a connection string in libpq format or URI.<br/>e.g.<br/>- "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"<br/>- "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"<br/>The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html<br/><br/>For mysql store, the format is a MySQL connection URL.<br/>e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem" | | `bind_addr` | String | `127.0.0.1:3002` | The bind address of metasrv. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. | | `server_addr` | String | `127.0.0.1:3002` | The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost. |
| `backend` | String | `etcd_store` | The datastore for meta server.<br/>Available values:<br/>- `etcd_store` (default value)<br/>- `memory_store`<br/>- `postgres_store`<br/>- `mysql_store` | | `store_addr` | String | `127.0.0.1:2379` | Store server address default to etcd store. |
| `meta_table_name` | String | `greptime_metakv` | Table name in RDS to store metadata. Effect when using a RDS kvbackend.<br/>**Only used when backend is `postgres_store`.** |
| `meta_schema_name` | String | `greptime_schema` | Optional PostgreSQL schema for metadata table and election table name qualification.<br/>When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),<br/>set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.<br/>GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.<br/>**Only used when backend is `postgres_store`.** |
| `meta_election_lock_id` | Integer | `1` | Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend<br/>Only used when backend is `postgres_store`. |
| `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". | | `selector` | String | `round_robin` | Datanode selector type.<br/>- `round_robin` (default value)<br/>- `lease_based`<br/>- `load_based`<br/>For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector". |
| `use_memory_store` | Bool | `false` | Store data in memory. | | `use_memory_store` | Bool | `false` | Store data in memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. |
| `store_key_prefix` | String | `""` | If it's not empty, the metasrv will store all data with this key prefix. |
| `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). | | `enable_region_failover` | Bool | `false` | Whether to enable region failover.<br/>This feature is only available on GreptimeDB running on cluster mode and<br/>- Using Remote WAL<br/>- Using shared storage (e.g., s3). |
| `region_failure_detector_initialization_delay` | String | `10m` | The delay before starting region failure detection.<br/>This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.<br/>Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled. | | `backend` | String | `EtcdStore` | The datastore for meta server. |
| `allow_region_failover_on_local_wal` | Bool | `false` | Whether to allow region failover on local WAL.<br/>**This option is not recommended to be set to true, because it may lead to data loss during failover.** |
| `node_max_idle_time` | String | `24hours` | Max allowed idle time before removing node info from metasrv memory. |
| `enable_telemetry` | Bool | `true` | Whether to enable greptimedb telemetry. Enabled by default. |
| `runtime` | -- | -- | The runtime options. | | `runtime` | -- | -- | The runtime options. |
| `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. | | `runtime.global_rt_size` | Integer | `8` | The number of threads to execute the runtime for global read operations. |
| `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. | | `runtime.compact_rt_size` | Integer | `4` | The number of threads to execute the runtime for global write operations. |
| `backend_tls` | -- | -- | TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)<br/>When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here<br/><br/>Note: if TLS is configured in both this section and the `store_addrs` connection string, the<br/>settings here will override the TLS settings in `store_addrs`. |
| `backend_tls.mode` | String | `prefer` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- "disable" - No TLS<br/>- "prefer" (default) - Try TLS, fallback to plain<br/>- "require" - Require TLS<br/>- "verify_ca" - Require TLS and verify CA<br/>- "verify_full" - Require TLS and verify hostname |
| `backend_tls.cert_path` | String | `""` | Path to client certificate file (for client authentication)<br/>Like "/path/to/client.crt" |
| `backend_tls.key_path` | String | `""` | Path to client private key file (for client authentication)<br/>Like "/path/to/client.key" |
| `backend_tls.ca_cert_path` | String | `""` | Path to CA certificate file (for server certificate verification)<br/>Required when using custom CAs or self-signed certificates<br/>Leave empty to use system root certificates only<br/>Like "/path/to/ca.crt" |
| `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3002` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3002` | The communication server address for the frontend and datanode to connect to metasrv.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `bind_addr`. |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `procedure` | -- | -- | Procedure storage options. | | `procedure` | -- | -- | Procedure storage options. |
| `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. | | `procedure.max_retry_times` | Integer | `12` | Procedure max retry time. |
| `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially | | `procedure.retry_delay` | String | `500ms` | Initial retry delay of procedures, increases exponentially |
| `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). | | `procedure.max_metadata_value_size` | String | `1500KiB` | Auto split large value<br/>GreptimeDB procedure uses etcd as the default metadata storage backend.<br/>The etcd the maximum size of any request is 1.5 MiB<br/>1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)<br/>Comments out the `max_metadata_value_size`, for don't split large value (no limit). |
| `procedure.max_running_procedures` | Integer | `128` | Max running procedures.<br/>The maximum number of procedures that can be running at the same time.<br/>If the number of running procedures exceeds this limit, the procedure will be rejected. |
| `failure_detector` | -- | -- | -- | | `failure_detector` | -- | -- | -- |
| `failure_detector.threshold` | Float | `8.0` | Maximum acceptable φ before the peer is treated as failed.<br/>Lower values react faster but yield more false positives. | | `failure_detector.threshold` | Float | `8.0` | The threshold value used by the failure detector to determine failure conditions. |
| `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals.<br/>So tiny variations dont make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. | | `failure_detector.min_std_deviation` | String | `100ms` | The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations. |
| `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats.<br/>Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. | | `failure_detector.acceptable_heartbeat_pause` | String | `10000ms` | The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable. |
| `failure_detector.first_heartbeat_estimate` | String | `1000ms` | The initial estimate of the heartbeat interval used by the failure detector. |
| `datanode` | -- | -- | Datanode options. | | `datanode` | -- | -- | Datanode options. |
| `datanode.client` | -- | -- | Datanode client options. | | `datanode.client` | -- | -- | Datanode client options. |
| `datanode.client.timeout` | String | `10s` | Operation timeout. | | `datanode.client.timeout` | String | `10s` | Operation timeout. |
@@ -386,62 +307,69 @@
| `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | | `datanode.client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
| `wal` | -- | -- | -- | | `wal` | -- | -- | -- |
| `wal.provider` | String | `raft_engine` | -- | | `wal.provider` | String | `raft_engine` | -- |
| `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster.<br/><br/>**It's only used when the provider is `kafka`**. | | `wal.broker_endpoints` | Array | -- | The broker endpoints of the Kafka cluster. |
| `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)`<br/>**It's only used when the provider is `kafka`**. | | `wal.auto_create_topics` | Bool | `true` | Automatically create topics for WAL.<br/>Set to `true` to automatically create topics for WAL.<br/>Otherwise, use topics named `topic_name_prefix_[0..num_topics)` |
| `wal.auto_prune_interval` | String | `30m` | Interval of automatically WAL pruning.<br/>Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.<br/>**It's only used when the provider is `kafka`**. | | `wal.num_topics` | Integer | `64` | Number of topics. |
| `wal.flush_trigger_size` | String | `512MB` | Estimated size threshold to trigger a flush when using Kafka remote WAL.<br/>Since multiple regions may share a Kafka topic, the estimated size is calculated as:<br/> (latest_entry_id - flushed_entry_id) * avg_record_size<br/>MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.<br/>- `latest_entry_id`: The latest entry ID in the topic.<br/>- `flushed_entry_id`: The last flushed entry ID for the region.<br/>Set to "0" to let the system decide the flush trigger size.<br/>**It's only used when the provider is `kafka`**. | | `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default) |
| `wal.checkpoint_trigger_size` | String | `128MB` | Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.<br/>The estimated size is calculated as:<br/> (latest_entry_id - last_checkpoint_entry_id) * avg_record_size<br/>MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.<br/>Set to "0" to let the system decide the checkpoint trigger size.<br/>**It's only used when the provider is `kafka`**. | | `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. |
| `wal.auto_prune_parallelism` | Integer | `10` | Concurrent task limit for automatically WAL pruning.<br/>**It's only used when the provider is `kafka`**. | | `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition. |
| `wal.num_topics` | Integer | `64` | Number of topics used for remote WAL.<br/>**It's only used when the provider is `kafka`**. | | `wal.create_topic_timeout` | String | `30s` | Above which a topic creation operation will be cancelled. |
| `wal.selector_type` | String | `round_robin` | Topic selector type.<br/>Available selector types:<br/>- `round_robin` (default)<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_init` | String | `500ms` | The initial backoff for kafka clients. |
| `wal.topic_name_prefix` | String | `greptimedb_wal_topic` | A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.<br/>Only accepts strings that match the following regular expression pattern:<br/>[a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*<br/>i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_max` | String | `10s` | The maximum backoff for kafka clients. |
| `wal.replication_factor` | Integer | `1` | Expected number of replicas of each partition.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_base` | Integer | `2` | Exponential backoff rate, i.e. next backoff = base * current backoff. |
| `wal.create_topic_timeout` | String | `30s` | The timeout for creating a Kafka topic.<br/>**It's only used when the provider is `kafka`**. | | `wal.backoff_deadline` | String | `5mins` | Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate. |
| `event_recorder` | -- | -- | Configuration options for the event recorder. |
| `event_recorder.ttl` | String | `90d` | TTL for the events table that will be used to store the events. Default is `90d`. |
| `stats_persistence` | -- | -- | Configuration options for the stats persistence. |
| `stats_persistence.ttl` | String | `0s` | TTL for the stats table that will be used to store the stats.<br/>Set to `0s` to disable stats persistence.<br/>Default is `0s`.<br/>If you want to enable stats persistence, set the TTL to a value greater than 0.<br/>It is recommended to set a small value, e.g., `3h`. |
| `stats_persistence.interval` | String | `10m` | The interval to persist the stats. Default is `10m`.<br/>The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. | | `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. | | `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
### Datanode ### Datanode
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `standalone` | The running mode of the datanode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. | | `node_id` | Integer | Unset | The datanode identifier and should be unique in the cluster. |
| `default_column_prefix` | String | Unset | The default column prefix for auto-created time index and value columns. |
| `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. | | `require_lease_before_startup` | Bool | `false` | Start services after regions have obtained leases.<br/>It will block the datanode start if it can't receive leases in the heartbeat from metasrv. |
| `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. | | `init_regions_in_background` | Bool | `false` | Initialize all regions in the background during the startup.<br/>By default, it provides services after all regions have been initialized. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. |
| `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. | | `init_regions_parallelism` | Integer | `16` | Parallelism of initializing regions. |
| `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited.<br/>NOTE: This setting affects scan_memory_limit's privileged tier allocation.<br/>When set, 70% of queries get privileged memory access (full scan_memory_limit).<br/>The remaining 30% get standard tier access (70% of scan_memory_limit). | | `max_concurrent_queries` | Integer | `0` | The maximum current queries allowed to be executed. Zero means unlimited. |
| `enable_telemetry` | Bool | `true` | Enable telemetry to collect anonymous usage data. Enabled by default. | | `rpc_addr` | String | Unset | Deprecated, use `grpc.addr` instead. |
| `rpc_hostname` | String | Unset | Deprecated, use `grpc.hostname` instead. |
| `rpc_runtime_size` | Integer | Unset | Deprecated, use `grpc.runtime_size` instead. |
| `rpc_max_recv_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_recv_message_size` instead. |
| `rpc_max_send_message_size` | String | Unset | Deprecated, use `grpc.rpc_max_send_message_size` instead. |
| `http` | -- | -- | The HTTP server options. | | `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. | | `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. | | `http.timeout` | String | `30s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. | | `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:3001` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:3001` | The address advertised to the metasrv, and used for connections from outside the host.<br/>If left empty or unset, the server will automatically use the IP address of the first network interface<br/>on the host, with the same port number as the one specified in `grpc.bind_addr`. | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `8` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `8` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `grpc.flight_compression` | String | `arrow_ipc` | Compression mode for datanode side Arrow IPC service. Available options:<br/>- `none`: disable all compression<br/>- `transport`: only enable gRPC transport compression (zstd)<br/>- `arrow_ipc`: only enable Arrow IPC compression (lz4)<br/>- `all`: enable all compression.<br/>Default to `none` |
| `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. | | `grpc.tls` | -- | -- | gRPC server TLS options, see `mysql.tls` section. |
| `grpc.tls.mode` | String | `disable` | TLS mode. | | `grpc.tls.mode` | String | `disable` | TLS mode. |
| `grpc.tls.cert_path` | String | Unset | Certificate file path. | | `grpc.tls.cert_path` | String | Unset | Certificate file path. |
@@ -456,6 +384,7 @@
| `meta_client` | -- | -- | The metasrv client options. | | `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | | `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. | | `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. | | `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. | | `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | | `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -463,11 +392,11 @@
| `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. | | `meta_client.metadata_cache_ttl` | String | `10m` | TTL of the metadata cache. |
| `meta_client.metadata_cache_tti` | String | `5m` | -- | | `meta_client.metadata_cache_tti` | String | `5m` | -- |
| `wal` | -- | -- | The WAL options. | | `wal` | -- | -- | The WAL options. |
| `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka.<br/>- `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.** | | `wal.provider` | String | `raft_engine` | The provider of the WAL.<br/>- `raft_engine`: the wal is stored in the local file system by raft-engine.<br/>- `kafka`: it's remote wal that data is stored in Kafka. |
| `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.dir` | String | Unset | The directory to store the WAL files.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.file_size` | String | `128MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.file_size` | String | `256MB` | The size of the WAL segment file.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_threshold` | String | `1GB` | The threshold of the WAL size to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_threshold` | String | `4GB` | The threshold of the WAL size to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.purge_interval` | String | `1m` | The interval to trigger a purge.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.purge_interval` | String | `10m` | The interval to trigger a flush.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.read_batch_size` | Integer | `128` | The read batch size.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.sync_write` | Bool | `false` | Whether to use sync write.<br/>**It's only used when the provider is `raft_engine`**. |
| `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. | | `wal.enable_log_recycle` | Bool | `true` | Whether to reuse logically truncated log files.<br/>**It's only used when the provider is `raft_engine`**. |
@@ -477,18 +406,18 @@
| `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. | | `wal.broker_endpoints` | Array | -- | The Kafka broker endpoints.<br/>**It's only used when the provider is `kafka`**. |
| `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. | | `wal.max_batch_bytes` | String | `1MB` | The max size of a single producer batch.<br/>Warning: Kafka has a default limit of 1MB per message in a topic.<br/>**It's only used when the provider is `kafka`**. |
| `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. | | `wal.consumer_wait_timeout` | String | `100ms` | The consumer wait timeout.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_init` | String | `500ms` | The initial backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_max` | String | `10s` | The maximum backoff delay.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_base` | Integer | `2` | The exponential backoff rate, i.e. next backoff = base * current backoff.<br/>**It's only used when the provider is `kafka`**. |
| `wal.backoff_deadline` | String | `5mins` | The deadline of retries.<br/>**It's only used when the provider is `kafka`**. |
| `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. | | `wal.create_index` | Bool | `true` | Whether to enable WAL index creation.<br/>**It's only used when the provider is `kafka`**. |
| `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. | | `wal.dump_index_interval` | String | `60s` | The interval for dumping WAL indexes.<br/>**It's only used when the provider is `kafka`**. |
| `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. | | `wal.overwrite_entry_start_id` | Bool | `false` | Ignore missing entries during read WAL.<br/>**It's only used when the provider is `kafka`**.<br/><br/>This option ensures that when Kafka messages are deleted, the system<br/>can still successfully replay memtable data without throwing an<br/>out-of-range error.<br/>However, enabling this option might lead to unexpected data loss,<br/>as the system will skip over missing entries instead of treating<br/>them as critical errors. |
| `query` | -- | -- | The query engine options. |
| `query.parallelism` | Integer | `0` | Parallelism of the query engine.<br/>Default to 0, which means the number of CPU cores. |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `storage` | -- | -- | The data storage options. | | `storage` | -- | -- | The data storage options. |
| `storage.data_home` | String | `./greptimedb_data` | The working home directory. | | `storage.data_home` | String | `/tmp/greptimedb/` | The working home directory. |
| `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. | | `storage.type` | String | `File` | The storage type used to store the data.<br/>- `File`: the data is stored in the local file system.<br/>- `S3`: the data is stored in the S3 object storage.<br/>- `Gcs`: the data is stored in the Google Cloud Storage.<br/>- `Azblob`: the data is stored in the Azure Blob Storage.<br/>- `Oss`: the data is stored in the Aliyun OSS. |
| `storage.cache_path` | String | Unset | Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.<br/>A local file directory, defaults to `{data_home}`. An empty string means disabling. | | `storage.cache_path` | String | Unset | Cache configuration for object storage such as 'S3' etc.<br/>The local file cache directory. |
| `storage.enable_read_cache` | Bool | `true` | Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. | | `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. |
| `storage.cache_capacity` | String | Unset | The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. |
| `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. | | `storage.bucket` | String | Unset | The S3 bucket name.<br/>**It's only used when the storage type is `S3`, `Oss` and `Gcs`**. |
| `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. | | `storage.root` | String | Unset | The S3 data will be stored in the specified prefix, for example, `s3://${bucket}/${root}`.<br/>**It's only used when the storage type is `S3`, `Oss` and `Azblob`**. |
| `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. | | `storage.access_key_id` | String | Unset | The access key id of the aws account.<br/>It's **highly recommended** to use AWS IAM roles instead of hardcoding the access key id and secret key.<br/>**It's only used when the storage type is `S3` and `Oss`**. |
@@ -503,20 +432,12 @@
| `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. | | `storage.sas_token` | String | Unset | The sas token of the azure account.<br/>**It's only used when the storage type is `Azblob`**. |
| `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.endpoint` | String | Unset | The endpoint of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. | | `storage.region` | String | Unset | The region of the S3 service.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client` | -- | -- | The http client options to the storage.<br/>**It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**. |
| `storage.http_client.pool_max_idle_per_host` | Integer | `1024` | The maximum idle connection per host allowed in the pool. |
| `storage.http_client.connect_timeout` | String | `30s` | The timeout for only the connect phase of a http client. |
| `storage.http_client.timeout` | String | `30s` | The total request timeout, applied from when the request starts connecting until the response body has finished.<br/>Also considered a total deadline. |
| `storage.http_client.pool_idle_timeout` | String | `90s` | The timeout for idle sockets being kept-alive. |
| `storage.http_client.skip_ssl_validation` | Bool | `false` | To skip the ssl verification<br/>**Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks. |
| `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. | | `[[region_engine]]` | -- | -- | The region engine options. You can configure multiple region engines. |
| `region_engine.mito` | -- | -- | The Mito engine options. | | `region_engine.mito` | -- | -- | The Mito engine options. |
| `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. | | `region_engine.mito.num_workers` | Integer | `8` | Number of region workers. |
| `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. | | `region_engine.mito.worker_channel_size` | Integer | `128` | Request channel size of each worker. |
| `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. | | `region_engine.mito.worker_request_batch_size` | Integer | `64` | Max batch size for a worker to handle requests. |
| `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. | | `region_engine.mito.manifest_checkpoint_distance` | Integer | `10` | Number of meta action updated to trigger a new checkpoint for the manifest. |
| `region_engine.mito.experimental_manifest_keep_removed_file_count` | Integer | `256` | Number of removed files to keep in manifest's `removed_files` field before also<br/>remove them from `removed_files`. Mostly for debugging purpose.<br/>If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files<br/>from `removed_files` field. |
| `region_engine.mito.experimental_manifest_keep_removed_file_ttl` | String | `1h` | How long to keep removed files in the `removed_files` field of manifest<br/>after they are removed from manifest.<br/>files will only be removed from `removed_files` field<br/>if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached. |
| `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). | | `region_engine.mito.compress_manifest` | Bool | `false` | Whether to compress manifest and checkpoint file by gzip (default false). |
| `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). | | `region_engine.mito.max_background_flushes` | Integer | Auto | Max number of running background flush jobs (default: 1/2 of cpu cores). |
| `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). | | `region_engine.mito.max_background_compactions` | Integer | Auto | Max number of running background compaction jobs (default: 1/4 of cpu cores). |
@@ -528,27 +449,18 @@
| `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.vector_cache_size` | String | Auto | Cache size for vectors and arrow arrays. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. | | `region_engine.mito.page_cache_size` | String | Auto | Cache size for pages of SST row groups. Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/8 of OS memory. |
| `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. | | `region_engine.mito.selector_result_cache_size` | String | Auto | Cache size for time series selector (e.g. `last_value()`). Setting it to 0 to disable the cache.<br/>If not set, it's default to 1/16 of OS memory with a max limitation of 512MB. |
| `region_engine.mito.enable_write_cache` | Bool | `false` | Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. | | `region_engine.mito.enable_experimental_write_cache` | Bool | `false` | Whether to enable the experimental write cache. |
| `region_engine.mito.write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}`. | | `region_engine.mito.experimental_write_cache_path` | String | `""` | File system path for write cache, defaults to `{data_home}/write_cache`. |
| `region_engine.mito.write_cache_size` | String | `5GiB` | Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. | | `region_engine.mito.experimental_write_cache_size` | String | `512MB` | Capacity for write cache. |
| `region_engine.mito.write_cache_ttl` | String | Unset | TTL for write cache. | | `region_engine.mito.experimental_write_cache_ttl` | String | Unset | TTL for write cache. |
| `region_engine.mito.preload_index_cache` | Bool | `true` | Preload index (puffin) files into cache on region open (default: true).<br/>When enabled, index files are loaded into the write cache during region initialization,<br/>which can improve query performance at the cost of longer startup times. |
| `region_engine.mito.index_cache_percent` | Integer | `20` | Percentage of write cache capacity allocated for index (puffin) files (default: 20).<br/>The remaining capacity is used for data (parquet) files.<br/>Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,<br/>1GiB is reserved for index files and 4GiB for data files. |
| `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. | | `region_engine.mito.sst_write_buffer_size` | String | `8MB` | Buffer size for SST writing. |
| `region_engine.mito.scan_parallelism` | Integer | `0` | Parallelism to scan a region (default: 1/4 of cpu cores).<br/>- `0`: using the default value (1/4 of cpu cores).<br/>- `1`: scan in current thread.<br/>- `n`: scan in parallelism n. |
| `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. | | `region_engine.mito.parallel_scan_channel_size` | Integer | `32` | Capacity of the channel to send data from parallel scan tasks to the main task. |
| `region_engine.mito.max_concurrent_scan_files` | Integer | `384` | Maximum number of SST files to scan concurrently. |
| `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. | | `region_engine.mito.allow_stale_entries` | Bool | `false` | Whether to allow stale WAL entries read during replay. |
| `region_engine.mito.scan_memory_limit` | String | `50%` | Memory limit for table scans across all queries.<br/>Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit.<br/>NOTE: Works with max_concurrent_queries for tiered memory allocation.<br/>- If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.<br/>- If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access. |
| `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). | | `region_engine.mito.min_compaction_interval` | String | `0m` | Minimum time interval between two compactions.<br/>To align with the old behavior, the default value is 0 (no restrictions). |
| `region_engine.mito.default_experimental_flat_format` | Bool | `false` | Whether to enable experimental flat format as the default format. |
| `region_engine.mito.index` | -- | -- | The options for index in Mito engine. | | `region_engine.mito.index` | -- | -- | The options for index in Mito engine. |
| `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. | | `region_engine.mito.index.aux_path` | String | `""` | Auxiliary directory path for the index in filesystem, used to store intermediate files for<br/>creating the index and staging files for searching the index, defaults to `{data_home}/index_intermediate`.<br/>The default name for this directory is `index_intermediate` for backward compatibility.<br/><br/>This path contains two subdirectories:<br/>- `__intm`: for storing intermediate files used during creating index.<br/>- `staging`: for storing staging files used during searching index. |
| `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. | | `region_engine.mito.index.staging_size` | String | `2GB` | The max capacity of the staging directory. |
| `region_engine.mito.index.staging_ttl` | String | `7d` | The TTL of the staging directory.<br/>Defaults to 7 days.<br/>Setting it to "0s" to disable TTL. |
| `region_engine.mito.index.metadata_cache_size` | String | `64MiB` | Cache size for inverted index metadata. |
| `region_engine.mito.index.content_cache_size` | String | `128MiB` | Cache size for inverted index content. |
| `region_engine.mito.index.content_cache_page_size` | String | `64KiB` | Page size for inverted index content cache. |
| `region_engine.mito.index.result_cache_size` | String | `128MiB` | Cache size for index result. |
| `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. | | `region_engine.mito.inverted_index` | -- | -- | The options for inverted index in Mito engine. |
| `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.inverted_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
@@ -560,73 +472,54 @@
| `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never | | `region_engine.mito.fulltext_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold | | `region_engine.mito.fulltext_index.mem_threshold_on_create` | String | `auto` | Memory threshold for index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.bloom_filter_index` | -- | -- | The options for bloom filter index in Mito engine. |
| `region_engine.mito.bloom_filter_index.create_on_flush` | String | `auto` | Whether to create the index on flush.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.create_on_compaction` | String | `auto` | Whether to create the index on compaction.<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.apply_on_query` | String | `auto` | Whether to apply the index on query<br/>- `auto`: automatically (default)<br/>- `disable`: never |
| `region_engine.mito.bloom_filter_index.mem_threshold_on_create` | String | `auto` | Memory threshold for the index creation.<br/>- `auto`: automatically determine the threshold based on the system memory size (default)<br/>- `unlimited`: no memory limit<br/>- `[size]` e.g. `64MB`: fixed memory threshold |
| `region_engine.mito.memtable` | -- | -- | -- | | `region_engine.mito.memtable` | -- | -- | -- |
| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) | | `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. | | `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `region_engine.file` | -- | -- | Enable the file engine. | | `region_engine.file` | -- | -- | Enable the file engine. |
| `region_engine.metric` | -- | -- | Metric engine options. |
| `region_engine.metric.sparse_primary_key_encoding` | Bool | `true` | Whether to use sparse primary key encoding. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. | | `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. | | `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.<br/>This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommend to collect metrics generated by itself<br/>You must create the database before enabling it. |
| `export_metrics.self_import.db` | String | Unset | -- |
| `export_metrics.remote_write` | -- | -- | -- |
| `export_metrics.remote_write.url` | String | `""` | The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |
### Flownode ### Flownode
| Key | Type | Default | Descriptions | | Key | Type | Default | Descriptions |
| --- | -----| ------- | ----------- | | --- | -----| ------- | ----------- |
| `mode` | String | `distributed` | The running mode of the flownode. It can be `standalone` or `distributed`. |
| `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. | | `node_id` | Integer | Unset | The flownode identifier and should be unique in the cluster. |
| `flow` | -- | -- | flow engine options. |
| `flow.num_workers` | Integer | `0` | The number of flow worker in flownode.<br/>Not setting(or set to 0) this value will use the number of CPU cores divided by 2. |
| `flow.batching_mode` | -- | -- | -- |
| `flow.batching_mode.query_timeout` | String | `600s` | The default batching engine query timeout is 10 minutes. |
| `flow.batching_mode.slow_query_threshold` | String | `60s` | will output a warn log for any query that runs for more that this threshold |
| `flow.batching_mode.experimental_min_refresh_duration` | String | `5s` | The minimum duration between two queries execution by batching mode task |
| `flow.batching_mode.grpc_conn_timeout` | String | `5s` | The gRPC connection timeout |
| `flow.batching_mode.experimental_grpc_max_retries` | Integer | `3` | The gRPC max retry number |
| `flow.batching_mode.experimental_frontend_scan_timeout` | String | `30s` | Flow wait for available frontend timeout,<br/>if failed to find available frontend after frontend_scan_timeout elapsed, return error<br/>which prevent flownode from starting |
| `flow.batching_mode.experimental_frontend_activity_timeout` | String | `60s` | Frontend activity timeout<br/>if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,<br/>it will be removed from the list that flownode use to connect |
| `flow.batching_mode.experimental_max_filter_num_per_query` | Integer | `20` | Maximum number of filters allowed in a single query |
| `flow.batching_mode.experimental_time_window_merge_threshold` | Integer | `3` | Time window merge distance |
| `flow.batching_mode.read_preference` | String | `Leader` | Read preference of the Frontend client. |
| `flow.batching_mode.frontend_tls` | -- | -- | -- |
| `flow.batching_mode.frontend_tls.enabled` | Bool | `false` | Whether to enable TLS for client. |
| `flow.batching_mode.frontend_tls.server_ca_cert_path` | String | Unset | Server Certificate file path. |
| `flow.batching_mode.frontend_tls.client_cert_path` | String | Unset | Client Certificate file path. |
| `flow.batching_mode.frontend_tls.client_key_path` | String | Unset | Client Private key file path. |
| `grpc` | -- | -- | The gRPC server options. | | `grpc` | -- | -- | The gRPC server options. |
| `grpc.bind_addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. | | `grpc.addr` | String | `127.0.0.1:6800` | The address to bind the gRPC server. |
| `grpc.server_addr` | String | `127.0.0.1:6800` | The address advertised to the metasrv,<br/>and used for connections from outside the host | | `grpc.hostname` | String | `127.0.0.1` | The hostname advertised to the metasrv,<br/>and used for connections from outside the host |
| `grpc.runtime_size` | Integer | `2` | The number of server worker threads. | | `grpc.runtime_size` | Integer | `2` | The number of server worker threads. |
| `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. | | `grpc.max_recv_message_size` | String | `512MB` | The maximum receive message size for gRPC server. |
| `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. | | `grpc.max_send_message_size` | String | `512MB` | The maximum send message size for gRPC server. |
| `http` | -- | -- | The HTTP server options. |
| `http.addr` | String | `127.0.0.1:4000` | The address to bind the HTTP server. |
| `http.timeout` | String | `0s` | HTTP request timeout. Set to 0 to disable timeout. |
| `http.body_limit` | String | `64MB` | HTTP request body limit.<br/>The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.<br/>Set to 0 to disable limit. |
| `meta_client` | -- | -- | The metasrv client options. | | `meta_client` | -- | -- | The metasrv client options. |
| `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. | | `meta_client.metasrv_addrs` | Array | -- | The addresses of the metasrv. |
| `meta_client.timeout` | String | `3s` | Operation timeout. | | `meta_client.timeout` | String | `3s` | Operation timeout. |
| `meta_client.heartbeat_timeout` | String | `500ms` | Heartbeat timeout. |
| `meta_client.ddl_timeout` | String | `10s` | DDL timeout. | | `meta_client.ddl_timeout` | String | `10s` | DDL timeout. |
| `meta_client.connect_timeout` | String | `1s` | Connect server timeout. | | `meta_client.connect_timeout` | String | `1s` | Connect server timeout. |
| `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. | | `meta_client.tcp_nodelay` | Bool | `true` | `TCP_NODELAY` option for accepted connections. |
@@ -637,21 +530,18 @@
| `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. | | `heartbeat.interval` | String | `3s` | Interval for sending heartbeat messages to the metasrv. |
| `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. | | `heartbeat.retry_interval` | String | `3s` | Interval for retrying to send heartbeat messages to the metasrv. |
| `logging` | -- | -- | The logging options. | | `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `./greptimedb_data/logs` | The directory to store the log files. If set to empty, logs will not be written to files. | | `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. If set to empty, logs will not be written to files. |
| `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. | | `logging.level` | String | Unset | The log level. Can be `info`/`debug`/`warn`/`error`. |
| `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. | | `logging.enable_otlp_tracing` | Bool | `false` | Enable OTLP tracing. |
| `logging.otlp_endpoint` | String | `http://localhost:4318/v1/traces` | The OTLP tracing endpoint. | | `logging.otlp_endpoint` | String | `http://localhost:4317` | The OTLP tracing endpoint. |
| `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. | | `logging.append_stdout` | Bool | `true` | Whether to append logs to stdout. |
| `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. | | `logging.log_format` | String | `text` | The log format. Can be `text`/`json`. |
| `logging.max_log_files` | Integer | `720` | The maximum amount of log files. | | `logging.max_log_files` | Integer | `720` | The maximum amount of log files. |
| `logging.otlp_export_protocol` | String | `http` | The OTLP tracing export protocol. Can be `grpc`/`http`. | | `logging.tracing_sample_ratio` | -- | -- | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.<br/>Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.<br/>ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- | | `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
| `logging.slow_query` | -- | -- | The slow query log options. |
| `logging.slow_query.enable` | Bool | `false` | Whether to enable slow query log. |
| `logging.slow_query.threshold` | String | Unset | The threshold of slow query. |
| `logging.slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. | | `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. | | `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `query` | -- | -- | -- |
| `query.parallelism` | Integer | `1` | Parallelism of the query engine for query sent by flownode.<br/>Default to 1, so it won't use too much cpu or memory |
| `query.memory_pool_size` | String | `50%` | Memory pool size for query execution operators (aggregation, sorting, join).<br/>Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").<br/>Setting it to 0 disables the limit (unbounded, default behavior).<br/>When this limit is reached, queries will fail with ResourceExhausted error.<br/>NOTE: This does NOT limit memory used by table scans. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.<br/>When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable<br/>is set to "prof:true,prof_active:false". The official image adds this env variable.<br/>Default is true. |

View File

@@ -1,11 +1,10 @@
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## The datanode identifier and should be unique in the cluster. ## The datanode identifier and should be unique in the cluster.
## @toml2docs:none-default ## @toml2docs:none-default
node_id = 42 node_id = 42
## The default column prefix for auto-created time index and value columns.
## @toml2docs:none-default
default_column_prefix = "greptime"
## Start services after regions have obtained leases. ## Start services after regions have obtained leases.
## It will block the datanode start if it can't receive leases in the heartbeat from metasrv. ## It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false require_lease_before_startup = false
@@ -14,24 +13,42 @@ require_lease_before_startup = false
## By default, it provides services after all regions have been initialized. ## By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## Parallelism of initializing regions. ## Parallelism of initializing regions.
init_regions_parallelism = 16 init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited. ## The maximum current queries allowed to be executed. Zero means unlimited.
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0 max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default. ## Deprecated, use `grpc.addr` instead.
#+ enable_telemetry = true ## @toml2docs:none-default
rpc_addr = "127.0.0.1:3001"
## Deprecated, use `grpc.hostname` instead.
## @toml2docs:none-default
rpc_hostname = "127.0.0.1"
## Deprecated, use `grpc.runtime_size` instead.
## @toml2docs:none-default
rpc_runtime_size = 8
## Deprecated, use `grpc.rpc_max_recv_message_size` instead.
## @toml2docs:none-default
rpc_max_recv_message_size = "512MB"
## Deprecated, use `grpc.rpc_max_send_message_size` instead.
## @toml2docs:none-default
rpc_max_send_message_size = "512MB"
## The HTTP server options. ## The HTTP server options.
[http] [http]
## The address to bind the HTTP server. ## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. ## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s" timeout = "30s"
## HTTP request body limit. ## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
@@ -40,24 +57,16 @@ body_limit = "64MB"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:3001" addr = "127.0.0.1:3001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:3001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB" max_recv_message_size = "512MB"
## The maximum send message size for gRPC server. ## The maximum send message size for gRPC server.
max_send_message_size = "512MB" max_send_message_size = "512MB"
## Compression mode for datanode side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
## - `all`: enable all compression.
## Default to `none`
flight_compression = "arrow_ipc"
## gRPC server TLS options, see `mysql.tls` section. ## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls] [grpc.tls]
@@ -99,6 +108,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout. ## Operation timeout.
timeout = "3s" timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout. ## DDL timeout.
ddl_timeout = "10s" ddl_timeout = "10s"
@@ -122,25 +134,24 @@ metadata_cache_tti = "5m"
## The provider of the WAL. ## The provider of the WAL.
## - `raft_engine`: the wal is stored in the local file system by raft-engine. ## - `raft_engine`: the wal is stored in the local file system by raft-engine.
## - `kafka`: it's remote wal that data is stored in Kafka. ## - `kafka`: it's remote wal that data is stored in Kafka.
## - `noop`: it's a no-op WAL provider that does not store any WAL data.<br/>**Notes: any unflushed data will be lost when the datanode is shutdown.**
provider = "raft_engine" provider = "raft_engine"
## The directory to store the WAL files. ## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default ## @toml2docs:none-default
dir = "./greptimedb_data/wal" dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
file_size = "128MB" file_size = "256MB"
## The threshold of the WAL size to trigger a purge. ## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_threshold = "1GB" purge_threshold = "4GB"
## The interval to trigger a purge. ## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_interval = "1m" purge_interval = "10m"
## The read batch size. ## The read batch size.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
@@ -178,6 +189,22 @@ max_batch_bytes = "1MB"
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms" consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Whether to enable WAL index creation. ## Whether to enable WAL index creation.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
create_index = true create_index = true
@@ -224,7 +251,6 @@ overwrite_entry_start_id = false
# secret_access_key = "123456" # secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com" # endpoint = "https://s3.amazonaws.com"
# region = "us-west-2" # region = "us-west-2"
# enable_virtual_host_style = false
# Example of using Oss as the storage. # Example of using Oss as the storage.
# [storage] # [storage]
@@ -255,23 +281,10 @@ overwrite_entry_start_id = false
# credential = "base64-credential" # credential = "base64-credential"
# endpoint = "https://storage.googleapis.com" # endpoint = "https://storage.googleapis.com"
## The query engine options.
[query]
## Parallelism of the query engine.
## Default to 0, which means the number of CPU cores.
parallelism = 0
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The data storage options. ## The data storage options.
[storage] [storage]
## The working home directory. ## The working home directory.
data_home = "./greptimedb_data" data_home = "/tmp/greptimedb/"
## The storage type used to store the data. ## The storage type used to store the data.
## - `File`: the data is stored in the local file system. ## - `File`: the data is stored in the local file system.
@@ -281,17 +294,14 @@ data_home = "./greptimedb_data"
## - `Oss`: the data is stored in the Aliyun OSS. ## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance. ## Cache configuration for object storage such as 'S3' etc.
## A local file directory, defaults to `{data_home}`. An empty string means disabling. ## The local file cache directory.
## @toml2docs:none-default ## @toml2docs:none-default
#+ cache_path = "" cache_path = "/path/local_cache"
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. ## The local file cache capacity in bytes.
#+ enable_read_cache = true
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger.
## @toml2docs:none-default ## @toml2docs:none-default
cache_capacity = "5GiB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -365,27 +375,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default ## @toml2docs:none-default
region = "us-west-2" region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
## To skip the ssl verification
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
skip_ssl_validation = false
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3" # name = "S3"
@@ -424,19 +413,6 @@ worker_request_batch_size = 64
## Number of meta action updated to trigger a new checkpoint for the manifest. ## Number of meta action updated to trigger a new checkpoint for the manifest.
manifest_checkpoint_distance = 10 manifest_checkpoint_distance = 10
## Number of removed files to keep in manifest's `removed_files` field before also
## remove them from `removed_files`. Mostly for debugging purpose.
## If set to 0, it will only use `keep_removed_file_ttl` to decide when to remove files
## from `removed_files` field.
experimental_manifest_keep_removed_file_count = 256
## How long to keep removed files in the `removed_files` field of manifest
## after they are removed from manifest.
## files will only be removed from `removed_files` field
## if both `keep_removed_file_count` and `keep_removed_file_ttl` is reached.
experimental_manifest_keep_removed_file_ttl = "1h"
## Whether to compress manifest and checkpoint file by gzip (default false). ## Whether to compress manifest and checkpoint file by gzip (default false).
compress_manifest = false compress_manifest = false
@@ -483,57 +459,38 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}/write_cache`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache.
write_cache_size = "5GiB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Preload index (puffin) files into cache on region open (default: true).
## When enabled, index files are loaded into the write cache during region initialization,
## which can improve query performance at the cost of longer startup times.
preload_index_cache = true
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
## The remaining capacity is used for data (parquet) files.
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
## 1GiB is reserved for index files and 4GiB for data files.
index_cache_percent = 20
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task. ## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
## Maximum number of SST files to scan concurrently.
max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay. ## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## Memory limit for table scans across all queries.
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit.
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
scan_memory_limit = "50%"
## Minimum time interval between two compactions. ## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions). ## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m" min_compaction_interval = "0m"
## Whether to enable experimental flat format as the default format.
default_experimental_flat_format = false
## The options for index in Mito engine. ## The options for index in Mito engine.
[region_engine.mito.index] [region_engine.mito.index]
@@ -549,23 +506,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -617,30 +557,6 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold ## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto" mem_threshold_on_create = "auto"
## The options for bloom filter index in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the index on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the index on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the index on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for the index creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -663,16 +579,10 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to use sparse primary key encoding.
sparse_primary_key_encoding = true
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "./greptimedb_data/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## @toml2docs:none-default
@@ -682,7 +592,7 @@ level = "info"
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4318/v1/traces" otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
@@ -693,32 +603,50 @@ log_format = "text"
## The maximum amount of log files. ## The maximum amount of log files.
max_log_files = 720 max_log_files = 720
## The OTLP tracing export protocol. Can be `grpc`/`http`.
otlp_export_protocol = "http"
## Additional OTLP headers, only valid when using OTLP http
[logging.otlp_headers]
## @toml2docs:none-default
#Authorization = "Bearer my-token"
## @toml2docs:none-default
#Database = "My database"
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] #+ [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" #+ tokio_console_addr = "127.0.0.1"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
## is set to "prof:true,prof_active:false". The official image adds this env variable.
## Default is true.
enable_heap_profiling = true

View File

@@ -1,57 +1,17 @@
## The running mode of the flownode. It can be `standalone` or `distributed`.
mode = "distributed"
## The flownode identifier and should be unique in the cluster. ## The flownode identifier and should be unique in the cluster.
## @toml2docs:none-default ## @toml2docs:none-default
node_id = 14 node_id = 14
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
[flow.batching_mode]
## The default batching engine query timeout is 10 minutes.
#+query_timeout="600s"
## will output a warn log for any query that runs for more that this threshold
#+slow_query_threshold="60s"
## The minimum duration between two queries execution by batching mode task
#+experimental_min_refresh_duration="5s"
## The gRPC connection timeout
#+grpc_conn_timeout="5s"
## The gRPC max retry number
#+experimental_grpc_max_retries=3
## Flow wait for available frontend timeout,
## if failed to find available frontend after frontend_scan_timeout elapsed, return error
## which prevent flownode from starting
#+experimental_frontend_scan_timeout="30s"
## Frontend activity timeout
## if frontend is down(not sending heartbeat) for more than frontend_activity_timeout,
## it will be removed from the list that flownode use to connect
#+experimental_frontend_activity_timeout="60s"
## Maximum number of filters allowed in a single query
#+experimental_max_filter_num_per_query=20
## Time window merge distance
#+experimental_time_window_merge_threshold=3
## Read preference of the Frontend client.
#+read_preference="Leader"
[flow.batching_mode.frontend_tls]
## Whether to enable TLS for client.
#+enabled=false
## Server Certificate file path.
## @toml2docs:none-default
#+server_ca_cert_path=""
## Client Certificate file path.
## @toml2docs:none-default
#+client_cert_path=""
## Client Private key file path.
## @toml2docs:none-default
#+client_key_path=""
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:6800" addr = "127.0.0.1:6800"
## The address advertised to the metasrv, ## The hostname advertised to the metasrv,
## and used for connections from outside the host ## and used for connections from outside the host
server_addr = "127.0.0.1:6800" hostname = "127.0.0.1"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## The maximum receive message size for gRPC server. ## The maximum receive message size for gRPC server.
@@ -59,16 +19,6 @@ max_recv_message_size = "512MB"
## The maximum send message size for gRPC server. ## The maximum send message size for gRPC server.
max_send_message_size = "512MB" max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## The metasrv client options. ## The metasrv client options.
[meta_client] [meta_client]
@@ -78,6 +28,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout. ## Operation timeout.
timeout = "3s" timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout. ## DDL timeout.
ddl_timeout = "10s" ddl_timeout = "10s"
@@ -107,7 +60,7 @@ retry_interval = "3s"
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "./greptimedb_data/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## @toml2docs:none-default
@@ -117,7 +70,7 @@ level = "info"
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4318/v1/traces" otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
@@ -128,44 +81,28 @@ log_format = "text"
## The maximum amount of log files. ## The maximum amount of log files.
max_log_files = 720 max_log_files = 720
## The OTLP tracing export protocol. Can be `grpc`/`http`.
otlp_export_protocol = "http"
## Additional OTLP headers, only valid when using OTLP http
[logging.otlp_headers]
## @toml2docs:none-default
#Authorization = "Bearer my-token"
## @toml2docs:none-default
#Database = "My database"
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] #+ [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" #+ tokio_console_addr = "127.0.0.1"
[query]
## Parallelism of the query engine for query sent by flownode.
## Default to 1, so it won't use too much cpu or memory
parallelism = 1
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "1GB", "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
## is set to "prof:true,prof_active:false". The official image adds this env variable.
## Default is true.
enable_heap_profiling = true

View File

@@ -2,14 +2,6 @@
## @toml2docs:none-default ## @toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## The default column prefix for auto-created time index and value columns.
## @toml2docs:none-default
default_column_prefix = "greptime"
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
## The number of threads to execute the runtime for global read operations. ## The number of threads to execute the runtime for global read operations.
@@ -30,54 +22,21 @@ retry_interval = "3s"
## The address to bind the HTTP server. ## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. ## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s" timeout = "30s"
## HTTP request body limit. ## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## Maximum total memory for all concurrent HTTP request bodies.
## Set to 0 to disable the limit. Default: "0" (unlimited)
## @toml2docs:none-default
#+ max_total_body_memory = "1GB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The address advertised to the metasrv, and used for connections from outside the host. ## The hostname advertised to the metasrv,
## If left empty or unset, the server will automatically use the IP address of the first network interface ## and used for connections from outside the host
## on the host, with the same port number as the one specified in `grpc.bind_addr`. hostname = "127.0.0.1"
server_addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## Maximum total memory for all concurrent gRPC request messages.
## Set to 0 to disable the limit. Default: "0" (unlimited)
## @toml2docs:none-default
#+ max_total_message_memory = "1GB"
## Compression mode for frontend side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
## - `all`: enable all compression.
## Default to `none`
flight_compression = "arrow_ipc"
## The maximum connection age for gRPC connection.
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
## @toml2docs:none-default
#+ max_connection_age = "10m"
## gRPC server TLS options, see `mysql.tls` section. ## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls] [grpc.tls]
@@ -96,42 +55,6 @@ key_path = ""
## For now, gRPC tls config does not support auto reload. ## For now, gRPC tls config does not support auto reload.
watch = false watch = false
## The internal gRPC server options. Internal gRPC port for nodes inside cluster to access frontend.
[internal_grpc]
## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4010"
## The address advertised to the metasrv, and used for connections from outside the host.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `grpc.bind_addr`.
server_addr = "127.0.0.1:4010"
## The number of server worker threads.
runtime_size = 8
## Compression mode for frontend side Arrow IPC service. Available options:
## - `none`: disable all compression
## - `transport`: only enable gRPC transport compression (zstd)
## - `arrow_ipc`: only enable Arrow IPC compression (lz4)
## - `all`: enable all compression.
## Default to `none`
flight_compression = "arrow_ipc"
## internal gRPC server TLS options, see `mysql.tls` section.
[internal_grpc.tls]
## TLS mode.
mode = "disable"
## Certificate file path.
## @toml2docs:none-default
cert_path = ""
## Private key file path.
## @toml2docs:none-default
key_path = ""
## Watch for Certificate and key file change and auto reload.
## For now, gRPC tls config does not support auto reload.
watch = false
## MySQL server options. ## MySQL server options.
[mysql] [mysql]
## Whether to enable. ## Whether to enable.
@@ -140,11 +63,6 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
prepared_stmt_cache_size = 10000
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -176,9 +94,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -206,11 +121,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.
@@ -226,6 +136,9 @@ metasrv_addrs = ["127.0.0.1:3002"]
## Operation timeout. ## Operation timeout.
timeout = "3s" timeout = "3s"
## Heartbeat timeout.
heartbeat_timeout = "500ms"
## DDL timeout. ## DDL timeout.
ddl_timeout = "10s" ddl_timeout = "10s"
@@ -244,22 +157,6 @@ metadata_cache_ttl = "10m"
# TTI of the metadata cache. # TTI of the metadata cache.
metadata_cache_tti = "5m" metadata_cache_tti = "5m"
## The query engine options.
[query]
## Parallelism of the query engine.
## Default to 0, which means the number of CPU cores.
parallelism = 0
## Whether to allow query fallback when push down optimize fails.
## Default to false, meaning when push down optimize failed, return error msg
allow_query_fallback = false
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "4GB", "8GB") or percentage of system memory (e.g., "30%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans (only applies to datanodes).
memory_pool_size = "50%"
## Datanode options. ## Datanode options.
[datanode] [datanode]
## Datanode client options. ## Datanode client options.
@@ -270,7 +167,7 @@ tcp_nodelay = true
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "./greptimedb_data/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## @toml2docs:none-default
@@ -280,7 +177,7 @@ level = "info"
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4318/v1/traces" otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
@@ -291,16 +188,6 @@ log_format = "text"
## The maximum amount of log files. ## The maximum amount of log files.
max_log_files = 720 max_log_files = 720
## The OTLP tracing export protocol. Can be `grpc`/`http`.
otlp_export_protocol = "http"
## Additional OTLP headers, only valid when using OTLP http
[logging.otlp_headers]
## @toml2docs:none-default
#Authorization = "Bearer my-token"
## @toml2docs:none-default
#Database = "My database"
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -308,39 +195,43 @@ otlp_export_protocol = "http"
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options. ## The slow query log options.
[slow_query] [logging.slow_query]
## Whether to enable slow query log. ## Whether to enable slow query log.
enable = true enable = false
## The record type of slow queries. It can be `system_table` or `log`. ## The threshold of slow query.
## If `system_table` is selected, the slow queries will be recorded in a system table `greptime_private.slow_queries`. ## @toml2docs:none-default
## If `log` is selected, the slow queries will be logged in a log file `greptimedb-slow-queries.*`. threshold = "10s"
record_type = "system_table"
## The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. ## The sampling ratio of slow query log. The value should be in the range of (0, 1].
threshold = "30s" ## @toml2docs:none-default
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged.
sample_ratio = 1.0 sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. ## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
ttl = "90d" ## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] #+ [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" #+ tokio_console_addr = "127.0.0.1"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
## is set to "prof:true,prof_active:false". The official image adds this env variable.
## Default is true.
enable_heap_profiling = true
## Configuration options for the event recorder.
[event_recorder]
## TTL for the events table that will be used to store the events. Default is `90d`.
ttl = "90d"

View File

@@ -1,47 +1,14 @@
## The working home directory. ## The working home directory.
data_home = "./greptimedb_data" data_home = "/tmp/metasrv/"
## Store server address(es). The format depends on the selected backend. ## The bind address of metasrv.
## bind_addr = "127.0.0.1:3002"
## For etcd: a list of "host:port" endpoints.
## e.g. ["192.168.1.1:2379", "192.168.1.2:2379"]
##
## For PostgreSQL: a connection string in libpq format or URI.
## e.g.
## - "host=localhost port=5432 user=postgres password=<PASSWORD> dbname=postgres"
## - "postgresql://user:password@localhost:5432/mydb?connect_timeout=10"
## The detail see: https://docs.rs/tokio-postgres/latest/tokio_postgres/config/struct.Config.html
##
## For mysql store, the format is a MySQL connection URL.
## e.g. "mysql://user:password@localhost:3306/greptime_meta?ssl-mode=VERIFY_CA&ssl-ca=/path/to/ca.pem"
store_addrs = ["127.0.0.1:2379"]
## If it's not empty, the metasrv will store all data with this key prefix. ## The communication server address for frontend and datanode to connect to metasrv, "127.0.0.1:3002" by default for localhost.
store_key_prefix = "" server_addr = "127.0.0.1:3002"
## The datastore for meta server. ## Store server address default to etcd store.
## Available values: store_addr = "127.0.0.1:2379"
## - `etcd_store` (default value)
## - `memory_store`
## - `postgres_store`
## - `mysql_store`
backend = "etcd_store"
## Table name in RDS to store metadata. Effect when using a RDS kvbackend.
## **Only used when backend is `postgres_store`.**
meta_table_name = "greptime_metakv"
## Optional PostgreSQL schema for metadata table and election table name qualification.
## When PostgreSQL public schema is not writable (e.g., PostgreSQL 15+ with restricted public),
## set this to a writable schema. GreptimeDB will use `meta_schema_name`.`meta_table_name`.
## GreptimeDB will NOT create the schema automatically; please ensure it exists or the user has permission.
## **Only used when backend is `postgres_store`.**
meta_schema_name = "greptime_schema"
## Advisory lock id in PostgreSQL for election. Effect when using PostgreSQL as kvbackend
## Only used when backend is `postgres_store`.
meta_election_lock_id = 1
## Datanode selector type. ## Datanode selector type.
## - `round_robin` (default value) ## - `round_robin` (default value)
@@ -53,26 +20,20 @@ selector = "round_robin"
## Store data in memory. ## Store data in memory.
use_memory_store = false use_memory_store = false
## Whether to enable greptimedb telemetry.
enable_telemetry = true
## If it's not empty, the metasrv will store all data with this key prefix.
store_key_prefix = ""
## Whether to enable region failover. ## Whether to enable region failover.
## This feature is only available on GreptimeDB running on cluster mode and ## This feature is only available on GreptimeDB running on cluster mode and
## - Using Remote WAL ## - Using Remote WAL
## - Using shared storage (e.g., s3). ## - Using shared storage (e.g., s3).
enable_region_failover = false enable_region_failover = false
## The delay before starting region failure detection. ## The datastore for meta server.
## This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started. backend = "EtcdStore"
## Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
region_failure_detector_initialization_delay = '10m'
## Whether to allow region failover on local WAL.
## **This option is not recommended to be set to true, because it may lead to data loss during failover.**
allow_region_failover_on_local_wal = false
## Max allowed idle time before removing node info from metasrv memory.
node_max_idle_time = "24hours"
## Whether to enable greptimedb telemetry. Enabled by default.
#+ enable_telemetry = true
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
@@ -81,60 +42,6 @@ node_max_idle_time = "24hours"
## The number of threads to execute the runtime for global write operations. ## The number of threads to execute the runtime for global write operations.
#+ compact_rt_size = 4 #+ compact_rt_size = 4
## TLS configuration for kv store backend (applicable for etcd, PostgreSQL, and MySQL backends)
## When using etcd, PostgreSQL, or MySQL as metadata store, you can configure TLS here
##
## Note: if TLS is configured in both this section and the `store_addrs` connection string, the
## settings here will override the TLS settings in `store_addrs`.
[backend_tls]
## TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html
## - "disable" - No TLS
## - "prefer" (default) - Try TLS, fallback to plain
## - "require" - Require TLS
## - "verify_ca" - Require TLS and verify CA
## - "verify_full" - Require TLS and verify hostname
mode = "prefer"
## Path to client certificate file (for client authentication)
## Like "/path/to/client.crt"
cert_path = ""
## Path to client private key file (for client authentication)
## Like "/path/to/client.key"
key_path = ""
## Path to CA certificate file (for server certificate verification)
## Required when using custom CAs or self-signed certificates
## Leave empty to use system root certificates only
## Like "/path/to/ca.crt"
ca_cert_path = ""
## The gRPC server options.
[grpc]
## The address to bind the gRPC server.
bind_addr = "127.0.0.1:3002"
## The communication server address for the frontend and datanode to connect to metasrv.
## If left empty or unset, the server will automatically use the IP address of the first network interface
## on the host, with the same port number as the one specified in `bind_addr`.
server_addr = "127.0.0.1:3002"
## The number of server worker threads.
runtime_size = 8
## The maximum receive message size for gRPC server.
max_recv_message_size = "512MB"
## The maximum send message size for gRPC server.
max_send_message_size = "512MB"
## The HTTP server options.
[http]
## The address to bind the HTTP server.
addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s"
## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit.
body_limit = "64MB"
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -151,24 +58,21 @@ retry_delay = "500ms"
## Comments out the `max_metadata_value_size`, for don't split large value (no limit). ## Comments out the `max_metadata_value_size`, for don't split large value (no limit).
max_metadata_value_size = "1500KiB" max_metadata_value_size = "1500KiB"
## Max running procedures.
## The maximum number of procedures that can be running at the same time.
## If the number of running procedures exceeds this limit, the procedure will be rejected.
max_running_procedures = 128
# Failure detectors options. # Failure detectors options.
# GreptimeDB uses the Phi Accrual Failure Detector algorithm to detect datanode failures.
[failure_detector] [failure_detector]
## Maximum acceptable φ before the peer is treated as failed.
## Lower values react faster but yield more false positives. ## The threshold value used by the failure detector to determine failure conditions.
threshold = 8.0 threshold = 8.0
## The minimum standard deviation of the heartbeat intervals.
## So tiny variations dont make φ explode. Prevents hypersensitivity when heartbeat intervals barely vary. ## The minimum standard deviation of the heartbeat intervals, used to calculate acceptable variations.
min_std_deviation = "100ms" min_std_deviation = "100ms"
## The acceptable pause duration between heartbeats.
## Additional extra grace period to the learned mean interval before φ rises, absorbing temporary network hiccups or GC pauses. ## The acceptable pause duration between heartbeats, used to determine if a heartbeat interval is acceptable.
acceptable_heartbeat_pause = "10000ms" acceptable_heartbeat_pause = "10000ms"
## The initial estimate of the heartbeat interval used by the failure detector.
first_heartbeat_estimate = "1000ms"
## Datanode options. ## Datanode options.
[datanode] [datanode]
@@ -190,70 +94,44 @@ tcp_nodelay = true
# - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode. # - `kafka`: metasrv **have to be** configured with kafka wal config when using kafka wal provider in datanode.
provider = "raft_engine" provider = "raft_engine"
# Kafka wal config.
## The broker endpoints of the Kafka cluster. ## The broker endpoints of the Kafka cluster.
##
## **It's only used when the provider is `kafka`**.
broker_endpoints = ["127.0.0.1:9092"] broker_endpoints = ["127.0.0.1:9092"]
## Automatically create topics for WAL. ## Automatically create topics for WAL.
## Set to `true` to automatically create topics for WAL. ## Set to `true` to automatically create topics for WAL.
## Otherwise, use topics named `topic_name_prefix_[0..num_topics)` ## Otherwise, use topics named `topic_name_prefix_[0..num_topics)`
## **It's only used when the provider is `kafka`**.
auto_create_topics = true auto_create_topics = true
## Interval of automatically WAL pruning. ## Number of topics.
## Set to `0s` to disable automatically WAL pruning which delete unused remote WAL entries periodically.
## **It's only used when the provider is `kafka`**.
auto_prune_interval = "30m"
## Estimated size threshold to trigger a flush when using Kafka remote WAL.
## Since multiple regions may share a Kafka topic, the estimated size is calculated as:
## (latest_entry_id - flushed_entry_id) * avg_record_size
## MetaSrv triggers a flush for a region when this estimated size exceeds `flush_trigger_size`.
## - `latest_entry_id`: The latest entry ID in the topic.
## - `flushed_entry_id`: The last flushed entry ID for the region.
## Set to "0" to let the system decide the flush trigger size.
## **It's only used when the provider is `kafka`**.
flush_trigger_size = "512MB"
## Estimated size threshold to trigger a checkpoint when using Kafka remote WAL.
## The estimated size is calculated as:
## (latest_entry_id - last_checkpoint_entry_id) * avg_record_size
## MetaSrv triggers a checkpoint for a region when this estimated size exceeds `checkpoint_trigger_size`.
## Set to "0" to let the system decide the checkpoint trigger size.
## **It's only used when the provider is `kafka`**.
checkpoint_trigger_size = "128MB"
## Concurrent task limit for automatically WAL pruning.
## **It's only used when the provider is `kafka`**.
auto_prune_parallelism = 10
## Number of topics used for remote WAL.
## **It's only used when the provider is `kafka`**.
num_topics = 64 num_topics = 64
## Topic selector type. ## Topic selector type.
## Available selector types: ## Available selector types:
## - `round_robin` (default) ## - `round_robin` (default)
## **It's only used when the provider is `kafka`**.
selector_type = "round_robin" selector_type = "round_robin"
## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`. ## A Kafka topic is constructed by concatenating `topic_name_prefix` and `topic_id`.
## Only accepts strings that match the following regular expression pattern:
## [a-zA-Z_:-][a-zA-Z0-9_:\-\.@#]*
## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1. ## i.g., greptimedb_wal_topic_0, greptimedb_wal_topic_1.
## **It's only used when the provider is `kafka`**.
topic_name_prefix = "greptimedb_wal_topic" topic_name_prefix = "greptimedb_wal_topic"
## Expected number of replicas of each partition. ## Expected number of replicas of each partition.
## **It's only used when the provider is `kafka`**.
replication_factor = 1 replication_factor = 1
## The timeout for creating a Kafka topic. ## Above which a topic creation operation will be cancelled.
## **It's only used when the provider is `kafka`**.
create_topic_timeout = "30s" create_topic_timeout = "30s"
## The initial backoff for kafka clients.
backoff_init = "500ms"
## The maximum backoff for kafka clients.
backoff_max = "10s"
## Exponential backoff rate, i.e. next backoff = base * current backoff.
backoff_base = 2
## Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
backoff_deadline = "5mins"
# The Kafka SASL configuration. # The Kafka SASL configuration.
# **It's only used when the provider is `kafka`**. # **It's only used when the provider is `kafka`**.
@@ -273,27 +151,10 @@ create_topic_timeout = "30s"
# client_cert_path = "/path/to/client_cert" # client_cert_path = "/path/to/client_cert"
# client_key_path = "/path/to/key" # client_key_path = "/path/to/key"
## Configuration options for the event recorder.
[event_recorder]
## TTL for the events table that will be used to store the events. Default is `90d`.
ttl = "90d"
## Configuration options for the stats persistence.
[stats_persistence]
## TTL for the stats table that will be used to store the stats.
## Set to `0s` to disable stats persistence.
## Default is `0s`.
## If you want to enable stats persistence, set the TTL to a value greater than 0.
## It is recommended to set a small value, e.g., `3h`.
ttl = "0s"
## The interval to persist the stats. Default is `10m`.
## The minimum value is `10m`, if the value is less than `10m`, it will be overridden to `10m`.
interval = "10m"
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "./greptimedb_data/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## @toml2docs:none-default
@@ -303,7 +164,7 @@ level = "info"
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4318/v1/traces" otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
@@ -314,33 +175,50 @@ log_format = "text"
## The maximum amount of log files. ## The maximum amount of log files.
max_log_files = 720 max_log_files = 720
## The OTLP tracing export protocol. Can be `grpc`/`http`.
otlp_export_protocol = "http"
## Additional OTLP headers, only valid when using OTLP http
[logging.otlp_headers]
## @toml2docs:none-default
#Authorization = "Bearer my-token"
## @toml2docs:none-default
#Database = "My database"
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
[logging.tracing_sample_ratio] [logging.tracing_sample_ratio]
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options.
[logging.slow_query]
## Whether to enable slow query log.
enable = false
## The threshold of slow query.
## @toml2docs:none-default
threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default
sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommend to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] #+ [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" #+ tokio_console_addr = "127.0.0.1"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
## is set to "prof:true,prof_active:false". The official image adds this env variable.
## Default is true.
enable_heap_profiling = true

View File

@@ -1,11 +1,13 @@
## The running mode of the datanode. It can be `standalone` or `distributed`.
mode = "standalone"
## Enable telemetry to collect anonymous usage data.
enable_telemetry = true
## The default timezone of the server. ## The default timezone of the server.
## @toml2docs:none-default ## @toml2docs:none-default
default_timezone = "UTC" default_timezone = "UTC"
## The default column prefix for auto-created time index and value columns.
## @toml2docs:none-default
default_column_prefix = "greptime"
## Initialize all regions in the background during the startup. ## Initialize all regions in the background during the startup.
## By default, it provides services after all regions have been initialized. ## By default, it provides services after all regions have been initialized.
init_regions_in_background = false init_regions_in_background = false
@@ -14,18 +16,8 @@ init_regions_in_background = false
init_regions_parallelism = 16 init_regions_parallelism = 16
## The maximum current queries allowed to be executed. Zero means unlimited. ## The maximum current queries allowed to be executed. Zero means unlimited.
## NOTE: This setting affects scan_memory_limit's privileged tier allocation.
## When set, 70% of queries get privileged memory access (full scan_memory_limit).
## The remaining 30% get standard tier access (70% of scan_memory_limit).
max_concurrent_queries = 0 max_concurrent_queries = 0
## Enable telemetry to collect anonymous usage data. Enabled by default.
#+ enable_telemetry = true
## The maximum in-flight write bytes.
## @toml2docs:none-default
#+ max_in_flight_write_bytes = "500MB"
## The runtime options. ## The runtime options.
#+ [runtime] #+ [runtime]
## The number of threads to execute the runtime for global read operations. ## The number of threads to execute the runtime for global read operations.
@@ -38,44 +30,18 @@ max_concurrent_queries = 0
## The address to bind the HTTP server. ## The address to bind the HTTP server.
addr = "127.0.0.1:4000" addr = "127.0.0.1:4000"
## HTTP request timeout. Set to 0 to disable timeout. ## HTTP request timeout. Set to 0 to disable timeout.
timeout = "0s" timeout = "30s"
## HTTP request body limit. ## HTTP request body limit.
## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`. ## The following units are supported: `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`.
## Set to 0 to disable limit. ## Set to 0 to disable limit.
body_limit = "64MB" body_limit = "64MB"
## Maximum total memory for all concurrent HTTP request bodies.
## Set to 0 to disable the limit. Default: "0" (unlimited)
## @toml2docs:none-default
#+ max_total_body_memory = "1GB"
## HTTP CORS support, it's turned on by default
## This allows browser to access http APIs without CORS restrictions
enable_cors = true
## Customize allowed origins for HTTP CORS.
## @toml2docs:none-default
cors_allowed_origins = ["https://example.com"]
## Whether to enable validation for Prometheus remote write requests.
## Available options:
## - strict: deny invalid UTF-8 strings (default).
## - lossy: allow invalid UTF-8 strings, replace invalid characters with REPLACEMENT_CHARACTER(U+FFFD).
## - unchecked: do not valid strings.
prom_validation_mode = "strict"
## The gRPC server options. ## The gRPC server options.
[grpc] [grpc]
## The address to bind the gRPC server. ## The address to bind the gRPC server.
bind_addr = "127.0.0.1:4001" addr = "127.0.0.1:4001"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 8 runtime_size = 8
## Maximum total memory for all concurrent gRPC request messages.
## Set to 0 to disable the limit. Default: "0" (unlimited)
## @toml2docs:none-default
#+ max_total_message_memory = "1GB"
## The maximum connection age for gRPC connection.
## The value can be a human-readable time string. For example: `10m` for ten minutes or `1h` for one hour.
## Refer to https://grpc.io/docs/guides/keepalive/ for more details.
## @toml2docs:none-default
#+ max_connection_age = "10m"
## gRPC server TLS options, see `mysql.tls` section. ## gRPC server TLS options, see `mysql.tls` section.
[grpc.tls] [grpc.tls]
@@ -102,11 +68,7 @@ enable = true
addr = "127.0.0.1:4002" addr = "127.0.0.1:4002"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## Maximum entries in the MySQL prepared statement cache; default is 10,000.
prepared_stmt_cache_size= 10000
# MySQL server TLS options. # MySQL server TLS options.
[mysql.tls] [mysql.tls]
@@ -137,9 +99,6 @@ enable = true
addr = "127.0.0.1:4003" addr = "127.0.0.1:4003"
## The number of server worker threads. ## The number of server worker threads.
runtime_size = 2 runtime_size = 2
## Server-side keep-alive time.
## Set to 0 (default) to disable.
keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section. ## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls] [postgres.tls]
@@ -167,11 +126,6 @@ enable = true
## Whether to enable InfluxDB protocol in HTTP API. ## Whether to enable InfluxDB protocol in HTTP API.
enable = true enable = true
## Jaeger protocol options.
[jaeger]
## Whether to enable Jaeger protocol in HTTP API.
enable = true
## Prometheus remote storage options ## Prometheus remote storage options
[prom_store] [prom_store]
## Whether to enable Prometheus remote write and read in HTTP API. ## Whether to enable Prometheus remote write and read in HTTP API.
@@ -189,19 +143,19 @@ provider = "raft_engine"
## The directory to store the WAL files. ## The directory to store the WAL files.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
## @toml2docs:none-default ## @toml2docs:none-default
dir = "./greptimedb_data/wal" dir = "/tmp/greptimedb/wal"
## The size of the WAL segment file. ## The size of the WAL segment file.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
file_size = "128MB" file_size = "256MB"
## The threshold of the WAL size to trigger a purge. ## The threshold of the WAL size to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_threshold = "1GB" purge_threshold = "4GB"
## The interval to trigger a purge. ## The interval to trigger a flush.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
purge_interval = "1m" purge_interval = "10m"
## The read batch size. ## The read batch size.
## **It's only used when the provider is `raft_engine`**. ## **It's only used when the provider is `raft_engine`**.
@@ -267,6 +221,22 @@ max_batch_bytes = "1MB"
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
consumer_wait_timeout = "100ms" consumer_wait_timeout = "100ms"
## The initial backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_init = "500ms"
## The maximum backoff delay.
## **It's only used when the provider is `kafka`**.
backoff_max = "10s"
## The exponential backoff rate, i.e. next backoff = base * current backoff.
## **It's only used when the provider is `kafka`**.
backoff_base = 2
## The deadline of retries.
## **It's only used when the provider is `kafka`**.
backoff_deadline = "5mins"
## Ignore missing entries during read WAL. ## Ignore missing entries during read WAL.
## **It's only used when the provider is `kafka`**. ## **It's only used when the provider is `kafka`**.
## ##
@@ -298,12 +268,10 @@ overwrite_entry_start_id = false
## Metadata storage options. ## Metadata storage options.
[metadata_store] [metadata_store]
## The size of the metadata store log file. ## Kv file size in bytes.
file_size = "64MB" file_size = "256MB"
## The threshold of the metadata store size to trigger a purge. ## Kv purge threshold.
purge_threshold = "256MB" purge_threshold = "4GB"
## The interval of the metadata store to trigger a purge.
purge_interval = "1m"
## Procedure storage options. ## Procedure storage options.
[procedure] [procedure]
@@ -311,16 +279,6 @@ purge_interval = "1m"
max_retry_times = 3 max_retry_times = 3
## Initial retry delay of procedures, increases exponentially ## Initial retry delay of procedures, increases exponentially
retry_delay = "500ms" retry_delay = "500ms"
## Max running procedures.
## The maximum number of procedures that can be running at the same time.
## If the number of running procedures exceeds this limit, the procedure will be rejected.
max_running_procedures = 128
## flow engine options.
[flow]
## The number of flow worker in flownode.
## Not setting(or set to 0) this value will use the number of CPU cores divided by 2.
#+num_workers=0
# Example of using S3 as the storage. # Example of using S3 as the storage.
# [storage] # [storage]
@@ -331,7 +289,6 @@ max_running_procedures = 128
# secret_access_key = "123456" # secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com" # endpoint = "https://s3.amazonaws.com"
# region = "us-west-2" # region = "us-west-2"
# enable_virtual_host_style = false
# Example of using Oss as the storage. # Example of using Oss as the storage.
# [storage] # [storage]
@@ -362,23 +319,10 @@ max_running_procedures = 128
# credential = "base64-credential" # credential = "base64-credential"
# endpoint = "https://storage.googleapis.com" # endpoint = "https://storage.googleapis.com"
## The query engine options.
[query]
## Parallelism of the query engine.
## Default to 0, which means the number of CPU cores.
parallelism = 0
## Memory pool size for query execution operators (aggregation, sorting, join).
## Supports absolute size (e.g., "2GB", "4GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit (unbounded, default behavior).
## When this limit is reached, queries will fail with ResourceExhausted error.
## NOTE: This does NOT limit memory used by table scans.
memory_pool_size = "50%"
## The data storage options. ## The data storage options.
[storage] [storage]
## The working home directory. ## The working home directory.
data_home = "./greptimedb_data" data_home = "/tmp/greptimedb/"
## The storage type used to store the data. ## The storage type used to store the data.
## - `File`: the data is stored in the local file system. ## - `File`: the data is stored in the local file system.
@@ -388,17 +332,14 @@ data_home = "./greptimedb_data"
## - `Oss`: the data is stored in the Aliyun OSS. ## - `Oss`: the data is stored in the Aliyun OSS.
type = "File" type = "File"
## Whether to enable read cache. If not set, the read cache will be enabled by default when using object storage. ## Cache configuration for object storage such as 'S3' etc.
#+ enable_read_cache = true ## The local file cache directory.
## Read cache configuration for object storage such as 'S3' etc, it's configured by default when using object storage. It is recommended to configure it when using object storage for better performance.
## A local file directory, defaults to `{data_home}`. An empty string means disabling.
## @toml2docs:none-default ## @toml2docs:none-default
#+ cache_path = "" cache_path = "/path/local_cache"
## The local file cache capacity in bytes. If your disk space is sufficient, it is recommended to set it larger. ## The local file cache capacity in bytes.
## @toml2docs:none-default ## @toml2docs:none-default
cache_capacity = "5GiB" cache_capacity = "256MB"
## The S3 bucket name. ## The S3 bucket name.
## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**. ## **It's only used when the storage type is `S3`, `Oss` and `Gcs`**.
@@ -472,27 +413,6 @@ endpoint = "https://s3.amazonaws.com"
## @toml2docs:none-default ## @toml2docs:none-default
region = "us-west-2" region = "us-west-2"
## The http client options to the storage.
## **It's only used when the storage type is `S3`, `Oss`, `Gcs` and `Azblob`**.
[storage.http_client]
## The maximum idle connection per host allowed in the pool.
pool_max_idle_per_host = 1024
## The timeout for only the connect phase of a http client.
connect_timeout = "30s"
## The total request timeout, applied from when the request starts connecting until the response body has finished.
## Also considered a total deadline.
timeout = "30s"
## The timeout for idle sockets being kept-alive.
pool_idle_timeout = "90s"
## To skip the ssl verification
## **Security Notice**: Setting `skip_ssl_validation = true` disables certificate verification, making connections vulnerable to man-in-the-middle attacks. Only use this in development or trusted private networks.
skip_ssl_validation = false
# Custom storage options # Custom storage options
# [[storage.providers]] # [[storage.providers]]
# name = "S3" # name = "S3"
@@ -577,57 +497,38 @@ auto_flush_interval = "1h"
## @toml2docs:none-default="Auto" ## @toml2docs:none-default="Auto"
#+ selector_result_cache_size = "512MB" #+ selector_result_cache_size = "512MB"
## Whether to enable the write cache, it's enabled by default when using object storage. It is recommended to enable it when using object storage for better performance. ## Whether to enable the experimental write cache.
enable_write_cache = false enable_experimental_write_cache = false
## File system path for write cache, defaults to `{data_home}`. ## File system path for write cache, defaults to `{data_home}/write_cache`.
write_cache_path = "" experimental_write_cache_path = ""
## Capacity for write cache. If your disk space is sufficient, it is recommended to set it larger. ## Capacity for write cache.
write_cache_size = "5GiB" experimental_write_cache_size = "512MB"
## TTL for write cache. ## TTL for write cache.
## @toml2docs:none-default ## @toml2docs:none-default
write_cache_ttl = "8h" experimental_write_cache_ttl = "8h"
## Preload index (puffin) files into cache on region open (default: true).
## When enabled, index files are loaded into the write cache during region initialization,
## which can improve query performance at the cost of longer startup times.
preload_index_cache = true
## Percentage of write cache capacity allocated for index (puffin) files (default: 20).
## The remaining capacity is used for data (parquet) files.
## Must be between 0 and 100 (exclusive). For example, with a 5GiB write cache and 20% allocation,
## 1GiB is reserved for index files and 4GiB for data files.
index_cache_percent = 20
## Buffer size for SST writing. ## Buffer size for SST writing.
sst_write_buffer_size = "8MB" sst_write_buffer_size = "8MB"
## Parallelism to scan a region (default: 1/4 of cpu cores).
## - `0`: using the default value (1/4 of cpu cores).
## - `1`: scan in current thread.
## - `n`: scan in parallelism n.
scan_parallelism = 0
## Capacity of the channel to send data from parallel scan tasks to the main task. ## Capacity of the channel to send data from parallel scan tasks to the main task.
parallel_scan_channel_size = 32 parallel_scan_channel_size = 32
## Maximum number of SST files to scan concurrently.
max_concurrent_scan_files = 384
## Whether to allow stale WAL entries read during replay. ## Whether to allow stale WAL entries read during replay.
allow_stale_entries = false allow_stale_entries = false
## Memory limit for table scans across all queries.
## Supports absolute size (e.g., "2GB") or percentage of system memory (e.g., "20%").
## Setting it to 0 disables the limit.
## NOTE: Works with max_concurrent_queries for tiered memory allocation.
## - If max_concurrent_queries is set: 70% of queries get full access, 30% get 70% access.
## - If max_concurrent_queries is 0 (unlimited): first 20 queries get full access, rest get 70% access.
scan_memory_limit = "50%"
## Minimum time interval between two compactions. ## Minimum time interval between two compactions.
## To align with the old behavior, the default value is 0 (no restrictions). ## To align with the old behavior, the default value is 0 (no restrictions).
min_compaction_interval = "0m" min_compaction_interval = "0m"
## Whether to enable experimental flat format as the default format.
default_experimental_flat_format = false
## The options for index in Mito engine. ## The options for index in Mito engine.
[region_engine.mito.index] [region_engine.mito.index]
@@ -643,23 +544,6 @@ aux_path = ""
## The max capacity of the staging directory. ## The max capacity of the staging directory.
staging_size = "2GB" staging_size = "2GB"
## The TTL of the staging directory.
## Defaults to 7 days.
## Setting it to "0s" to disable TTL.
staging_ttl = "7d"
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## Page size for inverted index content cache.
content_cache_page_size = "64KiB"
## Cache size for index result.
result_cache_size = "128MiB"
## The options for inverted index in Mito engine. ## The options for inverted index in Mito engine.
[region_engine.mito.inverted_index] [region_engine.mito.inverted_index]
@@ -687,6 +571,12 @@ mem_threshold_on_create = "auto"
## Deprecated, use `region_engine.mito.index.aux_path` instead. ## Deprecated, use `region_engine.mito.index.aux_path` instead.
intermediate_path = "" intermediate_path = ""
## Cache size for inverted index metadata.
metadata_cache_size = "64MiB"
## Cache size for inverted index content.
content_cache_size = "128MiB"
## The options for full-text index in Mito engine. ## The options for full-text index in Mito engine.
[region_engine.mito.fulltext_index] [region_engine.mito.fulltext_index]
@@ -711,30 +601,6 @@ apply_on_query = "auto"
## - `[size]` e.g. `64MB`: fixed memory threshold ## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto" mem_threshold_on_create = "auto"
## The options for bloom filter in Mito engine.
[region_engine.mito.bloom_filter_index]
## Whether to create the bloom filter on flush.
## - `auto`: automatically (default)
## - `disable`: never
create_on_flush = "auto"
## Whether to create the bloom filter on compaction.
## - `auto`: automatically (default)
## - `disable`: never
create_on_compaction = "auto"
## Whether to apply the bloom filter on query
## - `auto`: automatically (default)
## - `disable`: never
apply_on_query = "auto"
## Memory threshold for bloom filter creation.
## - `auto`: automatically determine the threshold based on the system memory size (default)
## - `unlimited`: no memory limit
## - `[size]` e.g. `64MB`: fixed memory threshold
mem_threshold_on_create = "auto"
[region_engine.mito.memtable] [region_engine.mito.memtable]
## Memtable type. ## Memtable type.
## - `time_series`: time-series memtable ## - `time_series`: time-series memtable
@@ -757,16 +623,10 @@ fork_dictionary_bytes = "1GiB"
## Enable the file engine. ## Enable the file engine.
[region_engine.file] [region_engine.file]
[[region_engine]]
## Metric engine options.
[region_engine.metric]
## Whether to use sparse primary key encoding.
sparse_primary_key_encoding = true
## The logging options. ## The logging options.
[logging] [logging]
## The directory to store the log files. If set to empty, logs will not be written to files. ## The directory to store the log files. If set to empty, logs will not be written to files.
dir = "./greptimedb_data/logs" dir = "/tmp/greptimedb/logs"
## The log level. Can be `info`/`debug`/`warn`/`error`. ## The log level. Can be `info`/`debug`/`warn`/`error`.
## @toml2docs:none-default ## @toml2docs:none-default
@@ -776,7 +636,7 @@ level = "info"
enable_otlp_tracing = false enable_otlp_tracing = false
## The OTLP tracing endpoint. ## The OTLP tracing endpoint.
otlp_endpoint = "http://localhost:4318/v1/traces" otlp_endpoint = "http://localhost:4317"
## Whether to append logs to stdout. ## Whether to append logs to stdout.
append_stdout = true append_stdout = true
@@ -787,16 +647,6 @@ log_format = "text"
## The maximum amount of log files. ## The maximum amount of log files.
max_log_files = 720 max_log_files = 720
## The OTLP tracing export protocol. Can be `grpc`/`http`.
otlp_export_protocol = "http"
## Additional OTLP headers, only valid when using OTLP http
[logging.otlp_headers]
## @toml2docs:none-default
#Authorization = "Bearer my-token"
## @toml2docs:none-default
#Database = "My database"
## The percentage of tracing will be sampled and exported. ## The percentage of tracing will be sampled and exported.
## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ## Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
## ratio > 1 are treated as 1. Fractions < 0 are treated as 0 ## ratio > 1 are treated as 1. Fractions < 0 are treated as 0
@@ -804,32 +654,43 @@ otlp_export_protocol = "http"
default_ratio = 1.0 default_ratio = 1.0
## The slow query log options. ## The slow query log options.
[slow_query] [logging.slow_query]
## Whether to enable slow query log. ## Whether to enable slow query log.
#+ enable = false enable = false
## The record type of slow queries. It can be `system_table` or `log`.
## @toml2docs:none-default
#+ record_type = "system_table"
## The threshold of slow query. ## The threshold of slow query.
## @toml2docs:none-default ## @toml2docs:none-default
#+ threshold = "10s" threshold = "10s"
## The sampling ratio of slow query log. The value should be in the range of (0, 1]. ## The sampling ratio of slow query log. The value should be in the range of (0, 1].
## @toml2docs:none-default ## @toml2docs:none-default
#+ sample_ratio = 1.0 sample_ratio = 1.0
## The datanode can export its metrics and send to Prometheus compatible service (e.g. send to `greptimedb` itself) from remote-write API.
## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
[export_metrics]
## whether enable export metrics.
enable = false
## The interval of export metrics.
write_interval = "30s"
## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
## You must create the database before enabling it.
[export_metrics.self_import]
## @toml2docs:none-default
db = "greptime_metrics"
[export_metrics.remote_write]
## The url the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
url = ""
## HTTP headers of Prometheus remote-write carry.
headers = { }
## The tracing options. Only effect when compiled with `tokio-console` feature. ## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing] #+ [tracing]
## The tokio console address. ## The tokio console address.
## @toml2docs:none-default ## @toml2docs:none-default
#+ tokio_console_addr = "127.0.0.1" #+ tokio_console_addr = "127.0.0.1"
## The memory options.
[memory]
## Whether to enable heap profiling activation during startup.
## When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
## is set to "prof:true,prof_active:false". The official image adds this env variable.
## Default is true.
enable_heap_profiling = true

View File

@@ -1,156 +0,0 @@
/*
* Copyright 2023 Greptime Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as core from "@actions/core";
import {obtainClient} from "@/common";
interface RepoConfig {
tokenEnv: string;
repo: string;
workflowLogic: (version: string) => [string, string] | null;
}
const REPO_CONFIGS: Record<string, RepoConfig> = {
website: {
tokenEnv: "WEBSITE_REPO_TOKEN",
repo: "website",
workflowLogic: (version: string) => {
// Skip nightly versions for website
if (version.includes('nightly')) {
console.log('Nightly version detected for website, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
demo: {
tokenEnv: "DEMO_REPO_TOKEN",
repo: "demo-scene",
workflowLogic: (version: string) => {
// Skip nightly versions for demo
if (version.includes('nightly')) {
console.log('Nightly version detected for demo, skipping workflow trigger.');
return null;
}
return ['bump-patch-version.yml', version];
}
},
docs: {
tokenEnv: "DOCS_REPO_TOKEN",
repo: "docs",
workflowLogic: (version: string) => {
// Check if it's a nightly version
if (version.includes('nightly')) {
return ['bump-nightly-version.yml', version];
}
const parts = version.split('.');
if (parts.length !== 3) {
throw new Error('Invalid version format');
}
// If patch version (last number) is 0, it's a major version
// Return only major.minor version
if (parts[2] === '0') {
return ['bump-version.yml', `${parts[0]}.${parts[1]}`];
}
// Otherwise it's a patch version, use full version
return ['bump-patch-version.yml', version];
}
}
};
async function triggerWorkflow(repoConfig: RepoConfig, workflowId: string, version: string) {
const client = obtainClient(repoConfig.tokenEnv);
try {
await client.rest.actions.createWorkflowDispatch({
owner: "GreptimeTeam",
repo: repoConfig.repo,
workflow_id: workflowId,
ref: "main",
inputs: {
version,
},
});
console.log(`Successfully triggered ${workflowId} workflow for ${repoConfig.repo} with version ${version}`);
} catch (error) {
core.setFailed(`Failed to trigger workflow for ${repoConfig.repo}: ${error.message}`);
throw error;
}
}
async function processRepo(repoName: string, version: string) {
const repoConfig = REPO_CONFIGS[repoName];
if (!repoConfig) {
throw new Error(`Unknown repository: ${repoName}`);
}
try {
const workflowResult = repoConfig.workflowLogic(version);
if (workflowResult === null) {
// Skip this repo (e.g., nightly version for website)
return;
}
const [workflowId, apiVersion] = workflowResult;
await triggerWorkflow(repoConfig, workflowId, apiVersion);
} catch (error) {
core.setFailed(`Error processing ${repoName} with version ${version}: ${error.message}`);
throw error;
}
}
async function main() {
const version = process.env.VERSION;
if (!version) {
core.setFailed("VERSION environment variable is required");
process.exit(1);
}
// Remove 'v' prefix if exists
const cleanVersion = version.startsWith('v') ? version.slice(1) : version;
// Get target repositories from environment variable
// Default to both if not specified
const targetRepos = process.env.TARGET_REPOS?.split(',').map(repo => repo.trim()) || ['website', 'docs'];
console.log(`Processing version ${cleanVersion} for repositories: ${targetRepos.join(', ')}`);
const errors: string[] = [];
// Process each repository
for (const repo of targetRepos) {
try {
await processRepo(repo, cleanVersion);
} catch (error) {
errors.push(`${repo}: ${error.message}`);
}
}
if (errors.length > 0) {
core.setFailed(`Failed to process some repositories: ${errors.join('; ')}`);
process.exit(1);
}
console.log('All repositories processed successfully');
}
// Execute main function
main().catch((error) => {
core.setFailed(`Unexpected error: ${error.message}`);
process.exit(1);
});

View File

@@ -55,25 +55,12 @@ async function main() {
await client.rest.issues.addLabels({ await client.rest.issues.addLabels({
owner, repo, issue_number: number, labels: [labelDocsRequired], owner, repo, issue_number: number, labels: [labelDocsRequired],
}) })
// Get available assignees for the docs repo
const assigneesResponse = await docsClient.rest.issues.listAssignees({
owner: 'GreptimeTeam',
repo: 'docs',
})
const validAssignees = assigneesResponse.data.map(assignee => assignee.login)
core.info(`Available assignees: ${validAssignees.join(', ')}`)
// Check if the actor is a valid assignee, otherwise fallback to fengjiachun
const assignee = validAssignees.includes(actor) ? actor : 'fengjiachun'
core.info(`Assigning issue to: ${assignee}`)
await docsClient.rest.issues.create({ await docsClient.rest.issues.create({
owner: 'GreptimeTeam', owner: 'GreptimeTeam',
repo: 'docs', repo: 'docs',
title: `Update docs for ${title}`, title: `Update docs for ${title}`,
body: `A document change request is generated from ${html_url}`, body: `A document change request is generated from ${html_url}`,
assignee: assignee, assignee: actor,
}).then((res) => { }).then((res) => {
core.info(`Created issue ${res.data}`) core.info(`Created issue ${res.data}`)
}) })

View File

@@ -1,10 +1,10 @@
FROM centos:7 AS builder FROM centos:7 as builder
ARG CARGO_PROFILE ARG CARGO_PROFILE
ARG FEATURES ARG FEATURES
ARG OUTPUT_DIR ARG OUTPUT_DIR
ENV LANG=en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Install dependencies # Install dependencies
@@ -13,6 +13,8 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
# Install protoc # Install protoc
@@ -22,7 +24,7 @@ RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Build the project in release mode. # Build the project in release mode.
RUN --mount=target=.,rw \ RUN --mount=target=.,rw \
@@ -33,7 +35,7 @@ RUN --mount=target=.,rw \
TARGET_DIR=/out/target TARGET_DIR=/out/target
# Export the binary to the clean image. # Export the binary to the clean image.
FROM centos:7 AS base FROM centos:7 as base
ARG OUTPUT_DIR ARG OUTPUT_DIR
@@ -41,12 +43,12 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
WORKDIR /greptime WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/ COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"] ENTRYPOINT ["greptime"]

View File

@@ -1,65 +0,0 @@
FROM ubuntu:22.04 AS builder
ARG CARGO_PROFILE
ARG FEATURES
ARG OUTPUT_DIR
ENV LANG=en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \
apt-get update && apt-get install -y \
libssl-dev \
protobuf-compiler \
curl \
git \
build-essential \
pkg-config
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/root/.cargo/bin/:$PATH
# Build the project in release mode.
RUN --mount=target=. \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
FEATURES=${FEATURES} \
TARGET_DIR=/out/target
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
RUN if [ "$TARGETARCH" = "amd64" ]; then \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /lib/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /lib/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
# Export the binary to the clean distroless image.
FROM gcr.io/distroless/cc-debian12:latest AS base
ARG OUTPUT_DIR
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /lib /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/greptime
ENV PATH=/greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -1,14 +1,16 @@
FROM ubuntu:22.04 AS builder FROM ubuntu:20.04 as builder
ARG CARGO_PROFILE ARG CARGO_PROFILE
ARG FEATURES ARG FEATURES
ARG OUTPUT_DIR ARG OUTPUT_DIR
ENV LANG=en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN --mount=type=cache,target=/var/cache/apt \ RUN --mount=type=cache,target=/var/cache/apt \
@@ -18,12 +20,15 @@ RUN --mount=type=cache,target=/var/cache/apt \
curl \ curl \
git \ git \
build-essential \ build-essential \
pkg-config pkg-config \
python3.10 \
python3.10-dev \
python3-pip
# Install Rust. # Install Rust.
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH=/root/.cargo/bin/:$PATH ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode. # Build the project in release mode.
RUN --mount=target=. \ RUN --mount=target=. \
@@ -35,18 +40,23 @@ RUN --mount=target=. \
# Export the binary to the clean image. # Export the binary to the clean image.
# TODO(zyy17): Maybe should use the more secure container image. # TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 AS base FROM ubuntu:22.04 as base
ARG OUTPUT_DIR ARG OUTPUT_DIR
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
-y install ca-certificates \ -y install ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl curl
COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
WORKDIR /greptime WORKDIR /greptime
COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/ COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"] ENTRYPOINT ["greptime"]

View File

@@ -7,14 +7,14 @@ RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
RUN yum install -y epel-release \ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl centos-release-scl \
rh-python38 \
rh-python38-python-devel
ARG TARGETARCH ARG TARGETARCH
ADD $TARGETARCH/greptime /greptime/bin/ ADD $TARGETARCH/greptime /greptime/bin/
ENV PATH=/greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"] ENTRYPOINT ["greptime"]

View File

@@ -1,40 +0,0 @@
FROM ubuntu:22.04 AS libs
ARG TARGETARCH
# Copy required library dependencies based on architecture
# TARGETARCH values: amd64, arm64
# Ubuntu library paths: x86_64-linux-gnu, aarch64-linux-gnu
RUN if [ "$TARGETARCH" = "amd64" ]; then \
mkdir -p /output/x86_64-linux-gnu && \
cp /lib/x86_64-linux-gnu/libz.so.1.2.11 /output/x86_64-linux-gnu/libz.so.1; \
elif [ "$TARGETARCH" = "arm64" ]; then \
mkdir -p /output/aarch64-linux-gnu && \
cp /lib/aarch64-linux-gnu/libz.so.1.2.11 /output/aarch64-linux-gnu/libz.so.1; \
else \
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
fi
FROM gcr.io/distroless/cc-debian12:latest
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
# The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different.
ARG TARGET_BIN=greptime
ARG TARGETARCH
# Copy required library dependencies
COPY --from=libs /output /lib
COPY --from=busybox:stable /bin/busybox /bin/busybox
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH=/greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["greptime"]

View File

@@ -8,16 +8,21 @@ ARG TARGET_BIN=greptime
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
ca-certificates \ ca-certificates \
python3.10 \
python3.10-dev \
python3-pip \
curl curl
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
ARG TARGETARCH ARG TARGETARCH
ADD $TARGETARCH/$TARGET_BIN /greptime/bin/ ADD $TARGETARCH/$TARGET_BIN /greptime/bin/
ENV PATH=/greptime/bin/:$PATH ENV PATH /greptime/bin/:$PATH
ENV TARGET_BIN=$TARGET_BIN ENV TARGET_BIN=$TARGET_BIN
ENV MALLOC_CONF="prof:true,prof_active:false"
ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"] ENTRYPOINT ["sh", "-c", "exec $TARGET_BIN \"$@\"", "--"]

View File

@@ -1,4 +1,4 @@
FROM ubuntu:latest FROM ubuntu:22.04
# The binary name of GreptimeDB executable. # The binary name of GreptimeDB executable.
# Defaults to "greptime", but sometimes in other projects it might be different. # Defaults to "greptime", but sometimes in other projects it might be different.

View File

@@ -9,21 +9,16 @@ RUN cp ${NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/14.0.7/lib/
# Install dependencies. # Install dependencies.
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
libssl-dev \ libssl-dev \
protobuf-compiler \
curl \ curl \
git \ git \
unzip \
build-essential \ build-essential \
pkg-config \ pkg-config \
openssh-client python3 \
python3-dev \
# Install protoc python3-pip \
ARG PROTOBUF_VERSION=29.3 && pip3 install --upgrade pip \
&& pip3 install pyarrow
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Trust workdir # Trust workdir
RUN git config --global --add safe.directory /greptimedb RUN git config --global --add safe.directory /greptimedb

View File

@@ -12,21 +12,18 @@ RUN yum install -y epel-release \
openssl \ openssl \
openssl-devel \ openssl-devel \
centos-release-scl \ centos-release-scl \
rh-python38 \
rh-python38-python-devel \
which which
# Install protoc # Install protoc
ARG PROTOBUF_VERSION=29.3 RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3;
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Install Rust # Install Rust
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /usr/local/bin:/root/.cargo/bin/:$PATH ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
# Install Rust toolchains. # Install Rust toolchains.
ARG RUST_TOOLCHAIN ARG RUST_TOOLCHAIN

View File

@@ -1,4 +1,4 @@
FROM ubuntu:22.04 FROM ubuntu:20.04
# The root path under which contains all the dependencies to build this Dockerfile. # The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=. ARG DOCKER_BUILD_ROOT=.
@@ -6,34 +6,38 @@ ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8 ENV LANG en_US.utf8
WORKDIR /greptimedb WORKDIR /greptimedb
# Add PPA for Python 3.10.
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa -y
# Install dependencies. # Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \ libssl-dev \
tzdata \ tzdata \
protobuf-compiler \
curl \ curl \
unzip \
ca-certificates \ ca-certificates \
git \ git \
build-essential \ build-essential \
pkg-config pkg-config \
python3.10 \
python3.10-dev
ARG TARGETPLATFORM # https://github.com/GreptimeTeam/greptimedb/actions/runs/10935485852/job/30357457188#step:3:7106
RUN echo "target platform: $TARGETPLATFORM" # `aws-lc-sys` require gcc >= 10.3.0 to work, hence alias to use gcc-10
RUN apt-get remove -y gcc-9 g++-9 cpp-9 && \
apt-get install -y gcc-10 g++-10 cpp-10 make cmake && \
ln -sf /usr/bin/gcc-10 /usr/bin/gcc && ln -sf /usr/bin/g++-10 /usr/bin/g++ && \
ln -sf /usr/bin/gcc-10 /usr/bin/cc && \
ln -sf /usr/bin/g++-10 /usr/bin/cpp && ln -sf /usr/bin/g++-10 /usr/bin/c++ && \
cc --version && gcc --version && g++ --version && cpp --version && c++ --version
ARG PROTOBUF_VERSION=29.3 # Remove Python 3.8 and install pip.
RUN apt-get -y purge python3.8 && \
# Install protobuf, because the one in the apt is too old (v3.12). apt-get -y autoremove && \
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ ln -s /usr/bin/python3.10 /usr/bin/python3 && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules. # Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build # Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
@@ -45,7 +49,11 @@ RUN mv protoc3/include/* /usr/local/include/
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user. # wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker, # It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules. # it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*' RUN git config --global --add safe.directory *
# Install Python dependencies.
COPY $DOCKER_BUILD_ROOT/docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
# Install Rust. # Install Rust.
SHELL ["/bin/bash", "-c"] SHELL ["/bin/bash", "-c"]

View File

@@ -0,0 +1,51 @@
# Use the legacy glibc 2.28.
FROM ubuntu:18.10
ENV LANG en_US.utf8
WORKDIR /greptimedb
# Use old-releases.ubuntu.com to avoid 404s: https://help.ubuntu.com/community/EOLUpgrades.
RUN echo "deb http://old-releases.ubuntu.com/ubuntu/ cosmic main restricted universe multiverse\n\
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-updates main restricted universe multiverse\n\
deb http://old-releases.ubuntu.com/ubuntu/ cosmic-security main restricted universe multiverse" > /etc/apt/sources.list
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
tzdata \
curl \
ca-certificates \
git \
build-essential \
unzip \
pkg-config
# Install protoc.
ENV PROTOC_VERSION=25.1
RUN if [ "$(uname -m)" = "x86_64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-x86_64.zip; \
elif [ "$(uname -m)" = "aarch64" ]; then \
PROTOC_ZIP=protoc-${PROTOC_VERSION}-linux-aarch_64.zip; \
else \
echo "Unsupported architecture"; exit 1; \
fi && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP} && \
unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc && \
unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' && \
rm -f ${PROTOC_ZIP}
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
RUN cargo install cargo-binstall --version 1.6.6 --locked
# Install nextest.
RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -1,66 +0,0 @@
FROM ubuntu:20.04
# The root path under which contains all the dependencies to build this Dockerfile.
ARG DOCKER_BUILD_ROOT=.
ENV LANG en_US.utf8
WORKDIR /greptimedb
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y software-properties-common
# Install dependencies.
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
libssl-dev \
tzdata \
curl \
unzip \
ca-certificates \
git \
build-essential \
pkg-config
ARG TARGETPLATFORM
RUN echo "target platform: $TARGETPLATFORM"
ARG PROTOBUF_VERSION=29.3
# Install protobuf, because the one in the apt is too old (v3.12).
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-aarch_64.zip -d protoc3; \
elif [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d protoc3; \
fi
RUN mv protoc3/bin/* /usr/local/bin/
RUN mv protoc3/include/* /usr/local/include/
# Silence all `safe.directory` warnings, to avoid the "detect dubious repository" error when building with submodules.
# Disabling the safe directory check here won't pose extra security issues, because in our usage for this dev build
# image, we use it solely on our own environment (that github action's VM, or ECS created dynamically by ourselves),
# and the repositories are pulled from trusted sources (still us, of course). Doing so does not violate the intention
# of the Git's addition to the "safe.directory" at the first place (see the commit message here:
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9).
# There's also another solution to this, that we add the desired submodules to the safe directory, instead of using
# wildcard here. However, that requires the git's config files and the submodules all owned by the very same user.
# It's troublesome to do this since the dev build runs in Docker, which is under user "root"; while outside the Docker,
# it can be a different user that have prepared the submodules.
RUN git config --global --add safe.directory '*'
# Install Rust.
SHELL ["/bin/bash", "-c"]
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
ENV PATH /root/.cargo/bin/:$PATH
# Install Rust toolchains.
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
# Install cargo-binstall with a specific version to adapt the current rust toolchain.
# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
# compile from source take too long, so we use the precompiled binary instead
COPY $DOCKER_BUILD_ROOT/docker/dev-builder/binstall/pull_binstall.sh /usr/local/bin/pull_binstall.sh
RUN chmod +x /usr/local/bin/pull_binstall.sh && /usr/local/bin/pull_binstall.sh
# Install nextest.
RUN cargo binstall cargo-nextest --no-confirm

View File

@@ -25,7 +25,7 @@ services:
- --initial-cluster-state=new - --initial-cluster-state=new
- *etcd_initial_cluster_token - *etcd_initial_cluster_token
volumes: volumes:
- ./greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd - /tmp/greptimedb-cluster-docker-compose/etcd0:/var/lib/etcd
healthcheck: healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ] test: [ "CMD", "etcdctl", "--endpoints=http://etcd0:2379", "endpoint", "health" ]
interval: 5s interval: 5s
@@ -34,63 +34,19 @@ services:
networks: networks:
- greptimedb - greptimedb
etcd-tls:
<<: *etcd_common_settings
container_name: etcd-tls
ports:
- 2378:2378
- 2381:2381
command:
- --name=etcd-tls
- --data-dir=/var/lib/etcd
- --initial-advertise-peer-urls=https://etcd-tls:2381
- --listen-peer-urls=https://0.0.0.0:2381
- --listen-client-urls=https://0.0.0.0:2378
- --advertise-client-urls=https://etcd-tls:2378
- --heartbeat-interval=250
- --election-timeout=1250
- --initial-cluster=etcd-tls=https://etcd-tls:2381
- --initial-cluster-state=new
- --initial-cluster-token=etcd-tls-cluster
- --cert-file=/certs/server.crt
- --key-file=/certs/server-key.pem
- --peer-cert-file=/certs/server.crt
- --peer-key-file=/certs/server-key.pem
- --trusted-ca-file=/certs/ca.crt
- --peer-trusted-ca-file=/certs/ca.crt
- --client-cert-auth
- --peer-client-cert-auth
volumes:
- ./greptimedb-cluster-docker-compose/etcd-tls:/var/lib/etcd
- ./greptimedb-cluster-docker-compose/certs:/certs:ro
environment:
- ETCDCTL_API=3
- ETCDCTL_CACERT=/certs/ca.crt
- ETCDCTL_CERT=/certs/server.crt
- ETCDCTL_KEY=/certs/server-key.pem
healthcheck:
test: [ "CMD", "etcdctl", "--endpoints=https://etcd-tls:2378", "--cacert=/certs/ca.crt", "--cert=/certs/server.crt", "--key=/certs/server-key.pem", "endpoint", "health" ]
interval: 10s
timeout: 5s
retries: 5
networks:
- greptimedb
metasrv: metasrv:
image: *greptimedb_image image: *greptimedb_image
container_name: metasrv container_name: metasrv
ports: ports:
- 3002:3002 - 3002:3002
- 3000:3000
command: command:
- metasrv - metasrv
- start - start
- --rpc-bind-addr=0.0.0.0:3002 - --bind-addr=0.0.0.0:3002
- --rpc-server-addr=metasrv:3002 - --server-addr=metasrv:3002
- --store-addrs=etcd0:2379 - --store-addrs=etcd0:2379
- --http-addr=0.0.0.0:3000
healthcheck: healthcheck:
test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ] test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 5 retries: 5
@@ -110,18 +66,17 @@ services:
- datanode - datanode
- start - start
- --node-id=0 - --node-id=0
- --data-home=/greptimedb_data - --rpc-addr=0.0.0.0:3001
- --rpc-bind-addr=0.0.0.0:3001 - --rpc-hostname=datanode0:3001
- --rpc-server-addr=datanode0:3001
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:5000 - --http-addr=0.0.0.0:5000
volumes: volumes:
- ./greptimedb-cluster-docker-compose/datanode0:/greptimedb_data - /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck: healthcheck:
test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ] test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
interval: 5s interval: 5s
timeout: 3s timeout: 3s
retries: 10 retries: 5
depends_on: depends_on:
metasrv: metasrv:
condition: service_healthy condition: service_healthy
@@ -141,7 +96,7 @@ services:
- start - start
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --http-addr=0.0.0.0:4000 - --http-addr=0.0.0.0:4000
- --rpc-bind-addr=0.0.0.0:4001 - --rpc-addr=0.0.0.0:4001
- --mysql-addr=0.0.0.0:4002 - --mysql-addr=0.0.0.0:4002
- --postgres-addr=0.0.0.0:4003 - --postgres-addr=0.0.0.0:4003
healthcheck: healthcheck:
@@ -160,23 +115,16 @@ services:
container_name: flownode0 container_name: flownode0
ports: ports:
- 4004:4004 - 4004:4004
- 4005:4005
command: command:
- flownode - flownode
- start - start
- --node-id=0 - --node-id=0
- --metasrv-addrs=metasrv:3002 - --metasrv-addrs=metasrv:3002
- --rpc-bind-addr=0.0.0.0:4004 - --rpc-addr=0.0.0.0:4004
- --rpc-server-addr=flownode0:4004 - --rpc-hostname=flownode0:4004
- --http-addr=0.0.0.0:4005
depends_on: depends_on:
frontend0: frontend0:
condition: service_healthy condition: service_healthy
healthcheck:
test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
interval: 5s
timeout: 3s
retries: 5
networks: networks:
- greptimedb - greptimedb

View File

@@ -0,0 +1,5 @@
numpy>=1.24.2
pandas>=1.5.3
pyarrow>=11.0.0
requests>=2.28.2
scipy>=1.10.1

Binary file not shown.

Before

Width:  |  Height:  |  Size: 173 KiB

View File

@@ -48,4 +48,4 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
## Addition ## Addition
- You can tune GreptimeDB's configuration to get better performance. - You can tune GreptimeDB's configuration to get better performance.
- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments-administration/configuration#storage-options). - You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/deployments/configuration#storage-options).

View File

@@ -1,40 +0,0 @@
# TSBS benchmark - v0.12.0
## Environment
### Amazon EC2
| | |
|---------|-------------------------|
| Machine | c5d.2xlarge |
| CPU | 8 core |
| Memory | 16GB |
| Disk | 100GB (GP3) |
| OS | Ubuntu Server 24.04 LTS |
## Write performance
| Environment | Ingest rate (rows/s) |
|-----------------|----------------------|
| EC2 c5d.2xlarge | 326839.28 |
## Query performance
| Query type | EC2 c5d.2xlarge (ms) |
|-----------------------|----------------------|
| cpu-max-all-1 | 12.46 |
| cpu-max-all-8 | 24.20 |
| double-groupby-1 | 673.08 |
| double-groupby-5 | 963.99 |
| double-groupby-all | 1330.05 |
| groupby-orderby-limit | 952.46 |
| high-cpu-1 | 5.08 |
| high-cpu-all | 4638.57 |
| lastpoint | 591.02 |
| single-groupby-1-1-1 | 4.06 |
| single-groupby-1-1-12 | 4.73 |
| single-groupby-1-8-1 | 8.23 |
| single-groupby-5-1-1 | 4.61 |
| single-groupby-5-1-12 | 5.61 |
| single-groupby-5-8-1 | 9.74 |

View File

@@ -4,28 +4,13 @@
example: example:
```bash ```bash
curl --data "trace,flow=debug" 127.0.0.1:4000/debug/log_level curl --data "trace;flow=debug" 127.0.0.1:4000/debug/log_level
``` ```
And database will reply with something like: And database will reply with something like:
```bash ```bash
Log Level changed from Some("info") to "trace,flow=debug"% Log Level changed from Some("info") to "trace;flow=debug"%
``` ```
The data is a string in the format of `global_level,module1=level1,module2=level2,...` that follows the same rule of `RUST_LOG`. The data is a string in the format of `global_level;module1=level1;module2=level2;...` that follow the same rule of `RUST_LOG`.
The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive). The module is the module name of the log, and the level is the log level. The log level can be one of the following: `trace`, `debug`, `info`, `warn`, `error`, `off`(case insensitive).
# Enable/Disable Trace on the Fly
## HTTP API
example:
```bash
curl --data "true" 127.0.0.1:4000/debug/enable_trace
```
And database will reply with something like:
```
trace enabled%
```
Possible values are "true" or "false".

View File

@@ -14,7 +14,7 @@ impl SqlQueryHandler for Instance {
``` ```
Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL Normally, when a SQL query arrives at GreptimeDB, the `do_query` method will be called. After some parsing work, the SQL
will be fed into `StatementExecutor`: will be feed into `StatementExecutor`:
```rust ```rust
// in Frontend Instance: // in Frontend Instance:
@@ -27,7 +27,7 @@ an example.
Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's Now, what if the statements should be handled differently for GreptimeDB Standalone and Cluster? You can see there's
a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation a `SqlStatementExecutor` field in `StatementExecutor`. Each GreptimeDB Standalone and Cluster has its own implementation
of `SqlStatementExecutor`. If you are going to implement the statements differently in the two modes ( of `SqlStatementExecutor`. If you are going to implement the statements differently in the two mode (
like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s. like `CREATE TABLE`), you have to implement them in their own `SqlStatementExecutor`s.
Summarize as the diagram below: Summarize as the diagram below:

View File

@@ -3,7 +3,7 @@
## HTTP API ## HTTP API
Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto). Sample at 99 Hertz, for 5 seconds, output report in [protobuf format](https://github.com/google/pprof/blob/master/proto/profile.proto).
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu' > /tmp/pprof.out curl -s '0:4000/debug/prof/cpu' > /tmp/pprof.out
``` ```
Then you can use `pprof` command with the protobuf file. Then you can use `pprof` command with the protobuf file.
@@ -13,10 +13,10 @@ go tool pprof -top /tmp/pprof.out
Sample at 99 Hertz, for 60 seconds, output report in flamegraph format. Sample at 99 Hertz, for 60 seconds, output report in flamegraph format.
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg curl -s '0:4000/debug/prof/cpu?seconds=60&output=flamegraph' > /tmp/pprof.svg
``` ```
Sample at 49 Hertz, for 10 seconds, output report in text format. Sample at 49 Hertz, for 10 seconds, output report in text format.
```bash ```bash
curl -X POST -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt curl -s '0:4000/debug/prof/cpu?seconds=10&frequency=49&output=text' > /tmp/pprof.txt
``` ```

View File

@@ -1,19 +1,9 @@
# Profile memory usage of GreptimeDB # Profile memory usage of GreptimeDB
This crate provides an easy approach to dump memory profiling info. A set of ready to use scripts is provided in [docs/how-to/memory-profile-scripts](./memory-profile-scripts/scripts). This crate provides an easy approach to dump memory profiling info.
## Prerequisites ## Prerequisites
### jemalloc ### jemalloc
jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
```
# find jeprof binary
find . -name 'jeprof'
# add executable permission
chmod +x <path_to_jeprof>
```
The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
```bash ```bash
# for macOS # for macOS
brew install jemalloc brew install jemalloc
@@ -30,68 +20,16 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
## Profiling ## Profiling
### Enable memory profiling for greptimedb binary
Start GreptimeDB instance with environment variables: Start GreptimeDB instance with environment variables:
```bash ```bash
# for Linux MALLOC_CONF=prof:true,lg_prof_interval:28 ./target/debug/greptime standalone start
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
# for macOS
_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
``` ```
### Memory profiling for greptimedb docker image
We have memory profiling enabled and activated by default in our official docker
image.
This behavior is controlled by configuration `enable_heap_profiling`:
```toml
[memory]
# Whether to enable heap profiling activation during startup.
# Default is true.
enable_heap_profiling = true
```
To disable memory profiling, set `enable_heap_profiling` to `false`.
### Memory profiling control
You can control heap profiling activation using the new HTTP APIs:
```bash
# Check current profiling status
curl -X GET localhost:4000/debug/prof/mem/status
# Activate heap profiling (if not already active)
curl -X POST localhost:4000/debug/prof/mem/activate
# Deactivate heap profiling
curl -X POST localhost:4000/debug/prof/mem/deactivate
# Activate gdump feature that dumps memory profiling data every time virtual memory usage exceeds previous maximum value.
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=true'
# Deactivate gdump.
curl -X POST localhost:4000/debug/prof/mem/gdump -d 'activate=false'
# Retrieve current gdump status.
curl -X GET localhost:4000/debug/prof/mem/gdump
```
### Dump memory profiling data
Dump memory profiling data through HTTP API: Dump memory profiling data through HTTP API:
```bash ```bash
curl -X POST localhost:4000/debug/prof/mem > greptime.hprof curl localhost:4000/debug/prof/mem > greptime.hprof
# or output flamegraph directly
curl -X POST "localhost:4000/debug/prof/mem?output=flamegraph" > greptime.svg
# or output pprof format
curl -X POST "localhost:4000/debug/prof/mem?output=proto" > greptime.pprof
``` ```
You can periodically dump profiling data and compare them to find the delta memory usage. You can periodically dump profiling data and compare them to find the delta memory usage.

View File

@@ -0,0 +1,72 @@
Currently, our query engine is based on DataFusion, so all aggregate function is executed by DataFusion, through its UDAF interface. You can find DataFusion's UDAF example [here](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-examples/examples/simple_udaf.rs). Basically, we provide the same way as DataFusion to write aggregate functions: both are centered in a struct called "Accumulator" to accumulates states along the way in aggregation.
However, DataFusion's UDAF implementation has a huge restriction, that it requires user to provide a concrete "Accumulator". Take `Median` aggregate function for example, to aggregate a `u32` datatype column, you have to write a `MedianU32`, and use `SELECT MEDIANU32(x)` in SQL. `MedianU32` cannot be used to aggregate a `i32` datatype column. Or, there's another way: you can use a special type that can hold all kinds of data (like our `Value` enum or Arrow's `ScalarValue`), and `match` all the way up to do aggregate calculations. It might work, though rather tedious. (But I think it's DataFusion's prefer way to write UDAF.)
So is there a way we can make an aggregate function that automatically match the input data's type? For example, a `Median` aggregator that can work on both `u32` column and `i32`? The answer is yes until we found a way to bypassing DataFusion's restriction, a restriction that DataFusion simply don't pass the input data's type when creating an Accumulator.
> There's an example in `my_sum_udaf_example.rs`, take that as quick start.
# 1. Impl `AggregateFunctionCreator` trait for your accumulator creator.
You must first define a struct that will be used to create your accumulator. For example,
```Rust
#[as_aggr_func_creator]
#[derive(Debug, AggrFuncTypeStore)]
struct MySumAccumulatorCreator {}
```
Attribute macro `#[as_aggr_func_creator]` and derive macro `#[derive(Debug, AggrFuncTypeStore)]` must both annotated on the struct. They work together to provide a storage of aggregate function's input data types, which are needed for creating generic accumulator later.
> Note that the `as_aggr_func_creator` macro will add fields to the struct, so the struct cannot be defined as an empty struct without field like `struct Foo;`, neither as a new type like `struct Foo(bar)`.
Then impl `AggregateFunctionCreator` trait on it. The definition of the trait is:
```Rust
pub trait AggregateFunctionCreator: Send + Sync + Debug {
fn creator(&self) -> AccumulatorCreatorFunction;
fn output_type(&self) -> ConcreteDataType;
fn state_types(&self) -> Vec<ConcreteDataType>;
}
```
You can use input data's type in methods that return output type and state types (just invoke `input_types()`).
The output type is aggregate function's output data's type. For example, `SUM` aggregate function's output type is `u64` for a `u32` datatype column. The state types are accumulator's internal states' types. Take `AVG` aggregate function on a `i32` column as example, it's state types are `i64` (for sum) and `u64` (for count).
The `creator` function is where you define how an accumulator (that will be used in DataFusion) is created. You define "how" to create the accumulator (instead of "what" to create), using the input data's type as arguments. With input datatype known, you can create accumulator generically.
# 2. Impl `Accumulator` trait for you accumulator.
The accumulator is where you store the aggregate calculation states and evaluate a result. You must impl `Accumulator` trait for it. The trait's definition is:
```Rust
pub trait Accumulator: Send + Sync + Debug {
fn state(&self) -> Result<Vec<Value>>;
fn update_batch(&mut self, values: &[VectorRef]) -> Result<()>;
fn merge_batch(&mut self, states: &[VectorRef]) -> Result<()>;
fn evaluate(&self) -> Result<Value>;
}
```
The DataFusion basically execute aggregate like this:
1. Partitioning all input data for aggregate. Create an accumulator for each part.
2. Call `update_batch` on each accumulator with partitioned data, to let you update your aggregate calculation.
3. Call `state` to get each accumulator's internal state, the medial calculation result.
4. Call `merge_batch` to merge all accumulator's internal state to one.
5. Execute `evaluate` on the chosen one to get the final calculation result.
Once you know the meaning of each method, you can easily write your accumulator. You can refer to `Median` accumulator or `SUM` accumulator defined in file `my_sum_udaf_example.rs` for more details.
# 3. Register your aggregate function to our query engine.
You can call `register_aggregate_function` method in query engine to register your aggregate function. To do that, you have to new an instance of struct `AggregateFunctionMeta`. The struct has three fields, first is the name of your aggregate function's name. The function name is case-sensitive due to DataFusion's restriction. We strongly recommend using lowercase for your name. If you have to use uppercase name, wrap your aggregate function with quotation marks. For example, if you define an aggregate function named "my_aggr", you can use "`SELECT MY_AGGR(x)`"; if you define "my_AGGR", you have to use "`SELECT "my_AGGR"(x)`".
The second field is arg_counts ,the count of the arguments. Like accumulator `percentile`, calculating the p_number of the column. We need to input the value of column and the value of p to cacalate, and so the count of the arguments is two.
The third field is a function about how to create your accumulator creator that you defined in step 1 above. Create creator, that's a bit intertwined, but it is how we make DataFusion use a newly created aggregate function each time it executes a SQL, preventing the stored input types from affecting each other. The key detail can be starting looking at our `DfContextProviderAdapter` struct's `get_aggregate_meta` method.
# (Optional) 4. Make your aggregate function automatically registered.
If you've written a great aggregate function that want to let everyone use it, you can make it automatically registered to our query engine at start time. It's quick simple, just refer to the `AggregateFunctions::register` function in `common/function/src/scalars/aggregate/mod.rs`.

View File

@@ -3,7 +3,7 @@
This document introduces how to write fuzz tests in GreptimeDB. This document introduces how to write fuzz tests in GreptimeDB.
## What is a fuzz test ## What is a fuzz test
Fuzz test is tool that leverages deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets. Fuzz test is tool that leverage deterministic random generation to assist in finding bugs. The goal of fuzz tests is to identify inputs generated by the fuzzer that cause system panics, crashes, or unexpected behaviors to occur. And we are using the [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) to run our fuzz test targets.
## Why we need them ## Why we need them
- Find bugs by leveraging random generation - Find bugs by leveraging random generation

View File

@@ -1,52 +0,0 @@
# Memory Analysis Process
This section will guide you through the process of analyzing memory usage for greptimedb.
1. Get the `jeprof` tool script, see the next section("Getting the `jeprof` tool") for details.
2. After starting `greptimedb`(with env var `MALLOC_CONF=prof:true`), execute the `dump.sh` script with the PID of the `greptimedb` process as an argument. This continuously monitors memory usage and captures profiles when exceeding thresholds (e.g. +20MB within 10 minutes). Outputs `greptime-{timestamp}.gprof` files.
3. With 2-3 gprof files, run `gen_flamegraph.sh` in the same environment to generate flame graphs showing memory allocation call stacks.
4. **NOTE:** The `gen_flamegraph.sh` script requires `jeprof` and optionally `flamegraph.pl` to be in the current directory. If needed to gen flamegraph now, run the `get_flamegraph_tool.sh` script, which downloads the flame graph generation tool `flamegraph.pl` to the current directory.
The usage of `gen_flamegraph.sh` is:
`Usage: ./gen_flamegraph.sh <binary_path> <gprof_directory>`
where `<binary_path>` is the path to the greptimedb binary, `<gprof_directory>` is the directory containing the gprof files(the directory `dump.sh` is dumping profiles to).
Example call: `./gen_flamegraph.sh ./greptime .`
Generating the flame graph might take a few minutes. The generated flame graphs are located in the `<gprof_directory>/flamegraphs` directory. Or if no `flamegraph.pl` is found, it will only contain `.collapse` files which is also fine.
5. You can send the generated flame graphs(the entire folder of `<gprof_directory>/flamegraphs`) to developers for further analysis.
## Getting the `jeprof` tool
there are three ways to get `jeprof`, list in here from simple to complex, using any one of those methods is ok, as long as it's the same environment as the `greptimedb` will be running on:
1. If you are compiling greptimedb from source, then `jeprof` is already produced during compilation. After running `cargo build`, execute `find_compiled_jeprof.sh`. This will copy `jeprof` to the current directory.
2. Or, if you have the Rust toolchain installed locally, simply follow these commands:
```bash
cargo new get_jeprof
cd get_jeprof
```
Then add this line to `Cargo.toml`:
```toml
[dependencies]
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats"] }
```
then run:
```bash
cargo build
```
after that the `jeprof` tool is produced. Now run `find_compiled_jeprof.sh` in current directory, it will copy the `jeprof` tool to the current directory.
3. compile jemalloc from source
you can first clone this repo, and checkout to this commit:
```bash
git clone https://github.com/tikv/jemalloc.git
cd jemalloc
git checkout e13ca993e8ccb9ba9847cc330696e02839f328f7
```
then run:
```bash
./configure
make
```
and `jeprof` is in `.bin/` directory. Copy it to the current directory.

View File

@@ -1,78 +0,0 @@
#!/bin/bash
# Monitors greptime process memory usage every 10 minutes
# Triggers memory profile capture via `curl -X POST localhost:4000/debug/prof/mem > greptime-{timestamp}.gprof`
# when memory increases by more than 20MB since last check
# Generated profiles can be analyzed using flame graphs as described in `how-to-profile-memory.md`
# (jeprof is compiled with the database - see documentation)
# Alternative: Share binaries + profiles for analysis (Docker images preferred)
# Threshold in Kilobytes (20 MB)
threshold_kb=$((20 * 1024))
sleep_interval=$((10 * 60))
# Variable to store the last measured memory usage in KB
last_mem_kb=0
echo "Starting memory monitoring for 'greptime' process..."
while true; do
# Check if PID is provided as an argument
if [ -z "$1" ]; then
echo "$(date): PID must be provided as a command-line argument."
exit 1
fi
pid="$1"
# Validate that the PID is a number
if ! [[ "$pid" =~ ^[0-9]+$ ]]; then
echo "$(date): Invalid PID: '$pid'. PID must be a number."
exit 1
fi
# Get the current Resident Set Size (RSS) in Kilobytes
current_mem_kb=$(ps -o rss= -p "$pid")
# Check if ps command was successful and returned a number
if ! [[ "$current_mem_kb" =~ ^[0-9]+$ ]]; then
echo "$(date): Failed to get memory usage for PID $pid. Skipping check."
# Keep last_mem_kb to avoid false positives if the process briefly becomes unreadable.
continue
fi
echo "$(date): Current memory usage for PID $pid: ${current_mem_kb} KB"
# Compare with the last measurement
# if it's the first run, also do a baseline dump just to make sure we can dump
diff_kb=$((current_mem_kb - last_mem_kb))
echo "$(date): Memory usage change since last check: ${diff_kb} KB"
if [ "$diff_kb" -gt "$threshold_kb" ]; then
echo "$(date): Memory increase (${diff_kb} KB) exceeded threshold (${threshold_kb} KB). Dumping profile..."
timestamp=$(date +%Y%m%d%H%M%S)
profile_file="greptime-${timestamp}.gprof"
# Execute curl and capture output to file
if curl -sf -X POST localhost:4000/debug/prof/mem > "$profile_file"; then
echo "$(date): Memory profile saved to $profile_file"
else
echo "$(date): Failed to dump memory profile (curl exit code: $?)."
# Remove the potentially empty/failed profile file
rm -f "$profile_file"
fi
else
echo "$(date): Memory increase (${diff_kb} KB) is within the threshold (${threshold_kb} KB)."
fi
# Update the last memory usage
last_mem_kb=$current_mem_kb
# Wait for 5 minutes
echo "$(date): Sleeping for $sleep_interval seconds..."
sleep $sleep_interval
done
echo "Memory monitoring script stopped." # This line might not be reached in normal operation

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Locates compiled jeprof binary (memory analysis tool) after cargo build
# Copies it to current directory from target/ build directories
JPROF_PATH=$(find . -name 'jeprof' -print -quit)
if [ -n "$JPROF_PATH" ]; then
echo "Found jeprof at $JPROF_PATH"
cp "$JPROF_PATH" .
chmod +x jeprof
echo "Copied jeprof to current directory and made it executable."
else
echo "jeprof not found"
exit 1
fi

View File

@@ -1,89 +0,0 @@
#!/bin/bash
# Generate flame graphs from a series of `.gprof` files
# First argument: Path to the binary executable
# Second argument: Path to directory containing gprof files
# Requires `jeprof` and `flamegraph.pl` in current directory
# What this script essentially does is:
# ./jeprof <binary> <gprof> --collapse | ./flamegraph.pl > <output>
# For differential analysis between consecutive profiles:
# ./jeprof <binary> --base <gprof1> <gprof2> --collapse | ./flamegraph.pl > <output_diff>
set -e # Exit immediately if a command exits with a non-zero status.
# Check for required tools
if [ ! -f "./jeprof" ]; then
echo "Error: jeprof not found in the current directory."
exit 1
fi
if [ ! -f "./flamegraph.pl" ]; then
echo "Error: flamegraph.pl not found in the current directory."
exit 1
fi
# Check arguments
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <binary_path> <gprof_directory>"
exit 1
fi
BINARY_PATH=$1
GPROF_DIR=$2
OUTPUT_DIR="${GPROF_DIR}/flamegraphs" # Store outputs in a subdirectory
if [ ! -f "$BINARY_PATH" ]; then
echo "Error: Binary file not found at $BINARY_PATH"
exit 1
fi
if [ ! -d "$GPROF_DIR" ]; then
echo "Error: gprof directory not found at $GPROF_DIR"
exit 1
fi
mkdir -p "$OUTPUT_DIR"
echo "Generating flamegraphs in $OUTPUT_DIR"
# Find and sort gprof files
# Use find + sort -V for natural sort of version numbers if present in filenames
# Use null-terminated strings for safety with find/xargs/sort
mapfile -d $'\0' gprof_files < <(find "$GPROF_DIR" -maxdepth 1 -name '*.gprof' -print0 | sort -zV)
if [ ${#gprof_files[@]} -eq 0 ]; then
echo "No .gprof files found in $GPROF_DIR"
exit 0
fi
prev_gprof=""
# Generate flamegraphs
for gprof_file in "${gprof_files[@]}"; do
# Skip empty entries if any
if [ -z "$gprof_file" ]; then
continue
fi
filename=$(basename "$gprof_file" .gprof)
output_collapse="${OUTPUT_DIR}/${filename}.collapse"
output_svg="${OUTPUT_DIR}/${filename}.svg"
echo "Generating collapse file for $gprof_file -> $output_collapse"
./jeprof "$BINARY_PATH" "$gprof_file" --collapse > "$output_collapse"
echo "Generating flamegraph for $gprof_file -> $output_svg"
./flamegraph.pl "$output_collapse" > "$output_svg" || true
# Generate diff flamegraph if not the first file
if [ -n "$prev_gprof" ]; then
prev_filename=$(basename "$prev_gprof" .gprof)
diff_output_collapse="${OUTPUT_DIR}/${prev_filename}_vs_${filename}_diff.collapse"
diff_output_svg="${OUTPUT_DIR}/${prev_filename}_vs_${filename}_diff.svg"
echo "Generating diff collapse file for $prev_gprof vs $gprof_file -> $diff_output_collapse"
./jeprof "$BINARY_PATH" --base "$prev_gprof" "$gprof_file" --collapse > "$diff_output_collapse"
echo "Generating diff flamegraph for $prev_gprof vs $gprof_file -> $diff_output_svg"
./flamegraph.pl "$diff_output_collapse" > "$diff_output_svg" || true
fi
prev_gprof="$gprof_file"
done
echo "Flamegraph generation complete."

View File

@@ -1,44 +0,0 @@
#!/bin/bash
# Generate flame graphs from .collapse files
# Argument: Path to directory containing collapse files
# Requires `flamegraph.pl` in current directory
# Check if flamegraph.pl exists
if [ ! -f "./flamegraph.pl" ]; then
echo "Error: flamegraph.pl not found in the current directory."
exit 1
fi
# Check if directory argument is provided
if [ -z "$1" ]; then
echo "Usage: $0 <collapse_directory>"
exit 1
fi
COLLAPSE_DIR=$1
# Check if the provided argument is a directory
if [ ! -d "$COLLAPSE_DIR" ]; then
echo "Error: '$COLLAPSE_DIR' is not a valid directory."
exit 1
fi
echo "Generating flame graphs from collapse files in '$COLLAPSE_DIR'..."
# Find and process each .collapse file
find "$COLLAPSE_DIR" -maxdepth 1 -name "*.collapse" -print0 | while IFS= read -r -d $'\0' collapse_file; do
if [ -f "$collapse_file" ]; then
# Construct the output SVG filename
svg_file="${collapse_file%.collapse}.svg"
echo "Generating $svg_file from $collapse_file..."
./flamegraph.pl "$collapse_file" > "$svg_file"
if [ $? -ne 0 ]; then
echo "Error generating flame graph for $collapse_file"
else
echo "Successfully generated $svg_file"
fi
fi
done
echo "Flame graph generation complete."

View File

@@ -1,6 +0,0 @@
#!/bin/bash
# Download flamegraph.pl to current directory - this is the flame graph generation tool script
curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl > ./flamegraph.pl
chmod +x ./flamegraph.pl

Some files were not shown because too many files have changed in this diff Show More